| // Copyright 2014 Google Inc. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| // Author: nevena@google.com (Nevena Lazic) |
| |
| #include "lossmin/minimizers/gradient-evaluator.h" |
| |
| #include <functional> |
| |
| namespace lossmin { |
| |
| float GradientEvaluator::Loss(const Weights &weights) const { |
| // TODO(azani): Implement multi-threaded version. |
| return loss_function_->BatchLoss(weights, instances_, labels_) / |
| NumExamples(); |
| } |
| |
| float GradientEvaluator::Loss( |
| const Weights &weights, const InstanceSet &validation_instances, |
| const LabelSet &validation_labels) const { |
| return loss_function_->BatchLoss( |
| weights, validation_instances, validation_labels) / |
| validation_labels.rows(); |
| } |
| |
| float GradientEvaluator::Loss( |
| const Weights &weights, const std::vector<int> &example_indices) const { |
| float loss = 0.0f; |
| for (int example : example_indices) { |
| loss += loss_function_->ExampleLoss(weights, instances_, labels_, example); |
| } |
| return loss / example_indices.size(); |
| } |
| |
| void GradientEvaluator::Gradient(const Weights &weights, |
| Weights *gradient) const { |
| //DCHECK(gradient != nullptr); |
| // TODO(azani): Implement multi-threaded version. |
| loss_function_->BatchGradient(weights, instances_, labels_, gradient); |
| *gradient /= NumExamples(); |
| } |
| |
| } // namespace lossmin |