blob: 270d334f455cc2da42d0bceda9b566569aead5ba [file] [log] [blame]
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Author: nevena@google.com (Nevena Lazic)
#include "lossmin/minimizers/gradient-evaluator.h"
#include <functional>
namespace lossmin {
float GradientEvaluator::Loss(const Weights &weights) const {
// TODO(azani): Implement multi-threaded version.
return loss_function_->BatchLoss(weights, instances_, labels_) /
NumExamples();
}
float GradientEvaluator::Loss(
const Weights &weights, const InstanceSet &validation_instances,
const LabelSet &validation_labels) const {
return loss_function_->BatchLoss(
weights, validation_instances, validation_labels) /
validation_labels.rows();
}
float GradientEvaluator::Loss(
const Weights &weights, const std::vector<int> &example_indices) const {
float loss = 0.0f;
for (int example : example_indices) {
loss += loss_function_->ExampleLoss(weights, instances_, labels_, example);
}
return loss / example_indices.size();
}
void GradientEvaluator::Gradient(const Weights &weights,
Weights *gradient) const {
//DCHECK(gradient != nullptr);
// TODO(azani): Implement multi-threaded version.
loss_function_->BatchGradient(weights, instances_, labels_, gradient);
*gradient /= NumExamples();
}
void GradientEvaluator::set_num_threads(int num_threads,
int batch_size) {
num_threads_ = num_threads;
if (num_threads_ == 1) return;
num_batches_ = (NumExamples() + batch_size - 1) / batch_size;
// Assign examples to each batch.
batch_examples_.assign(num_batches_, {});
for (auto &batch : batch_examples_) batch.reserve(batch_size);
for (int i = 0; i < NumExamples(); ++i) {
batch_examples_[i % num_batches_].push_back(i);
}
}
/*
void GradientEvaluator::ThreadLoss(
const Weights *weights, int example, VectorXf *loss_values,
BlockingCounter *num_jobs_remaining) const {
// Compute example loss.
loss_values->coeffRef(example) =
loss_function_->ExampleLoss(*weights, instances_, labels_, example);
// Decrement thread counter.
num_jobs_remaining->DecrementCount();
}
void GradientEvaluator::ThreadGradient(
const Weights *weights, int batch, Weights *gradient,
BlockingCounter *num_jobs_remaining) const {
// Add the example gradient.
for (int example : batch_examples_[batch]) {
loss_function_->AddExampleGradient(
*weights, instances_, labels_, example, 1.0f, 1.0f, gradient);
}
// Decrement thread counter.
num_jobs_remaining->DecrementCount();
}
*/
} // namespace lossmin