blob: ee6d31cf29cbb57c33d4a7bf4405f1c3677f0934 [file] [log] [blame]
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <memory>
#include <random>
#include <iostream>
#include "lossmin/eigen-types.h"
#include "lossmin/losses/loss-function.h"
#include "lossmin/losses/inner-product-loss-function.h"
#include "minimizers/gradient-evaluator.h"
#include "minimizers/loss-minimizer.h"
#include "minimizers/parallel-boosting-with-momentum.h"
using namespace lossmin;
int main(int argc, char **argv) {
LossFunction *loss_func = new LinearRegressionLossFunction();
const int rows_num = 10000;
const int features_num = 3;
InstanceSet instances(rows_num, features_num);
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-1.0,1.0);
for (int row = 0; row < rows_num; row++) {
for (int feature = 0; feature < features_num; feature++) {
instances.insert(row, feature) = distribution(generator);
}
}
Weights real_weights(features_num, 1);
for (int feature = 0; feature < features_num; feature++) {
real_weights.coeffRef(feature, 0) = distribution(generator);
}
std::cout << "Real Weights:" << std::endl << real_weights << std::endl;
LabelSet labels = instances * real_weights;
GradientEvaluator gradient_evaluator(instances, labels, loss_func);
LossMinimizer *minimizer = new ParallelBoostingWithMomentum(
0.0, 0.0, gradient_evaluator);
minimizer->Setup();
Weights weights(features_num, 1);
std::vector<float> loss;
minimizer->Run(5000, &weights, &loss);
std::cout << "Learned Weights:" << std::endl << weights << std::endl;
}