Merge branch 'ismaelJimenez-added_lambdas'
diff --git a/README.md b/README.md
index 5be5153..e30052d 100644
--- a/README.md
+++ b/README.md
@@ -142,6 +142,14 @@
->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity();
```
+The following code will specify asymptotic complexity with a lambda function,
+that might be used to customize high-order term calculation.
+
+```c++
+BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
+ ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; });
+```
+
### Templated benchmarks
Templated benchmarks work the same way: This example produces and consumes
messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h
index e705d75..f38dc97 100644
--- a/include/benchmark/benchmark_api.h
+++ b/include/benchmark/benchmark_api.h
@@ -247,9 +247,14 @@
oNCubed,
oLogN,
oNLogN,
- oAuto
+ oAuto,
+ oLambda
};
+// BigOFunc is passed to a benchmark in order to specify the asymptotic
+// computational complexity for the benchmark.
+typedef double(BigOFunc)(int);
+
// State is passed to a running Benchmark and contains state for the
// benchmark to use.
class State {
@@ -257,24 +262,24 @@
State(size_t max_iters, bool has_x, int x, bool has_y, int y,
int thread_i, int n_threads);
- // Returns true iff the benchmark should continue through another iteration.
+ // Returns true if the benchmark should continue through another iteration.
// NOTE: A benchmark may not return from the test until KeepRunning() has
// returned false.
bool KeepRunning() {
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
- assert(!finished_);
- started_ = true;
- ResumeTiming();
+ assert(!finished_);
+ started_ = true;
+ ResumeTiming();
}
bool const res = total_iterations_++ < max_iterations;
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
- assert(started_ && (!finished_ || error_occurred_));
- if (!error_occurred_) {
- PauseTiming();
- }
- // Total iterations now is one greater than max iterations. Fix this.
- total_iterations_ = max_iterations;
- finished_ = true;
+ assert(started_ && (!finished_ || error_occurred_));
+ if (!error_occurred_) {
+ PauseTiming();
+ }
+ // Total iterations now is one greater than max iterations. Fix this.
+ total_iterations_ = max_iterations;
+ finished_ = true;
}
return res;
}
@@ -358,7 +363,7 @@
// family benchmark, then current benchmark will be part of the computation and complexity_n will
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
- void SetComplexityN(size_t complexity_n) {
+ void SetComplexityN(int complexity_n) {
complexity_n_ = complexity_n;
}
@@ -439,7 +444,7 @@
size_t bytes_processed_;
size_t items_processed_;
- size_t complexity_n_;
+ int complexity_n_;
public:
// FIXME: Make this private somehow.
@@ -538,6 +543,10 @@
// the asymptotic computational complexity will be shown on the output.
Benchmark* Complexity(BigO complexity = benchmark::oAuto);
+ // Set the asymptotic computational complexity for the benchmark. If called
+ // the asymptotic computational complexity will be shown on the output.
+ Benchmark* Complexity(BigOFunc* complexity);
+
// Support for running multiple copies of the same benchmark concurrently
// in multiple threads. This may be useful when measuring the scaling
// of some piece of code.
diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h
index 0262754..22c97a0 100644
--- a/include/benchmark/reporter.h
+++ b/include/benchmark/reporter.h
@@ -20,7 +20,7 @@
#include <utility>
#include <vector>
-#include "benchmark_api.h" // For forward declaration of BenchmarkReporter
+#include "benchmark_api.h" // For forward declaration of BenchmarkReporter
namespace benchmark {
@@ -85,7 +85,8 @@
double max_heapbytes_used;
// Keep track of arguments to compute asymptotic complexity
- BigO complexity;
+ BigO complexity;
+ BigOFunc* complexity_lambda;
int complexity_n;
// Inform print function whether the current run is a complexity report
@@ -147,7 +148,7 @@
// REQUIRES: 'out' is non-null.
static void PrintBasicContext(std::ostream* out, Context const& context);
-private:
+ private:
std::ostream* output_stream_;
std::ostream* error_stream_;
};
@@ -159,31 +160,31 @@
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
-protected:
+ protected:
virtual void PrintRunData(const Run& report);
size_t name_field_width_;
};
class JSONReporter : public BenchmarkReporter {
-public:
+ public:
JSONReporter() : first_report_(true) {}
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
virtual void Finalize();
-private:
+ private:
void PrintRunData(const Run& report);
bool first_report_;
};
class CSVReporter : public BenchmarkReporter {
-public:
+ public:
virtual bool ReportContext(const Context& context);
virtual void ReportRuns(const std::vector<Run>& reports);
-private:
+ private:
void PrintRunData(const Run& report);
};
@@ -200,7 +201,7 @@
}
inline double GetTimeUnitMultiplier(TimeUnit unit) {
- switch (unit) {
+ switch (unit) {
case kMillisecond:
return 1e3;
case kMicrosecond:
@@ -211,5 +212,5 @@
}
}
-} // end namespace benchmark
-#endif // BENCHMARK_REPORTER_H_
+} // end namespace benchmark
+#endif // BENCHMARK_REPORTER_H_
diff --git a/src/benchmark.cc b/src/benchmark.cc
index 599cda0..f6c4fc2 100644
--- a/src/benchmark.cc
+++ b/src/benchmark.cc
@@ -130,7 +130,7 @@
ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {}
int64_t bytes_processed;
int64_t items_processed;
- int complexity_n;
+ int complexity_n;
};
// Timer management class
@@ -287,7 +287,7 @@
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
- return false;
+ return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
@@ -317,6 +317,7 @@
bool use_real_time;
bool use_manual_time;
BigO complexity;
+ BigOFunc* complexity_lambda;
bool last_benchmark_instance;
int repetitions;
double min_time;
@@ -362,6 +363,7 @@
void UseRealTime();
void UseManualTime();
void Complexity(BigO complexity);
+ void ComplexityLambda(BigOFunc* complexity);
void Threads(int t);
void ThreadRange(int min_threads, int max_threads);
void ThreadPerCpu();
@@ -382,6 +384,7 @@
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
+ BigOFunc* complexity_lambda_;
std::vector<int> thread_counts_;
BenchmarkImp& operator=(BenchmarkImp const&);
@@ -446,6 +449,7 @@
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
+ instance.complexity_lambda = family->complexity_lambda_;
instance.threads = num_threads;
instance.multithreaded = !(family->thread_counts_.empty());
@@ -573,6 +577,10 @@
complexity_ = complexity;
}
+void BenchmarkImp::ComplexityLambda(BigOFunc* complexity) {
+ complexity_lambda_ = complexity;
+}
+
void BenchmarkImp::Threads(int t) {
CHECK_GT(t, 0);
thread_counts_.push_back(t);
@@ -697,6 +705,12 @@
return this;
}
+Benchmark* Benchmark::Complexity(BigOFunc* complexity) {
+ imp_->Complexity(oLambda);
+ imp_->ComplexityLambda(complexity);
+ return this;
+}
+
Benchmark* Benchmark::Threads(int t) {
imp_->Threads(t);
return this;
@@ -855,6 +869,7 @@
report.items_per_second = items_per_second;
report.complexity_n = total.complexity_n;
report.complexity = b.complexity;
+ report.complexity_lambda = b.complexity_lambda;
if(report.complexity != oNone)
complexity_reports.push_back(report);
}
@@ -884,7 +899,7 @@
}
std::vector<BenchmarkReporter::Run> additional_run_stats = ComputeStats(reports);
reports.insert(reports.end(), additional_run_stats.begin(),
- additional_run_stats.end());
+ additional_run_stats.end());
if((b.complexity != oNone) && b.last_benchmark_instance) {
additional_run_stats = ComputeBigO(complexity_reports);
diff --git a/src/complexity.cc b/src/complexity.cc
index 3e42f5d..24f1cf4 100644
--- a/src/complexity.cc
+++ b/src/complexity.cc
@@ -17,31 +17,30 @@
#include "benchmark/benchmark_api.h"
-#include "complexity.h"
-#include "check.h"
-#include "stat.h"
-#include <cmath>
#include <algorithm>
-#include <functional>
+#include <cmath>
+#include "check.h"
+#include "complexity.h"
+#include "stat.h"
namespace benchmark {
-
+
// Internal function to calculate the different scalability forms
-std::function<double(int)> FittingCurve(BigO complexity) {
+BigOFunc* FittingCurve(BigO complexity) {
switch (complexity) {
case oN:
- return [](int n) {return n; };
+ return [](int n) -> double { return n; };
case oNSquared:
- return [](int n) {return n*n; };
+ return [](int n) -> double { return n * n; };
case oNCubed:
- return [](int n) {return n*n*n; };
+ return [](int n) -> double { return n * n * n; };
case oLogN:
- return [](int n) {return log2(n); };
+ return [](int n) { return log2(n); };
case oNLogN:
- return [](int n) {return n * log2(n); };
+ return [](int n) { return n * log2(n); };
case o1:
default:
- return [](int) {return 1; };
+ return [](int) { return 1.0; };
}
}
@@ -49,24 +48,24 @@
std::string GetBigOString(BigO complexity) {
switch (complexity) {
case oN:
- return "* N";
+ return "N";
case oNSquared:
- return "* N**2";
+ return "N^2";
case oNCubed:
- return "* N**3";
+ return "N^3";
case oLogN:
- return "* lgN";
+ return "lgN";
case oNLogN:
- return "* NlgN";
+ return "NlgN";
case o1:
- return "* 1";
+ return "(1)";
default:
- return "";
+ return "f(N)";
}
}
-// Find the coefficient for the high-order term in the running time, by
-// minimizing the sum of squares of relative error, for the fitting curve
+// Find the coefficient for the high-order term in the running time, by
+// minimizing the sum of squares of relative error, for the fitting curve
// given by the lambda expresion.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
@@ -75,21 +74,9 @@
// For a deeper explanation on the algorithm logic, look the README file at
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
-// This interface is currently not used from the oustide, but it has been
-// provided for future upgrades. If in the future it is not needed to support
-// Cxx03, then all the calculations could be upgraded to use lambdas because
-// they are more powerful and provide a cleaner inferface than enumerators,
-// but complete implementation with lambdas will not work for Cxx03
-// (e.g. lack of std::function).
-// In case lambdas are implemented, the interface would be like :
-// -> Complexity([](int n) {return n;};)
-// and any arbitrary and valid equation would be allowed, but the option to
-// calculate the best fit to the most common scalability curves will still
-// be kept.
-
-LeastSq CalculateLeastSq(const std::vector<int>& n,
- const std::vector<double>& time,
- std::function<double(int)> fitting_curve) {
+LeastSq MinimalLeastSq(const std::vector<int>& n,
+ const std::vector<double>& time,
+ BigOFunc* fitting_curve) {
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
@@ -105,6 +92,7 @@
}
LeastSq result;
+ result.complexity = oLambda;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
@@ -134,29 +122,29 @@
const std::vector<double>& time,
const BigO complexity) {
CHECK_EQ(n.size(), time.size());
- CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given
+ CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
+ // benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq best_fit;
- if(complexity == oAuto) {
- std::vector<BigO> fit_curves = {
- oLogN, oN, oNLogN, oNSquared, oNCubed };
+ if (complexity == oAuto) {
+ std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
- best_fit = CalculateLeastSq(n, time, FittingCurve(o1));
+ best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto& fit : fit_curves) {
- LeastSq current_fit = CalculateLeastSq(n, time, FittingCurve(fit));
+ LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms) {
best_fit = current_fit;
best_fit.complexity = fit;
}
}
} else {
- best_fit = CalculateLeastSq(n, time, FittingCurve(complexity));
+ best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
@@ -164,14 +152,13 @@
}
std::vector<BenchmarkReporter::Run> ComputeStats(
- const std::vector<BenchmarkReporter::Run>& reports)
-{
+ const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
- auto error_count = std::count_if(
- reports.begin(), reports.end(),
- [](Run const& run) {return run.error_occurred;});
+ auto error_count =
+ std::count_if(reports.begin(), reports.end(),
+ [](Run const& run) { return run.error_occurred; });
if (reports.size() - error_count < 2) {
// We don't report aggregated data if there was a single run.
@@ -190,12 +177,11 @@
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
CHECK_EQ(run_iterations, run.iterations);
- if (run.error_occurred)
- continue;
+ if (run.error_occurred) continue;
real_accumulated_time_stat +=
- Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
+ Stat1_d(run.real_accumulated_time / run.iterations, run.iterations);
cpu_accumulated_time_stat +=
- Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
+ Stat1_d(run.cpu_accumulated_time / run.iterations, run.iterations);
items_per_second_stat += Stat1_d(run.items_per_second, run.iterations);
bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations);
}
@@ -204,10 +190,10 @@
Run mean_data;
mean_data.benchmark_name = reports[0].benchmark_name + "_mean";
mean_data.iterations = run_iterations;
- mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() *
- run_iterations;
- mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() *
- run_iterations;
+ mean_data.real_accumulated_time =
+ real_accumulated_time_stat.Mean() * run_iterations;
+ mean_data.cpu_accumulated_time =
+ cpu_accumulated_time_stat.Mean() * run_iterations;
mean_data.bytes_per_second = bytes_per_second_stat.Mean();
mean_data.items_per_second = items_per_second_stat.Mean();
@@ -224,10 +210,8 @@
stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev";
stddev_data.report_label = mean_data.report_label;
stddev_data.iterations = 0;
- stddev_data.real_accumulated_time =
- real_accumulated_time_stat.StdDev();
- stddev_data.cpu_accumulated_time =
- cpu_accumulated_time_stat.StdDev();
+ stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev();
+ stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev();
stddev_data.bytes_per_second = bytes_per_second_stat.StdDev();
stddev_data.items_per_second = items_per_second_stat.StdDev();
@@ -237,8 +221,7 @@
}
std::vector<BenchmarkReporter::Run> ComputeBigO(
- const std::vector<BenchmarkReporter::Run>& reports)
-{
+ const std::vector<BenchmarkReporter::Run>& reports) {
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
@@ -252,19 +235,22 @@
// Populate the accumulators.
for (const Run& run : reports) {
n.push_back(run.complexity_n);
- real_time.push_back(run.real_accumulated_time/run.iterations);
- cpu_time.push_back(run.cpu_accumulated_time/run.iterations);
+ real_time.push_back(run.real_accumulated_time / run.iterations);
+ cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
- LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
+ LeastSq result_cpu;
+ LeastSq result_real;
- // result_cpu.complexity is passed as parameter to result_real because in case
- // reports[0].complexity is oAuto, the noise on the measured data could make
- // the best fit function of Cpu and Real differ. In order to solve this, we
- // take the best fitting function for the Cpu, and apply it to Real data.
- LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
-
- std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
+ if (reports[0].complexity == oLambda) {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
+ result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
+ } else {
+ result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
+ result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
+ }
+ std::string benchmark_name =
+ reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
diff --git a/src/complexity.h b/src/complexity.h
index be095a9..85cc125 100644
--- a/src/complexity.h
+++ b/src/complexity.h
@@ -60,11 +60,5 @@
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
-// Find the coefficient for the high-order term in the running time, by
-// minimizing the sum of squares of relative error.
-LeastSq MinimalLeastSq(const std::vector<int>& n,
- const std::vector<double>& time,
- const BigO complexity = oAuto);
-
} // end namespace benchmark
#endif // COMPLEXITY_H_
diff --git a/src/console_reporter.cc b/src/console_reporter.cc
index 9b20ac8..080c324 100644
--- a/src/console_reporter.cc
+++ b/src/console_reporter.cc
@@ -15,9 +15,9 @@
#include "benchmark/reporter.h"
#include "complexity.h"
+#include <algorithm>
#include <cstdint>
#include <cstdio>
-#include <algorithm>
#include <iostream>
#include <string>
#include <tuple>
@@ -62,8 +62,8 @@
void ConsoleReporter::PrintRunData(const Run& result) {
auto& Out = GetOutputStream();
- auto name_color = (result.report_big_o || result.report_rms)
- ? COLOR_BLUE : COLOR_GREEN;
+ auto name_color =
+ (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
ColorPrintf(Out, name_color, "%-*s ", name_field_width_,
result.benchmark_name.c_str());
@@ -84,25 +84,25 @@
if (result.items_per_second > 0) {
items = StrCat(" ", HumanReadableNumber(result.items_per_second),
" items/s");
- }
+ }
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
- if(result.report_big_o) {
- std::string big_o = result.report_big_o ? GetBigOString(result.complexity) : "";
- ColorPrintf(Out, COLOR_YELLOW, "%10.4f %s %10.4f %s ",
- real_time, big_o.c_str(), cpu_time, big_o.c_str());
- } else if(result.report_rms) {
- ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ",
- real_time * 100, cpu_time * 100);
+ if (result.report_big_o) {
+ std::string big_o = GetBigOString(result.complexity);
+ ColorPrintf(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time,
+ big_o.c_str(), cpu_time, big_o.c_str());
+ } else if (result.report_rms) {
+ ColorPrintf(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100,
+ cpu_time * 100);
} else {
const char* timeLabel = GetTimeUnitString(result.time_unit);
- ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ",
- real_time, timeLabel, cpu_time, timeLabel);
+ ColorPrintf(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel,
+ cpu_time, timeLabel);
}
- if(!result.report_big_o && !result.report_rms) {
+ if (!result.report_big_o && !result.report_rms) {
ColorPrintf(Out, COLOR_CYAN, "%10lld", result.iterations);
}
diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc
index 5c18c9e..7bc7ef3 100644
--- a/src/csv_reporter.cc
+++ b/src/csv_reporter.cc
@@ -13,9 +13,10 @@
// limitations under the License.
#include "benchmark/reporter.h"
+#include "complexity.h"
-#include <cstdint>
#include <algorithm>
+#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
@@ -79,7 +80,7 @@
}
// Do not print iteration on bigO and RMS report
- if(!run.report_big_o && !run.report_rms) {
+ if (!run.report_big_o && !run.report_rms) {
Out << run.iterations;
}
Out << ",";
@@ -87,8 +88,10 @@
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
- // Do not print timeLabel on RMS report
- if(!run.report_rms) {
+ // Do not print timeLabel on bigO and RMS report
+ if (run.report_big_o) {
+ Out << GetBigOString(run.complexity);
+ } else if (!run.report_rms) {
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
@@ -108,7 +111,7 @@
ReplaceAll(&label, "\"", "\"\"");
Out << "\"" << label << "\"";
}
- Out << ",,"; // for error_occurred and error_message
+ Out << ",,"; // for error_occurred and error_message
Out << '\n';
}
diff --git a/src/json_reporter.cc b/src/json_reporter.cc
index 8ec18e0..485d305 100644
--- a/src/json_reporter.cc
+++ b/src/json_reporter.cc
@@ -13,9 +13,10 @@
// limitations under the License.
#include "benchmark/reporter.h"
+#include "complexity.h"
-#include <cstdint>
#include <algorithm>
+#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
@@ -99,24 +100,24 @@
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it) {
- out << indent << "{\n";
- PrintRunData(*it);
- out << indent << '}';
- auto it_cp = it;
- if (++it_cp != reports.end()) {
- out << ",\n";
- }
+ out << indent << "{\n";
+ PrintRunData(*it);
+ out << indent << '}';
+ auto it_cp = it;
+ if (++it_cp != reports.end()) {
+ out << ",\n";
+ }
}
}
void JSONReporter::Finalize() {
- // Close the list of benchmarks and the top level object.
- GetOutputStream() << "\n ]\n}\n";
+ // Close the list of benchmarks and the top level object.
+ GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const& run) {
- std::string indent(6, ' ');
- std::ostream& out = GetOutputStream();
+ std::string indent(6, ' ');
+ std::ostream& out = GetOutputStream();
out << indent
<< FormatKV("name", run.benchmark_name)
<< ",\n";
@@ -128,33 +129,50 @@
<< FormatKV("error_message", run.error_message)
<< ",\n";
}
- if(!run.report_big_o && !run.report_rms) {
+ if (!run.report_big_o && !run.report_rms) {
out << indent
<< FormatKV("iterations", run.iterations)
<< ",\n";
- }
- out << indent
- << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
- << ",\n";
- out << indent
- << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
- if(!run.report_rms) {
+ out << indent
+ << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime()))
+ << ",\n";
+ out << indent
+ << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime()));
out << ",\n" << indent
<< FormatKV("time_unit", GetTimeUnitString(run.time_unit));
- }
- if (run.bytes_per_second > 0.0) {
- out << ",\n" << indent
- << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
- }
- if (run.items_per_second > 0.0) {
- out << ",\n" << indent
- << FormatKV("items_per_second", RoundDouble(run.items_per_second));
- }
- if (!run.report_label.empty()) {
- out << ",\n" << indent
- << FormatKV("label", run.report_label);
- }
- out << '\n';
+ } else if (run.report_big_o) {
+ out << indent
+ << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime()))
+ << ",\n";
+ out << indent
+ << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime()))
+ << ",\n";
+ out << indent
+ << FormatKV("big_o", GetBigOString(run.complexity))
+ << ",\n";
+ out << indent
+ << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
+ } else if(run.report_rms) {
+ out << indent
+ << FormatKV("rms", RoundDouble(run.GetAdjustedCPUTime()*100))
+ << '%';
+ }
+ if (run.bytes_per_second > 0.0) {
+ out << ",\n"
+ << indent
+ << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second));
+ }
+ if (run.items_per_second > 0.0) {
+ out << ",\n"
+ << indent
+ << FormatKV("items_per_second", RoundDouble(run.items_per_second));
+ }
+ if (!run.report_label.empty()) {
+ out << ",\n"
+ << indent
+ << FormatKV("label", run.report_label);
+ }
+ out << '\n';
}
-} // end namespace benchmark
+} // end namespace benchmark
diff --git a/test/complexity_test.cc b/test/complexity_test.cc
index 225a181..ee24202 100644
--- a/test/complexity_test.cc
+++ b/test/complexity_test.cc
@@ -1,12 +1,179 @@
-#include "benchmark/benchmark_api.h"
-
-#include <cstdlib>
-#include <string>
+#undef NDEBUG
+#include "benchmark/benchmark.h"
+#include "../src/check.h" // NOTE: check.h is for internal use only!
+#include "../src/re.h" // NOTE: re.h is for internal use only
+#include <cassert>
+#include <cstring>
+#include <iostream>
+#include <sstream>
#include <vector>
-#include <map>
+#include <utility>
#include <algorithm>
+namespace {
+
+// ========================================================================= //
+// -------------------------- Testing Case --------------------------------- //
+// ========================================================================= //
+
+enum MatchRules {
+ MR_Default, // Skip non-matching lines until a match is found.
+ MR_Next // Match must occur on the next line.
+};
+
+struct TestCase {
+ std::string regex;
+ int match_rule;
+
+ TestCase(std::string re, int rule = MR_Default) : regex(re), match_rule(rule) {}
+
+ void Check(std::stringstream& remaining_output) const {
+ benchmark::Regex r;
+ std::string err_str;
+ r.Init(regex, &err_str);
+ CHECK(err_str.empty()) << "Could not construct regex \"" << regex << "\""
+ << " got Error: " << err_str;
+
+ std::string line;
+ while (remaining_output.eof() == false) {
+ CHECK(remaining_output.good());
+ std::getline(remaining_output, line);
+ if (r.Match(line)) return;
+ CHECK(match_rule != MR_Next) << "Expected line \"" << line
+ << "\" to match regex \"" << regex << "\"";
+ }
+
+ CHECK(remaining_output.eof() == false)
+ << "End of output reached before match for regex \"" << regex
+ << "\" was found";
+ }
+};
+
+std::vector<TestCase> ConsoleOutputTests;
+std::vector<TestCase> JSONOutputTests;
+std::vector<TestCase> CSVOutputTests;
+
+// ========================================================================= //
+// -------------------------- Test Helpers --------------------------------- //
+// ========================================================================= //
+
+class TestReporter : public benchmark::BenchmarkReporter {
+public:
+ TestReporter(std::vector<benchmark::BenchmarkReporter*> reps)
+ : reporters_(reps) {}
+
+ virtual bool ReportContext(const Context& context) {
+ bool last_ret = false;
+ bool first = true;
+ for (auto rep : reporters_) {
+ bool new_ret = rep->ReportContext(context);
+ CHECK(first || new_ret == last_ret)
+ << "Reports return different values for ReportContext";
+ first = false;
+ last_ret = new_ret;
+ }
+ return last_ret;
+ }
+
+ virtual void ReportRuns(const std::vector<Run>& report) {
+ for (auto rep : reporters_)
+ rep->ReportRuns(report);
+ }
+
+ virtual void Finalize() {
+ for (auto rep : reporters_)
+ rep->Finalize();
+ }
+
+private:
+ std::vector<benchmark::BenchmarkReporter*> reporters_;
+};
+
+
+#define CONCAT2(x, y) x##y
+#define CONCAT(x, y) CONCAT2(x, y)
+
+#define ADD_CASES(...) \
+ int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
+
+int AddCases(std::vector<TestCase>* out, std::initializer_list<TestCase> const& v) {
+ for (auto const& TC : v)
+ out->push_back(TC);
+ return 0;
+}
+
+template <class First>
+std::string join(First f) { return f; }
+
+template <class First, class ...Args>
+std::string join(First f, Args&&... args) {
+ return std::string(std::move(f)) + "[ ]+" + join(std::forward<Args>(args)...);
+}
+
+std::string dec_re = "[0-9]+\\.[0-9]+";
+
+#define ADD_COMPLEXITY_CASES(...) \
+ int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
+
+int AddComplexityTest(std::vector<TestCase>* console_out, std::vector<TestCase>* json_out,
+ std::vector<TestCase>* csv_out, std::string big_o_test_name,
+ std::string rms_test_name, std::string big_o) {
+ std::string big_o_str = dec_re + " " + big_o;
+ AddCases(console_out, {
+ {join("^" + big_o_test_name + "", big_o_str, big_o_str) + "[ ]*$"},
+ {join("^" + rms_test_name + "", "[0-9]+ %", "[0-9]+ %") + "[ ]*$"}
+ });
+ AddCases(json_out, {
+ {"\"name\": \"" + big_o_test_name + "\",$"},
+ {"\"cpu_coefficient\": [0-9]+,$", MR_Next},
+ {"\"real_coefficient\": [0-9]{1,5},$", MR_Next},
+ {"\"big_o\": \"" + big_o + "\",$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next},
+ {"\"name\": \"" + rms_test_name + "\",$"},
+ {"\"rms\": [0-9]+%$", MR_Next},
+ {"}", MR_Next}
+ });
+ AddCases(csv_out, {
+ {"^\"" + big_o_test_name + "\",," + dec_re + "," + dec_re + "," + big_o + ",,,,,$"},
+ {"^\"" + rms_test_name + "\",," + dec_re + "," + dec_re + ",,,,,,$"}
+ });
+ return 0;
+}
+
+} // end namespace
+
+// ========================================================================= //
+// --------------------------- Testing BigO O(1) --------------------------- //
+// ========================================================================= //
+
+void BM_Complexity_O1(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ }
+ state.SetComplexityN(state.range_x());
+}
+BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1);
+BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity([](int){return 1.0; });
+BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity();
+
+const char* big_o_1_test_name = "BM_Complexity_O1_BigO";
+const char* rms_o_1_test_name = "BM_Complexity_O1_RMS";
+const char* enum_auto_big_o_1 = "\\([0-9]+\\)";
+const char* lambda_big_o_1 = "f\\(N\\)";
+
+// Add enum tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_1_test_name, rms_o_1_test_name, enum_auto_big_o_1);
+
+// Add lambda tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
+
+// ========================================================================= //
+// --------------------------- Testing BigO O(N) --------------------------- //
+// ========================================================================= //
+
std::vector<int> ConstructRandomVector(int size) {
std::vector<int> v;
v.reserve(size);
@@ -16,22 +183,7 @@
return v;
}
-std::map<int, int> ConstructRandomMap(int size) {
- std::map<int, int> m;
- for (int i = 0; i < size; ++i) {
- m.insert(std::make_pair(rand() % size, rand() % size));
- }
- return m;
-}
-
-void BM_Complexity_O1(benchmark::State& state) {
- while (state.KeepRunning()) {
- }
- state.SetComplexityN(state.range_x());
-}
-BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1);
-
-static void BM_Complexity_O_N(benchmark::State& state) {
+void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector)
while (state.KeepRunning()) {
@@ -40,50 +192,25 @@
state.SetComplexityN(state.range_x());
}
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN);
+BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) -> double{return n; });
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity();
-static void BM_Complexity_O_N_Squared(benchmark::State& state) {
- std::string s1(state.range_x(), '-');
- std::string s2(state.range_x(), '-');
- state.SetComplexityN(state.range_x());
- while (state.KeepRunning())
- for(char& c1 : s1) {
- for(char& c2 : s2) {
- benchmark::DoNotOptimize(c1 = 'a');
- benchmark::DoNotOptimize(c2 = 'b');
- }
- }
-}
-BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::oNSquared);
+const char* big_o_n_test_name = "BM_Complexity_O_N_BigO";
+const char* rms_o_n_test_name = "BM_Complexity_O_N_RMS";
+const char* enum_auto_big_o_n = "N";
+const char* lambda_big_o_n = "f\\(N\\)";
-static void BM_Complexity_O_N_Cubed(benchmark::State& state) {
- std::string s1(state.range_x(), '-');
- std::string s2(state.range_x(), '-');
- std::string s3(state.range_x(), '-');
- state.SetComplexityN(state.range_x());
- while (state.KeepRunning())
- for(char& c1 : s1) {
- for(char& c2 : s2) {
- for(char& c3 : s3) {
- benchmark::DoNotOptimize(c1 = 'a');
- benchmark::DoNotOptimize(c2 = 'b');
- benchmark::DoNotOptimize(c3 = 'c');
- }
- }
- }
-}
-BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::oNCubed);
+// Add enum tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
-static void BM_Complexity_O_log_N(benchmark::State& state) {
- auto m = ConstructRandomMap(state.range_x());
- const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector)
- while (state.KeepRunning()) {
- benchmark::DoNotOptimize(m.find(item_not_in_vector));
- }
- state.SetComplexityN(state.range_x());
-}
-BENCHMARK(BM_Complexity_O_log_N)
- -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN);
+// Add lambda tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
+
+// ========================================================================= //
+// ------------------------- Testing BigO O(N*lgN) ------------------------- //
+// ========================================================================= //
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
@@ -93,14 +220,77 @@
state.SetComplexityN(state.range_x());
}
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN);
+BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity([](int n) {return n * log2(n); });
BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity();
-// Test benchmark with no range and check no complexity is calculated.
-void BM_Extreme_Cases(benchmark::State& state) {
- while (state.KeepRunning()) {
- }
-}
-BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::oNLogN);
-BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity();
+const char* big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
+const char* rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
+const char* enum_auto_big_o_n_lg_n = "NlgN";
+const char* lambda_big_o_n_lg_n = "f\\(N\\)";
-BENCHMARK_MAIN()
+// Add enum tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
+
+// Add lambda tests
+ADD_COMPLEXITY_CASES(&ConsoleOutputTests, &JSONOutputTests, &CSVOutputTests,
+ big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
+
+
+// ========================================================================= //
+// --------------------------- TEST CASES END ------------------------------ //
+// ========================================================================= //
+
+
+int main(int argc, char* argv[]) {
+ // Add --color_print=false to argv since we don't want to match color codes.
+ char new_arg[64];
+ char* new_argv[64];
+ std::copy(argv, argv + argc, new_argv);
+ new_argv[argc++] = std::strcpy(new_arg, "--color_print=false");
+ benchmark::Initialize(&argc, new_argv);
+
+ benchmark::ConsoleReporter CR;
+ benchmark::JSONReporter JR;
+ benchmark::CSVReporter CSVR;
+ struct ReporterTest {
+ const char* name;
+ std::vector<TestCase>& output_cases;
+ benchmark::BenchmarkReporter& reporter;
+ std::stringstream out_stream;
+ std::stringstream err_stream;
+
+ ReporterTest(const char* n,
+ std::vector<TestCase>& out_tc,
+ benchmark::BenchmarkReporter& br)
+ : name(n), output_cases(out_tc), reporter(br) {
+ reporter.SetOutputStream(&out_stream);
+ reporter.SetErrorStream(&err_stream);
+ }
+ } TestCases[] = {
+ {"ConsoleReporter", ConsoleOutputTests, CR},
+ {"JSONReporter", JSONOutputTests, JR},
+ {"CSVReporter", CSVOutputTests, CSVR}
+ };
+
+ // Create the test reporter and run the benchmarks.
+ std::cout << "Running benchmarks...\n";
+ TestReporter test_rep({&CR, &JR, &CSVR});
+ benchmark::RunSpecifiedBenchmarks(&test_rep);
+
+ for (auto& rep_test : TestCases) {
+ std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n";
+ std::string banner(msg.size() - 1, '-');
+ std::cout << banner << msg << banner << "\n";
+
+ std::cerr << rep_test.err_stream.str();
+ std::cout << rep_test.out_stream.str();
+
+ for (const auto& TC : rep_test.output_cases)
+ TC.Check(rep_test.out_stream);
+
+ std::cout << "\n";
+ }
+ return 0;
+}
+
diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc
index c09fbb6..b3898ac 100644
--- a/test/reporter_output_test.cc
+++ b/test/reporter_output_test.cc
@@ -189,7 +189,7 @@
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1<<18)->Complexity(benchmark::o1);
-std::string bigOStr = "[0-9]+\\.[0-9]+ \\* [0-9]+";
+std::string bigOStr = "[0-9]+\\.[0-9]+ \\([0-9]+\\)";
ADD_CASES(&ConsoleOutputTests, {
{join("^BM_Complexity_O1_BigO", bigOStr, bigOStr) + "[ ]*$"},