Allow AddRange to work with int64_t. (#548)
* Allow AddRange to work with int64_t.
Fixes #516
Also, tweak how we manage per-test build needs, and create a standard
_gtest suffix for googletest to differentiate from non-googletest tests.
I also ran clang-format on the files that I changed (but not the
benchmark include or main src as they have too many clang-format
issues).
* Add benchmark_gtest to cmake
* Set(Items|Bytes)Processed now take int64_t
diff --git a/include/benchmark/benchmark.h b/include/benchmark/benchmark.h
index 04fbbf4..ea9743c 100644
--- a/include/benchmark/benchmark.h
+++ b/include/benchmark/benchmark.h
@@ -514,10 +514,10 @@
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
+ void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; }
BENCHMARK_ALWAYS_INLINE
- size_t bytes_processed() const { return bytes_processed_; }
+ int64_t bytes_processed() const { return bytes_processed_; }
// If this routine is called with complexity_n > 0 and complexity report is
// requested for the
@@ -525,10 +525,10 @@
// and complexity_n will
// represent the length of N.
BENCHMARK_ALWAYS_INLINE
- void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
+ void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
BENCHMARK_ALWAYS_INLINE
- int complexity_length_n() { return complexity_n_; }
+ int64_t complexity_length_n() { return complexity_n_; }
// If this routine is called with items > 0, then an items/s
// label is printed on the benchmark report line for the currently
@@ -537,10 +537,10 @@
//
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
- void SetItemsProcessed(size_t items) { items_processed_ = items; }
+ void SetItemsProcessed(int64_t items) { items_processed_ = items; }
BENCHMARK_ALWAYS_INLINE
- size_t items_processed() const { return items_processed_; }
+ int64_t items_processed() const { return items_processed_; }
// If this routine is called, the specified label is printed at the
// end of the benchmark report line for the currently executing
@@ -562,16 +562,16 @@
// Range arguments for this run. CHECKs if the argument has been set.
BENCHMARK_ALWAYS_INLINE
- int range(std::size_t pos = 0) const {
+ int64_t range(std::size_t pos = 0) const {
assert(range_.size() > pos);
return range_[pos];
}
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
- int range_x() const { return range(0); }
+ int64_t range_x() const { return range(0); }
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
- int range_y() const { return range(1); }
+ int64_t range_y() const { return range(1); }
BENCHMARK_ALWAYS_INLINE
size_t iterations() const {
@@ -598,12 +598,12 @@
bool error_occurred_;
private: // items we don't need on the first cache line
- std::vector<int> range_;
+ std::vector<int64_t> range_;
- size_t bytes_processed_;
- size_t items_processed_;
+ int64_t bytes_processed_;
+ int64_t items_processed_;
- int complexity_n_;
+ int64_t complexity_n_;
public:
// Container for user-defined counters.
@@ -615,7 +615,7 @@
// TODO(EricWF) make me private
- State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
+ State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager);
@@ -736,7 +736,7 @@
// Run this benchmark once with "x" as the extra argument passed
// to the function.
// REQUIRES: The function passed to the constructor must accept an arg1.
- Benchmark* Arg(int x);
+ Benchmark* Arg(int64_t x);
// Run this benchmark with the given time unit for the generated output report
Benchmark* Unit(TimeUnit unit);
@@ -744,23 +744,23 @@
// Run this benchmark once for a number of values picked from the
// range [start..limit]. (start and limit are always picked.)
// REQUIRES: The function passed to the constructor must accept an arg1.
- Benchmark* Range(int start, int limit);
+ Benchmark* Range(int64_t start, int64_t limit);
// Run this benchmark once for all values in the range [start..limit] with
// specific step
// REQUIRES: The function passed to the constructor must accept an arg1.
- Benchmark* DenseRange(int start, int limit, int step = 1);
+ Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
// Run this benchmark once with "args" as the extra arguments passed
// to the function.
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
- Benchmark* Args(const std::vector<int>& args);
+ Benchmark* Args(const std::vector<int64_t>& args);
// Equivalent to Args({x, y})
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Args'.
- Benchmark* ArgPair(int x, int y) {
- std::vector<int> args;
+ Benchmark* ArgPair(int64_t x, int64_t y) {
+ std::vector<int64_t> args;
args.push_back(x);
args.push_back(y);
return Args(args);
@@ -769,7 +769,7 @@
// Run this benchmark once for a number of values picked from the
// ranges [start..limit]. (starts and limits are always picked.)
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
- Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
+ Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
// Equivalent to ArgNames({name})
Benchmark* ArgName(const std::string& name);
@@ -781,8 +781,8 @@
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
// NOTE: This is a legacy C++03 interface provided for compatibility only.
// New code should use 'Ranges'.
- Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
- std::vector<std::pair<int, int> > ranges;
+ Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
+ std::vector<std::pair<int64_t, int64_t> > ranges;
ranges.push_back(std::make_pair(lo1, hi1));
ranges.push_back(std::make_pair(lo2, hi2));
return Ranges(ranges);
@@ -889,15 +889,13 @@
int ArgsCnt() const;
- static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
-
private:
friend class BenchmarkFamilies;
std::string name_;
ReportMode report_mode_;
std::vector<std::string> arg_names_; // Args for all benchmark runs
- std::vector<std::vector<int> > args_; // Args for all benchmark runs
+ std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
diff --git a/src/benchmark.cc b/src/benchmark.cc
index 356ed54..7b0d113 100644
--- a/src/benchmark.cc
+++ b/src/benchmark.cc
@@ -290,7 +290,7 @@
} // namespace
} // namespace internal
-State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
+State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
int n_threads, internal::ThreadTimer* timer,
internal::ThreadManager* manager)
: total_iterations_(0),
diff --git a/src/benchmark_api_internal.h b/src/benchmark_api_internal.h
index d481dc5..dd7a3ff 100644
--- a/src/benchmark_api_internal.h
+++ b/src/benchmark_api_internal.h
@@ -17,7 +17,7 @@
std::string name;
Benchmark* benchmark;
ReportMode report_mode;
- std::vector<int> arg;
+ std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
bool use_real_time;
diff --git a/src/benchmark_register.cc b/src/benchmark_register.cc
index 59b3e4d..4fea6d9 100644
--- a/src/benchmark_register.cc
+++ b/src/benchmark_register.cc
@@ -12,9 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "benchmark/benchmark.h"
-#include "benchmark_api_internal.h"
-#include "internal_macros.h"
+#include "benchmark_register.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
@@ -36,13 +34,16 @@
#include <sstream>
#include <thread>
+#include "benchmark/benchmark.h"
+#include "benchmark_api_internal.h"
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
-#include "statistics.h"
+#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
+#include "statistics.h"
#include "string_util.h"
#include "timers.h"
@@ -175,7 +176,7 @@
StrFormat("%s:", family->arg_names_[arg_i].c_str());
}
}
-
+
instance.name += StrFormat("%d", arg);
++arg_i;
}
@@ -246,30 +247,7 @@
Benchmark::~Benchmark() {}
-void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
- CHECK_GE(lo, 0);
- CHECK_GE(hi, lo);
- CHECK_GE(mult, 2);
-
- // Add "lo"
- dst->push_back(lo);
-
- static const int kint32max = std::numeric_limits<int32_t>::max();
-
- // Now space out the benchmarks in multiples of "mult"
- for (int32_t i = 1; i < kint32max / mult; i *= mult) {
- if (i >= hi) break;
- if (i > lo) {
- dst->push_back(i);
- }
- }
- // Add "hi" (if different from "lo")
- if (hi != lo) {
- dst->push_back(hi);
- }
-}
-
-Benchmark* Benchmark::Arg(int x) {
+Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
@@ -280,20 +258,21 @@
return this;
}
-Benchmark* Benchmark::Range(int start, int limit) {
+Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
- std::vector<int> arglist;
+ std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
- for (int i : arglist) {
+ for (int64_t i : arglist) {
args_.push_back({i});
}
return this;
}
-Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
+Benchmark* Benchmark::Ranges(
+ const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
- std::vector<std::vector<int>> arglists(ranges.size());
+ std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
@@ -304,7 +283,7 @@
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++) {
- std::vector<int> tmp;
+ std::vector<int64_t> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++) {
@@ -336,17 +315,17 @@
return this;
}
-Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
+Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_GE(start, 0);
CHECK_LE(start, limit);
- for (int arg = start; arg <= limit; arg += step) {
+ for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
return this;
}
-Benchmark* Benchmark::Args(const std::vector<int>& args) {
+Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
@@ -363,7 +342,6 @@
return this;
}
-
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
@@ -371,7 +349,6 @@
return this;
}
-
Benchmark* Benchmark::Iterations(size_t n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
diff --git a/src/benchmark_register.h b/src/benchmark_register.h
new file mode 100644
index 0000000..0705e21
--- /dev/null
+++ b/src/benchmark_register.h
@@ -0,0 +1,33 @@
+#ifndef BENCHMARK_REGISTER_H
+#define BENCHMARK_REGISTER_H
+
+#include <vector>
+
+#include "check.h"
+
+template <typename T>
+void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
+ CHECK_GE(lo, 0);
+ CHECK_GE(hi, lo);
+ CHECK_GE(mult, 2);
+
+ // Add "lo"
+ dst->push_back(lo);
+
+ static const T kmax = std::numeric_limits<T>::max();
+
+ // Now space out the benchmarks in multiples of "mult"
+ for (T i = 1; i < kmax / mult; i *= mult) {
+ if (i >= hi) break;
+ if (i > lo) {
+ dst->push_back(i);
+ }
+ }
+
+ // Add "hi" (if different from "lo")
+ if (hi != lo) {
+ dst->push_back(hi);
+ }
+}
+
+#endif // BENCHMARK_REGISTER_H
diff --git a/test/BUILD b/test/BUILD
index dfca7db..2b3a391 100644
--- a/test/BUILD
+++ b/test/BUILD
@@ -1,15 +1,28 @@
-NEEDS_GTEST_MAIN = [
- "statistics_test.cc",
-]
-
TEST_COPTS = [
"-pedantic",
"-pedantic-errors",
"-std=c++11",
+ "-Wall",
+ "-Wextra",
+ "-Wshadow",
+# "-Wshorten-64-to-32",
+ "-Wfloat-equal",
+ "-fstrict-aliasing",
]
+PER_SRC_COPTS = ({
+ "cxx03_test.cc": ["-std=c++03"],
+ # Some of the issues with DoNotOptimize only occur when optimization is enabled
+ "donotoptimize_test.cc": ["-O3"],
+})
+
+
TEST_ARGS = ["--benchmark_min_time=0.01"]
+PER_SRC_TEST_ARGS = ({
+ "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
+})
+
cc_library(
name = "output_test_helper",
testonly = 1,
@@ -22,27 +35,23 @@
],
)
-[cc_test(
+[
+ cc_test(
name = test_src[:-len(".cc")],
size = "small",
srcs = [test_src],
- args = TEST_ARGS + ({
- "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"],
- }).get(test_src, []),
- copts = TEST_COPTS + ({
- "cxx03_test.cc": ["-std=c++03"],
- # Some of the issues with DoNotOptimize only occur when optimization is enabled
- "donotoptimize_test.cc": ["-O3"],
- }).get(test_src, []),
+ args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []),
+ copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []),
deps = [
":output_test_helper",
"//:benchmark",
"//:benchmark_internal_headers",
"@com_google_googletest//:gtest",
] + (
- ["@com_google_googletest//:gtest_main"] if (test_src in NEEDS_GTEST_MAIN) else []
+ ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else []
),
-# FIXME: Add support for assembly tests to bazel.
-# See Issue #556
-# https://github.com/google/benchmark/issues/556
-) for test_src in glob(["*_test.cc"], exclude = ["*_assembly_test.cc"])]
+ # FIXME: Add support for assembly tests to bazel.
+ # See Issue #556
+ # https://github.com/google/benchmark/issues/556
+ ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc"])
+]
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 7c6366f..63c0e58 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -162,7 +162,8 @@
add_test(${name} ${name})
endmacro()
- add_gtest(statistics_test)
+ add_gtest(benchmark_gtest)
+ add_gtest(statistics_gtest)
endif(BENCHMARK_ENABLE_GTEST_TESTS)
###############################################################################
diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc
new file mode 100644
index 0000000..10683b4
--- /dev/null
+++ b/test/benchmark_gtest.cc
@@ -0,0 +1,33 @@
+#include <vector>
+
+#include "../src/benchmark_register.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+TEST(AddRangeTest, Simple) {
+ std::vector<int> dst;
+ AddRange(&dst, 1, 2, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2));
+}
+
+TEST(AddRangeTest, Simple64) {
+ std::vector<int64_t> dst;
+ AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
+ EXPECT_THAT(dst, testing::ElementsAre(1, 2));
+}
+
+TEST(AddRangeTest, Advanced) {
+ std::vector<int> dst;
+ AddRange(&dst, 5, 15, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
+}
+
+TEST(AddRangeTest, Advanced64) {
+ std::vector<int64_t> dst;
+ AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
+ EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
+}
+
+} // end namespace
diff --git a/test/benchmark_test.cc b/test/benchmark_test.cc
index 78802c8..3cd4f55 100644
--- a/test/benchmark_test.cc
+++ b/test/benchmark_test.cc
@@ -40,8 +40,8 @@
return (pi - 1.0) * 4;
}
-std::set<int> ConstructRandomSet(int size) {
- std::set<int> s;
+std::set<int64_t> ConstructRandomSet(int64_t size) {
+ std::set<int64_t> s;
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
return s;
}
@@ -64,7 +64,7 @@
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
- for (auto _ : state) pi = CalculatePi(state.range(0));
+ for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
@@ -74,7 +74,7 @@
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
for (auto _ : state) {
- benchmark::DoNotOptimize(CalculatePi(depth));
+ benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
@@ -82,7 +82,7 @@
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
- std::set<int> data;
+ std::set<int64_t> data;
for (auto _ : state) {
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
@@ -103,9 +103,9 @@
ValueType v = 42;
for (auto _ : state) {
Container c;
- for (int i = state.range(0); --i;) c.push_back(v);
+ for (int64_t i = state.range(0); --i;) c.push_back(v);
}
- const size_t items_processed = state.iterations() * state.range(0);
+ const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
@@ -118,8 +118,9 @@
#endif
static void BM_StringCompare(benchmark::State& state) {
- std::string s1(state.range(0), '-');
- std::string s2(state.range(0), '-');
+ size_t len = static_cast<size_t>(state.range(0));
+ std::string s1(len, '-');
+ std::string s2(len, '-');
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
@@ -154,13 +155,13 @@
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State& state) {
- int size = state.range(0) / static_cast<int>(sizeof(int));
- int thread_size = size / state.threads;
+ int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
+ int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0) {
- test_vector = new std::vector<int>(size);
+ test_vector = new std::vector<int>(static_cast<size_t>(size));
}
for (auto _ : state) {
@@ -178,8 +179,8 @@
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State& state) {
- size_t slept_for = 0;
- int microseconds = state.range(0);
+ int64_t slept_for = 0;
+ int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
diff --git a/test/complexity_test.cc b/test/complexity_test.cc
index 89dfa58..aa35619 100644
--- a/test/complexity_test.cc
+++ b/test/complexity_test.cc
@@ -81,9 +81,9 @@
// --------------------------- Testing BigO O(N) --------------------------- //
// ========================================================================= //
-std::vector<int> ConstructRandomVector(int size) {
+std::vector<int> ConstructRandomVector(int64_t size) {
std::vector<int> v;
- v.reserve(size);
+ v.reserve(static_cast<int>(size));
for (int i = 0; i < size; ++i) {
v.push_back(std::rand() % size);
}
@@ -92,8 +92,8 @@
void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
- const int item_not_in_vector =
- state.range(0) * 2; // Test worst case scenario (item not in vector)
+ // Test worst case scenario (item not in vector)
+ const int64_t item_not_in_vector = state.range(0) * 2;
for (auto _ : state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
diff --git a/test/map_test.cc b/test/map_test.cc
index 311d2d2..dbf7982 100644
--- a/test/map_test.cc
+++ b/test/map_test.cc
@@ -8,7 +8,7 @@
std::map<int, int> ConstructRandomMap(int size) {
std::map<int, int> m;
for (int i = 0; i < size; ++i) {
- m.insert(std::make_pair(rand() % size, rand() % size));
+ m.insert(std::make_pair(std::rand() % size, std::rand() % size));
}
return m;
}
@@ -17,14 +17,14 @@
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
- const int size = state.range(0);
+ const int size = static_cast<int>(state.range(0));
std::map<int, int> m;
for (auto _ : state) {
state.PauseTiming();
m = ConstructRandomMap(size);
state.ResumeTiming();
for (int i = 0; i < size; ++i) {
- benchmark::DoNotOptimize(m.find(rand() % size));
+ benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
@@ -35,7 +35,7 @@
class MapFixture : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& st) {
- m = ConstructRandomMap(st.range(0));
+ m = ConstructRandomMap(static_cast<int>(st.range(0)));
}
void TearDown(const ::benchmark::State&) { m.clear(); }
@@ -44,10 +44,10 @@
};
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
- const int size = state.range(0);
+ const int size = static_cast<int>(state.range(0));
for (auto _ : state) {
for (int i = 0; i < size; ++i) {
- benchmark::DoNotOptimize(m.find(rand() % size));
+ benchmark::DoNotOptimize(m.find(std::rand() % size));
}
}
state.SetItemsProcessed(state.iterations() * size);
diff --git a/test/multiple_ranges_test.cc b/test/multiple_ranges_test.cc
index 0a82382..c64acab 100644
--- a/test/multiple_ranges_test.cc
+++ b/test/multiple_ranges_test.cc
@@ -1,7 +1,9 @@
#include "benchmark/benchmark.h"
#include <cassert>
+#include <iostream>
#include <set>
+#include <vector>
class MultipleRangesFixture : public ::benchmark::Fixture {
public:
@@ -27,25 +29,46 @@
{7, 6, 3}}) {}
void SetUp(const ::benchmark::State& state) {
- std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};
+ std::vector<int64_t> ranges = {state.range(0), state.range(1),
+ state.range(2)};
assert(expectedValues.find(ranges) != expectedValues.end());
actualValues.insert(ranges);
}
+ // NOTE: This is not TearDown as we want to check after _all_ runs are
+ // complete.
virtual ~MultipleRangesFixture() {
assert(actualValues.size() == expectedValues.size());
+ if (actualValues.size() != expectedValues.size()) {
+ std::cout << "EXPECTED\n";
+ for (auto v : expectedValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ std::cout << "ACTUAL\n";
+ for (auto v : actualValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ }
}
- std::set<std::vector<int>> expectedValues;
- std::set<std::vector<int>> actualValues;
+ std::set<std::vector<int64_t>> expectedValues;
+ std::set<std::vector<int64_t>> actualValues;
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
for (auto _ : state) {
- int product = state.range(0) * state.range(1) * state.range(2);
- for (int x = 0; x < product; x++) {
+ int64_t product = state.range(0) * state.range(1) * state.range(2);
+ for (int64_t x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
}
}
diff --git a/test/statistics_test.cc b/test/statistics_gtest.cc
similarity index 100%
rename from test/statistics_test.cc
rename to test/statistics_gtest.cc