[perftest] Add library for running performance tests
This is approximately a port to Zircon of the perf test runner from
garnet/bin/zircon_benchmarks/test_runner.cc. Changes from that
include the following:
* Use a KeepRunning() interface similar to gbenchmark so that the
test function can own the loop. This is useful for test cases that
need to return to an event loop.
* Use a "bool" return type (rather than "void"), to allow propagating
errors. This will eventually allow compatibility with unittest.h's
ASSERT_*() macros, though these aren't supported with perftest yet.
* This version has some unit tests for the perf test runner.
* This version will print a table of summary statistics for the test
cases.
* This version mostly uses getopt_long() for command line argument
parsing. In contrast, zircon_benchmarks uses the gflags library,
which isn't available in the Zircon layer.
* We leave out producing trace events for test runs for now. I'll
add this back later.
ZX-1715
Change-Id: I90d313b4223b4a0548b2edc32ad77b6e6de0310a
diff --git a/system/ulib/perftest/include/perftest/perftest.h b/system/ulib/perftest/include/perftest/perftest.h
new file mode 100644
index 0000000..c6bcfc2
--- /dev/null
+++ b/system/ulib/perftest/include/perftest/perftest.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <stdint.h>
+
+#include <fbl/function.h>
+#include <perftest/results.h>
+
+// This is a library for writing performance tests. It supports
+// performance tests that involve running an operation repeatedly,
+// sequentially, and recording the times taken by each run of the
+// operation. (It does not yet support other types of performance test,
+// such as where we run an operation concurrently in multiple threads.)
+//
+// There are two ways to implement a test:
+//
+// 1) For tests that don't need to reuse any test fixtures across each run,
+// use RegisterSimpleTest():
+//
+// bool TestFooOp() {
+// FooOp(); // The operation that we are timing.
+// return true; // Indicate success.
+// }
+// void RegisterTests() {
+// perftest::RegisterSimpleTest<TestFooOp>("FooOp");
+// }
+// PERFTEST_CTOR(RegisterTests);
+//
+// 2) For tests that do need to reuse test fixtures across each run, use
+// the more general RegisterTest():
+//
+// bool TestFooObjectOp(perftest::RepeatState* state) {
+// FooObject obj; // Fixture that is reused across test runs.
+// while (state->KeepRunning()) {
+// obj.FooOp(); // The operation that we are timing.
+// }
+// return true; // Indicate success.
+// }
+// void RegisterTests() {
+// perftest::RegisterTest("FooObjectOp", TestFooObjectOp);
+// }
+// PERFTEST_CTOR(RegisterTests);
+//
+// Test registration is done using function calls in order to make it easy
+// to instantiate parameterized tests multiple times.
+//
+// Background: The "KeepRunning()" interface is based on the interface used
+// by the gbenchmark library (https://github.com/google/benchmark).
+
+namespace perftest {
+
+// This object is passed to the test function. It controls the iteration
+// of test runs and records the times taken by test runs.
+//
+// This is a pure virtual interface so that one can potentially use a test
+// runner other than the one provided by this library.
+class RepeatState {
+public:
+ // KeepRunning() should be called by test functions using a "while"
+ // loop shown above. A call to KeepRunning() indicates the start or
+ // end of a test run, or both. KeepRunning() returns a bool indicating
+ // whether the caller should do another test run.
+ virtual bool KeepRunning() = 0;
+};
+
+typedef bool TestFunc(RepeatState* state);
+typedef bool SimpleTestFunc();
+
+void RegisterTest(const char* name, fbl::Function<TestFunc> test_func);
+
+// Convenience routine for registering a perf test that is specified by a
+// function. This is for tests that don't set up any fixtures that are
+// shared across invocations of the function.
+//
+// This takes the function as a template parameter rather than as a value
+// parameter in order to avoid the potential cost of an indirect function
+// call.
+template <SimpleTestFunc test_func>
+void RegisterSimpleTest(const char* test_name) {
+ auto wrapper_func = [](RepeatState* state) {
+ while (state->KeepRunning()) {
+ if (!test_func()) {
+ return false;
+ }
+ }
+ return true;
+ };
+ RegisterTest(test_name, fbl::move(wrapper_func));
+}
+
+// Entry point for the perf test runner that a test executable should call
+// from main(). This will run the registered perf tests and/or unit tests,
+// based on the command line arguments. (See the "--help" output for more
+// details.)
+int PerfTestMain(int argc, char** argv);
+
+} // namespace perftest
+
+// This calls func() at startup time as a global constructor. This is
+// useful for registering perf tests. This is similar to declaring func()
+// with __attribute__((constructor)), but portable.
+#define PERFTEST_CTOR(func) \
+ namespace { \
+ struct FuncCaller_##func { \
+ FuncCaller_##func() { func(); } \
+ } global; \
+ }
diff --git a/system/ulib/perftest/include/perftest/results.h b/system/ulib/perftest/include/perftest/results.h
index 9dd2f93..a08a665 100644
--- a/system/ulib/perftest/include/perftest/results.h
+++ b/system/ulib/perftest/include/perftest/results.h
@@ -41,6 +41,10 @@
: label_(label),
unit_(unit) {}
+ fbl::String label() const { return label_; }
+ fbl::String unit() const { return unit_; }
+ fbl::Vector<double>* values() { return &values_; }
+
void AppendValue(double value) {
values_.push_back(value);
}
@@ -63,11 +67,14 @@
// case affects a later test case.)
class ResultsSet {
public:
+ fbl::Vector<TestCaseResults>* results() { return &results_; }
+
TestCaseResults* AddTestCase(const fbl::String& label,
const fbl::String& unit);
// A caller may check for errors using ferror().
void WriteJSON(FILE* out_file) const;
+ void PrintSummaryStatistics(FILE* out_file) const;
private:
fbl::Vector<TestCaseResults> results_;
diff --git a/system/ulib/perftest/include/perftest/runner.h b/system/ulib/perftest/include/perftest/runner.h
new file mode 100644
index 0000000..ecf8574
--- /dev/null
+++ b/system/ulib/perftest/include/perftest/runner.h
@@ -0,0 +1,36 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <fbl/vector.h>
+#include <perftest/perftest.h>
+
+namespace perftest {
+namespace internal {
+
+// Definitions used by the perf test runner. These are in a header file so
+// that the perf test runner can be tested by unit tests.
+
+struct NamedTest {
+ fbl::String name;
+ fbl::Function<TestFunc> test_func;
+};
+
+typedef fbl::Vector<NamedTest> TestList;
+
+bool RunTests(TestList* test_list, uint32_t run_count, const char* regex_string,
+ FILE* log_stream, ResultsSet* results_set);
+
+struct CommandArgs {
+ const char* output_filename = nullptr;
+ // Note that this default matches any string.
+ const char* filter_regex = "";
+ uint32_t run_count = 1000;
+};
+
+void ParseCommandArgs(int argc, char** argv, CommandArgs* dest);
+
+} // namespace internal
+} // namespace perftest
diff --git a/system/ulib/perftest/results.cpp b/system/ulib/perftest/results.cpp
index e0a58c9..dcb545f 100644
--- a/system/ulib/perftest/results.cpp
+++ b/system/ulib/perftest/results.cpp
@@ -121,4 +121,19 @@
fprintf(out_file, "]");
}
+void ResultsSet::PrintSummaryStatistics(FILE* out_file) const {
+ // Print table headings row.
+ fprintf(out_file, "%10s %10s %10s %10s %-12s %s\n",
+ "Mean", "Std dev", "Min", "Max", "Unit", "Test case");
+ if (results_.size() == 0) {
+ fprintf(out_file, "(No test results)\n");
+ }
+ for (const auto& test : results_) {
+ SummaryStatistics stats = test.GetSummaryStatistics();
+ fprintf(out_file, "%10.0f %10.0f %10.0f %10.0f %-12s %s\n",
+ stats.mean, stats.std_dev, stats.min, stats.max,
+ test.unit().c_str(), test.label().c_str());
+ }
+}
+
} // namespace perftest
diff --git a/system/ulib/perftest/rules.mk b/system/ulib/perftest/rules.mk
index 37f3632..72b47e5 100644
--- a/system/ulib/perftest/rules.mk
+++ b/system/ulib/perftest/rules.mk
@@ -10,10 +10,12 @@
MODULE_SRCS += \
$(LOCAL_DIR)/results.cpp \
+ $(LOCAL_DIR)/runner.cpp \
MODULE_LIBS := \
system/ulib/c \
system/ulib/fbl \
+ system/ulib/unittest \
system/ulib/zircon \
MODULE_PACKAGE := src
diff --git a/system/ulib/perftest/runner.cpp b/system/ulib/perftest/runner.cpp
new file mode 100644
index 0000000..7215f00
--- /dev/null
+++ b/system/ulib/perftest/runner.cpp
@@ -0,0 +1,296 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <perftest/runner.h>
+
+#include <errno.h>
+#include <getopt.h>
+#include <regex.h>
+
+#include <fbl/function.h>
+#include <fbl/string.h>
+#include <fbl/vector.h>
+#include <unittest/unittest.h>
+#include <zircon/syscalls.h>
+
+namespace perftest {
+namespace {
+
+// g_tests needs to be POD because this list is populated by constructors.
+// We don't want g_tests to have a constructor that might get run after
+// items have been added to the list, because that would clobber the list.
+internal::TestList* g_tests;
+
+class RepeatStateImpl : public RepeatState {
+public:
+ RepeatStateImpl(uint32_t run_count)
+ : run_count_(run_count) {
+ // Add 1 because we store timestamps for the start of each test run
+ // (which serve as timestamps for the end of the previous test
+ // run), plus one more timestamp for the end of the last test run.
+ size_t array_size = run_count + 1;
+ timestamps_.reset(new uint64_t[array_size]);
+ // Clear the array in order to fault in the pages. This should
+ // prevent page faults occurring as we cross page boundaries when
+ // writing a test's running times (which would affect the first
+ // test case but not later test cases).
+ memset(timestamps_.get(), 0, sizeof(timestamps_[0]) * array_size);
+ }
+
+ bool KeepRunning() override {
+ timestamps_[runs_started_] = zx_ticks_get();
+ if (unlikely(runs_started_ == run_count_)) {
+ ++finishing_calls_;
+ return false;
+ }
+ ++runs_started_;
+ return true;
+ }
+
+ bool Success() const {
+ return runs_started_ == run_count_ && finishing_calls_ == 1;
+ }
+
+ void CopyTimeResults(const char* test_name, ResultsSet* dest) const {
+ // Copy the timing results, converting timestamps to elapsed times.
+ double nanoseconds_per_tick =
+ 1e9 / static_cast<double>(zx_ticks_per_second());
+ TestCaseResults* results = dest->AddTestCase(test_name, "nanoseconds");
+ results->values()->reserve(run_count_);
+ for (uint32_t idx = 0; idx < run_count_; ++idx) {
+ uint64_t time_taken = timestamps_[idx + 1] - timestamps_[idx];
+ results->AppendValue(
+ static_cast<double>(time_taken) * nanoseconds_per_tick);
+ }
+ }
+
+private:
+ // Number of test runs that we intend to do.
+ uint32_t run_count_;
+ // Number of test runs started.
+ uint32_t runs_started_ = 0;
+ // Number of calls to KeepRunning() after the last test run has been
+ // started. This should be 1 when the test runs have finished. This
+ // is just used as a sanity check: It will be 0 if the test case failed
+ // to make the final call to KeepRunning() or >1 if it made unnecessary
+ // excess calls.
+ //
+ // Having this separate from runs_started_ removes the need for an
+ // extra comparison in the fast path of KeepRunning().
+ uint32_t finishing_calls_ = 0;
+ fbl::unique_ptr<uint64_t[]> timestamps_;
+};
+
+} // namespace
+
+void RegisterTest(const char* name, fbl::Function<TestFunc> test_func) {
+ if (!g_tests) {
+ g_tests = new internal::TestList;
+ }
+ internal::NamedTest new_test{name, fbl::move(test_func)};
+ g_tests->push_back(fbl::move(new_test));
+}
+
+namespace internal {
+
+bool RunTests(TestList* test_list, uint32_t run_count, const char* regex_string,
+ FILE* log_stream, ResultsSet* results_set) {
+ // Compile the regular expression.
+ regex_t regex;
+ int err = regcomp(®ex, regex_string, REG_EXTENDED);
+ if (err != 0) {
+ char msg[256];
+ msg[0] = '\0';
+ regerror(err, ®ex, msg, sizeof(msg));
+ fprintf(log_stream,
+ "Compiling the regular expression \"%s\" failed: %s\n",
+ regex_string, msg);
+ return false;
+ }
+
+ bool found_regex_match = false;
+ bool ok = true;
+ for (const internal::NamedTest& test_case : *test_list) {
+ const char* test_name = test_case.name.c_str();
+ bool matched_regex = regexec(®ex, test_name, 0, nullptr, 0) == 0;
+ if (!matched_regex) {
+ continue;
+ }
+ found_regex_match = true;
+
+ // Log in a format similar to gtest's output, so that this will
+ // look familiar to readers and to allow parsing by tools that can
+ // parse gtest's output.
+ fprintf(log_stream, "[ RUN ] %s\n", test_name);
+
+ RepeatStateImpl state(run_count);
+ bool result = test_case.test_func(&state);
+
+ if (!result) {
+ fprintf(log_stream, "[ FAILED ] %s\n", test_name);
+ ok = false;
+ continue;
+ }
+ if (!state.Success()) {
+ fprintf(log_stream, "Excess or missing calls to KeepRunning()\n");
+ fprintf(log_stream, "[ FAILED ] %s\n", test_name);
+ ok = false;
+ continue;
+ }
+ fprintf(log_stream, "[ OK ] %s\n", test_name);
+
+ state.CopyTimeResults(test_name, results_set);
+ }
+
+ regfree(®ex);
+
+ if (!found_regex_match) {
+ // Report an error so that this doesn't fail silently if the regex
+ // is wrong.
+ fprintf(log_stream,
+ "The regular expression \"%s\" did not match any tests\n",
+ regex_string);
+ return false;
+ }
+ return ok;
+}
+
+void ParseCommandArgs(int argc, char** argv, CommandArgs* dest) {
+ static const struct option opts[] = {
+ {"out", required_argument, nullptr, 'o'},
+ {"filter", required_argument, nullptr, 'f'},
+ {"runs", required_argument, nullptr, 'r'},
+ };
+ optind = 1;
+ for (;;) {
+ int opt = getopt_long(argc, argv, "", opts, nullptr);
+ if (opt < 0) {
+ break;
+ }
+ switch (opt) {
+ case 'o':
+ dest->output_filename = optarg;
+ break;
+ case 'f':
+ dest->filter_regex = optarg;
+ break;
+ case 'r': {
+ // Convert string to number.
+ char* end;
+ long val = strtol(optarg, &end, 0);
+ // Check that the string contains only a positive number and
+ // that the number doesn't overflow.
+ if (val != static_cast<uint32_t>(val) || *end != '\0' ||
+ *optarg == '\0' || val == 0) {
+ fprintf(stderr, "Invalid argument for --runs: \"%s\"\n",
+ optarg);
+ exit(1);
+ }
+ dest->run_count = static_cast<uint32_t>(val);
+ break;
+ }
+ default:
+ // getopt_long() will have printed an error already.
+ exit(1);
+ }
+ }
+ if (optind < argc) {
+ fprintf(stderr, "Unrecognized argument: \"%s\"\n", argv[optind]);
+ exit(1);
+ }
+}
+
+} // namespace internal
+
+static bool PerfTestMode(int argc, char** argv) {
+ internal::CommandArgs args;
+ internal::ParseCommandArgs(argc, argv, &args);
+ ResultsSet results;
+ bool success = RunTests(g_tests, args.run_count, args.filter_regex, stdout,
+ &results);
+
+ printf("\n");
+ results.PrintSummaryStatistics(stdout);
+ printf("\n");
+
+ if (args.output_filename) {
+ FILE* fh = fopen(args.output_filename, "w");
+ if (!fh) {
+ fprintf(stderr, "Failed to open output file \"%s\": %s\n",
+ args.output_filename, strerror(errno));
+ exit(1);
+ }
+ results.WriteJSON(fh);
+ fclose(fh);
+ }
+
+ return success;
+}
+
+int PerfTestMain(int argc, char** argv) {
+ if (argc == 2 && (strcmp(argv[1], "-h") == 0 ||
+ strcmp(argv[1], "--help") == 0)) {
+ printf("Usage:\n"
+ " %s -p [options] # run in \"perf test mode\"\n"
+ " %s # run in \"unit test mode\"\n"
+ "\n"
+ "\"Unit test mode\" runs perf tests as unit tests. "
+ "This means it only checks that the perf tests pass. "
+ "It only does a small number of runs of each test, and it "
+ "does not report their performance. Additionally, it runs "
+ "all of the unit tests in the executable (i.e. those that "
+ "use the unittest library).\n"
+ "\n"
+ "\"Perf test mode\" runs many iterations of each perf test, "
+ "and reports the performance results. It does not run any "
+ "unittest test cases.\n"
+ "\n"
+ "Options:\n"
+ " --out FILENAME\n"
+ " Filename to write JSON results data to. If this is "
+ "omitted, no JSON output is produced.\n"
+ " --filter REGEX\n"
+ " Regular expression that specifies a subset of tests "
+ "to run. By default, all the tests are run.\n"
+ " --runs NUMBER\n"
+ " Number of times to run each test.\n",
+ argv[0], argv[0]);
+ return 1;
+ }
+
+ bool success = true;
+
+ if (argc >= 2 && strcmp(argv[1], "-p") == 0) {
+ // Drop the "-p" argument. Keep argv[0] because getopt_long()
+ // prints it in error messages.
+ argv[1] = argv[0];
+ argc--;
+ argv++;
+ if (!PerfTestMode(argc, argv)) {
+ success = false;
+ }
+ } else {
+ printf("Running perf tests in unit test mode...\n");
+ {
+ // Run each test a small number of times to ensure that doing
+ // multiple runs works OK.
+ const int kRunCount = 3;
+ ResultsSet unused_results;
+ if (!RunTests(g_tests, kRunCount, "", stdout, &unused_results)) {
+ success = false;
+ }
+ }
+
+ // In unit test mode, we pass all command line arguments on to the
+ // unittest library.
+ printf("Running unit tests...\n");
+ if (!unittest_run_all_tests(argc, argv)) {
+ success = false;
+ }
+ }
+
+ return success ? 0 : 1;
+}
+
+} // namespace perftest
diff --git a/system/utest/perftest/results-test.cpp b/system/utest/perftest/results-test.cpp
index b38f79c..84a1701 100644
--- a/system/utest/perftest/results-test.cpp
+++ b/system/utest/perftest/results-test.cpp
@@ -69,8 +69,3 @@
RUN_TEST(test_json_output)
RUN_TEST(test_json_string_escaping)
END_TEST_CASE(perf_results_output_tests)
-
-int main(int argc, char** argv) {
- bool success = unittest_run_all_tests(argc, argv);
- return success ? 0 : -1;
-}
diff --git a/system/utest/perftest/rules.mk b/system/utest/perftest/rules.mk
index fc70f04..047fda4 100644
--- a/system/utest/perftest/rules.mk
+++ b/system/utest/perftest/rules.mk
@@ -10,8 +10,10 @@
MODULE_SRCS += \
$(LOCAL_DIR)/results-test.cpp \
+ $(LOCAL_DIR)/runner-test.cpp \
+ $(LOCAL_DIR)/syscalls-test.cpp \
-MODULE_NAME := perftest-results-test
+MODULE_NAME := perf-test
MODULE_STATIC_LIBS := \
system/ulib/fbl \
diff --git a/system/utest/perftest/runner-test.cpp b/system/utest/perftest/runner-test.cpp
new file mode 100644
index 0000000..750384a
--- /dev/null
+++ b/system/utest/perftest/runner-test.cpp
@@ -0,0 +1,145 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <perftest/perftest.h>
+#include <perftest/runner.h>
+#include <unittest/unittest.h>
+#include <zircon/assert.h>
+
+// This is a helper for creating a FILE* that we can redirect output to, in
+// order to make the tests below less noisy. We don't look at the output
+// that is sent to the stream.
+class DummyOutputStream {
+public:
+ DummyOutputStream() {
+ fp_ = fmemopen(buf_, sizeof(buf_), "w+");
+ ZX_ASSERT(fp_);
+ }
+ ~DummyOutputStream() {
+ ZX_ASSERT(fclose(fp_) == 0);
+ }
+
+ FILE* fp() { return fp_; }
+
+private:
+ FILE* fp_;
+ // Non-zero-size dummy buffer that fmemopen() will accept.
+ char buf_[1];
+};
+
+// Example of a valid test that passes.
+static bool NoOpTest(perftest::RepeatState* state) {
+ while (state->KeepRunning()) {}
+ return true;
+}
+
+// Example of a test that fails by returning false.
+static bool FailingTest(perftest::RepeatState* state) {
+ while (state->KeepRunning()) {}
+ return false;
+}
+
+// Test that a successful run of a perf test produces sensible results.
+static bool test_results() {
+ BEGIN_TEST;
+
+ perftest::internal::TestList test_list;
+ perftest::internal::NamedTest test{"no_op_example_test", NoOpTest};
+ test_list.push_back(fbl::move(test));
+
+ const uint32_t kRunCount = 7;
+ perftest::ResultsSet results;
+ DummyOutputStream out;
+ EXPECT_TRUE(perftest::internal::RunTests(
+ &test_list, kRunCount, "", out.fp(), &results));
+
+ auto* test_cases = results.results();
+ ASSERT_EQ(test_cases->size(), 1);
+ // The output should have time values for the number of runs we requested.
+ auto* test_case = &(*test_cases)[0];
+ EXPECT_EQ(test_case->values()->size(), kRunCount);
+ // Sanity-check the times.
+ for (auto time_taken : *test_case->values()) {
+ EXPECT_GE(time_taken, 0);
+ }
+
+ END_TEST;
+}
+
+// Test that if a perf test fails by returning "false", the failure gets
+// propagated correctly.
+static bool test_failing_test() {
+ BEGIN_TEST;
+
+ perftest::internal::TestList test_list;
+ perftest::internal::NamedTest test{"example_test", FailingTest};
+ test_list.push_back(fbl::move(test));
+
+ const uint32_t kRunCount = 7;
+ perftest::ResultsSet results;
+ DummyOutputStream out;
+ EXPECT_FALSE(perftest::internal::RunTests(
+ &test_list, kRunCount, "", out.fp(), &results));
+ EXPECT_EQ(results.results()->size(), 0);
+
+ END_TEST;
+}
+
+// Test that we report a test as failed if it calls KeepRunning() too many
+// or too few times. Make sure that we don't overrun the array of
+// timestamps or report uninitialized data from that array.
+static bool test_bad_keeprunning_calls() {
+ BEGIN_TEST;
+
+ for (int actual_runs = 0; actual_runs < 10; ++actual_runs) {
+ // Example test function which might call KeepRunning() the wrong
+ // number of times.
+ auto test_func = [=](perftest::RepeatState* state) {
+ for (int i = 0; i < actual_runs + 1; ++i)
+ state->KeepRunning();
+ return true;
+ };
+
+ perftest::internal::TestList test_list;
+ perftest::internal::NamedTest test{"example_bad_test", test_func};
+ test_list.push_back(fbl::move(test));
+
+ const uint32_t kRunCount = 5;
+ perftest::ResultsSet results;
+ DummyOutputStream out;
+ bool success = perftest::internal::RunTests(
+ &test_list, kRunCount, "", out.fp(), &results);
+ EXPECT_EQ(success, kRunCount == actual_runs);
+ EXPECT_EQ(results.results()->size(),
+ (size_t)(kRunCount == actual_runs ? 1 : 0));
+ }
+
+ END_TEST;
+}
+
+static bool test_parsing_command_args() {
+ BEGIN_TEST;
+
+ const char* argv[] = {"unused_argv0", "--runs", "123", "--out", "dest_file",
+ "--filter", "some_regex"};
+ perftest::internal::CommandArgs args;
+ perftest::internal::ParseCommandArgs(
+ countof(argv), const_cast<char**>(argv), &args);
+ EXPECT_EQ(args.run_count, 123);
+ EXPECT_STR_EQ(args.output_filename, "dest_file");
+ EXPECT_STR_EQ(args.filter_regex, "some_regex");
+
+ END_TEST;
+}
+
+BEGIN_TEST_CASE(perftest_runner_test)
+RUN_TEST(test_results)
+RUN_TEST(test_failing_test)
+RUN_TEST(test_bad_keeprunning_calls)
+RUN_TEST(test_parsing_command_args)
+END_TEST_CASE(perftest_runner_test)
+
+int main(int argc, char** argv) {
+ return perftest::PerfTestMain(argc, argv);
+}
diff --git a/system/utest/perftest/syscalls-test.cpp b/system/utest/perftest/syscalls-test.cpp
new file mode 100644
index 0000000..95d645d
--- /dev/null
+++ b/system/utest/perftest/syscalls-test.cpp
@@ -0,0 +1,27 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <perftest/perftest.h>
+#include <zircon/assert.h>
+#include <zircon/syscalls.h>
+
+namespace {
+
+bool SyscallNullTest() {
+ ZX_ASSERT(zx_syscall_test_0() == 0);
+ return true;
+}
+
+bool SyscallManyArgsTest() {
+ ZX_ASSERT(zx_syscall_test_8(1, 2, 3, 4, 5, 6, 7, 8) == 36);
+ return true;
+}
+
+void RegisterTests() {
+ perftest::RegisterSimpleTest<SyscallNullTest>("Syscall/Null");
+ perftest::RegisterSimpleTest<SyscallManyArgsTest>("Syscall/ManyArgs");
+}
+PERFTEST_CTOR(RegisterTests);
+
+} // namespace