blob: 3e724b56ecc9a5819ae525c6521dc70b8ab37654 [file] [log] [blame]
# Copyright 2016 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/components.gni")
import("//build/fidl/fidl.gni")
import("//build/testing/host_test.gni")
import("//build/testing/perf/test.gni")
import("//src/starnix/kernel/starnix.gni")
import("//src/starnix/tests/build/starnix_host_test_component.gni")
import("//src/starnix/tests/starnix_test_subpackages.gni")
import("//zircon/vdso/vdso.gni")
# This can be built as a Fuchsia executable or as a host executable.
# The latter allows running some of the benchmarks on Linux for
# comparison against Fuchsia.
#
# In principle this could be built for Mac too, but it may require
# changes to build and run successfully on Mac.
fuchsia_microbenchmark_sources = [
"atomics.cc",
"clock_posix.cc",
"example.cc",
"filesystem.cc",
"main.cc",
"malloc.cc",
"memcpy.cc",
"null.cc",
"pthreads.cc",
"random_memcpy.cc",
"round_trips_futex.cc",
"round_trips_posix.cc",
"stdcompat.cc",
]
if (is_linux) {
fuchsia_microbenchmark_sources += [
"fork.cc",
# The getpid() microbenchmark would work on Fuchsia, but it is
# not useful to run it on Fuchsia because Fuchsia's getpid()
# function does not return a real process ID.
"getpid.cc",
"process_spawn_posix.cc",
]
}
if (is_fuchsia) {
fuchsia_microbenchmark_sources += [
"async_loop.cc",
"channels.cc",
"clock.cc",
"context_switch_overhead.cc",
"events.cc",
"fdio_spawn.cc",
"fifos.cc",
"get_info.cc",
"handle.cc",
"handle_creation.cc",
"inspect.cc",
"lazy_dir.cc",
"mem_alloc.cc",
"mmu.cc",
"mutex.cc",
"object_wait.cc",
"ports.cc",
"prng.cc",
"pseudo_dir.cc",
"round_trips.cc",
"sleep.cc",
"sockets.cc",
"streams.cc",
"threads.cc",
"timer.cc",
"tracing.cc",
"util.cc",
"vmar.cc",
"vmo.cc",
]
if (!exclude_testonly_syscalls) {
fuchsia_microbenchmark_sources += [
# Uses `zx_syscall_test_*()`.
"syscalls.cc",
]
}
}
fuchsia_microbenchmark_deps = [
"//sdk/lib/stdcompat",
"//sdk/lib/syslog/cpp",
"//src/lib/fxl",
"//zircon/system/ulib/fbl",
"//zircon/system/ulib/perftest",
]
if (is_fuchsia) {
fuchsia_microbenchmark_deps += [
":fuchsia.zircon.benchmarks_hlcpp",
"//sdk/lib/fdio",
"//sdk/lib/scheduler/cpp",
"//src/lib/fsl",
"//src/storage/lib/vfs/cpp",
"//src/zircon/lib/zircon",
"//zircon/system/ulib/async-loop:async-loop-cpp",
"//zircon/system/ulib/async-loop:async-loop-default",
"//zircon/system/ulib/inspect",
"//zircon/system/ulib/trace",
"//zircon/system/ulib/zx",
]
}
executable("fuchsia_microbenchmarks_bin") {
output_name = "fuchsia_microbenchmarks"
testonly = true
sources = fuchsia_microbenchmark_sources
if (is_fuchsia && !exclude_testonly_syscalls) {
sources += [
# Uses `zx_restricted_*()` which is available in the 'next' vdso.
"restricted_enter.cc",
]
}
deps = fuchsia_microbenchmark_deps
}
# This is an executable that immediately exits when run. This is used by the
# process-spawn benchmarks.
executable("no_op_executable") {
output_name = "no_op_executable"
testonly = true
sources = [ "no_op_executable.cc" ]
deps = []
public_deps = []
}
# "Helper" executable used to implement the context_switch_overhead benchmark.
executable("context_switch_overhead_helper") {
output_name = "context_switch_overhead_helper"
testonly = true
sources = [ "context_switch_overhead_helper.cc" ]
deps = [
"//sdk/lib/fdio",
"//sdk/lib/scheduler/cpp",
"//sdk/lib/sys/cpp",
"//sdk/lib/syslog/cpp",
"//zircon/system/ulib/zx",
]
}
# "Helper" executable used to implement the get_info benchmark.
executable("get_info_helper") {
output_name = "get_info_helper"
testonly = true
sources = [ "get_info_helper.cc" ]
deps = [
"//sdk/lib/fdio",
"//zircon/system/ulib/zx",
]
}
if (is_fuchsia) {
# "Helper" executable used to implement the round_trips benchmarks on Fuchsia.
executable("round_trips_helper") {
output_name = "round_trips_helper"
testonly = true
sources = [
"main.cc",
"round_trips.cc",
]
deps = [
":fuchsia.zircon.benchmarks_hlcpp",
"//sdk/lib/fdio",
"//sdk/lib/scheduler/cpp",
"//sdk/lib/syslog/cpp",
"//src/lib/fxl",
"//src/storage/lib/vfs/cpp",
"//src/zircon/lib/zircon",
"//zircon/system/ulib/async-loop:async-loop-cpp",
"//zircon/system/ulib/async-loop:async-loop-default",
"//zircon/system/ulib/inspect",
"//zircon/system/ulib/perftest",
"//zircon/system/ulib/zx",
]
}
}
fidl("fuchsia.zircon.benchmarks") {
testonly = true
sources = [
"async_loop.fidl",
"round_tripper.fidl",
]
enable_hlcpp = true
}
fuchsia_unittest_package("fuchsia_microbenchmarks") {
package_name = "fuchsia_microbenchmarks"
manifest = "meta/fuchsia_microbenchmarks.cml"
deps = [
":context_switch_overhead_helper",
":fuchsia_microbenchmarks_bin",
":get_info_helper",
":no_op_executable",
":round_trips_helper",
]
test_type = "system"
}
fuchsia_component_perf_test("fuchsia_microbenchmarks_test") {
package = ":fuchsia_microbenchmarks"
component_name = "fuchsia_microbenchmarks"
test_type = "system"
# We run the fuchsia_microbenchmarks process multiple times. That is useful
# for tests that exhibit between-process variation in results (e.g. due to
# memory layout chosen when a process starts) -- it reduces the variation in
# the average that we report.
process_runs = 6
# Runs: we override the default number of within-process iterations of each
# test case and use a lower value. This reduces the overall time taken and
# reduces the chance that these invocations hit Infra Swarming tasks' IO
# timeout (swarming_io_timeout_secs -- the amount of time that a task is
# allowed to run without producing log output).
test_component_args = [
"-p",
"--quiet",
"--runs",
"120",
]
results_path_test_arg = "--out"
expected_metric_names_filepath = "//src/tests/end_to_end/perf/expected_metric_names/fuchsia.microbenchmarks.txt"
}
if (is_linux) {
host_test("fuchsia_microbenchmarks_host") {
binary_path = "$root_out_dir/fuchsia_microbenchmarks"
deps = [
":fuchsia_microbenchmarks_bin",
":runtime_deps",
]
}
host_test_data("runtime_deps") {
sources = [ "$root_out_dir/no_op_executable" ]
deps = [ ":no_op_executable" ]
}
}
if (host_os == "linux" && !is_asan) {
resource("no_op_executable_in_test_dir") {
testonly = true
out_dir = get_label_info(":no_op_executable", "root_out_dir")
sources = [ "${out_dir}/no_op_executable" ]
outputs = [ "data/tests/no_op_executable" ]
deps = [ ":no_op_executable" ]
}
executable("starnix_microbenchmarks_bin") {
output_name = "starnix_microbenchmarks"
testonly = true
sources = fuchsia_microbenchmark_sources + [ "starnix_filesystem.cc" ]
deps = fuchsia_microbenchmark_deps
}
starnix_host_test_component("starnix_microbenchmarks_component") {
test_label = ":starnix_microbenchmarks_bin"
test_binary = "starnix_microbenchmarks"
manifest = "meta/starnix_microbenchmarks.cml"
component_name = "starnix_microbenchmarks"
deps = [ ":no_op_executable_in_test_dir($host_toolchain_for_target_arch)" ]
test_type = "starnix"
}
fuchsia_test_package("starnix_microbenchmarks_unittestmode") {
test_components = [ ":starnix_microbenchmarks_component" ]
subpackages = starnix_test_subpackages +
[ "//src/starnix/containers/debian:debian_package" ]
deps = [ "//src/lib/testing/expectation:expectation_comparer" ]
}
fuchsia_shell_package("starnix_microbenchmarks_perftestmode") {
testonly = true
subpackages = starnix_test_subpackages +
[ "//src/starnix/containers/debian:debian_package" ]
deps = [ ":starnix_microbenchmarks_component" ]
# This package intentionally contains many non-Fuchia ELF binaries that
# are not under /data/, and some of them are even unstripped, so disable
# these checks to build successfully.
disable_elf_binaries_checks = true
}
fuchsia_component_perf_test("starnix_microbenchmarks_test") {
package = ":starnix_microbenchmarks_perftestmode"
component_name = "starnix_microbenchmarks"
test_type = "starnix"
# We run the fuchsia_microbenchmarks process multiple times. That is useful
# for tests that exhibit between-process variation in results (e.g. due to
# memory layout chosen when a process starts) -- it reduces the variation in
# the average that we report.
process_runs = 6
# Runs: we override the default number of within-process iterations of each
# test case and use a lower value. This reduces the overall time taken and
# reduces the chance that these invocations hit Infra Swarming tasks' IO
# timeout (swarming_io_timeout_secs -- the amount of time that a task is
# allowed to run without producing log output).
test_component_args = [
"-p",
"--quiet",
"--runs",
"120",
]
results_path_test_arg = "--out"
expected_metric_names_filepath = "//src/tests/end_to_end/perf/expected_metric_names/fuchsia.microbenchmarks.starnix.txt"
}
}
# This contains all the dependencies necessary for running
# fuchsia_microbenchmarks in unit test mode.
group("tests") {
testonly = true
deps = [ ":fuchsia_microbenchmarks" ]
if (host_os == "linux") {
deps += [ ":fuchsia_microbenchmarks_host($host_toolchain)" ]
if (!is_asan) {
deps += [
":starnix_microbenchmarks_perftestmode",
":starnix_microbenchmarks_unittestmode",
]
}
}
}
# Convenience target so that one can pass "--with //src/tests/microbenchmarks"
# to "fx set".
group("microbenchmarks") {
testonly = true
deps = [ ":tests" ]
}
group("benchmarks") {
testonly = true
deps = [ ":fuchsia_microbenchmarks_test" ]
if (!is_asan && host_os == "linux") {
deps += [ ":starnix_microbenchmarks_test" ]
}
}