blob: 4d8410c0a05ece35a12965798ab19a9c6007584a [file] [log] [blame]
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/testing/platforms.gni")
# TODO(joshuaseaton): Only used in package() today; consider a scheme in which
# test specs are re-used for host tests.
# Describes the target device environment in which a test should run. Associated
# JSON is written to $target_out_dir with file extension "spec.json".
#
# Parameters
#
# path
# Required: Full path to the the test.
# output_dir
# Required: Where to write the test spec.
# environments
# Optional: Target device environments in which the test should run. Valid
# environments are element-wise subsets of the full platform environments
# specified in //build/testing/platforms.gni.
# A `label` may also be specified in a given environment, on which tests
# may be grouped to be run in a _custom_ builder. NOTE: If set, then the
# test will only be run in that environment with special support from the
# Infrastructure team.
#
template("test_spec") {
assert(defined(invoker.path), "path must be defined.")
assert(defined(invoker.output_dir), "output_dir must be defined.")
if (defined(invoker.environments)) {
environments = invoker.environments
} else {
environments = [
{
dimensions = {
device_type = "QEMU"
}
},
]
# TODO(joshuaseaton): This is temporary until we are confident that all
# tests that need to specify hardware are doing so; then the default can
# return to being QEMU alone.
if (current_cpu == "x64") {
environments += [
{
dimensions = {
device_type = "Intel NUC Kit NUC7i5DNHE"
}
},
]
}
}
test_spec = {
test = {
name = get_label_info(":$target_name", "label_no_toolchain")
location = invoker.path
}
environments = []
}
# Call "expanding" the operation that takes a scope
# {
# x = a
# y = b
# z = c
# ...
# }
# and converts it to a list [{x=a}, {y=b}, {z=c},...].
#
# Expand each scope of test platform dimensions and group them by architecture
# (i.e., cpu).
target_platform_dims = []
other_platform_dims = []
foreach(platform, test_platforms) {
platform_dims = [] # Clear from previous iteration.
foreach(key, all_dimension_keys) {
platform_dims += [
{
forward_variables_from(platform, [ key ])
},
]
}
if (platform.cpu == current_cpu) {
target_platform_dims += [ platform_dims ]
} else {
other_platform_dims += [ platform_dims ]
}
}
foreach(env, environments) {
# Likewise, expand each environment's dimensions.
dims = [] # Clear from previous iteration.
if (defined(env.dimensions)) {
foreach(key, all_dimension_keys) {
dims += [
{
forward_variables_from(env.dimensions, [ key ])
},
]
}
}
# Empty scopes may have been introduced to dims, corresponding to unset keys;
# Add and then subtract an empty scope to remove them.
empty_dim = { # Clear from previous iteration.
}
dims += [ empty_dim ]
dims -= [ empty_dim ]
# Check if the environment's dimensions match those of a platform of the
# target architecture; if a match, include the environment among the
# test spec's.
# Note that in GN "A is a subset of B" is equivalent to `A + B - B == []`.
match = false
foreach(platform_dims, target_platform_dims) {
if (dims + platform_dims - platform_dims == []) {
match = true
test_spec.environments += [ env ]
}
}
# If the environment's dimensions do not match a target architecture, ensure
# that they match those of a platform of another architecture.
if (!match) {
foreach(platform_dims, other_platform_dims) {
match = match || dims + platform_dims - platform_dims == []
}
if (!match) {
print("Could not match environment specifications for '$target_name':")
print("$env")
assert(
match,
"Consult //build/testing/platforms.gni for all allowable specifications")
}
}
}
# In the case that target_name is given by a path (e.g., a rebase_path()
# value), give the test spec its basename.
target_base_name = get_path_info(target_name, "name")
write_file("${invoker.output_dir}/$target_base_name.spec.json",
test_spec,
"json")
}