blob: b4862908caadca21a43a7ff0f178f6714651de80 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from recipe_engine import recipe_api
# System path at which authorized SSH keys are stored.
AUTHORIZED_KEY_PATH = "data/ssh/authorized_keys"
# The path to the botanist config on the host.
BOTANIST_DEVICE_CONFIG = "/etc/botanist/config.json"
# The log level to use for botanist invocations in test tasks. Can be one of
# "fatal", "error", "warning", "info", "debug", or "trace", where "trace" is
# the most verbose, and fatal is the least.
BOTANIST_LOG_LEVEL = "trace"
# Name of image manifest produced by the build.
IMAGES_JSON = "images.json"
# The path in the BootFS manifest that we want runcmds to show up at.
RUNCMDS_BOOTFS_PATH = "infra/runcmds"
STORAGE_FULL_KEY = "blk/storage-full"
# The PCI address to use for the block device to contain test results.
TEST_FS_PCI_ADDR = "06.0"
_PRIVATE_KEY_PATH = "private_key"
CATALYST_CIPD_REVISION = "git_revision:d0f1c217a82b83a5566b8a66427a8c80a75c0293"
GCSPROXY_CIPD_REVISION = "git_revision:0cc1113899f4b755d22fe878723270fb24e65af7"
SERIAL_LOG_NAME = "serial_log.txt"
SYSLOG_NAME = "syslog.txt"
SERIAL_NAME = "serial.log"
SNAPSHOT_NAME = "snapshot.zip"
class _TaskRequester(object):
"""Creates requests for swarming tasks that run tests."""
def __init__(
self,
api,
buildbucket_build,
per_test_timeout_secs,
pave,
pool,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
timeout_secs,
use_runtests,
default_service_account,
targets_serial,
catapult_dashboard_master,
catapult_dashboard_bot,
release_version,
zircon_args,
):
self._api = api
self._buildbucket_build = buildbucket_build
self._per_test_timeout_secs = per_test_timeout_secs
self._pave = pave
self._pool = pool
self._swarming_expiration_timeout_secs = swarming_expiration_timeout_secs
self._swarming_io_timeout_secs = swarming_io_timeout_secs
self._timeout_secs = timeout_secs
self._use_runtests = use_runtests
self._default_service_account = default_service_account
self._targets_serial = targets_serial
self._catapult_dashboard_master = catapult_dashboard_master
self._catapult_dashboard_bot = catapult_dashboard_bot
self._release_version = release_version
self._zircon_args = zircon_args
def request(self, shard, build_results):
# Copy since we modify it for each shard.
build_results = copy.deepcopy(build_results)
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self._api.file.symlink_tree(
root=self._api.path.mkdtemp("isolate")
)
test_manifest = "tests.json"
self._api.file.write_json(
"write test manifest",
isolate_tree.root.join(test_manifest),
[test.render_to_jsonish() for test in shard.tests],
indent=2,
)
cmd = []
outputs = []
ensure_file = self._api.cipd.EnsureFile()
dimensions = {"pool": self._pool}
test_bot_cpu = "x64"
is_emu_type = self._api.emu.is_emulator_type(shard.device_type)
# TODO(fxbug.dev/40840): In the hardware case, we cannot yet run the GCS
# proxy server in the task (it must be run outside of the container on the
# controller). In this case we rely on provisioning logic to set
# $GCS_PROXY_HOST in the environment so that we may pass this to botanist.
# Once we can scope the server in the task, delete this codepath and do as
# we do in the QEMU case.
#
# Note: in the QEMU case, we pass the placeholder of "localhost" to
# botanist. This is because we cannot know an absolute address ahead of
# time. Botanist has logic to resolve this address and scope it for the
# host and target accordingly.
if is_emu_type:
gcsproxy_port = "8080"
gcsproxy_host = "localhost:%s" % gcsproxy_port
else:
gcsproxy_host = "$GCS_PROXY_HOST"
# This command spins up a metadata server that allows its subcommands to
# automagically authenticate with LUCI auth, provided the sub-exec'ed tool
# was written in go or dart and respectively makes use of the standard
# cloud.google.com/go/compute/metadata or
# github.com/dart-lang/googleapis_auth authentication libraries. Such
# libraries look for a metadata server under environment variables
# like $GCE_METADATA_HOST, which LUCI emulates.
service_account = shard.service_account or self._default_service_account
if service_account:
# TODO(fxbug.dev/37142): Find a way to use the version that LUCI is
# currently using, instead of 'latest'.
ensure_file.add_package("infra/tools/luci-auth/${platform}", "latest")
cmd.extend(["./luci-auth", "context", "--"])
if is_emu_type:
dimensions.update(os="Debian", cpu=build_results.target, kvm="1")
# To take advantage of KVM, we execute QEMU-arm tasks on arm hardware.
test_bot_cpu = build_results.target
else:
if self._targets_serial:
dimensions["serial"] = "1"
dimensions.update(shard.dimensions)
# Ensure we use GCE VMs whenever possible.
is_linux = not shard.os or shard.os.lower() == "linux"
if (
(is_emu_type or not shard.device_type)
and test_bot_cpu == "x64"
and is_linux
):
dimensions["gce"] = "1"
image_manifest = "%s/%s" % (self._api.artifacts.image_url(), IMAGES_JSON)
if shard.targets_fuchsia:
if is_emu_type:
# TODO(fxbug.dev/40840): As mentioned above, once this bug is resolved.
# we should unconditionally run gcsproxy.
ensure_file.add_package(
"fuchsia/infra/gcsproxy/${platform}", GCSPROXY_CIPD_REVISION
)
cmd.extend(
["./gcsproxy", "-port", gcsproxy_port,]
)
else:
ensure_file.add_package(
"fuchsia/infra/catalyst/${platform}", CATALYST_CIPD_REVISION
)
cmd.append("./catalyst")
botanist_cmd = [
"./botanist",
"-level",
BOTANIST_LOG_LEVEL,
"run",
"-images",
image_manifest,
"-timeout",
"%ds" % self._timeout_secs,
]
# In the emulator case, serial is redirected to stdio.
if not is_emu_type:
botanist_cmd.extend(["-serial-log", SERIAL_LOG_NAME])
outputs.append(SERIAL_LOG_NAME)
if self._pave:
botanist_cmd.extend(
[
"-syslog",
SYSLOG_NAME,
"-repo",
self._api.artifacts.package_repo_url(host=gcsproxy_host),
"-blobs",
self._api.artifacts.package_blob_url(host=gcsproxy_host),
]
)
outputs.append(SYSLOG_NAME)
if is_emu_type:
botanist_cmd.extend(["-ssh", _PRIVATE_KEY_PATH])
else:
botanist_cmd.append("-netboot")
for arg in self._zircon_args:
botanist_cmd.extend(["-zircon-args", arg])
config = BOTANIST_DEVICE_CONFIG
if is_emu_type:
config = "./qemu.json"
qemu_config = [
{
"type": shard.device_type.lower(),
"path": "./%s/bin" % shard.device_type.lower(),
"target": build_results.target,
"cpu": 4,
"memory": self._api.emu.get_memory_for_variant(build_results),
"kvm": True,
# Is a directive to run the QEMU process in a way in which we can
# synthesize a 'serial device'. We need only do this in the bringup
# case, this being used for executing tests at that level;
# restriction to the minimal case is especially important as this
# mode shows tendencies to slow certain processes down.
"serial": not self._pave,
# Isolated to the root below in _isolate_build_artifacts().
# Used to dynamically extend fvm.blk to fit downloaded test
# packages.
"fvm_tool": "fvm" if self._pave else "",
"logfile": SERIAL_NAME,
}
]
outputs.append(SERIAL_NAME)
if shard.device_type == "AEMU":
self._api.emu.add_aemu_to_ensure_file(
ensure_file,
checkout=build_results.checkout.root_dir,
subdir="aemu/bin",
)
elif shard.device_type == "QEMU":
self._api.emu.add_qemu_to_ensure_file(
ensure_file,
checkout=build_results.checkout.root_dir,
subdir="qemu",
)
self._api.file.write_json(
"write qemu config",
isolate_tree.root.join("qemu.json"),
qemu_config,
indent=2,
)
botanist_cmd.extend(["-config", config])
cmd.extend(botanist_cmd)
cmd.extend(
[
"./testrunner",
"-level",
"debug",
"-out-dir",
self._api.testing_requests.TEST_RESULTS_DIR_NAME,
"-snapshot-output",
SNAPSHOT_NAME,
]
)
if self._use_runtests:
cmd.append("-use-runtests")
if self._per_test_timeout_secs:
cmd.extend(["-per-test-timeout", "%ds" % self._per_test_timeout_secs])
cmd.append(test_manifest)
outputs.append(self._api.testing_requests.TEST_RESULTS_DIR_NAME)
isolated_hash = self._api.testing_requests._isolate_build_artifacts(
isolate_tree, build_results, shard=shard, test_bot_cpu=test_bot_cpu,
)
tags = self._api.testing_requests.test_task_tags(
self._buildbucket_build,
build_results,
env_name="%s-%s" % (shard.device_type or shard.os, build_results.target),
task_name=shard.name,
)
request = (
self._api.swarming.task_request()
.with_name(shard.name)
.with_tags(self._api.testing_requests.create_swarming_tags(tags))
)
if service_account:
request = request.with_service_account(service_account)
return request.with_slice(
0,
request[0]
.with_command(cmd)
.with_isolated(isolated_hash)
.with_dimensions(**dimensions)
.with_expiration_secs(self._swarming_expiration_timeout_secs)
.with_io_timeout_secs(self._swarming_io_timeout_secs)
.with_execution_timeout_secs(self._timeout_secs)
.with_outputs(outputs)
.with_cipd_ensure_file(ensure_file)
.with_env_vars(
**self._api.testing_requests.test_task_env_vars(
self._buildbucket_build,
shard.device_type,
build_results,
catapult_dashboard_master=self._catapult_dashboard_master,
catapult_dashboard_bot=self._catapult_dashboard_bot,
release_version=self._release_version,
)
),
)
class TestingRequestsApi(recipe_api.RecipeApi):
"""APIs for constructing Swarming task requests to test Fuchsia."""
SERIAL_LOG_NAME = SERIAL_LOG_NAME
TEST_RESULTS_MINFS_NAME = "output.fs"
TEST_RESULTS_DIR_NAME = "out"
# The name of the tag to set on every task request that contains the name
# of the shard's environment (device type/OS and architecture).
TEST_ENVIRONMENT_TAG_NAME = "test_environment_name"
def task_requests(
self,
build_results,
buildbucket_build,
per_test_timeout_secs,
pool,
shards,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
use_runtests,
timeout_secs,
default_service_account=None,
pave=True,
targets_serial=False,
catapult_dashboard_master=None,
catapult_dashboard_bot=None,
release_version=None,
zircon_args=(),
):
"""Returns a swarming.TaskRequest for each shard in build_artifact.shards.
Args:
build_results (FuchsiaBuildResults): The Fuchsia build results to test.
buildbucket_build (build_pb2.Build): The buildbucket build that is going
to orchestrate testing.
per_test_timeout_secs (int): Any test that executes for longer than this
will be considered failed.
pool (str): The Swarming pool to schedule test tasks in.
shards (list of testsharder.Shard): Test shards.
use_runtests (bool): Whether to use runtests (or else run_test_component)
when executing tests on target.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
default_service_account (str or None): The default service account to run the
test task with. This is required for fetching images from GCS.
pave (bool): Whether to pave (or else 'netboot') the system; this is
effectively equivalent to "not bringup" and is treated as such (even for
QEMU).
targets_serial (bool): Whether the task should target a bot with serial
enabled.
release_version (str or None): The release version that we checked out.
Passed as a task environment variable.
zircon_args (list(str)): Kernel command-line arguments to pass on boot.
"""
# Embed the authorized key into the appropriate ZBI. This enabled us to SSH
# into QEMU, in which case we are unable to supply the key at pave-time (as
# QEMU instances are not paved.)
has_emu_shard = any(
self.m.emu.is_emulator_type(shard.device_type) for shard in shards
)
if pave and has_emu_shard:
self.m.zbi.zbi_path = build_results.zbi
zbi_key = self._zbi_key(build_results)
zbi_path = build_results.fuchsia_build_dir.join(
build_results.images[zbi_key]["path"]
)
self.m.zbi.copy_and_extend(
step_name="embed authorized key",
input_image=zbi_path,
output_image=zbi_path,
manifest={AUTHORIZED_KEY_PATH: build_results.authorized_key},
)
task_requester = _TaskRequester(
self.m,
buildbucket_build=buildbucket_build,
per_test_timeout_secs=per_test_timeout_secs,
pave=pave,
pool=pool,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
timeout_secs=timeout_secs,
use_runtests=use_runtests,
default_service_account=default_service_account,
targets_serial=targets_serial,
catapult_dashboard_master=catapult_dashboard_master,
catapult_dashboard_bot=catapult_dashboard_bot,
release_version=release_version,
zircon_args=zircon_args,
)
task_requests = []
for s in shards:
with self.m.step.nest("shard %s" % s.name):
task_requests.append(task_requester.request(s, build_results))
return task_requests
def deprecated_task_requests(
self,
build_results,
buildbucket_build,
test_cmds,
device_type,
pool,
timeout_secs,
pave,
swarming_expiration_timeout_secs=18000,
swarming_io_timeout_secs=5 * 60,
default_service_account=None,
zircon_args=(),
):
"""Returns a swarming task request for testing in the deprecated way.
Args:
build_results (FuchsiaBuildResults): The Fuchsia build to test.
test_cmds (list[str]): Command to have Fuchsia run on boot.
pool (str): Swarming pool from which the test task will be drawn.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
pave (bool): Whether to pave the image to disk. Ignored if device_type ==
'QEMU'.
swarming_expiration_timeout_secs (int): Maximum run time for the swarming
task, once scheduled (enforced by swarming).
swarming_io_timeout_secs (int): The swarming task will be killed if it does
not produce any output for this long.
default_service_account (str or None): The default service account to run the
task with.
zircon_args (list(str)): Kernel command-line arguments to pass on boot.
Returns:
A list of a single swarming.TaskRequest.
"""
assert test_cmds
assert device_type
self.m.minfs.minfs_path = build_results.minfs
self.m.zbi.zbi_path = build_results.zbi
# Copy build_results because we modify its contents below.
build_results = copy.deepcopy(build_results)
self._install_runcmds_files(
build_results, device_type=device_type, pave=pave, test_cmds=test_cmds,
)
if self.m.emu.is_emulator_type(device_type):
task = self._construct_legacy_qemu_task_request(
task_name="all tests",
build_results=build_results,
buildbucket_build=buildbucket_build,
pool=pool,
timeout_secs=timeout_secs,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
qemu_type=device_type,
service_account=default_service_account,
zircon_args=zircon_args,
)
else:
task = self._construct_device_task_request(
task_name="all tests",
device_type=device_type,
build_results=build_results,
buildbucket_build=buildbucket_build,
pool=pool,
pave=pave,
timeout_secs=timeout_secs,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
service_account=default_service_account,
zircon_args=zircon_args,
)
return [task]
def deprecated_test_cmds(self, test_spec):
runtests_cmd_parts = [
"runtests",
"--all",
"--output",
self.results_dir_on_target,
]
if test_spec.per_test_timeout_secs:
runtests_cmd_parts.extend(["-i", "%d" % test_spec.per_test_timeout_secs])
runtests_cmd_parts.append(test_spec.runtests_args)
return [" ".join(runtests_cmd_parts)]
@property
def results_dir_on_target(self):
"""The directory on target to which target test results will be written."""
return "/tmp/infra-test-output"
def _construct_legacy_qemu_task_request(
self,
task_name,
build_results,
buildbucket_build,
pool,
timeout_secs,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
qemu_type,
service_account,
zircon_args,
):
"""Constructs a Swarming task request which runs Fuchsia tests inside QEMU.
Expects the build and artifacts to be at the same place they were at
the end of the build.
Args:
build_results (FuchsiaBuildResults): The Fuchsia build to test.
pool (str): Swarming pool from which the test task will be drawn.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
qemu_type (str): type of qemu, either QEMU or AEMU.
service_account (str or None): The service account to run the task with.
zircon_args (list(str)): Kernel command-line arguments to pass on boot.
Returns:
An api.swarming.TaskRequest representing the swarming task request.
"""
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self.m.file.symlink_tree(root=self.m.path.mkdtemp("isolate"))
# As part of running tests, we'll send a MinFS image over to another machine
# which will be declared as a block device in QEMU, at which point
# Fuchsia will mount it and write test output to. We choose 3.5G for the
# MinFS image arbitrarily, as it appears it can hold our test output
# comfortably without going overboard on size.
#
minfs_image_path = isolate_tree.root.join(self.TEST_RESULTS_MINFS_NAME)
self.m.minfs.create(minfs_image_path, "3584M", name="create test image")
cmd = []
ensure_file = self.m.cipd.EnsureFile()
if service_account:
# TODO(fxbug.dev/37142): Find a way to use the version that LUCI is
# currently using, instead of 'latest'.
ensure_file.add_package("infra/tools/luci-auth/${platform}", "latest")
cmd.extend(["./luci-auth", "context", "--"])
image_manifest = "%s/%s" % (self.m.artifacts.image_url(), IMAGES_JSON)
cmd.extend(
[
"./botanist",
"-level",
BOTANIST_LOG_LEVEL,
"qemu",
"-type",
"%s" % qemu_type.lower(),
"-qemu-dir",
"./%s/bin" % qemu_type.lower(),
"-images",
image_manifest,
"-arch",
build_results.target,
"-minfs",
self.TEST_RESULTS_MINFS_NAME,
"-pci-addr",
TEST_FS_PCI_ADDR,
"-use-kvm",
"-memory",
str(self.m.emu.get_memory_for_variant(build_results)),
]
)
# storage-full not being present signifies the exclusion of the system
# partition, which means `boot` (i.e. running on boot) must be used instead
# of `system` (i.e., running after the system partition is mounted).
storage_free_build = STORAGE_FULL_KEY not in build_results.images
arg_key = "zircon.autorun.%s" % ("boot" if storage_free_build else "system")
cmd.append("%s=/boot/bin/sh+/boot/%s" % (arg_key, RUNCMDS_BOOTFS_PATH))
isolated_hash = self._isolate_build_artifacts(
isolate_tree,
build_results,
# To take advantage of KVM, we execute QEMU-arm tasks on arm hardware.
test_bot_cpu=build_results.target,
)
cmd.extend(zircon_args)
if qemu_type == "AEMU":
self.m.emu.add_aemu_to_ensure_file(
ensure_file, checkout=build_results.checkout.root_dir, subdir="aemu/bin"
)
elif qemu_type == "QEMU":
self.m.emu.add_qemu_to_ensure_file(
ensure_file, checkout=build_results.checkout.root_dir, subdir="qemu"
)
env_name = "%s-%s" % (qemu_type, build_results.target)
tags = self.test_task_tags(
buildbucket_build, build_results, env_name=env_name, task_name=task_name
)
request = (
self.m.swarming.task_request()
.with_name(task_name)
.with_tags(self.create_swarming_tags(tags))
)
if service_account:
request = request.with_service_account(service_account)
return request.with_slice(
0,
request[0]
.with_command(cmd)
.with_isolated(isolated_hash)
.with_dimensions(pool=pool, os="Debian", cpu=build_results.target, kvm="1")
.with_io_timeout_secs(swarming_io_timeout_secs)
.with_execution_timeout_secs(timeout_secs)
.with_expiration_secs(swarming_expiration_timeout_secs)
.with_outputs([self.TEST_RESULTS_MINFS_NAME])
.with_cipd_ensure_file(ensure_file)
.with_env_vars(
**self.test_task_env_vars(buildbucket_build, qemu_type, build_results)
),
)
def _construct_device_task_request(
self,
task_name,
device_type,
build_results,
buildbucket_build,
pool,
pave,
timeout_secs,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
service_account,
zircon_args,
):
"""Constructs a Swarming task request to run Fuchsia tests on a device.
Expects the build and artifacts to be at the same place they were at
the end of the build.
Args:
build_results (FuchsiaBuildResults): The Fuchsia build to test.
pool (str): Swarming pool from which the test task will be drawn.
pave (bool): Whether or not the build artifacts should be paved.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
service_account (str or None): The service account to run the task with.
zircon_args (list(str)): Kernel command-line arguments to pass on boot.
Returns:
An api.swarming.TaskRequest representing the swarming task request.
"""
cmd = []
ensure_file = self.m.cipd.EnsureFile()
if service_account:
# TODO(fxbug.dev/37142): Find a way to use the version that LUCI is
# currently using, instead of 'latest'.
ensure_file.add_package("infra/tools/luci-auth/${platform}", "latest")
cmd.extend(["./luci-auth", "context", "--"])
image_manifest = "%s/%s" % (self.m.artifacts.image_url(), IMAGES_JSON)
# Download catalyst.
ensure_file.add_package(
"fuchsia/infra/catalyst/${platform}", CATALYST_CIPD_REVISION
)
# Construct the catalyst command.
cmd.append("./catalyst")
# Construct the botanist command.
cmd.extend(
[
"./botanist",
"-level",
BOTANIST_LOG_LEVEL,
"zedboot",
"-config",
BOTANIST_DEVICE_CONFIG,
"-images",
image_manifest,
"-results-dir",
self.results_dir_on_target,
"-out-dir",
self.TEST_RESULTS_DIR_NAME,
"-serial-log",
SERIAL_LOG_NAME,
]
)
if not pave:
cmd.append("-netboot")
# storage-full not being present signifies the exclusion of the system
# partition, which means `boot` (i.e. running on boot) must be used instead
# of `system` (i.e., running after the system partition is mounted).
storage_free_build = STORAGE_FULL_KEY not in build_results.images
arg_key = "zircon.autorun.%s" % ("boot" if storage_free_build else "system")
cmd.append("%s=/boot/bin/sh+/boot/%s" % (arg_key, RUNCMDS_BOOTFS_PATH))
cmd.extend(zircon_args)
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self.m.file.symlink_tree(root=self.m.path.mkdtemp("isolate"))
isolated_hash = self._isolate_build_artifacts(isolate_tree, build_results)
dimensions = {
"pool": pool,
"device_type": device_type,
}
tags = self.test_task_tags(
buildbucket_build,
build_results,
env_name="%s-%s" % (device_type, build_results.target),
task_name=task_name,
)
request = (
self.m.swarming.task_request()
.with_name(task_name)
.with_tags(self.create_swarming_tags(tags))
)
if service_account:
request = request.with_service_account(service_account)
return request.with_slice(
0,
request[0]
.with_command(cmd)
.with_isolated(isolated_hash)
.with_dimensions(**dimensions)
.with_expiration_secs(swarming_expiration_timeout_secs)
.with_io_timeout_secs(swarming_io_timeout_secs)
.with_execution_timeout_secs(timeout_secs)
.with_outputs([self.TEST_RESULTS_DIR_NAME, SERIAL_LOG_NAME])
.with_cipd_ensure_file(ensure_file)
.with_env_vars(
**self.test_task_env_vars(buildbucket_build, device_type, build_results)
),
)
def test_task_env_vars(
self,
build,
device_type,
build_results,
image_manifest=None,
catapult_dashboard_master=None,
catapult_dashboard_bot=None,
release_version=None,
):
"""Returns the environment variables to be set for the test task.
Returns:
A dict mapping string env var names to string values.
"""
# build = self.m.buildbucket.build
commit = build.input.gitiles_commit
llvm_symbolizer = self.m.path.basename(build_results.llvm_symbolizer)
env_vars = dict(
# `${ISOLATED_OUTDIR}` is a magic string that Swarming will replace
# with a temporary directory into which files will be automatically
# collected upon exit of a task.
FUCHSIA_TEST_OUTDIR="${ISOLATED_OUTDIR}",
# Don't set BUILDBUCKET_ID for builds run using led.
BUILDBUCKET_ID=str(build.id) if build.id else None,
BUILD_BOARD=build_results.board,
BUILD_TYPE=build_results.build_type,
BUILD_PRODUCT=build_results.product,
BUILD_TARGET=build_results.target,
BUILDBUCKET_BUCKET=build.builder.bucket,
# Used for symbolization:
ASAN_SYMBOLIZER_PATH=llvm_symbolizer,
UBSAN_SYMBOLIZER_PATH=llvm_symbolizer,
LSAN_SYMBOLIZER_PATH=llvm_symbolizer,
# Used for generating data to upload to the Catapult performance
# dashboard.
# TODO(fxb/50210): Don't fall back to time.time() once led starts
# setting create_time again.
BUILD_CREATE_TIME=str(build.create_time.seconds or int(self.m.time.time())),
BUILDER_NAME=build.builder.builder,
FUCHSIA_DEVICE_TYPE=device_type,
INPUT_COMMIT_HOST=commit.host,
INPUT_COMMIT_PROJECT=commit.project,
INPUT_COMMIT_REF=commit.ref,
RELEASE_VERSION=release_version,
BOOTSERVER_PATH="./"
+ self.m.path.basename(build_results.tool("bootserver_new")),
IMAGE_MANIFEST_PATH=image_manifest
or "%s/%s" % (self.m.artifacts.image_url(), IMAGES_JSON),
SWARMING_BOT_FILE="${SWARMING_BOT_FILE}",
)
env_vars.update(
self.get_catapult_dashboard_env_vars(
catapult_dashboard_master, catapult_dashboard_bot, commit.ref
)
)
# For some reason, empty string environment variables sent to the swarming
# API get interpreted as null and rejected. So don't bother sending them to
# avoid breaking the task request.
# TODO(olivernewman): Figure out whether this logic should be moved into
# the upstream swarming module (or obviated by fixing the "" -> null
# behavior).
return {k: v for k, v in env_vars.iteritems() if v}
def test_task_tags(self, buildbucket_build, build_results, env_name, task_name):
return {
"board": build_results.board,
"build_type": build_results.build_type,
"buildbucket_bucket": buildbucket_build.builder.bucket,
"buildbucket_builder": buildbucket_build.builder.builder,
"product": build_results.product,
"role": "tester",
"task_name": task_name,
# Consumed by google3 results uploader, and by the orchestrator
# when uploading to resultdb.
self.TEST_ENVIRONMENT_TAG_NAME: env_name,
"variants": list(build_results.variants),
}
def _create_runcmds_script(self, device_type, test_cmds, output_path):
"""Creates a script for running tests on boot."""
# The device topological path is the toplogical path to the block device
# which will contain test output.
device_topological_path = "/dev/sys/pci/00:%s/virtio-block/block" % (
TEST_FS_PCI_ADDR
)
# Script that mounts the block device to contain test output and runs tests,
# dropping test output into the block device.
results_dir = self.results_dir_on_target
runcmds = [
"mkdir %s" % results_dir,
]
if self.m.emu.is_emulator_type(device_type):
runcmds.extend(
[
# Wait until the MinFS test image shows up (max <timeout> ms).
"waitfor class=block topo=%s timeout=60000"
% device_topological_path,
"mount %s %s" % (device_topological_path, results_dir),
]
+ test_cmds
+ ["umount %s" % results_dir, "dm poweroff",]
)
else:
runcmds.extend(test_cmds)
runcmds_bytes = []
for line in runcmds:
if isinstance(line, unicode):
runcmds_bytes.append(line.encode("utf-8"))
elif isinstance(line, str):
runcmds_bytes.append(line)
else: # pragma: no cover
assert False, "line is not unicode or a str: %s, %s" % (
line,
type(line),
)
self.m.file.write_text("write runcmds", output_path, "\n".join(runcmds_bytes))
def _isolate_build_artifacts(
self, isolate_tree, build_results, shard=None, test_bot_cpu="x64",
):
"""Populates a tree with build artifacts and isolates it.
Specifically, the following is linked into or created within the tree:
- The images in the build are linked in and manifest of them is created
in the root, if targeting a fuchsia device;
- The Linux/Mac tests in the shard and their runtime dependencies.
Args:
isolate_tree (api.file.SymlinkTree): A tree into which artifacts may be
linked.
build_results (FuchsiaBuildResults): The result of a fuchsia build.
shard (api.testsharder.Shard or None): A test shard.
test_bot_cpu (str or None): The host cpu of the bot running the test task.
Returns:
The isolated hash that may be used to reference and download the
artifacts.
"""
def register_link(relpath):
"""Prepares a symlink of a relative path within the build directory to the
tree."""
isolate_tree.register_link(
target=build_results.fuchsia_build_dir.join(relpath),
linkname=isolate_tree.root.join(relpath),
)
is_emu_type = shard and self.m.emu.is_emulator_type(shard.device_type)
if shard:
for dep in shard.deps:
register_link(dep)
# If targeting QEMU we include a private key corresponding to an authorized
# key already in the boot image; this is needed as we do not pave QEMU.
if is_emu_type:
isolate_tree.register_link(
target=build_results.private_key,
linkname=isolate_tree.root.join(_PRIVATE_KEY_PATH),
)
# TODO(fxb/38517): s/bootserver_new/bootserver.
tools = ["botanist", "testrunner", "bootserver_new"]
if test_bot_cpu == "x64":
# Relevant for automatic symbolization of things running on host. Only
# the x64 variation is available in the checkout and we have nothing
# that runs on an arm host that needs symbolizing.
tools.append("llvm-symbolizer")
if is_emu_type:
# Used to dynamically extend fvm.blk, which is relevant only to QEMU.
tools.append("fvm")
for tool_name in tools:
tool = build_results.tool(tool_name, cpu=test_bot_cpu)
isolate_tree.register_link(
target=tool, linkname=isolate_tree.root.join(tool_name)
)
isolate_tree.create_links("create tree of build artifacts")
isolated = self.m.isolated.isolated(isolate_tree.root)
isolated.add_dir(isolate_tree.root)
return isolated.archive("isolate build artifacts")
def _zbi_key(self, build_results, pave=True):
zbi_key = "zbi/zircon-a"
bootserver_type = "bootserver_%s" % ("pave" if pave else "netboot")
for image in build_results.images.values():
if (image["type"] == "zbi") and (
"--boot" in image.get(bootserver_type, [])
):
zbi_key = image["type"] + "/" + image["name"]
break
return zbi_key
def _install_runcmds_files(
self,
build_results,
per_test_timeout_secs=None,
device_type=None,
pave=False,
test_cmds=None,
):
"""Creates the files used to invoke runtests on boot.
This is only necessary for QEMU shards, which are the only
shards that use runcmds, and the non-sharding codepath.
"""
assert device_type and test_cmds
self.m.zbi.zbi_path = build_results.zbi
# TODO(fxbug.dev/41930): The deprecated QEMU codepath explicitly looks for
# a zircon-a. The attached bug will enable us to stop making these sorts of
# assumptions.
if self.m.emu.is_emulator_type(device_type):
zbi_key = "zbi/zircon-a"
else:
zbi_key = self._zbi_key(build_results, pave)
runcmds_path = self.m.path["cleanup"].join("runcmds")
self._create_runcmds_script(device_type, test_cmds, runcmds_path)
zbi_path = build_results.fuchsia_build_dir.join(
build_results.images[zbi_key]["path"]
)
# Inject the runcmds script and/or authorized key into the bootfs image.
self.m.zbi.copy_and_extend(
step_name="create zbi",
input_image=zbi_path,
output_image=zbi_path,
manifest={RUNCMDS_BOOTFS_PATH: runcmds_path},
)
def create_swarming_tags(self, tags):
"""Creates a properly formatted tags dict to pass to a swarming task.
Args:
tags (dict): A dictionary of key-value pairs of the desired tags.
Returns:
A dictionary where the keys are strings and the values are lists of
strings.
"""
swarming_tags = {}
for k in tags:
val = tags[k]
if not val:
val = []
if not isinstance(val, list):
val = [val]
swarming_tags[str(k)] = [str(i) for i in val]
return swarming_tags
# This is included in the API just so that it can be unit-tested.
def get_catapult_dashboard_env_vars(self, master_name, bot_name, commit_ref):
if not master_name and not bot_name:
# Uploading to Catapult is disabled.
return {}
if not (master_name and bot_name):
raise ValueError(
"Catapult master and bot names not set consistently: %r, %r"
% (master_name, bot_name)
)
prefix = "refs/heads/releases/"
if commit_ref.startswith(prefix):
branch_name = commit_ref[len(prefix) :]
master_name += "." + branch_name
elif commit_ref != "refs/heads/master":
# Unrecognized Git branch/tag name. Disable uploading to Catapult
# by not setting the env vars.
return {}
return dict(
CATAPULT_DASHBOARD_MASTER=master_name, CATAPULT_DASHBOARD_BOT=bot_name
)