blob: f4bd8f08f0c88751df2e08bff0a32ecab37a9c2c [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from recipe_engine import recipe_api
# System path at which authorized SSH keys are stored.
AUTHORIZED_KEY_PATH = 'data/ssh/authorized_keys'
# The path to the botanist config on the host.
BOTANIST_DEVICE_CONFIG = '/etc/botanist/config.json'
# The log level to use for botanist invocations in test tasks. Can be one of
# "fatal", "error", "warning", "info", "debug", or "trace", where "trace" is
# the most verbose, and fatal is the least.
BOTANIST_LOG_LEVEL = 'debug'
# Name of image manifest produced by the build.
IMAGES_JSON = 'images.json'
# The path in the BootFS manifest that we want runcmds to show up at.
RUNCMDS_BOOTFS_PATH = 'infra/runcmds'
SECRETSHIM_CIPD_VERSION = 'git_revision:63ab3ac613fceb52ac49b63b43fce841a2585645'
STORAGE_FULL = 'storage-full'
# The PCI address to use for the block device to contain test results.
TEST_FS_PCI_ADDR = '06.0'
class _TaskRequester(object):
"""Creates requests for swarming tasks that run tests."""
def __init__(self, api, buildbucket_build, per_test_timeout_secs, pool,
swarming_expiration_timeout_secs, swarming_io_timeout_secs,
timeout_secs, use_runtests):
self._api = api
self._buildbucket_build = buildbucket_build
self._per_test_timeout_secs = per_test_timeout_secs
self._pool = pool
self._swarming_expiration_timeout_secs = swarming_expiration_timeout_secs
self._swarming_io_timeout_secs = swarming_io_timeout_secs
self._timeout_secs = timeout_secs
self._use_runtests = use_runtests
def request(self, shard, build_artifacts):
# Copy the build_artifacts object to be modified for each shard.
build_artifacts = copy.deepcopy(build_artifacts)
if self._api.testing_requests._uses_legacy_qemu(shard):
task_request = self._api.testing_requests._construct_legacy_qemu_task_request(
task_name=shard.name,
pool=self._pool,
build_artifacts=build_artifacts,
timeout_secs=self._timeout_secs,
swarming_io_timeout_secs=self._swarming_io_timeout_secs,
swarming_expiration_timeout_secs=(
self._swarming_expiration_timeout_secs),
# secret_bytes is only supported in the deprecated testing code path.
secret_bytes='',
qemu_type=shard.device_type,
shard=shard,
)
else:
task_request = self._construct_test_task_request(
build_artifacts=build_artifacts, shard=shard)
return self._api.build.ShardTaskRequest(shard, task_request)
def _construct_test_task_request(self, build_artifacts, shard):
"""Constructs a Swarming task request to run a shard of Fuchsia tests.
Args:
build_artifacts (BuildArtifacts): The Fuchsia build artifacts to test.
shard (api.testsharder.Shard): A shard of tests.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
Returns:
An api.swarming.TaskRequest representing the swarming task request.
"""
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self._api.file.symlink_tree(
root=self._api.path.mkdtemp('isolate'))
test_manifest = 'tests.json'
self._api.file.write_json(
'write test manifest',
isolate_tree.root.join(test_manifest),
[test.render_to_jsonish() for test in shard.tests],
indent=2)
cmd = []
outputs = []
ensure_file = self._api.cipd.EnsureFile()
dimensions = {'pool': self._pool}
test_bot_cpu = 'x64'
is_emu_type = self._api.emu.is_emulator_type(shard.device_type)
# This command spins up a metadata server that allows its subcommands to
# automagically authenticate with LUCI auth, provided the sub-exec'ed tool
# was written in go or dart and respectively makes use of the standard
# cloud.google.com/go/compute/metadata or
# github.com/dart-lang/googleapis_auth authentication libraries. Such
# libraries look for a metadata server under environment variables
# like $GCE_METADATA_HOST, which LUCI emulates.
if shard.service_account:
# TODO(fxbug.dev/37142): Find a way to use the version that LUCI is
# currently using, instead of 'latest'.
ensure_file.add_package('infra/tools/luci-auth/${platform}', 'latest')
cmd.extend(['./luci-auth', 'context', '--'])
if is_emu_type:
dimensions.update(os='Debian', cpu=build_artifacts.target, kvm='1')
# To take advantage of KVM, we execute QEMU-arm tasks on arm hardware.
test_bot_cpu = build_artifacts.target
else:
dimensions.update(shard.dimensions)
if shard.targets_fuchsia:
botanist_cmd = [
'./botanist',
'-level', BOTANIST_LOG_LEVEL,
'run',
'-images', IMAGES_JSON,
'-timeout', '%ds' % self._timeout_secs,
'-syslog', self._api.testing_requests.SYSLOG_NAME,
'-serial-log', self._api.testing_requests.SERIAL_LOG_NAME,
] # yapf: disable
outputs.append(self._api.testing_requests.SYSLOG_NAME)
outputs.append(self._api.testing_requests.SERIAL_LOG_NAME)
# TODO(fxbug.dev/40840): Once we can scope the proxy server to a
# an individual task, we can make free use of it in the emulator case.
if not is_emu_type:
botanist_cmd.extend([
# For container networking and authentication reasons, we access GCS
# via a proxy server running on the controller.
'-repo', self._api.artifacts.package_repo_url(host='$GCS_PROXY_HOST'),
'-blobs', self._api.artifacts.package_blob_url(host='$GCS_PROXY_HOST'),
]) # yapf: disable
config = BOTANIST_DEVICE_CONFIG
if self._api.emu.is_emulator_type(shard.device_type):
config = './qemu.json'
botanist_cmd.extend(
['-ssh', build_artifacts.DEFAULT_ISOLATED_LAYOUT.private_key])
qemu_config = [{
'type': shard.device_type.lower(),
'path': './%s/bin' % shard.device_type.lower(),
'target': build_artifacts.target,
'cpu': 8,
'memory': 8192,
'kvm': True,
}]
if shard.device_type == 'AEMU':
self._api.emu.add_aemu_to_ensure_file(ensure_file, subdir='aemu/bin')
elif shard.device_type == 'QEMU':
self._api.emu.add_qemu_to_ensure_file(ensure_file, subdir='qemu')
self._api.file.write_json(
'write qemu config',
isolate_tree.root.join('qemu.json'),
qemu_config,
indent=2)
elif shard.netboot:
botanist_cmd.append('-netboot')
botanist_cmd.extend(['-config', config])
cmd.extend(botanist_cmd)
cmd.extend([
'./testrunner',
'-archive',
self._api.testing_requests.TEST_RESULTS_ARCHIVE_NAME,
])
if self._use_runtests:
cmd.append('-use-runtests')
if self._per_test_timeout_secs:
cmd.extend(['-per-test-timeout', '%ds' % self._per_test_timeout_secs])
cmd.append(test_manifest)
outputs.append(self._api.testing_requests.TEST_RESULTS_ARCHIVE_NAME)
isolated_hash = self._api.testing_requests._isolate_build_artifacts(
isolate_tree, build_artifacts, shard=shard, test_bot_cpu=test_bot_cpu)
env_name = '%s-%s' % (shard.device_type or shard.os, build_artifacts.target)
tags = {'test_environment_name': [env_name]}
request = (self._api.swarming.task_request().
with_name(shard.name).
with_service_account(shard.service_account).
with_tags(tags)
) #yapf: disable
return request.with_slice(0, request[0].
with_command(cmd).
with_isolated(isolated_hash).
with_dimensions(**dimensions).
with_expiration_secs(self._swarming_expiration_timeout_secs).
with_io_timeout_secs(self._swarming_io_timeout_secs).
with_execution_timeout_secs(self._timeout_secs).
with_outputs(outputs).
with_cipd_ensure_file(ensure_file).
with_env_vars(**self._test_task_env_vars(shard, build_artifacts))
) #yapf: disable
def _test_task_env_vars(self, shard, build_artifacts):
"""Returns the environment variables to be set for the test task.
Returns:
A dict mapping string env var names to string values.
"""
build = self._buildbucket_build
commit = build.input.gitiles_commit
llvm_symbolizer = self._api.path.basename(build_artifacts.llvm_symbolizer)
env_vars = dict(
# `${ISOLATED_OUTDIR}` is a magic string that Swarming will replace
# with a temporary directory into which files will be automatically
# collected upon exit of a task.
FUCHSIA_TEST_OUTDIR='${ISOLATED_OUTDIR}',
BUILDBUCKET_ID=str(build.id),
BUILD_BOARD=build_artifacts.board,
BUILD_TYPE=build_artifacts.build_type,
BUILD_PRODUCT=build_artifacts.product,
BUILD_TARGET=build_artifacts.target,
BUILDBUCKET_BUCKET=build.builder.bucket,
# Used for symbolization:
ASAN_SYMBOLIZER_PATH=llvm_symbolizer,
UBSAN_SYMBOLIZER_PATH=llvm_symbolizer,
LSAN_SYMBOLIZER_PATH=llvm_symbolizer,
# Used by the catapult converter
BUILD_CREATE_TIME=str(build.create_time.seconds),
BUILDER_NAME=build.builder.builder,
FUCHSIA_DEVICE_TYPE=shard.device_type,
INPUT_COMMIT_HOST=commit.host,
INPUT_COMMIT_PROJECT=commit.project,
INPUT_COMMIT_REF=commit.ref,
)
# For some reason, empty string environment variables sent to the swarming
# API get interpreted as null and rejected. So don't bother sending them to
# avoid breaking the task request.
# TODO(olivernewman): Figure out whether this logic should be moved into
# the upstream swarming module (or obviated by fixing the "" -> null
# behavior).
return {k: v for k, v in env_vars.iteritems() if v}
class TestingRequestsApi(recipe_api.RecipeApi):
"""APIs for constructing Swarming task requests to test Fuchsia."""
SERIAL_LOG_NAME = 'serial.txt'
SYSLOG_NAME = 'syslog.txt'
TEST_RESULTS_ARCHIVE_NAME = 'out.tar'
TEST_RESULTS_MINFS_NAME = 'output.fs'
def shard_requests(
self,
build_artifacts,
buildbucket_build,
per_test_timeout_secs,
pool,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
use_runtests,
# TODO(garymm): Remove default value.
# We should always get this from a spec.
timeout_secs=40 * 60):
"""Returns a _ShardTaskRequest for each shard in build_artifact.shards.
Args:
build_artifacts (BuildArtifacts): The Fuchsia build artifacts to test.
buildbucket_build (build_pb2.Build): The buildbucket build that is going
to orchestrate testing.
per_test_timeout_secs (int): Any test that executes for longer than this
will be considered failed.
pool (str): The Swarming pool to schedule test tasks in.
use_runtests (bool): Whether to use runtests (or else run_test_component)
when executing tests on target.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
"""
self.m.minfs.minfs_path = build_artifacts.minfs
self.m.zbi.zbi_path = build_artifacts.zbi
# This modifies the build artifacts so must be done before calling
# task_requester.request().
self._install_runcmds_files(
build_artifacts,
test_in_shards=True,
per_test_timeout_secs=per_test_timeout_secs,
in_place=True)
task_requester = _TaskRequester(
self.m,
buildbucket_build=buildbucket_build,
per_test_timeout_secs=per_test_timeout_secs,
pool=pool,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
timeout_secs=timeout_secs,
use_runtests=use_runtests,
)
shard_requests = []
for s in build_artifacts.shards:
with self.m.step.nest('shard %s' % s.name):
shard_requests.append(task_requester.request(s, build_artifacts))
return shard_requests
def deprecated_shard_requests(self,
build_artifacts,
test_cmds,
device_type,
pool,
timeout_secs,
pave,
requires_secrets=False,
swarming_expiration_timeout_secs=18000,
swarming_io_timeout_secs=5 * 60):
"""Returns a swarming task request for testing in the deprecated way.
Args:
build_artifacts (BuildArtifacts): The Fuchsia build artifacts to test.
test_cmds (list[str]): Command to have Fuchsia run on boot.
pool (str): Swarming pool from which the test task will be drawn.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
pave (bool): Whether to pave the image to disk. Ignored if device_type ==
'QEMU'.
requires_secrets (bool): Whether tests require plaintext secrets; ignored
if device_type != 'QEMU'. TODO(41665): Remove secrets support once done.
swarming_expiration_timeout_secs (int): Maximum run time for the swarming
task, once scheduled (enforced by swarming).
swarming_io_timeout_secs (int): The swarming task will be killed if it does
not produce any output for this long.
Returns:
A list of a single ShardTaskRequest.
"""
assert test_cmds
assert device_type
self.m.minfs.minfs_path = build_artifacts.minfs
self.m.zbi.zbi_path = build_artifacts.zbi
# Copy build_artifacts because we modify its contents below.
build_artifacts = copy.deepcopy(build_artifacts)
self._install_runcmds_files(
build_artifacts,
device_type=device_type,
pave=pave,
test_cmds=test_cmds,
)
if self.m.emu.is_emulator_type(device_type):
secret_bytes = ''
if requires_secrets:
secret_bytes = self.m.json.dumps(self._decrypt_secrets(build_artifacts))
task = self._construct_legacy_qemu_task_request(
task_name='all tests',
build_artifacts=build_artifacts,
pool=pool,
timeout_secs=timeout_secs,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
secret_bytes=secret_bytes,
qemu_type=device_type,
)
else:
task = self._construct_device_task_request(
task_name='all tests',
device_type=device_type,
build_artifacts=build_artifacts,
pool=pool,
pave=pave,
timeout_secs=timeout_secs,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
)
# In the deprecated testing code paths, shards are not used, but it makes
# other code simpler to have a valid shard here.
dummy_shard = self.m.testsharder.Shard('dummy', (), {})
return [self.m.build.ShardTaskRequest(dummy_shard, task)]
def deprecated_test_cmds(self, test_spec):
runtests_cmd_parts = ['runtests', '-o', self.results_dir_on_target]
if test_spec.per_test_timeout_secs:
runtests_cmd_parts.extend(['-i', '%d' % test_spec.per_test_timeout_secs])
runtests_cmd_parts.append(test_spec.runtests_args)
return [' '.join(runtests_cmd_parts)]
@property
def results_dir_on_target(self):
"""The directory on target to which target test results will be written."""
return '/tmp/infra-test-output'
def _decrypt_secrets(self, build_artifacts):
"""Decrypts the secrets included in the build.
Args:
build (FuchsiaBuildResults): The build for which secret specs were
generated.
Returns:
The dictionary that maps secret spec name to the corresponding plaintext.
"""
self.m.cloudkms.ensure()
secret_spec_dir = build_artifacts.secret_specs
secrets_map = {}
with self.m.step.nest('process secret specs'):
secret_spec_files = self.m.file.listdir('list', secret_spec_dir)
for secret_spec_file in secret_spec_files:
basename = self.m.path.basename(secret_spec_file)
# Skip the 'ciphertext' subdirectory.
if basename == 'ciphertext':
continue
secret_name, _ = basename.split('.json', 1)
secret_spec = self.m.json.read('read spec for %s' % secret_name,
secret_spec_file).json.output
# For each test spec file <name>.json in this directory, there is an
# associated ciphertext file at ciphertext/<name>.ciphertext.
ciphertext_file = secret_spec_dir.join('ciphertext',
'%s.ciphertext' % secret_name)
key_path = secret_spec['cloudkms_key_path']
secrets_map[secret_name] = self.m.cloudkms.decrypt(
'decrypt secret for %s' % secret_name, key_path, ciphertext_file,
self.m.raw_io.output()).raw_io.output
return secrets_map
def _construct_legacy_qemu_task_request(self,
task_name,
build_artifacts,
pool,
timeout_secs,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
secret_bytes,
qemu_type,
shard=None):
"""Constructs a Swarming task request which runs Fuchsia tests inside QEMU.
Expects the build and artifacts to be at the same place they were at
the end of the build.
Args:
build_artifacts (BuildArtifacts): The Fuchsia build artifacts to test.
pool (str): Swarming pool from which the test task will be drawn.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
secret_bytes (str): secret bytes to pass to the QEMU task.
qemu_type (str): type of qemu, either QEMU or AEMU.
shard (api.testsharder.Shard|None): The shard associated with the task or
None if it's not a shard.
Returns:
An api.swarming.TaskRequest representing the swarming task request.
"""
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self.m.file.symlink_tree(root=self.m.path.mkdtemp('isolate'))
# As part of running tests, we'll send a MinFS image over to another machine
# which will be declared as a block device in QEMU, at which point
# Fuchsia will mount it and write test output to. We choose 3.5G for the
# MinFS image arbitrarily, as it appears it can hold our test output
# comfortably without going overboard on size.
#
minfs_image_path = isolate_tree.root.join(self.TEST_RESULTS_MINFS_NAME)
self.m.minfs.create(minfs_image_path, '3584M', name='create test image')
ensure_file = self.m.cipd.EnsureFile()
botanist_cmd = [
'./botanist',
'-level', BOTANIST_LOG_LEVEL,
'qemu',
'-type', '%s' % qemu_type.lower(),
'-qemu-dir', './%s/bin' % qemu_type.lower(),
'-images', IMAGES_JSON,
'-arch', build_artifacts.target,
'-minfs', self.TEST_RESULTS_MINFS_NAME,
'-pci-addr', TEST_FS_PCI_ADDR,
'-use-kvm'
] # yapf: disable
if secret_bytes:
# Wrap botanist command with secretshim which starts the secrets server
# before running the following command.
botanist_cmd = ['./secretshim'] + botanist_cmd
ensure_file.add_package('fuchsia/infra/secretshim/${platform}',
SECRETSHIM_CIPD_VERSION)
if [v for v in ['asan', 'profile'] if v in build_artifacts.variants]:
botanist_cmd.extend([
'-cpu',
str(8),
'-memory',
str(8192),
])
# storage-full not being present signifies the exclusion of the system
# partition, which means `boot` (i.e. running on boot) must be used instead
# of `system` (i.e., running after the system partition is mounted).
storage_free_build = STORAGE_FULL not in build_artifacts.images
arg_key = 'zircon.autorun.%s' % ('boot' if storage_free_build else 'system')
botanist_cmd.append('%s=/boot/bin/sh+/boot/%s' %
(arg_key, self._get_runcmds_path_per_shard(shard)))
isolated_hash = self._isolate_build_artifacts(
isolate_tree,
build_artifacts,
# To take advantage of KVM, we execute QEMU-arm tasks on arm hardware.
test_bot_cpu=build_artifacts.target,
legacy_qemu=True,
)
if qemu_type == 'AEMU':
self.m.emu.add_aemu_to_ensure_file(ensure_file, subdir='aemu/bin')
elif qemu_type == 'QEMU':
self.m.emu.add_qemu_to_ensure_file(ensure_file, subdir='qemu')
env_name = '%s-%s' % (qemu_type, build_artifacts.target)
tags = {
# consumed by google3 results uploader
'test_environment_name': [env_name],
# consumed by this recipe module
'uses_legacy_qemu': ['true']
}
request = self.m.swarming.task_request().with_name(task_name).with_tags(
tags)
return (request.with_slice(0, request[0].
with_command(botanist_cmd).
with_isolated(isolated_hash).
with_dimensions(pool=pool, os='Debian', cpu=build_artifacts.target, kvm='1').
with_io_timeout_secs(swarming_io_timeout_secs).
with_execution_timeout_secs(timeout_secs).
with_expiration_secs(swarming_expiration_timeout_secs).
with_secret_bytes(secret_bytes).
with_outputs([self.TEST_RESULTS_MINFS_NAME]).
with_cipd_ensure_file(ensure_file)
)) #yapf: disable
def _construct_device_task_request(self, task_name, device_type,
build_artifacts, pool, pave, timeout_secs,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs):
"""Constructs a Swarming task request to run Fuchsia tests on a device.
Expects the build and artifacts to be at the same place they were at
the end of the build.
Args:
build_artifacts (BuildArtifacts): The Fuchsia build artifacts to test.
pool (str): Swarming pool from which the test task will be drawn.
pave (bool): Whether or not the build artifacts should be paved.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
Returns:
An api.swarming.TaskRequest representing the swarming task request.
"""
# Construct the botanist command.
botanist_cmd = [
'./botanist',
'-level', BOTANIST_LOG_LEVEL,
'zedboot',
'-config', BOTANIST_DEVICE_CONFIG,
'-images', IMAGES_JSON,
'-results-dir', self.results_dir_on_target,
'-out', self.TEST_RESULTS_ARCHIVE_NAME,
'-serial-log', self.SERIAL_LOG_NAME,
] # yapf: disable
if not pave:
botanist_cmd.append('-netboot')
# storage-full not being present signifies the exclusion of the system
# partition, which means `boot` (i.e. running on boot) must be used instead
# of `system` (i.e., running after the system partition is mounted).
storage_free_build = STORAGE_FULL not in build_artifacts.images
arg_key = 'zircon.autorun.%s' % ('boot' if storage_free_build else 'system')
botanist_cmd.append('%s=/boot/bin/sh+/boot/%s' %
(arg_key, RUNCMDS_BOOTFS_PATH))
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of symlinks in a fresh directory and
# isolate that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to isolate
# extraneous files.
isolate_tree = self.m.file.symlink_tree(root=self.m.path.mkdtemp('isolate'))
isolated_hash = self._isolate_build_artifacts(isolate_tree, build_artifacts)
dimensions = {
'pool': pool,
'device_type': device_type,
}
env_name = '%s-%s' % (device_type, build_artifacts.target)
tags = {'test_environment_name': [env_name]}
request = self.m.swarming.task_request().with_name(task_name).with_tags(
tags)
return (request.with_slice(0, request[0].
with_command(botanist_cmd).
with_isolated(isolated_hash).
with_dimensions(**dimensions).
with_expiration_secs(swarming_expiration_timeout_secs).
with_io_timeout_secs(swarming_io_timeout_secs).
with_execution_timeout_secs(timeout_secs).
with_outputs([self.TEST_RESULTS_ARCHIVE_NAME, self.SERIAL_LOG_NAME])
)) #yapf: disable
def _create_runcmds_script(self, device_type, test_cmds, output_path):
"""Creates a script for running tests on boot."""
# The device topological path is the toplogical path to the block device
# which will contain test output.
device_topological_path = '/dev/sys/pci/00:%s/virtio-block/block' % (
TEST_FS_PCI_ADDR)
# Script that mounts the block device to contain test output and runs tests,
# dropping test output into the block device.
results_dir = self.results_dir_on_target
runcmds = [
'mkdir %s' % results_dir,
]
if self.m.emu.is_emulator_type(device_type):
runcmds.extend([
# Wait until the MinFS test image shows up (max <timeout> ms).
'waitfor class=block topo=%s timeout=60000' % device_topological_path,
'mount %s %s' % (device_topological_path, results_dir),
] + test_cmds + [
'umount %s' % results_dir,
'dm poweroff',
])
else:
runcmds.extend(test_cmds)
runcmds_bytes = []
for line in runcmds:
if isinstance(line, unicode):
runcmds_bytes.append(line.encode('utf-8'))
elif isinstance(line, str):
runcmds_bytes.append(line)
else: # pragma: no cover
assert False, 'line is not unicode or a str: %s, %s' % (line,
type(line))
self.m.file.write_text('write runcmds', output_path,
'\n'.join(runcmds_bytes))
def _isolate_build_artifacts(self,
isolate_tree,
build_artifacts,
shard=None,
test_bot_cpu='x64',
legacy_qemu=False):
"""Populates a tree with build artifacts and isolates it.
Specifically, the following is linked into or created within the tree:
- The images in the build are linked in and manifest of them is created
in the root, if targeting a fuchsia device;
- The Linux/Mac tests in the shard and their runtime dependencies.
Args:
isolate_tree (api.file.SymlinkTree): A tree into which artifacts may be
linked.
build (FuchsiaBuildResults): The result of a fuchsia build.
shard (api.testsharder.Shard|None): A test shard.
test_bot_cpu (str|None): The host cpu of the bot running the test task.
legacy_qemu (bool): Whether to only isolate the images needed to run QEMU
alone.
Returns:
The isolated hash that may be used to reference and download the
artifacts.
"""
def register_link(relpath):
"""Prepares a symlink of a relative path within the build directory to the tree."""
isolate_tree.register_link(
target=build_artifacts.fuchsia_build_dir.join(relpath),
linkname=isolate_tree.root.join(relpath),
)
# TODO(IN-931): Remove `shard is None` condition once device and QEMU
# codepaths are passing shard and using _construct_test_task_request().
no_shard = shard is None
if no_shard or shard.targets_fuchsia:
image_list = build_artifacts.images.values()
# In the case of an emulated target, we restrict what we isolate to the
# bare essentials to avoid the needless downloading of several gigabytes
# of images on the other end.
is_emulated_target = (
(no_shard and legacy_qemu) or
(shard and self.m.emu.is_emulator_type(shard.device_type))
) # yapf: disable
if is_emulated_target:
image_list = [
img for img in image_list
if img['name'] in ['qemu-kernel', 'zircon-a', 'storage-full']
] # yapf: disable
image_manifest_path = isolate_tree.root.join(IMAGES_JSON)
self.m.file.write_json(
'write image manifest', image_manifest_path, image_list, indent=2)
for image in image_list:
register_link(image['path'])
if shard:
for test in shard.tests:
if test.os in ['linux', 'mac']:
register_link(test.path)
for dep in shard.deps:
register_link(dep)
# If targeting QEMU we include a private key corresponding to an authorized
# key already in the boot image; this is needed as we do not pave QEMU.
if shard and self.m.emu.is_emulator_type(shard.device_type):
isolate_tree.register_link(
target=build_artifacts.private_key,
linkname=isolate_tree.root.join(
build_artifacts.DEFAULT_ISOLATED_LAYOUT.private_key,),
)
for tool in [
build_artifacts.botanist(test_bot_cpu),
build_artifacts.testrunner(test_bot_cpu),
build_artifacts.llvm_symbolizer,
build_artifacts.bootserver,
]:
tool_name = self.m.path.basename(tool)
isolate_tree.register_link(
target=tool, linkname=isolate_tree.root.join(tool_name))
isolate_tree.create_links('create tree of build artifacts')
isolated = self.m.isolated.isolated(isolate_tree.root)
isolated.add_dir(isolate_tree.root)
return isolated.archive('isolate build artifacts')
def _create_test_list(self, shard):
test_locations = []
for test in shard.tests:
test_locations.append(test.path)
test_list_path = self.m.path['cleanup'].join('tests-%s' %
self._normalize(shard.name))
self.m.file.write_text(
name='write test list',
dest=test_list_path,
text_data='\n'.join(test_locations) + '\n',
)
return test_list_path
def _normalize(self, name):
return name.replace(' ', '_').replace('(', '').replace(')', '')
def _get_runcmds_path_per_shard(self, shard=None):
if not shard:
return RUNCMDS_BOOTFS_PATH
return '%s-%s' % (RUNCMDS_BOOTFS_PATH, self._normalize(shard.name))
def _uses_legacy_qemu(self, shard):
return (not self.m.experimental.ssh_into_qemu and
self.m.emu.is_emulator_type(shard.device_type))
def _install_runcmds_files(self,
build_artifacts,
test_in_shards=False,
per_test_timeout_secs=None,
device_type=None,
pave=False,
test_cmds=None,
in_place=False):
"""Creates the files used to invoke runtests on boot.
This is only necessary for QEMU shards, which are the only shards that
use runcmds, and the non-sharding codepath.
"""
self.m.zbi.zbi_path = build_artifacts.zbi
manifest = {}
zbi_name = 'zircon-a'
new_zbi_filename = None
new_zbi_path = None
if test_in_shards:
# if testing in shards, zbi file should be modified once for all shards in
# place before uploading through artifactory.
assert in_place
needs_key = False
for shard in build_artifacts.shards:
if self.m.emu.is_emulator_type(shard.device_type):
if self._uses_legacy_qemu(shard):
test_list_path = self._create_test_list(shard)
runtests_file_bootfs_path = 'infra/shard-%s.run' % self._normalize(
shard.name)
runcmds_path = self.m.path['cleanup'].join(
'runcmds-%s' % self._normalize(shard.name))
runtests_cmd_parts = [
'runtests', '-o', self.results_dir_on_target, '-f',
'/boot/%s' % runtests_file_bootfs_path
]
if per_test_timeout_secs:
runtests_cmd_parts.extend(['-i', '%d' % per_test_timeout_secs])
self._create_runcmds_script(
device_type=shard.device_type,
test_cmds=[' '.join(runtests_cmd_parts)],
output_path=runcmds_path,
)
manifest[self._get_runcmds_path_per_shard(shard)] = runcmds_path
manifest[runtests_file_bootfs_path] = test_list_path
else:
needs_key = True
if needs_key:
manifest[AUTHORIZED_KEY_PATH] = build_artifacts.authorized_key
else:
assert device_type and test_cmds
if not in_place:
new_zbi_filename = 'test-infra.zbi'
new_zbi_path = build_artifacts.fuchsia_build_dir.join(new_zbi_filename)
if not self.m.emu.is_emulator_type(device_type):
zbi_name = next(
(image['name']
for image in build_artifacts.images.values()
if '--boot' in image.get(
'bootserver_%s' % ('netboot' if not pave else 'pave'), [])),
None)
assert zbi_name, 'Could not find kernel image to boot.'
runcmds_path = self.m.path['cleanup'].join('runcmds')
self._create_runcmds_script(device_type, test_cmds, runcmds_path)
manifest[RUNCMDS_BOOTFS_PATH] = runcmds_path
# Inject the runcmds script and/or authorized key into the bootfs image.
if manifest and zbi_name in build_artifacts.images:
self.m.zbi.copy_and_extend(
step_name='create zbi',
input_image=build_artifacts.fuchsia_build_dir.join(
build_artifacts.images[zbi_name]['path']),
output_image=new_zbi_path or build_artifacts.fuchsia_build_dir.join(
build_artifacts.images[zbi_name]['path']),
manifest=manifest,
)
if new_zbi_filename:
build_artifacts.images[zbi_name]['path'] = new_zbi_filename