blob: 09e1e50be0c3cd8af18afff71be1e486e2048539 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import attr
from recipe_engine import recipe_api
from RECIPE_MODULES.fuchsia.gce import api as gce_api
# The path to the botanist config on the host.
BOTANIST_DEVICE_CONFIG = "/etc/botanist/config.json"
# The log level to use for botanist invocations in test tasks. Can be one of
# "fatal", "error", "warning", "info", "debug", or "trace", where "trace" is
# the most verbose, and fatal is the least.
BOTANIST_LOG_LEVEL = "debug"
# Name of image manifest produced by the build.
IMAGES_JSON = "images.json"
_PRIVATE_KEY_PATH = "private_key"
GCSPROXY_CIPD_REVISION = "git_revision:9f8534bc114000de0b4e3a85738bcb0e7af06109"
NSJAIL_CIPD_REVISION = "build_id:8840659429159557552"
@attr.s
class TaskRequester:
"""Creates requests for swarming tasks that run tests."""
_api = attr.ib()
_buildbucket_build = attr.ib()
_pave = attr.ib(type=bool)
_pool = attr.ib(type=str)
_swarming_expiration_timeout_secs = attr.ib(type=int)
_swarming_io_timeout_secs = attr.ib(type=int)
_timeout_secs = attr.ib(type=int)
_use_runtests = attr.ib(type=bool)
_default_service_account = attr.ib(type=str)
_targets_serial = attr.ib(type=bool)
_catapult_dashboard_master = attr.ib(type=str)
_catapult_dashboard_bot = attr.ib(type=str)
_release_branch = attr.ib(type=str)
_release_version = attr.ib(type=str)
_zircon_args = attr.ib(type=list)
_gcem_cloud_project = attr.ib(type=str, default=None)
_gcem_host = attr.ib(type=str, default=None)
_gcem_machine_shape = attr.ib(type=str, default=None)
_use_ffx = attr.ib(type=bool, default=False)
_ffx_experiment_level = attr.ib(type=int, default=0)
_use_cas = attr.ib(type=bool, default=False)
_enable_sandboxing = attr.ib(type=bool, default=False)
def botanist_request(self, shard, build_results):
"""Returns a swarming.TaskRequest that uses `botanist` or `testrunner`
to run tests.
It constructs the appropriate flags to run botanist or testrunner
and passes that to TaskRequester.request() which creates the task request.
Args:
shard (testsharder.Shard): Test shard.
build_results (FuchsiaBuildResults): The Fuchsia build results to test.
"""
# Copy since we modify it for each shard.
build_results = copy.deepcopy(build_results)
# To freely archive files from the build directory, the source, and those we
# dynamically create, we create a tree of files in a fresh directory and
# upload that. This solves the problems of (a) finding a root directory
# that works for all artifacts, (b) being able to create files in that
# directory without fear of collision, and (c) not having to upload
# extraneous files.
task_input_tree = self._api.cas_util.tree(
root=self._api.path.mkdtemp("isolate")
)
# Some artifacts are within the checkout root directory but not in the
# build directory. Thus we need to map the task input tree root to the
# checkout root directory instead. However, since the paths in the test
# manifest are relative to the build directory, we use the relative
# build directory as the relative cwd of the swarming task.
relative_cwd = self._api.path.relpath(
build_results.build_dir, build_results.checkout.root_dir
)
self._api.file.ensure_directory(
"ensure relative cwd", task_input_tree.root.join(relative_cwd)
)
task_input_tools = ["testrunner"]
test_manifest = "tests.json"
self._api.file.write_json(
"write test manifest",
task_input_tree.root.join(relative_cwd, test_manifest),
shard.tests,
indent=2,
)
flags = [
"-out-dir",
# testrunner writes the out-dir directly to the task outputs dir,
# so no need to add it to the expected outputs. In fact, the
# out-dir name is the same as a subdirectory of the task inputs
# which gets mapped to the swarming task root directory, so adding
# this out-dir to the expected outputs will cause the directory from
# the root directory to get symlinked into the outputs as well.
self._api.testing_requests.TEST_RESULTS_DIR_NAME,
"-snapshot-output",
self._api.testing_requests.SNAPSHOT_NAME,
]
image_manifest = "%s/%s" % (self._api.artifacts.image_url(), IMAGES_JSON)
# If we're using CAS for delivery, then the testsharder will have
# provisioned the images as part of the CAS inputs, so the image
# manifest will be present in the working directory.
if self._use_cas:
image_manifest = IMAGES_JSON
flags.append("-prefetch-packages")
if self._use_runtests:
flags.append("-use-runtests")
if "bringup" in build_results.set_metadata.product:
flags.append("-use-serial")
is_emu_type = self._api.emu.is_emulator_type(shard.device_type)
test_bot_cpu = build_results.set_metadata.target_arch if is_emu_type else "x64"
if test_bot_cpu == "x64":
# Relevant for automatic symbolization of things running on host. Only
# the x64 variation is available in the checkout and we have nothing
# that runs on an arm host that needs symbolizing.
task_input_tools.append("llvm-symbolizer")
is_linux = not shard.os or shard.os.lower() == "linux"
# TODO(rudymathu): Eventually we may want to distribute nsjail to ARM64
# bots, but we don't have an nsjail build on ARM set up yet.
# We also may want to eventually support doing this on device shards,
# but our on-device testing infrastructure doesn't support nsjail
# sandboxing yet.
if (
self._enable_sandboxing
and (is_emu_type or not shard.device_type)
and test_bot_cpu == "x64"
and is_linux
):
flags.extend(
[
"-nsjail",
"./nsjail",
"-nsjail-root",
# nsjail is run within relative_cwd but needs the root
# directory containing all the task inputs to mount.
self._api.path.relpath(
task_input_tree.root, task_input_tree.root.join(relative_cwd)
),
]
)
flags.append(test_manifest)
# If targeting emu we include a private key corresponding to an authorized
# key already in the boot image; this is needed as we do not pave emu.
if is_emu_type and self._pave:
task_input_tree.register_link(
target=build_results.private_key,
linkname=task_input_tree.root.join(relative_cwd, _PRIVATE_KEY_PATH),
)
# Used to dynamically extend fvm.blk, which is relevant only to emu.
task_input_tools.append("fvm")
return self.request(
shard,
build_results,
flags,
task_input_tree,
task_input_tools,
image_manifest,
BOTANIST_LOG_LEVEL,
relative_cwd,
)
def request(
self,
shard,
build_results,
flags,
task_input_tree,
task_input_tools,
image_manifest,
log_level,
relative_cwd,
subcmd_outputs=(),
):
"""Returns a swarming.TaskRequest for the specified shard.
This function handles all the common logic for setting up test tasks
such as isolating the necessary tools and constructing the proper
botanist command depending on the details of the provided shard and
build_results.
Args:
shard (testsharder.Shard): Test shard.
build_results (FuchsiaBuildResults): The Fuchsia build results to test.
flags (list(str)): The flags to run botanist or testrunner with on the
task.
task_input_tree (api.cas_util.ArchiveTree): The tree into which the
inputs for the task are linked or copied.
task_input_tools (list(str)): A list of tools to add to the task
inputs.
image_manifest (str): The image manifest path or URL to pass to the
botanist command.
log_level (str): Passed to the botanist command.
relative_cwd (str): The directory relative to the task input tree root
from where to run the swarming task command.
subcmd_outputs (list(str)): The expected outputs of the subcmd, added
to the expected outputs of the task.
"""
cmd = []
outputs = []
ensure_file = self._api.cipd.EnsureFile()
dimensions = {"pool": self._pool}
is_emu_type = self._api.emu.is_emulator_type(shard.device_type)
is_gce_type = shard.device_type == "GCE"
# To take advantage of KVM, we execute emu-arm tasks on arm hardware.
test_bot_cpu = build_results.set_metadata.target_arch if is_emu_type else "x64"
# Non-bringup products require ssh access to the target. The bringup
# product and zbi tests require serial. A zbi test shard is
# distinguished by a non-empty image_overrides field.
need_ssh = "bringup" not in build_results.set_metadata.product
need_serial = not need_ssh or bool(shard.image_overrides)
# This command spins up a metadata server that allows its subcommands to
# automagically authenticate with LUCI auth, provided the sub-exec'ed tool
# was written in go or dart and respectively makes use of the standard
# cloud.google.com/go/compute/metadata or
# github.com/dart-lang/googleapis_auth authentication libraries. Such
# libraries look for a metadata server under environment variables
# like $GCE_METADATA_HOST, which LUCI emulates.
service_account = shard.service_account or self._default_service_account
if service_account:
# TODO(fxbug.dev/37142): Find a way to use the version that LUCI is
# currently using, instead of 'latest'.
ensure_file.add_package(
"infra/tools/luci-auth/${platform}", "latest", subdir=relative_cwd
)
# We specify -scopes in order to append "https://www.googleapis.com/auth/nest-account".
# Default Scopes when using 'luci-auth context':
# https://source.chromium.org/chromium/infra/infra/+/main:go/src/go.chromium.org/luci/auth/client/authcli/authcli.go;l=229;drc=8e944005719b0d612a63263176ec2a75ee78a850
scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/firebase",
"https://www.googleapis.com/auth/gerritcodereview",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/nest-account",
]
cmd.extend(["./luci-auth", "context", "-scopes", " ".join(scopes), "--"])
if is_emu_type:
dimensions.update(
os="Debian", cpu=build_results.set_metadata.target_arch, kvm="1"
)
elif is_gce_type:
# Have any GCE shards target the GCE executors, which are e2-2
# machines running Linux.
dimensions.update(os="Linux", cores="2", gce="1")
else:
# No device -> no serial.
if self._targets_serial and shard.device_type:
dimensions["serial"] = "1"
dimensions.update(shard.dimensions)
# Ensure we use GCE VMs whenever possible.
is_linux = not shard.os or shard.os.lower() == "linux"
if is_linux and not is_gce_type and test_bot_cpu == "x64":
if self._enable_sandboxing:
ensure_file.add_package(
"infra/3pp/tools/nsjail/${platform}",
NSJAIL_CIPD_REVISION,
subdir=relative_cwd,
)
dimensions["kvm"] = "1"
if (
(is_emu_type or not shard.device_type)
and test_bot_cpu == "x64"
and is_linux
):
dimensions["gce"] = "1"
dimensions["cores"] = "8"
# Default to the dynamically computed shard timeout, but allow
# overriding in case the dynamically computed timeouts are too short for
# some reason.
shard_timeout_secs = shard.timeout_secs
if self._timeout_secs:
shard_timeout_secs = self._timeout_secs
if shard.targets_fuchsia:
# EMU and non-EMU tasks all use the same serial log name and directory
# so add it to the expected outputs for all tasks that target Fuchsia.
outputs.append(self._api.testing_requests.SERIAL_LOG_DIR)
# We expect the serial log and directory to be in the swarming task
# root directory, but the botanist command is run within
# relative_cwd, so the path provided to botanist should be relative
# to the relative_cwd.
serial_log_path = self._api.path.relpath(
self._api.testing_requests.SERIAL_LOG_DIR + "/serial_log.txt",
relative_cwd,
)
serial_log_dir = self._api.path.relpath(
self._api.testing_requests.SERIAL_LOG_DIR,
relative_cwd,
)
cmd.extend(
[
"./botanist",
"-level",
log_level,
"run",
"-images",
image_manifest,
"-timeout",
"%ds" % shard_timeout_secs,
]
)
if shard.image_overrides:
cmd.extend(
["-image-overrides", self._api.json.dumps(shard.image_overrides)]
)
if self._use_ffx or self._ffx_experiment_level > 0:
cmd.extend(["-ffx", "./ffx"])
if self._ffx_experiment_level > 0:
cmd.extend(
["-ffx-experiment-level", str(self._ffx_experiment_level)]
)
task_input_tools.append("ffx")
if shard.pkg_repo:
outputs.append(self._api.testing_requests.BLOB_DOWNLOAD_MANIFEST)
blob_manifest_relpath = self._api.path.relpath(
self._api.testing_requests.BLOB_DOWNLOAD_MANIFEST,
relative_cwd,
)
cmd.extend(
[
"-local-repo",
shard.pkg_repo,
"-download-manifest",
blob_manifest_relpath,
]
)
# In the emulator case, serial is redirected to stdio.
if not is_emu_type:
cmd.extend(["-serial-log-dir", serial_log_dir])
if need_ssh:
# We expect the syslogs to be in the swarming task root
# directory, but the path provided to botanist should be
# relative to the relative_cwd.
cmd.extend(
[
"-syslog-dir",
self._api.path.relpath(
self._api.testing_requests.SYSLOG_DIR, relative_cwd
),
]
)
# Testing on GCE does not yet support ephemerality.
if not is_gce_type:
cmd.extend(
[
"-repo",
self._api.artifacts.package_repo_url(),
"-blobs",
self._api.artifacts.package_blob_url(),
]
)
outputs.append(self._api.testing_requests.SYSLOG_DIR)
if is_emu_type:
cmd.extend(["-ssh", _PRIVATE_KEY_PATH])
if not self._pave:
cmd.append("-netboot")
for arg in self._zircon_args:
cmd.extend(["-zircon-args", arg])
config = BOTANIST_DEVICE_CONFIG
if is_emu_type:
config = "./qemu.json"
qemu_config = [
{
"type": shard.device_type.lower(),
"path": "./%s/bin" % shard.device_type.lower(),
"edk2_dir": "./edk2",
"target": build_results.set_metadata.target_arch,
"cpu": 4,
"memory": self._api.emu.get_memory_for_variant(build_results),
"kvm": True,
# Is a directive to run the emu process in a way in which we can
# synthesize a 'serial device'. We need only do this in the bringup
# case, this being used for executing tests at that level;
# restriction to the minimal case is especially important as this
# mode shows tendencies to slow certain processes down.
"serial": need_serial,
# Added to the root below in _upload_build_artifacts().
# Used to dynamically extend fvm.blk to fit downloaded test
# packages.
"fvm_tool": "fvm" if self._pave else "",
"logfile": serial_log_path,
}
]
if need_ssh:
# Used to embed the ssh key into the zbi.
qemu_config[0]["zbi_tool"] = "zbi"
task_input_tools.append("zbi")
# UEFI-related emulator firmware.
self._api.emu.add_edk2_to_ensure_file(
ensure_file,
checkout=build_results.checkout.root_dir,
subdir=self._api.path.join(relative_cwd, "edk2"),
)
if shard.device_type == "AEMU":
self._api.emu.add_aemu_to_ensure_file(
ensure_file,
checkout=build_results.checkout.root_dir,
subdir=self._api.path.join(relative_cwd, "aemu/bin"),
)
elif shard.device_type == "QEMU":
self._api.emu.add_qemu_to_ensure_file(
ensure_file,
checkout=build_results.checkout.root_dir,
subdir=self._api.path.join(relative_cwd, "qemu"),
)
self._api.file.write_json(
"write qemu config",
task_input_tree.root.join(relative_cwd, "qemu.json"),
qemu_config,
indent=2,
)
elif is_gce_type:
config = "./gce.json"
ensure_file.add_package(
gce_api.GCEM_CLIENT_CIPD_PATH,
gce_api.GCEM_CLIENT_CIPD_REVISION,
subdir=relative_cwd,
)
self._api.gce.create_botanist_config(
self._gcem_host,
self._gcem_cloud_project,
self._gcem_machine_shape,
self._api.buildbucket.build.infra.swarming.parent_run_id,
task_input_tree.root.join(relative_cwd, "gce.json"),
)
cmd.extend(["-config", config])
else:
cmd.extend(["./testrunner", "-level", log_level])
cmd.extend(flags)
outputs.extend(subcmd_outputs)
task_inputs_digest = self._api.testing_requests._upload_build_artifacts(
task_input_tree,
relative_cwd,
build_results,
shard=shard,
test_bot_cpu=test_bot_cpu,
extra_tools=task_input_tools,
)
tags = self._api.testing_requests.test_task_tags(
self._buildbucket_build,
build_results,
env_name="%s-%s"
% (shard.device_type or shard.os, build_results.set_metadata.target_arch),
task_name=shard.name,
)
request = (
self._api.swarming.task_request()
.with_name(shard.name)
.with_tags(self._api.testing_requests.create_swarming_tags(tags))
)
if service_account:
request = request.with_service_account(service_account)
return request.with_slice(
0,
request[0]
.with_command(cmd)
.with_cas_input_root(task_inputs_digest)
.with_relative_cwd(relative_cwd)
.with_dimensions(**dimensions)
.with_expiration_secs(self._swarming_expiration_timeout_secs)
.with_io_timeout_secs(self._swarming_io_timeout_secs)
# Use a slightly longer timeout for the swarming task execution
# timeout to allow botanist to handle the timeout itself.
.with_execution_timeout_secs(shard_timeout_secs + 60)
.with_outputs(outputs)
.with_cipd_ensure_file(ensure_file)
.with_env_vars(
**self._api.testing_requests.test_task_env_vars(
self._buildbucket_build,
shard.device_type,
build_results,
catapult_dashboard_master=self._catapult_dashboard_master,
catapult_dashboard_bot=self._catapult_dashboard_bot,
release_branch=self._release_branch,
release_version=self._release_version,
image_manifest=image_manifest,
)
),
)
class TestingRequestsApi(recipe_api.RecipeApi):
"""APIs for constructing Swarming task requests to test Fuchsia."""
BLOB_DOWNLOAD_MANIFEST = "blob_downloads.json"
SNAPSHOT_NAME = "snapshot.zip"
SERIAL_LOG_DIR = "serial_logs"
SYSLOG_DIR = "syslogs"
# What to name the file that contains the swarming task output. Not used
# directly by this recipe module, but it is used by most clients of this
# recipe module so it makes sense to define it here.
TEST_TASK_OUTPUT_FILE = "infra_and_test_std_and_klog.txt"
TEST_RESULTS_DIR_NAME = "out"
# The name of the tag to set on every task request that contains the name
# of the shard's environment (device type/OS and architecture).
TEST_ENVIRONMENT_TAG_NAME = "test_environment_name"
def task_requests(
self,
build_results,
buildbucket_build,
pool,
shards,
swarming_expiration_timeout_secs,
swarming_io_timeout_secs,
use_runtests,
timeout_secs,
default_service_account=None,
pave=True,
targets_serial=False,
catapult_dashboard_master=None,
catapult_dashboard_bot=None,
release_branch=None,
release_version=None,
test_on_gce=False,
zircon_args=(),
gcem_host=None,
gcem_cloud_project=None,
gcem_machine_shape=None,
use_ffx=False,
ffx_experiment_level=0,
use_cas=False,
enable_sandboxing=False,
):
"""Returns a swarming.TaskRequest for each shard in build_artifact.shards.
Args:
build_results (FuchsiaBuildResults): The Fuchsia build results to test.
buildbucket_build (build_pb2.Build): The buildbucket build that is going
to orchestrate testing.
pool (str): The Swarming pool to schedule test tasks in.
shards (list of testsharder.Shard): Test shards.
use_runtests (bool): Whether to use runtests (or else run_test_component)
when executing tests on target.
timeout_secs (int): The amount of seconds to wait for the tests to execute
before giving up.
default_service_account (str or None): The default service account to run the
test task with. This is required for fetching images from GCS.
pave (bool): Whether to pave (or else 'netboot') the system; this is
effectively equivalent to "not bringup" and is treated as such (even for
QEMU).
targets_serial (bool): Whether the task should target a bot with serial
enabled.
release_branch (str or None): The release branch corresponding to
the checkout. Passed as a task environment variable.
release_version (release.ReleaseVersion or None): The release version
that we checked out. Passed as a task environment variable.
test_on_gce (bool): Whether to run shards with device_type=GCE.
zircon_args (list(str)): Kernel command-line arguments to pass on boot.
gcem_host (str): The endpoint the GCE Mediator can be found at.
gcem_cloud_project (str): The cloud project the Mediator should create VMs in.
gcem_machine_shape (str): The machine shape the Mediator should create.
use_ffx (bool): Whether to enable using ffx in infra.
ffx_experiment_level (int): The level of experimental ffx features to
enable in infra.
use_cas (bool): Whether to deliver images and blobs via CAS.
enable_sandboxing (bool): Whether to enable sandboxing for host tests.
"""
task_requester = self.get_task_requester(
buildbucket_build=buildbucket_build,
pave=pave,
pool=pool,
swarming_expiration_timeout_secs=swarming_expiration_timeout_secs,
swarming_io_timeout_secs=swarming_io_timeout_secs,
timeout_secs=timeout_secs,
use_runtests=use_runtests,
default_service_account=default_service_account,
targets_serial=targets_serial,
catapult_dashboard_master=catapult_dashboard_master,
catapult_dashboard_bot=catapult_dashboard_bot,
release_branch=release_branch,
release_version=release_version,
zircon_args=zircon_args,
gcem_host=gcem_host,
gcem_cloud_project=gcem_cloud_project,
gcem_machine_shape=gcem_machine_shape,
use_ffx=use_ffx,
ffx_experiment_level=ffx_experiment_level,
use_cas=use_cas,
enable_sandboxing=enable_sandboxing,
)
futures = []
for shard in shards:
if shard.device_type == "GCE" and not test_on_gce:
# Do not generate a task request for GCE shards if the
# test_on_gce flag is disabled.
continue
def create_request(shard):
with self.m.step.nest("shard %s" % shard.name):
return task_requester.botanist_request(shard, build_results)
futures.append(self.m.futures.spawn(create_request, shard))
self.m.futures.wait(futures)
return [f.result() for f in futures]
def get_task_requester(self, **kwargs):
"""Returns a TaskRequester with self.m as the api.
This allows all dependencies of TaskRequester to only need to be
imported into the testing_requests module.
"""
return TaskRequester(self.m, **kwargs)
def test_task_env_vars(
self,
build,
device_type,
build_results,
image_manifest,
catapult_dashboard_master=None,
catapult_dashboard_bot=None,
release_branch=None,
release_version=None,
):
"""Returns the environment variables to be set for the test task.
Returns:
A dict mapping string env var names to string values.
"""
# Note that this will sometimes point to the wrong commit for tryjobs,
# which re-resolve HEAD on the fly rather than respecting the input
# commit. Therefore its `id` field should not be used.
commit = build.input.gitiles_commit
commit_host = commit.host
commit_ref = commit.ref
del commit
llvm_symbolizer = self.m.path.basename(build_results.tool("llvm-symbolizer"))
env_vars = dict(
# `${ISOLATED_OUTDIR}` is a magic string that Swarming will replace
# with a temporary directory whose contents will be automatically
# uploaded to CAS upon exit of a task.
FUCHSIA_TEST_OUTDIR="${ISOLATED_OUTDIR}",
# Used by performance tests and OTA tests.
BUILDBUCKET_ID=str(build.id) if build.id else None,
# Used for symbolization:
ASAN_SYMBOLIZER_PATH=llvm_symbolizer,
LSAN_SYMBOLIZER_PATH=llvm_symbolizer,
TSAN_OPTIONS="external_symbolizer_path=" + llvm_symbolizer,
UBSAN_SYMBOLIZER_PATH=llvm_symbolizer,
# Used by performance and e2e tests.
# TODO(fxbug.dev/50210): Don't fall back to time.time() once led
# starts setting create_time again.
BUILD_CREATE_TIME=str(build.create_time.seconds or int(self.m.time.time())),
# Used by e2e tests.
BUILDER_NAME=build.builder.builder,
# Used by e2e tests.
FUCHSIA_DEVICE_TYPE=device_type,
# Used by e2e tests.
INPUT_COMMIT_HOST=commit_host,
RELEASE_BRANCH=release_branch,
# Used by performance tests.
RELEASE_VERSION=str(release_version) if release_version else None,
# Used by the fuchsia-specific Swarming pre-task hook.
BOOTSERVER_PATH=(
"./" + self.m.path.basename(build_results.tool("bootserver_new"))
),
# Used by the fuchsia-specific Swarming pre-task hook.
IMAGE_MANIFEST_PATH=image_manifest,
SWARMING_BOT_FILE="${SWARMING_BOT_FILE}",
)
env_vars.update(
self.get_catapult_dashboard_env_vars(
catapult_dashboard_master, catapult_dashboard_bot, commit_ref
)
)
# For some reason, empty string environment variables sent to the swarming
# API get interpreted as null and rejected. So don't bother sending them to
# avoid breaking the task request.
# TODO(olivernewman): Figure out whether this logic should be moved into
# the upstream swarming module (or obviated by fixing the "" -> null
# behavior).
return {k: v for k, v in env_vars.items() if v}
def test_task_tags(self, buildbucket_build, build_results, env_name, task_name):
return {
"board": (
build_results.set_metadata.board
or build_results.set_metadata.target_arch
),
"build_type": build_results.set_metadata.optimize,
"buildbucket_bucket": buildbucket_build.builder.bucket,
"buildbucket_builder": buildbucket_build.builder.builder,
"product": build_results.set_metadata.product,
"role": "tester",
"task_name": task_name,
# Consumed by google3 results uploader, and by the orchestrator
# when uploading to resultdb.
self.TEST_ENVIRONMENT_TAG_NAME: env_name,
"variants": build_results.set_metadata.variants,
}
def _upload_build_artifacts(
self,
artifact_tree,
relative_cwd,
build_results,
shard=None,
test_bot_cpu="x64",
extra_tools=(),
):
"""Populates a tree with build artifacts and uploads it.
Specifically, the following is linked into or created within the tree:
- The images in the build are linked in and manifest of them is created
in the root, if targeting a fuchsia device;
- The Linux/Mac tests in the shard and their runtime dependencies.
Args:
artifact_tree (api.cas_util.ArchiveTree): A tree into which artifacts may be
linked.
relative_cwd (str): The directory relative to the artifact tree root
from where the swarming task command will be run.
build_results (FuchsiaBuildResults): The result of a fuchsia build.
shard (api.testsharder.Shard or None): A test shard.
test_bot_cpu (str or None): The host cpu of the bot running the test task.
extra_tools (list(str)): A list of extra tools to add to the tree.
Returns:
The CAS digest that may be used to reference and download the
artifacts.
"""
if shard:
for dep in shard.deps:
# Prepare a link of a relative path within the build
# directory to the tree.
target = build_results.build_dir.join(dep)
self.m.path.mock_add_paths(target)
if not self.m.path.exists(target): # pragma: no cover
raise self.m.step.StepFailure(
"Dependency for shard %s was not built: %s" % (shard.name, dep)
)
artifact_tree.register_link(
target=build_results.build_dir.join(dep),
linkname=artifact_tree.root.join(relative_cwd, dep),
)
# TODO(fxb/38517): s/bootserver_new/bootserver.
tools = ["botanist", "bootserver_new"] + extra_tools
for tool_name in tools:
tool = build_results.tool(tool_name, cpu=test_bot_cpu)
artifact_tree.register_link(
target=tool, linkname=artifact_tree.root.join(relative_cwd, tool_name)
)
artifact_tree.create_links(
"create tree of build artifacts",
# Some Fuchsia tests have dependencies that must be symlinks.
preserve_symlinks=True,
)
digest = self.m.cas_util.upload(
artifact_tree.root,
step_name="isolate build artifacts",
)
# When using CAS, files are copied into the artifact_tree instead of
# linked, so remove the tree after uploading it to save disk space.
self.m.file.rmtree("remove isolate tree", artifact_tree.root)
return digest
def create_swarming_tags(self, tags):
"""Creates a properly formatted tags dict to pass to a swarming task.
Args:
tags (dict): A dictionary of key-value pairs of the desired tags.
Returns:
A dictionary where the keys are strings and the values are lists of
strings.
"""
swarming_tags = {}
for k, val in tags.items():
if not val:
val = []
elif isinstance(val, str):
val = [val]
swarming_tags[str(k)] = [str(i) for i in val]
return swarming_tags
# This is included in the API just so that it can be unit-tested.
def get_catapult_dashboard_env_vars(self, master_name, bot_name, commit_ref):
if not master_name and not bot_name:
# Uploading to Catapult is disabled.
return {}
if not (master_name and bot_name):
raise ValueError(
"Catapult master and bot names not set consistently: %r, %r"
% (master_name, bot_name)
)
prefix = "refs/heads/releases/"
if commit_ref.startswith(prefix):
branch_name = commit_ref[len(prefix) :]
master_name += "." + branch_name
elif commit_ref != "refs/heads/main":
# Unrecognized Git branch/tag name. Disable uploading to Catapult
# by not setting the env vars.
return {}
return dict(
CATAPULT_DASHBOARD_MASTER=master_name, CATAPULT_DASHBOARD_BOT=bot_name
)