blob: 34796b20ae202a40f0f092343d6c99a52558643f [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Pylint is not smart enough to infer the return type of methods with a custom
# property decorator like @cached_property, so we have to disable some spurious
# warnings from cached property accesses. See
# https://github.com/PyCQA/pylint/issues/3484
#
# pylint: disable=no-member, not-an-iterable, not-context-manager
# pylint: disable=unsubscriptable-object
import attr
from recipe_engine import recipe_api
import collections
import contextlib
import copy
import functools
import re
from .orchestration_inputs import _TestOrchestrationInputs
from PB.go.fuchsia.dev.fuchsia.tools.integration.cmd.fint.proto import (
context as context_pb2,
)
from RECIPE_MODULES.fuchsia.utils import cached_property
# Host architecture -> number of bits -> host platform name.
# Add to this dictionary as we support building on more devices.
HOST_PLATFORMS = {
"intel": {64: "x64",},
}
# List of available targets.
TARGETS = ["x64", "arm64"]
# List of available build types.
BUILD_TYPES = ["debug", "release", "thinlto", "lto"]
# List of canonical names for archives that the build can produce.
ARCHIVES = [
"archive", # Images and scripts for paving/netbooting.
"packages", # Package metadata, blobs, and tools.
]
# Manifests produced by the build.
ARCHIVES_JSON = "archives.json"
IMAGES_JSON = "images.json"
PREBUILT_BINARIES_JSON = "prebuilt_binaries.json"
TESTS_JSON = "tests.json"
TOOL_PATHS_JSON = "tool_paths.json"
TRIAGE_SOURCES_JSON = "triage_sources.json"
ZIRCON_JSON = "zircon.json"
# Name of BigQuery project and table for uploading artifacts.
BIGQUERY_PROJECT = "fuchsia-infra"
BIGQUERY_ARTIFACTS_DATASET = "artifacts"
# The private and authorized SSH keys pulled down in the checkout, relative to
# the fuchsia root.
CHECKOUT_AUTHORIZED_KEY = ".ssh/authorized_keys"
CHECKOUT_PRIVATE_KEY = ".ssh/pkey"
# The path to a public key used to sign release builds. Only set on release
# builders.
RELEASE_PUBKEY_PATH = "/etc/release_keys/release_key_pub.pem"
# The name of the public key file uploaded in release builds.
RELEASE_PUBKEY_FILENAME = "publickey.pem"
QEMU_IMAGES = ("qemu-kernel", "zircon-a")
# Name of Ninja Build metadata files.
COMPDB_FILENAME = "compile_commands.json"
GRAPH_FILENAME = "graph.dot"
NINJA_LOG_FILENAME = ".ninja_log"
BUILDSTATS_FILENAME = "buildstats.json"
# GN tracelogs, located in the root of the fuchsia build output directory.
ZIRCON_GN_TRACE = "zircon_gn_trace.json"
FUCHSIA_GN_TRACE = "gn_trace.json"
class NoSuchTool(Exception):
def __init__(self, name, cpu, os):
super(NoSuchTool, self).__init__(
"no such tool in %s: (name=%r, cpu=%r, os=%r)"
% (TOOL_PATHS_JSON, name, cpu, os)
)
@attr.s
class _FuchsiaBuildResults(object):
"""Represents a completed build of Fuchsia.
Attributes:
archives: Mapping between the canonical name of an archive produced by the Fuchsia
build to the absolute path to that archive on the local disk.
board: The board for this build.
checkout: The Fuchsia checkout.
fuchsia_build_dir: The directory where Fuchsia build artifacts may be found.
images: Mapping between the canonical name of an image produced by the Fuchsia
build to its path relative to the fuchsia build directory.
product: The product for this build.
target: The target CPU architecture for this build.
variants: The variants for this build.
gn_results: GN gen step results
"""
_api = attr.ib()
checkout = attr.ib()
target = attr.ib(validator=attr.validators.in_(TARGETS))
variants = attr.ib()
build_type = attr.ib()
fuchsia_build_dir = attr.ib()
zircon_build_dir = attr.ib()
board = attr.ib()
product = attr.ib()
gn_results = attr.ib()
images = attr.ib(factory=dict)
archives = attr.ib(factory=dict)
_size_check_failed = attr.ib(init=False, default=False)
authorized_key = attr.ib(init=False)
llvm_symbolizer = attr.ib(init=False)
minfs = attr.ib(init=False)
private_key = attr.ib(init=False)
zbi = attr.ib(init=False)
fuchsia_targets = attr.ib(default=[])
zircon_targets = attr.ib(default=[])
def __attrs_post_init__(self):
self.authorized_key = self.checkout.root_dir.join(CHECKOUT_AUTHORIZED_KEY)
self.llvm_symbolizer = self.tool("llvm-symbolizer")
self.minfs = self.tool("minfs")
self.private_key = self.checkout.root_dir.join(CHECKOUT_PRIVATE_KEY)
self.zbi = self.tool("zbi")
def __deepcopy__(self, memodict):
# Shallow copy first.
new = copy.copy(self)
# Only images needs to be a real deep copy.
new.images = copy.deepcopy(new.images, memodict)
return new
@property
def secret_specs(self):
return self.fuchsia_build_dir.join("secret_specs")
@cached_property
def _binary_sizes(self):
"""The binary sizes data produced by the build."""
check_sizes_result = self._api.step(
"size_checker",
[
self.tool("size_checker"),
"--build-dir",
self.fuchsia_build_dir,
"--sizes-json-out",
self._api.json.output(),
],
step_test_data=lambda: self._api.json.test_api.output({"component": 1}),
ok_ret="any",
)
self._size_check_failed = bool(check_sizes_result.exc_result.retcode)
return check_sizes_result.json.output
@cached_property
def generated_sources(self):
"""The paths to the generated source files relative to the checkout root."""
generated_sources = []
for path in self.gn_results.all_generated_sources:
abspath = self._api.path.abs_to_path(
self._api.path.realpath(self.fuchsia_build_dir.join(path.lstrip("/")))
)
self._api.path.mock_add_paths(abspath)
if self._api.path.exists(abspath):
generated_sources.append(
self._api.path.relpath(abspath, self.checkout.root_dir)
)
return generated_sources
@cached_property
def triage_sources(self):
"""The paths to the triage sources relative to the checkout root."""
return [
self._api.path.relpath(f, self.checkout.root_dir)
for f in self.gn_results.triage_sources
]
def tool(self, name, cpu="x64", os=None):
"""The path to a tool of the given name and cpu."""
return self.gn_results.tool(name, cpu, os)
def check_binary_sizes(self):
"""Checks that binary sizes are less than the maximum allowed.
Raises:
StepFailure if the size exceeds the maximum.
"""
# This property is read by the binary-size Gerrit plugin.
with self._api.step.nest("check binary sizes") as presentation:
if self._binary_sizes:
presentation.properties["binary_sizes"] = self._binary_sizes
if self._size_check_failed:
raise self._api.step.StepFailure("binary size checks failed")
def calculate_affected_tests(self, bb_input):
"""Returns (path of affected tests, whether all testing can be skipped)."""
no_work = False
affected_tests_file = self._api.path.mkstemp()
with self._api.step.nest("affected tests") as presentation:
changed_files = self.checkout.changed_files(bb_input)
# In CI runs we won't have changed files to analyze and this is expected.
if changed_files is None:
return affected_tests_file, no_work
# The affected tests analysis is not valid for changes to integration.
# In particular for both infra configs and jiri manifests.
if bb_input.gerrit_changes[0].project == "integration":
return affected_tests_file, no_work
ninja_dry_run_file = self._api.path.mkstemp()
no_work_status_file = self._api.path.mkstemp()
result = self._api.python(
"find affected tests",
self._api.build.resource("affected_tests.py"),
["--changed-srcs",]
+ changed_files
+ [
"--tests-json",
self.gn_results.test_manifest_filename,
"--ninja",
self.tool("ninja"),
"--zircon_targets",
]
+ sorted(self.zircon_targets)
+ ["--fuchsia_targets",]
+ sorted(self.fuchsia_targets)
+ [
"--zircon-out-dir",
self.zircon_build_dir,
"--fuchsia-out-dir",
self.fuchsia_build_dir,
"--ninja-out",
ninja_dry_run_file,
"--no-work-status",
no_work_status_file,
],
stdout=self._api.raw_io.output(leak_to=affected_tests_file),
step_test_data=lambda: self._api.raw_io.test_api.stream_output(
"test1\ntest2\n"
),
)
presentation.logs["affected_tests.txt"] = result.stdout
# Ensure it gets logged for debugging
self._api.file.read_text("read ninja dry run output", ninja_dry_run_file)
no_work = self._api.json.read(
"read no work status",
no_work_status_file,
step_test_data=functools.partial(self._api.json.test_api.output, False),
).json.output
return affected_tests_file, no_work
def upload(self, gcs_bucket, is_release_version=False, namespace=None):
"""Uploads artifacts from the build to Google Cloud Storage.
Args:
gcs_bucket (str): GCS bucket name to upload build results to.
is_release_version (bool): True if checkout is a release version.
namespace (str|None): A unique namespace for the GCS upload location;
if None, the current build's ID is used.
"""
assert gcs_bucket
with self._api.step.nest("upload build results"):
self._api.build._upload_build_results(
self, gcs_bucket, is_release_version, namespace
)
self._api.build._upload_package_snapshot(self, gcs_bucket, namespace)
class _GNResults(object):
"""_GNResults is represents the result of a `gn gen` invocation in the fuchsia build.
It exposes the API of the build, which defines how one can invoke
ninja.
"""
def __init__(self, api, fuchsia_build_dir, compdb_exported=False):
self._api = api
self._fuchsia_build_dir = fuchsia_build_dir
zircon_ninja_instructions = api.json.read(
"read zircon ninja instructions",
fuchsia_build_dir.join(ZIRCON_JSON),
step_test_data=api.build.test_api.mock_zircon_instructions,
).json.output
self._zircon_build_dir = api.path.abs_to_path(
api.path.realpath(
fuchsia_build_dir.join(zircon_ninja_instructions["dir"]),
),
)
self._canonical_zircon_ninja_targets = zircon_ninja_instructions["targets"]
self._fuchsia_compdb = None
self._zircon_compdb = None
if compdb_exported:
self._fuchsia_compdb = self._fuchsia_build_dir.join(COMPDB_FILENAME)
api.path.mock_add_paths(self._fuchsia_compdb)
self._zircon_compdb = self._zircon_build_dir.join(COMPDB_FILENAME)
api.path.mock_add_paths(self._zircon_compdb)
@property
def fuchsia_build_dir(self):
"""Returns the fuchsia build directory (Path)."""
return self._fuchsia_build_dir
@property
def zircon_build_dir(self):
"""Returns the associated zircon build directory (Path)."""
return self._zircon_build_dir
@property
def canonical_zircon_ninja_targets(self):
"""Returns the canonical zircon ninja targets (list(string)) that the fuchsia
build is informed of."""
return self._canonical_zircon_ninja_targets
@cached_property
def image_manifest(self):
"""Returns the manifest of images (dict) in the GN graph.
TODO(fxbug.dev/3062): Point to the schema once there is one.
"""
return self._api.json.read(
"read image manifest",
self.fuchsia_build_dir.join(IMAGES_JSON),
step_test_data=self._api.build.test_api.mock_image_manifest,
).json.output
@cached_property
def archives(self):
"""Returns the archives (dict[str]str) in the GN graph.
Maps archive name to path relative to the fuchsia build
directory.
"""
all_entries = self._api.json.read(
"read archive manifest",
self.fuchsia_build_dir.join(ARCHIVES_JSON),
step_test_data=lambda: self._api.json.test_api.output(
[{"name": "archive", "path": "build-archive.tgz", "type": "tgz",},]
),
).json.output
return [entry for entry in all_entries if entry["type"] == "tgz"]
@cached_property
def test_manifest(self):
"""Returns the manifest of tests (dict) in the GN graph.
TODO(fxbug.dev/3062): Point to the schema once there is one.
"""
return self._api.json.read(
"read test spec manifest",
self.test_manifest_filename,
step_test_data=self._api.build.test_api.mock_test_spec_manifest,
).json.output
@property
def test_manifest_filename(self):
"""Returns the filename of the test manifest generated by GN."""
return self.fuchsia_build_dir.join(TESTS_JSON)
@cached_property
def all_generated_sources(self):
"""Returns all the generated source files (list(str)) from the zircon and
fuchsia builds.
The returned paths are relative to the fuchsia build directory.
TODO(fxb/53415): Remove generated_sources() and zircon_generated_sources()
and use this method instead once bug is fixed.
"""
return self._api.json.read(
"read generated sources",
self.fuchsia_build_dir.join("generated_sources.json"),
step_test_data=lambda: self._api.json.test_api.output(
["generated_header.h", "../default.zircon/generated_header.h"]
),
).json.output
@cached_property
def generated_sources(self):
"""Returns the generated source files (list(str)) from the fuchsia build.
The returned paths are relative to the fuchsia build directory.
"""
filter_prefixes = [
# remove zircon targets from fuchsia generated_sources as they will
# cause build errors.
self._api.path.relpath(self.zircon_build_dir, self.fuchsia_build_dir),
]
all_targets = self._api.json.read(
"read generated sources",
self.fuchsia_build_dir.join("generated_sources.json"),
step_test_data=lambda: self._api.json.test_api.output(
["generated_header.h", "../default.zircon/generated_header.h"]
),
).json.output
generated_sources = []
for target in all_targets:
for filter_prefix in filter_prefixes:
if target.startswith(filter_prefix):
break
else:
generated_sources.append(target)
return generated_sources
@cached_property
def zircon_generated_sources(self):
"""Returns the generated source files (list(str)) from the zircon build.
The returned paths are relative to the zircon build directory.
"""
return self._api.json.read(
"read zircon generated sources",
self.zircon_build_dir.join("generated_sources.json"),
step_test_data=lambda: self._api.json.test_api.output(
["//generated_header.h"]
),
).json.output
def tool_and_target(self, name, cpu="x64", os=None):
"""Returns the path to the specified tool provided from the tool_paths
manifest, along with its ninja build target."""
os = os or self._api.platform.name
try:
return self._tools[name, cpu, os]
except KeyError:
raise NoSuchTool(name, cpu, os)
def tool(self, name, cpu="x64", os=None):
"""Returns the path to the specified tool provided from the tool_paths
manifest."""
tool_path, _ = self.tool_and_target(name, cpu, os)
return tool_path
@cached_property
def _tools(self):
tools = {}
tool_paths_manifest = self._api.json.read(
"read tool_paths manifest",
self._fuchsia_build_dir.join(TOOL_PATHS_JSON),
step_test_data=self._api.build.test_api.mock_tool_paths_manifest,
).json.output
for tool in tool_paths_manifest:
key = (tool["name"], tool["cpu"], tool["os"])
assert key not in tools, (
"only one tool with (name, cpu, os) == (%s, %s, %s) is allowed" % key
)
tool_target = tool["path"]
tool_path = self._api.path.abs_to_path(
self._api.path.realpath(self._fuchsia_build_dir.join(tool_target))
)
tools[key] = (tool_path, tool_target)
return tools
@cached_property
def triage_sources(self):
"""Returns the absolute paths of the files defined in the triage_sources
manifest."""
triage_sources_manifest = self._api.json.read(
"read triage_sources manifest",
self._fuchsia_build_dir.join(TRIAGE_SOURCES_JSON),
step_test_data=self._api.build.test_api.mock_triage_sources_manifest,
).json.output
return [
self._api.path.abs_to_path(
self._api.path.realpath(self._fuchsia_build_dir.join(source))
)
for source in triage_sources_manifest
]
@cached_property
def prebuilt_binaries(self):
return self._api.json.read(
"read prebuilt binary manifest",
self.fuchsia_build_dir.join(PREBUILT_BINARIES_JSON),
step_test_data=lambda: self._api.json.test_api.output(
[
{
"name": "prebuilt_binaries",
"manifest": "gen/prebuilt_binary_manifest.json",
}
]
),
).json.output
def filtered_compdb(self, filters):
"""The path to a merged compilation database, filtered via the passed
filters."""
with self._api.step.nest("merge compdbs"):
compdb = []
compdb += self._api.json.read(
"read zircon compdb",
self._zircon_compdb,
stdout=self._api.json.output(),
step_test_data=lambda: self._api.json.test_api.output(
[
{
"directory": "[START_DIR]/out/not-default.zircon",
"file": "../../zircon.cpp",
"command": "clang++ zircon.cpp",
}
]
),
).json.output
compdb += self._api.json.read(
"read compdb",
self._fuchsia_compdb,
stdout=self._api.json.output(),
step_test_data=lambda: self._api.json.test_api.output(
[
{
"directory": "[START_DIR]/out/not-default",
"file": "../../foo.cpp",
"command": "clang++ foo.cpp",
},
{
"directory": "[START_DIR]/out/not-default",
"file": "../../third_party/foo.cpp",
"command": "clang++ third_party/foo.cpp",
},
{
"directory": "[START_DIR]/out/not-default",
"file": "../../out/not-default/foo.cpp",
"command": "clang++ foo.cpp",
},
]
),
).json.output
def keep_in_compdb(entry):
# Filenames are relative to the build directory, and the build directory is absolute.
build_dir = self._api.path.abs_to_path(entry["directory"])
full_path = self._api.path.abs_to_path(
self._api.path.realpath(self._api.path.join(build_dir, entry["file"]))
)
if build_dir.is_parent_of(full_path):
return False
segments = entry["file"].split(self._api.path.sep)
if any(bad_segments in segments for bad_segments in filters):
return False
return True
compdb_filtered = [entry for entry in compdb if keep_in_compdb(entry)]
compdb_path = self._api.path["cleanup"].join(COMPDB_FILENAME)
self._api.file.write_json("write merged compdb", compdb_path, compdb_filtered)
return compdb_path
@cached_property
def zbi_tests(self):
"""Returns the ZBI tests from the Fuchsia build directory."""
zbi_tests = self._api.json.read(
"read zbi test manifest",
self.fuchsia_build_dir.join("zbi_tests.json"),
step_test_data=lambda: self._api.json.test_api.output([]),
).json.output
return {zbi_test["name"]: zbi_test for zbi_test in zbi_tests}
class FuchsiaBuildApi(recipe_api.RecipeApi):
"""APIs for building Fuchsia."""
TEST_ORCHESTRATION_INPUTS_HASH_PROPERTY = _TestOrchestrationInputs.HASH_PROPERTY
NoSuchTool = NoSuchTool
def __init__(self, clang_toolchain, gcc_toolchain, rust_toolchain, *args, **kwargs):
super(FuchsiaBuildApi, self).__init__(*args, **kwargs)
# TODO(fxb/35063): Remove most of these properties, use a spec msg.
self._clang_toolchain = clang_toolchain
self._gcc_toolchain = gcc_toolchain
self._rust_toolchain = rust_toolchain
def gn_results(self, *args, **kwargs):
return _GNResults(self.m, *args, **kwargs)
def build_results(self, *args, **kwargs):
return _FuchsiaBuildResults(self.m, *args, **kwargs)
def download_test_orchestration_inputs(self, isolated_hash):
return _TestOrchestrationInputs.download(self.m, isolated_hash)
def test_orchestration_inputs_from_build_results(self, *args, **kwargs):
return _TestOrchestrationInputs.from_build_results(self.m, *args, **kwargs)
@staticmethod
def test_orchestration_inputs_property_name(without_cl):
if without_cl:
return _TestOrchestrationInputs.HASH_PROPERTY_WITHOUT_CL
else:
return _TestOrchestrationInputs.HASH_PROPERTY
@cached_property
def _clang_toolchain_dir(self):
if self._clang_toolchain:
return self._download_toolchain(
"clang", self._clang_toolchain, "third_party/clang"
)
return ""
@cached_property
def _gcc_toolchain_dir(self):
if self._gcc_toolchain:
return self._download_toolchain(
"gcc", self._gcc_toolchain, "third_party/gcc"
)
return ""
@cached_property
def _rust_toolchain_dir(self):
if self._rust_toolchain:
return self._download_toolchain("rust", self._rust_toolchain, "rust")
return ""
def _prebuilt_path(self, checkout_root, *path):
"""Returns the Path to the host-platform subdir under the given subdirs."""
path = list(path)
path.append(
"{os}-{arch}".format(
os=self.m.platform.name, arch={"intel": "x64"}[self.m.platform.arch],
)
)
return checkout_root.join("prebuilt", *path)
def from_spec(
self,
build_spec,
checkout,
pave,
sdk_id=None,
gcs_bucket=None,
buildstats_upload_namespace=None,
):
"""Builds Fuchsia from a Jiri checkout.
Args:
build_spec (fuchsia_pb2.Fuchsia.Build): The input build spec.
checkout (CheckoutApi.CheckoutResults): The Fuchsia checkout.
pave (bool): Whether the images produced by the build will be paved to
the target device.
sdk_id (str): If specified, set sdk_id in GN.
gcs_bucket (str or None): The GCS bucket to upload results to, if set.
buildstats_upload_namespace (str|None): The namespace to upload build
stats to.
Returns:
A FuchsiaBuildResults, representing the build.
"""
archives_to_build = []
ninja_targets = build_spec.ninja_targets
# TODO(fxb/43568): Remove once it is always false.
if build_spec.include_archives:
# If we are not building images, then we need not build package-related
# targets.
if not build_spec.exclude_images:
archives_to_build.extend(["archive", "packages"])
elif not build_spec.exclude_images:
# This will build the amber-files tree that was included in the
# packages archive.
ninja_targets.append("build/images:updates")
test_durations_file = build_spec.test_durations_file
# We might not yet have any historical test duration data for the tests
# built under the current build config, in which case a test duration
# file specific to the current build config would not yet exist in the
# checkout. So fall back to using a "default" test durations file whose
# data is not specific to the current build config, but which is
# guaranteed to exist.
#
# But if `test_durations_file` is empty to begin with, that means that
# this build doesn't require test durations at all, so don't fall back
# to `default_test_durations_file`.
if test_durations_file and not self.m.path.exists(
checkout.root_dir.join(test_durations_file)
):
test_durations_file = build_spec.default_test_durations_file
return self.with_options(
checkout=checkout,
target=build_spec.target,
build_type=build_spec.build_type,
packages=build_spec.packages,
cache_packages=build_spec.cache_packages,
universe_packages=build_spec.universe_packages,
variants=build_spec.variants,
gn_args=build_spec.gn_args,
use_goma=build_spec.use_goma,
sdk_id=sdk_id,
ninja_targets=ninja_targets,
board=build_spec.board,
product=build_spec.product,
collect_build_metrics=build_spec.upload_results,
build_host_tests=build_spec.run_tests,
build_images=not build_spec.exclude_images,
archives_to_build=tuple(archives_to_build),
gcs_bucket=gcs_bucket,
build_stats_gcs_bucket=build_spec.stats_gcs_bucket,
upload_namespace=buildstats_upload_namespace,
pave=pave,
skip_if_unaffected=build_spec.skip_if_unaffected,
test_durations_file=test_durations_file,
static_spec_path=checkout.root_dir.join(build_spec.static_spec_path),
fint_set=build_spec.fint_set,
fint_build=build_spec.fint_build,
)
def with_options(self, *args, **kwargs):
with self.m.step.nest("build"):
return self._with_options(*args, **kwargs)
def _with_options(
self,
checkout,
target,
build_type,
packages,
cache_packages=(),
universe_packages=(),
variants=(),
gn_args=(),
use_goma=True,
sdk_id=None,
ninja_targets=(),
board=None,
product=None,
collect_build_metrics=False,
build_host_tests=False,
build_images=True,
archives_to_build=(),
gcs_bucket=None,
build_stats_gcs_bucket=None,
upload_namespace=None,
pave=True,
skip_if_unaffected=False,
test_durations_file="",
static_spec_path=None,
fint_set=False,
fint_build=False,
):
"""Builds Fuchsia from a Jiri checkout.
Args:
checkout (CheckoutResult): The Fuchsia checkout result.
target (str): The build target, see TARGETS for allowed targets.
build_type (str): One of the build types in BUILD_TYPES
packages (sequence[str]): A sequence of packages to pass to GN to build.
cache_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'cache' packages.
universe_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'universe' packages,
variants (sequence[str]): A sequence of build variant selectors to pass
to GN in `select_variant`.
gn_args (sequence[str]): Additional arguments to pass to GN
use_goma (bool): Whether to use goma when building fuchsia.
ninja_targets (sequence[str]): Additional target args to pass to ninja
sdk_id (str): If specified, set sdk_id in GN.
board (str): A board to pass to GN to build
product (str): A product to pass to GN to build
collect_build_metrics (bool): Whether to collect build metrics.
build_host_tests (bool): Whether to build the host tests.
build_images (bool): Whether to build the basic images needed to boot
and test on fuchsia.
archives_to_build (seq(str)): A tuple of canonical names of archives to
build.
gcs_bucket (str): GCS bucket name to upload build results to.
build_stats_gcs_bucket (str): GCS bucket name to upload build stats to.
upload_namespace (str): The namespace within the build stats GCS bucket
to upload to.
pave (bool): Whether the resulting images will be paved (vs. netbooted).
skip_if_unaffected (bool): Whether to skip building if the current
gerrit change doesn't affect the build graph.
test_durations_file (str): Path in the checkout containing historical
test duration data for the current build config.
static_spec_path (Path): The absolute path to a platform spec
textproto to pass to fint. Required if `fint_set` or `fint_build`
is set.
fint_set (bool): Whether to use fint for running `gn gen` rather
than invoking GN directly.
fint_build (bool): Whether to use fint for running ninja rather
than invoking ninja directly.
Returns:
A FuchsiaBuildResults, representing the recently completed build.
"""
# TODO(olivernewman): Once the `fint build` command is implemented, add
# support for calling it instead of ninja based on this parameter.
del fint_build
assert target in TARGETS
assert build_type in BUILD_TYPES
assert all(archive in ARCHIVES for archive in archives_to_build)
bb_input = self.m.buildbucket.build.input
# Some parts of the build require the build dir to be two
# directories nested underneath the checkout dir.
# We choose the path to be intentionally different from
# "out/default" because that is what most developers use locally and we
# want to prevent the build from relying on those directory names.
fuchsia_build_dir = checkout.root_dir.join("out", "not-default")
fint_path = None
context_spec_path = None
if fint_set:
fint_path = self.m.path.mkdtemp("fint").join("fint")
bootstrap_path = checkout.root_dir.join(
"tools", "integration", "bootstrap.sh"
)
self.m.step("bootstrap fint", [bootstrap_path, "-o", fint_path])
context_spec = context_pb2.Context(
checkout_dir=str(checkout.root_dir),
build_dir=str(fuchsia_build_dir),
sdk_id=sdk_id,
changed_files=[
context_pb2.Context.ChangedFile(
path=self.m.path.relpath(path, checkout.root_dir)
)
for path in checkout.changed_files(bb_input) or []
],
cache_dir=str(self.m.path["cache"]),
release_version=checkout.release_version,
clang_toolchain_dir=str(self._clang_toolchain_dir),
gcc_toolchain_dir=str(self._gcc_toolchain_dir),
rust_toolchain_dir=str(self._rust_toolchain_dir),
)
context_spec_path = self.m.path.mkdtemp().join("context.textproto")
self.m.file.write_text(
"write context spec",
context_spec_path,
self.m.proto.encode(context_spec, codec="TEXTPB"),
)
gn_results = self.gen(
checkout=checkout,
fuchsia_build_dir=fuchsia_build_dir,
target=target,
build_type=build_type,
packages=packages,
cache_packages=cache_packages,
universe_packages=universe_packages,
variants=variants,
use_goma=use_goma,
args=gn_args,
board=board,
product=product,
record_tracelogs=collect_build_metrics,
sdk_id=sdk_id,
test_durations_file=test_durations_file,
fint_path=fint_path if fint_set else None,
static_spec_path=static_spec_path,
context_spec_path=context_spec_path,
)
if (
not self.is_affected(bb_input, checkout, fuchsia_build_dir, gn_results)
and skip_if_unaffected
and not self.m.recipe_testing.enabled
):
return None
def used_for_testing(image):
# If an image is used in paving or netbooting, its manifest entry will
# specify what flags to pass to the bootserver when doing so.
if "bootserver_pave_zedboot" in image: # Used by catalyst.
return True
elif pave and "bootserver_pave" in image: # Used for paving.
return True
elif not pave and "bootserver_netboot" in image: # Used for netboot.
return True
elif image["name"] in QEMU_IMAGES: # Used for QEMU.
return True
elif image["name"] == "uefi-disk": # Used for GCE.
return True
# In order for a user to provision without Zedboot the
# scripts are needed too, so we want to include them such
# that artifactory can upload them. This covers scripts
# like "pave.sh", "flash.sh" and so on.
elif image["type"] == "script":
return True
return False
images = {}
ninja_targets = list(ninja_targets)
if build_images:
images = {
image["type"] + "/" + image["name"]: image
for image in gn_results.image_manifest
if used_for_testing(image)
}
# Needed for size checks and tracking.
ninja_targets.append("build/images:record_filesystem_sizes")
ninja_targets.append("build/images:system_snapshot")
archives = {
archive["name"]: self.m.path.abspath(
fuchsia_build_dir.join(archive["path"])
)
for archive in gn_results.archives
if archive["name"] in archives_to_build
}
try:
zircon_targets, fuchsia_targets = self.ninja(
gn_results=gn_results,
gcs_bucket=gcs_bucket,
targets=ninja_targets,
build_images=build_images,
image_filter=used_for_testing,
archives_to_build=archives_to_build,
build_host_tests=build_host_tests,
# At present, this is not an option we care to surface here,
# especially as in the common case this is a no-op.
build_prebuilt_binaries=True,
use_goma=use_goma,
)
finally:
if build_stats_gcs_bucket and upload_namespace:
try:
self._upload_buildstats_output(
gn_results, build_stats_gcs_bucket, upload_namespace
)
except Exception as e:
step = self.m.step("upload buildstats failure", None)
step.presentation.logs["exception"] = str(e).splitlines()
return self.build_results(
target=target,
variants=variants,
build_type=build_type,
fuchsia_build_dir=fuchsia_build_dir,
zircon_build_dir=gn_results.zircon_build_dir,
checkout=checkout,
board=board,
product=product,
gn_results=gn_results,
images=images,
archives=archives,
fuchsia_targets=fuchsia_targets,
zircon_targets=zircon_targets,
)
def _run_ninja(
self,
step_name,
build_dir,
ninja_targets,
jobs,
build_id,
gcs_bucket,
tool=None,
output_path=None,
):
try:
self.m.ninja(
step_name=step_name,
build_dir=build_dir,
targets=ninja_targets,
job_count=jobs,
tool=tool,
stdout=(
self.m.raw_io.output(leak_to=output_path) if output_path else None
),
)
except self.m.step.StepFailure:
crashreports_dir = build_dir.join("clang-crashreports")
self.m.path.mock_add_paths(crashreports_dir)
if gcs_bucket and self.m.path.exists(crashreports_dir):
with self.m.step.nest("clang-crashreports"):
with self.m.context(infra_steps=True):
temp = self.m.path.mkdtemp("reproducers")
reproducers = self.m.file.glob_paths(
"find reproducers",
crashreports_dir,
"*.sh",
test_data=(crashreports_dir.join("foo.sh"),),
)
for reproducer in reproducers:
base = self.m.path.splitext(
self.m.path.basename(reproducer)
)[0]
files = self.m.file.glob_paths(
"find %s files" % base,
crashreports_dir,
base + ".*",
test_data=(
crashreports_dir.join("foo.sh"),
crashreports_dir.join("foo.cpp"),
),
)
tgz_basename = "%s.tar.gz" % base
tgz_path = temp.join(tgz_basename)
archive = self.m.tar.create(tgz_path, compression="gzip")
for f in files:
archive.add(f, crashreports_dir)
archive.tar("create %s" % tgz_basename)
self.m.upload.file_to_gcs(
source=tgz_path,
bucket=gcs_bucket,
subpath=tgz_basename,
namespace=build_id,
)
raise
def _gen_build_perf_data(
self, step_name, build_dir, ninja_targets, jobs, build_id, gcs_bucket
):
# Generate Ninja build graph and compdb for build performance analysis.
out = build_dir.join(GRAPH_FILENAME)
self._run_ninja(
"%s graph" % step_name,
build_dir=build_dir,
# Pass in all targets to generate a complete graph for build analysis.
ninja_targets=ninja_targets,
jobs=jobs,
build_id=build_id,
gcs_bucket=gcs_bucket,
tool="graph",
output_path=out,
)
out = build_dir.join(COMPDB_FILENAME)
self._run_ninja(
"%s compdb" % step_name,
build_dir=build_dir,
# Omit targets for all build edges to be generated.
ninja_targets=[],
jobs=jobs,
build_id=build_id,
gcs_bucket=gcs_bucket,
tool="compdb",
output_path=out,
)
def gen(
self,
checkout,
fuchsia_build_dir,
export_compdb=False,
fint_path=None,
static_spec_path=None,
context_spec_path=None,
**kwargs
):
"""Sets up and calls `gn gen`.
Args:
checkout (CheckoutApi.CheckoutResults): The checkout results.
fuchsia_build_dir (Path): The output directory for the fuchsia build.
export_compdb (bool): Whether to generate a compilation database.
fint_path (Path): Path to a fint executable. If set, use fint for
running `gn gen` rather than invoking GN directly.
static_spec_path (Path): The absolute path to a Static spec
textproto to pass to fint.
context_spec_path (Path): The absolute path to a Context spec
textproto to pass to fint.
**kwargs (dict): Passed to _gn_gen_args() if using `gn gen`
*directly.
Returns:
A _GNResults object.
"""
# Set the path to the goma directory from the checkout.
# TODO(olivernewman): Move this into _gn_gen_options once `fint_set`
# implies `fint_build`. It's only necessary in the `fint_set` case as
# long as we still sometimes run ninja directly after `fint set`,
# because the `ninja()` function assumes that the `goma_dir` is set.
self.m.goma.set_path(
self._prebuilt_path(checkout.root_dir, "third_party", "goma")
)
with self.m.macos_sdk(), self.m.context(cwd=checkout.root_dir):
if fint_path:
self.m.step(
"fint set",
[
fint_path,
"set",
"-static",
static_spec_path,
"-context",
context_spec_path,
],
)
else:
self.m.gn(
"gen",
*self._gn_gen_options(
checkout=checkout,
fuchsia_build_dir=fuchsia_build_dir,
export_compdb=export_compdb,
**kwargs
)
)
return self.gn_results(fuchsia_build_dir, export_compdb)
def _gn_gen_options(
self,
checkout,
fuchsia_build_dir,
target,
build_type,
product,
board=None,
packages=(),
cache_packages=(),
universe_packages=(),
variants=(),
args=(),
use_goma=True,
record_tracelogs=False,
export_compdb=False,
export_project=False,
fail_on_unused_args=True,
sdk_id=None,
test_durations_file="",
):
"""Formats options to pass to `gn gen`.
Args:
checkout (CheckoutApi.CheckoutResults): The checkout results.
fuchsia_build_dir (Path): The output directory for the fuchsia build.
target (str): The build target, see TARGETS for allowed targets.
build_type (str): One of the build types in BUILD_TYPES.
product (str): A product to pass to GN to build.
board (str or None): A board to pass to GN to build.
packages (sequence[str]): A sequence of packages to pass to GN to build.
cache_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'cache' packages.
universe_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'universe' packages.
variants (sequence[str]): A sequence of build variant selectors to pass
to GN in `select_variant`.
args (sequence[str]): Additional arguments to pass to GN.
record_tracelogs (bool): Whether to record tracelogs.
export_compdb (bool): Whether to generate a compilation database.
export_project (bool): Whether to generate project.json.
fail_on_unused_args (bool): Treat unused build args warnings as errors.
sdk_id (str): If specified, set sdk_id in GN.
test_durations_file (str): Path in the checkout containing historical
test duration data for the current build config.
Returns:
A list of strings suitable to pass as command-line args to `gn gen`.
"""
# Set the path to GN executable since it is not installed from CIPD.
self.m.gn.set_path(
self._prebuilt_path(checkout.root_dir, "third_party", "gn").join("gn")
)
# We need to modify the arguments when setting up toolchain so make a copy.
args = list(args)
if checkout.release_version:
args.append(
'build_info_version="{version}"'.format(
version=checkout.release_version
)
)
if product:
product_base = self.m.path.splitext(self.m.path.basename(product))[0]
args.append('build_info_product="%s"' % product_base)
if board:
board_base = self.m.path.splitext(self.m.path.basename(board))[0]
args.append('build_info_board="%s"' % board_base)
if sdk_id:
args.extend(["build_sdk_archives=true", 'sdk_id="%s"' % sdk_id])
if self._clang_toolchain_dir:
assert (
not use_goma
), "goma is not supported for builds using a custom clang toolchain"
args.append('clang_prefix="%s"' % self._clang_toolchain_dir.join("bin"))
if self._gcc_toolchain_dir:
assert (
not use_goma
), "goma is not supported for builds using a custom gcc toolchain"
args.append(
'zircon_extra_args.gcc_tool_dir = "%s"'
% self._gcc_toolchain_dir.join("bin")
)
if self._rust_toolchain_dir:
args.append('rustc_prefix="%s"' % self._rust_toolchain_dir.join("bin"))
args.extend(
[
'target_cpu="%s"' % target,
"is_debug=%s" % ("true" if build_type == "debug" else "false"),
]
)
if test_durations_file:
args.append('test_durations_file="%s"' % test_durations_file)
if board:
args.append('import("//%s") ' % board)
if product:
args.append('import("//%s") ' % product)
if packages:
base_package_labels_format = "base_package_labels=[%s]"
# if product is set, append to base_package_labels.
if product:
base_package_labels_format = "base_package_labels+=[%s]"
args.append(
base_package_labels_format % ",".join('"%s"' % pkg for pkg in packages)
)
if cache_packages:
cache_package_labels_format = "cache_package_labels=[%s]"
# if product is set, append to cache_package_labels.
if product:
cache_package_labels_format = "cache_package_labels+=[%s]"
args.append(
cache_package_labels_format
% ",".join('"%s"' % pkg for pkg in cache_packages)
)
if universe_packages:
universe_package_labels_format = "universe_package_labels=[%s]"
# if product is set, append to universe_package_labels.
if product:
universe_package_labels_format = "universe_package_labels+=[%s]"
args.append(
universe_package_labels_format
% ",".join('"%s"' % pkg for pkg in universe_packages)
)
if variants:
# Apply double-quotes to `select_variant_shortcut`s (a string;
# e.g host-asan), but do not for `selector`s (a scope;
# e.g {variant="asan-fuzzer" target_type=["fuzzed_executable"]})
formatted_variants = [
v if re.match("^{.*}$", v) else '"%s"' % v for v in variants
]
args.append("select_variant=[%s]" % ",".join(formatted_variants))
if "thinlto" in variants:
args.append(
'thinlto_cache_dir="%s"' % self.m.path["cache"].join("thinlto")
)
if record_tracelogs:
args.append(
'zircon_tracelog="%s"' % str(fuchsia_build_dir.join(ZIRCON_GN_TRACE))
)
if use_goma:
args.extend(["use_goma=true", 'goma_dir="%s"' % self.m.goma.goma_dir])
def sorted_with_imports_first(arglist):
"""Sorts arguments while ensuring that imports come first.
The `--args` passed to gen are sorted for a deterministic
ordering with imports coming first, as otherwise they might
blindly redefine variables set or modified by other
arguments). This will ease reviews, as a re-ordering of
build logic will yield the same GN invocation.
"""
import_args, normal_args = [], []
for arg in arglist:
if arg.startswith("import("):
import_args.append(arg)
else:
normal_args.append(arg)
return (
sorted(import_args)
# Initialize zircon_extra_args before any variable-setting
# args, so that it's safe for subsequent args to do things like
# `zircon_extra_args.foo = "bar"` without worrying about
# initializing zircon_extra_args if it hasn't yet been defined.
# But do it after all imports in case one of the imported files
# sets `zircon_extra_args`.
+ ["if (!defined(zircon_extra_args)) { zircon_extra_args = { } }"]
+ sorted(normal_args)
)
gen_options = [
fuchsia_build_dir,
"--check=system",
"--args=%s" % " ".join(sorted_with_imports_first(args)),
]
if export_compdb:
gen_options.append("--export-compile-commands")
if export_project:
gen_options.append("--ide=json")
if record_tracelogs:
gen_options.append(
"--tracelog=%s" % str(fuchsia_build_dir.join(FUCHSIA_GN_TRACE))
)
if fail_on_unused_args:
gen_options.append("--fail-on-unused-args")
return gen_options
def is_affected(self, bb_input, checkout, fuchsia_build_dir, gn_results):
"""Returns whether or not the build should continue.
Will return False iff bb_input indicates that we are testing a single
gerrit change in CQ, and the build graph indicates that change cannot
affect the build.
"""
changed_files = checkout.changed_files(bb_input)
# changed_files = None -> CI, which we always want to test.
# The analysis is not valid for changes to integration.
# In particular for both infra configs and jiri manifests.
if changed_files is None or bb_input.gerrit_changes[0].project == "integration":
return True
# Rebase paths to be relative to the checkout root, and prefix with "//"
# to indicate source-relative paths.
changed_files_fuchsia = [
"//" + self.m.path.relpath(changed, checkout.root_dir)
for changed in changed_files
]
changed_files_zircon = [
"//" + self.m.path.relpath(changed, checkout.root_dir.join("zircon"))
for changed in changed_files
]
with self.m.step.nest("check should build"):
result = self.should_build(
"zircon",
checkout,
gn_results.zircon_build_dir,
checkout.root_dir.join("zircon"),
changed_files_zircon,
)
result.presentation.logs["should_build_zircon"] = (
result.stderr.splitlines() + result.stdout.splitlines()
)
structured_output = self.m.json.loads(result.stdout)
result.presentation.properties[
"should_build_zircon_structured"
] = structured_output
build_zircon = structured_output["result"]
result = self.should_build(
"fuchsia",
checkout,
fuchsia_build_dir,
checkout.root_dir,
changed_files_fuchsia,
)
result.presentation.logs["should_build"] = (
result.stderr.splitlines() + result.stdout.splitlines()
)
structured_output = self.m.json.loads(result.stdout)
result.presentation.properties[
"should_build_structured"
] = structured_output
build_fuchsia = structured_output["result"]
return build_zircon != "NoBuild" or build_fuchsia != "NoBuild"
def should_build(
self, step_name, checkout, build_dir, source_dir, changed_files,
):
"""Runs the should_build.py script to analyze the provided build.
Returns the result of running the should_build.py script with the
provided changed_files against the specified build directory.
"""
return self.m.step(
step_name,
[
self.m.build.resource("should_build.py"),
"--build-directory",
build_dir,
"--source-directory",
source_dir,
"--gn-path",
self._prebuilt_path(checkout.root_dir, "third_party", "gn").join("gn"),
]
+ ["--changed-files",]
+ changed_files,
stdout=self.m.raw_io.output(),
stderr=self.m.raw_io.output(),
step_test_data=lambda: self.m.raw_io.test_api.stream_output(
"""Analyzing...
Build Directory: out/default
GN Path: prebuilt/third_party/gn/linux-x64/gn
Changed Files: ['//src/modular/bin/sessionmgr/sessionmgr_impl.rs']
""",
stream="stderr",
)
+ self.m.raw_io.test_api.stream_output(
"""{"result": "NoBuild", "explicitly_disabled_file_types": false}
"""
),
ok_ret="all",
)
def ninja(
self,
gn_results,
targets=(),
zircon_targets=(),
path_targets=(),
build_canonical_zircon_targets=True,
build_images=False,
image_filter=None,
archives_to_build=(),
build_host_tests=False,
build_prebuilt_binaries=False,
build_generated_sources=False,
build_zbi_tests=False,
gcs_bucket=None,
build_fuchsia=True,
build_zircon=True,
use_goma=True,
):
"""A high-level ninja abstraction that consumes GN build APIs - exposed
through _GNResults - in determining what to run.
Args:
gn_results (_GNResults): GN gen results.
targets (seq(str)): Fuchsia ninja targets.
zircon_targets (seq(str)): Zircon ninja targets.
path_targets (seq(path)): Files to build as ninja targets.
build_canonical_zircon_targets (bool): Whether to build the zircon ninja
targets given in the fuchsia build 'instructions' of zircon.json.
build_images (bool): Whether to build images within the GN graph.
image_filter (lambda): A bool-valued map on the contents of
gn_results.image_manifest. If build_images is true, then only the images
for which this function is true will be built.
archives_to_build (seq(str)): A list of archives in the GN graph to build.
build_host_tests (bool): Whether to build host test executables in the
GN graph.
build_prebuilt_binaries (bool): Whether to create manifests from supplied
prebuilt .build-id directories. In the common case, no such prebuilts
are supplied and this is a no-op. This variable name is a misnomer, but
hopefully forgivable as it keeps with the existing naming scheme.
build_generated_sources (bool): Whether to build generated sources.
build_zbi_tests (bool): Whether to build the zbi tests in the GN graph.
gcs_bucket (str or None): A GCS bucket to upload crash reports to.
build_fuchsia (bool): Don't run Ninja on Fuchsia, only Zircon. Useful when
targets = () indicates no targets should be built for Fuchsia, rather
than indicating all targets should be built.
build_zircon (bool): Don't run Ninja on Zircon, only Fuchsia. Useful when
targets = () indicates no targets should be built for Zircon, rather
than indicating all targets should be built.
use_goma (bool): Whether to use goma when building.
"""
# Set the path to Ninja executable since it is not installed from CIPD.
self.m.ninja.set_path(gn_results.tool("ninja"))
targets = list(targets)
zircon_targets = list(zircon_targets)
def append_target(path):
abspath = self.m.path.abs_to_path(
self.m.path.realpath(gn_results.fuchsia_build_dir.join(path))
)
if gn_results.zircon_build_dir.is_parent_of(abspath):
zircon_targets.append(
self.m.path.relpath(abspath, gn_results.zircon_build_dir)
)
else:
targets.append(path)
for path in path_targets: # pragma: nocover
append_target(path)
if build_images:
filtered_image_manifest = filter(image_filter, gn_results.image_manifest)
for image in filtered_image_manifest:
append_target(image["path"])
if archives_to_build:
targets.extend(
[
archive["path"]
for archive in gn_results.archives
if archive["name"] in archives_to_build
]
)
if build_host_tests:
for test_spec in gn_results.test_manifest:
test = self.m.testsharder.Test.from_jsonish(test_spec["test"])
if test.os != "fuchsia":
targets.append(test.path)
if build_prebuilt_binaries:
for bins in gn_results.prebuilt_binaries:
targets.append(bins["manifest"])
if build_generated_sources:
zircon_targets.extend(gn_results.zircon_generated_sources)
targets.extend(gn_results.generated_sources)
if build_zbi_tests:
for zbi_test in gn_results.zbi_tests.itervalues():
append_target(zbi_test["path"])
if build_canonical_zircon_targets:
zircon_targets.extend(gn_results.canonical_zircon_ninja_targets)
built_zircon_targets = []
built_fuchsia_targets = []
if use_goma:
goma_context = self.m.goma.build_with_goma
else:
@contextlib.contextmanager
def goma_context():
yield
with self.m.macos_sdk(), self.m.step.nest("ninja"), goma_context():
jobs = self.m.goma.jobs if use_goma else self.m.platform.cpu_count
if build_zircon:
built_zircon_targets = zircon_targets
try:
self._run_ninja(
"zircon",
gn_results.zircon_build_dir,
zircon_targets,
jobs,
self.m.buildbucket.build.id,
gcs_bucket,
)
finally:
self._gen_build_perf_data(
"zircon",
gn_results.zircon_build_dir,
zircon_targets,
jobs,
self.m.buildbucket.build.id,
gcs_bucket,
)
if build_fuchsia:
built_fuchsia_targets = targets
try:
self._run_ninja(
"fuchsia",
gn_results.fuchsia_build_dir,
targets,
jobs,
self.m.buildbucket.build.id,
gcs_bucket,
)
finally:
self._gen_build_perf_data(
"fuchsia",
gn_results.fuchsia_build_dir,
targets,
jobs,
self.m.buildbucket.build.id,
gcs_bucket,
)
return built_zircon_targets, built_fuchsia_targets
def _upload_build_results(
self, build_results, gcs_bucket, is_release_version, namespace
):
assert gcs_bucket
# Upload archives.
for name, path in build_results.archives.iteritems():
metadata = None
# Try and sign the build archive. If the we are on a release build and the
# signing script returns a signature, add it to the metadata and
# upload the public key for verification.
# TODO(fxb/51162): Remove once this is no longer used.
if is_release_version and name == "archive":
signature = self._try_sign_archive(path)
if signature:
# Add the signature to the metadata.
metadata = {
"x-goog-meta-signature": signature,
}
# Upload the public key to GCS.
# Note that RELEASE_PUBKEY_PATH should always exist because a
# signature should only be generated if RELEASE_PUBKEY_PATH exists.
self.m.upload.file_to_gcs(
source=RELEASE_PUBKEY_PATH,
bucket=gcs_bucket,
subpath=RELEASE_PUBKEY_FILENAME,
namespace=namespace,
)
# Upload the archive
self.m.upload.file_to_gcs(
source=path,
bucket=gcs_bucket,
subpath=self.m.path.basename(path),
namespace=namespace,
metadata=metadata,
)
# Upload build metrics.
self._upload_tracing_data(build_results, gcs_bucket, namespace)
self._upload_binary_sizes(build_results)
self._upload_blobstats_output(build_results, gcs_bucket, namespace)
def _try_sign_archive(self, archive_path):
args = [
"--archive-file",
archive_path,
]
return self.m.python(
"run signing script",
self.resource("sign.py"),
args,
venv=self.resource("sign.py.vpython"),
stdout=self.m.raw_io.output(),
).stdout
def _upload_package_snapshot(self, build_results, gcs_bucket, build_id):
assert gcs_bucket
snapshot_path = build_results.fuchsia_build_dir.join(
"obj", "build", "images", "system.snapshot"
)
if not self.m.path.exists(snapshot_path):
return
# Upload a new table row for the system snapshot data generated during this build
snapshot = self.m.file.read_raw("read package snapshot file", snapshot_path)
build_packages_entry = {
"build_id": self.m.buildbucket.build.id,
"snapshot": snapshot,
}
basename = "system.snapshot.json"
build_packages_entry_file = self.m.path["tmp_base"].join(basename)
self.m.step(
"write build_packages_entry_file",
["cat", self.m.json.input(build_packages_entry)],
stdout=self.m.raw_io.output(leak_to=build_packages_entry_file),
)
self.m.upload.file_to_gcs(
source=build_packages_entry_file,
bucket=gcs_bucket,
subpath=basename,
namespace=build_id,
)
# Upload a new table row describing this particular build. Other tables' rows
# are linked into this table using the build id as a foreign key.
builds_entry = {
"bucket": self.m.buildbucket.bucket_v1,
"builder": self.m.buildbucket.builder_name,
"build_id": self.m.buildbucket.build.id,
"gitiles_commit": [self.m.buildbucket.gitiles_commit.id],
"datetime": str(self.m.buildbucket.build.create_time.ToDatetime()),
"start_time": str(self.m.buildbucket.build.start_time.ToDatetime()),
"repo": self.m.buildbucket.build_input.gitiles_commit.project,
"arch": build_results.target,
"product": build_results.product,
"board": build_results.board,
"channel": [""],
}
self.m.bqupload.insert(
step_name="add table row: %s/%s/builds_beta"
% (BIGQUERY_PROJECT, BIGQUERY_ARTIFACTS_DATASET),
project=BIGQUERY_PROJECT,
dataset=BIGQUERY_ARTIFACTS_DATASET,
table="builds_beta",
data_file=self.m.json.input(builds_entry),
)
def _upload_binary_sizes(self, build_results):
"""Uploads size checks to BigQuery.
The upload also includes metadata about this build so that the
data can be used to create a self-contained BigQuery table.
"""
if not build_results._binary_sizes: # pragma: no cover
return
metadata = {
"builder_name": self.m.buildbucket.builder_name,
# This field is set to a string in the BQ table schema because it's just
# an opaque ID. The conversion from int to string that happens on the
# BigQuery side is not what we want, so convert here.
"build_id": str(self.m.buildbucket.build.id),
"build_create_time_seconds": self.m.buildbucket.build.create_time.seconds,
"gitiles_commit_host": self.m.buildbucket.gitiles_commit.host,
"gitiles_commit_id": self.m.buildbucket.gitiles_commit.id,
"gitiles_commit_project": self.m.buildbucket.gitiles_commit.project,
}
size_data = []
for component, size in build_results._binary_sizes.items():
row = metadata.copy()
row["component"] = component
row["size"] = size
size_data.append(row)
bq_formatted_str = "\n".join(self.m.json.dumps(d) for d in size_data)
self.m.bqupload.insert(
step_name="upload size_checker output",
project=BIGQUERY_PROJECT,
dataset=BIGQUERY_ARTIFACTS_DATASET,
table="binary_sizes",
data_file=self.m.raw_io.input(bq_formatted_str),
)
def _upload_blobstats_output(self, build_results, gcs_bucket, build_id):
"""Runs the blobstats command and uploads the output files to GCS."""
dir_name = "blobstats"
blobstats_output_dir = self.m.path["cleanup"].join(dir_name)
with self.m.context(cwd=build_results.fuchsia_build_dir):
result = self.m.step(
"blobstats",
[build_results.tool("blobstats"), "--output=%s" % blobstats_output_dir],
ok_ret="any",
)
# If blobstats failed, it's probably because the build intentionally
# didn't produce the input files that blobstats requires. Blobstats is
# generally just a nice-to-have anyway, so either way it's probably okay
# to silently continue without uploading results if blobstats fails.
if result.retcode != 0:
return
self.m.upload.directory_to_gcs(
source=blobstats_output_dir,
bucket=gcs_bucket,
subpath=dir_name,
namespace=build_id,
)
def _upload_buildstats_output(self, gn_results, gcs_bucket, build_id):
"""Runs the buildstats command for Zircon and Fuchsia and uploads the output files to GCS."""
buildstats_binary_path = gn_results.tool("buildstats")
builds = collections.OrderedDict(
[
("zircon", gn_results.zircon_build_dir),
("fuchsia", gn_results.fuchsia_build_dir),
]
)
for name, build_dir in builds.items():
output_name = "%s-%s" % (name, BUILDSTATS_FILENAME)
buildstats_output_path = self.m.path["cleanup"].join(output_name)
if self._run_buildstats(
"%s buildstats" % name,
buildstats_binary_path,
build_dir,
buildstats_output_path,
):
self.m.upload.file_to_gcs(
source=buildstats_output_path,
bucket=gcs_bucket,
subpath=output_name,
namespace=build_id,
)
def _run_buildstats(
self, step_name, buildstats_binary_path, build_dir, output_path
):
command = [
buildstats_binary_path,
"--ninjalog",
build_dir.join(NINJA_LOG_FILENAME),
"--compdb",
build_dir.join(COMPDB_FILENAME),
"--graph",
build_dir.join(GRAPH_FILENAME),
"--output",
output_path,
]
with self.m.context(cwd=build_dir):
result = self.m.step(step_name, command, ok_ret="any")
return result.retcode == 0
def _upload_tracing_data(self, build_results, gcs_bucket, build_id):
"""Uploads GN and ninja tracing results for this build to GCS."""
paths_to_upload = [
self._extract_gn_tracing_data(build_results, FUCHSIA_GN_TRACE),
self._extract_gn_tracing_data(build_results, ZIRCON_GN_TRACE),
self._extract_ninja_tracing_data(
build_results, build_results.fuchsia_build_dir, "ninja_trace"
),
self._extract_ninja_tracing_data(
build_results, build_results.zircon_build_dir, "zircon_ninja_trace"
),
]
for path in paths_to_upload:
self.m.upload.file_to_gcs(
source=path,
bucket=gcs_bucket,
subpath=self.m.path.basename(path),
namespace=build_id,
)
def _extract_ninja_tracing_data(self, build_results, build_dir, trace_name):
"""Extracts the tracing data from the .ninja_log.
Args:
build_results (_FuchsiaBuildResults): The results of the build.
build_dir (Path): The build output directory.
trace_name (str): The name of both the input and output files (no
extension).
"""
trace = self.m.path["cleanup"].join("%s.json" % trace_name)
self.m.step(
"ninja tracing",
[
build_results.tool("ninjatrace"),
"-ninjalog",
build_dir.join(NINJA_LOG_FILENAME),
"-compdb",
build_dir.join(COMPDB_FILENAME),
"-graph",
build_dir.join(GRAPH_FILENAME),
"-critical-path",
"-trace-json",
trace,
],
stdout=self.m.raw_io.output(leak_to=trace),
)
return self._trace2html(
"ninja trace2html", trace, build_results.checkout.root_dir
)
def _extract_gn_tracing_data(self, build_results, trace_path):
"""Extracts the tracing data from this GN run.
Args:
build_results (_FuchsiaBuildResults): The results of the build.
trace_path (str): The path to the JSON output trace file.
"""
return self._trace2html(
"gn trace2html",
build_results.fuchsia_build_dir.join(trace_path),
build_results.checkout.root_dir,
)
def _trace2html(self, name, trace_path, checkout_root):
"""Converts an about:tracing file to HTML using the trace2html tool.
Returns:
A Path to the file containing the gn tracing data in Chromium's
about:tracing html format.
"""
# Write the file to a file in a temporary directory, instead of a
# top-level temporary file so that it can always have the same name and
# can be uploaded to a GCS file with that same name.
trace_name, _ = self.m.path.splitext(self.m.path.basename(trace_path))
output_path = self.m.path["cleanup"].join("%s.html" % trace_name)
self.m.python(
name=name,
script=checkout_root.join(
"third_party", "catapult", "tracing", "bin", "trace2html"
),
args=["--output", output_path, trace_path],
)
return output_path
def _download_toolchain(self, name, toolchain_info, cipd_package):
"""Downloads a prebuilt toolchain from isolate or CIPD.
Args:
name (str): Name of the toolchain (e.g. "clang").
toolchain_info (dict): Information about where to download the
toolchain from. Should contain a "type" field equal to "cipd"
or "isolated", and an "instance" field specifying the
isolated hash or CIPD package version.
cipd_package (str): The name of the CIPD package to download if
toolchain_info["type"] is "cipd".
Returns: A Path to the root of a temporary directory where the
toolchain was downloaded.
"""
with self.m.step.nest("%s_toolchain" % name), self.m.context(infra_steps=True):
instance = toolchain_info["instance"]
typ = toolchain_info["type"]
root_dir = self.m.path.mkdtemp(name)
if typ == "cipd":
pkgs = self.m.cipd.EnsureFile()
pkgs.add_package("fuchsia/%s/${platform}" % cipd_package, instance)
self.m.cipd.ensure(root_dir, pkgs)
elif typ == "isolated":
self.m.isolated.download(
"download", isolated_hash=instance, output_dir=root_dir
)
else: # pragma: no cover
raise KeyError('%s_toolchain type "%s" not recognized' % (name, typ))
return root_dir