blob: 23b262bc453f5549b2cd99895572f4bfcb44ba6f [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contextlib import contextmanager
from recipe_engine import recipe_api
import copy
import os
import re
from .build_artifacts import ShardTaskRequest
from .build_artifacts import TestOrchestrationInputs
from .fvm import FvmImage
# Host architecture -> number of bits -> host platform name.
# Add to this dictionary as we support building on more devices.
HOST_PLATFORMS = {
'intel': {
64: 'x64',
},
}
# List of available targets.
TARGETS = ['x64', 'arm64']
# List of available build types.
BUILD_TYPES = ['debug', 'release', 'thinlto', 'lto']
# List of canonical names for archives that the build can produce.
ARCHIVES = [
'archive', # Images and scripts for paving/netbooting.
'package-archive', # Package metadata, blobs, and tools.
'symbol-archive', # Metadata for symbolization.
'breakpad-symbol-archive', # Breakpad symbols.
]
# Canonical name for fvm.blk.
STORAGE_FULL = 'storage-full'
# Canonical name for fvm.sparse.blk.
STORAGE_SPARSE = 'storage-sparse'
# File in build out dir in which the sizes of filesystems are recorded.
FILESYSTEM_SIZES_FILENAME = 'filesystem_sizes.json'
# Manifests produced by the build.
IMAGES_JSON = 'images.json'
TESTS_JSON = 'tests.json'
ZIRCON_JSON = 'zircon.json'
TOOL_PATHS_JSON = 'tool_paths.json'
# Name of BigQuery project and table for uploading artifacts.
BIGQUERY_PROJECT = 'fuchsia-infra'
BIGQUERY_ARTIFACTS_DATASET = 'artifacts'
# Name of the build results manifest to isolate.
BUILD_RESULTS_JSON = 'build_results.json'
# The private and authorized SSH keys pulled down in the checkout, relative to
# the fuchsia root.
CHECKOUT_AUTHORIZED_KEY = '.ssh/authorized_keys'
CHECKOUT_PRIVATE_KEY = '.ssh/pkey'
# The name of the environment variable that contains the path to a public key
# used to sign release builds. Only set on release builders.
RELEASE_PUBKEY_PATH = 'RELEASE_PUBKEY_PATH'
# The default path to the public key.
DEFAULT_PUBKEY_PATH = '/etc/release_keys/release_key_pub.pem'
# The name of the public key file uploaded in release builds.
RELEASE_PUBKEY_FILENAME = 'publickey.pem'
# The directory in the root of the fuchsia checkout holding test duration files
# to be consumed by testsharder.
DURATION_FILES_DIR = 'integration/infra/test_durations'
class FuchsiaBuildResults(object):
"""Represents a completed build of Fuchsia."""
# TODO(garymm): Use attr.ib
def __init__(self,
api,
checkout_root,
target,
variants,
build_type,
fuchsia_build_dir,
zircon_build_dir,
board,
product,
gn_results,
images=None,
archives=None):
assert target in TARGETS
self._api = api
self._checkout_root = checkout_root
self._fuchsia_build_dir = fuchsia_build_dir
self._zircon_build_dir = zircon_build_dir
self._target = target
self._variants = variants
self._build_type = build_type
self._filesystem_sizes = None
self._images = images if images else {}
self._archives = archives if archives else {}
self._board = board
self._product = product
self._gn_results = gn_results
self._storage_full = None
self.authorized_key = self.checkout_root.join(CHECKOUT_AUTHORIZED_KEY)
self.llvm_symbolizer = self.tool('llvm-symbolizer')
self.minfs = self.tool('minfs')
self.private_key = self.checkout_root.join(CHECKOUT_PRIVATE_KEY)
self.zbi = self.tool('zbi')
def __deepcopy__(self, memodict):
# Shallow copy first.
new = copy.copy(self)
# Only images needs to be a real deep copy.
new._images = copy.deepcopy(new._images, memodict)
return new
@property
def target(self):
"""The build target for this build."""
return self._target
@property
def variants(self):
"""The variants for this build."""
return self._variants
@property
def build_type(self):
"""The build type for this build."""
return self._build_type
@property
def zircon_build_dir(self):
"""The directory where Zircon build artifacts may be found."""
return self._zircon_build_dir
@property
def fuchsia_build_dir(self):
"""The directory where Fuchsia build artifacts may be found."""
return self._fuchsia_build_dir
@property
def tests_file(self):
return self._fuchsia_build_dir.join(TESTS_JSON)
@property
def ids(self):
return self._fuchsia_build_dir.join('ids.txt')
@property
def secret_specs(self):
return self._fuchsia_build_dir.join('secret_specs')
@property
def images(self):
"""Mapping between the canonical name of an image produced by the Fuchsia
build to its path relative to the fuchsia build directory."""
return self._images
@property
def archives(self):
"""Mapping between the canonical name of an archive produced by the Fuchsia
build to the absolute path to that archive on the local disk."""
return self._archives
@property
def checkout_root(self):
"""The Fuchsia checkout root."""
return self._checkout_root
@property
def board(self):
"""The board for this build."""
return self._board
@property
def product(self):
"""The product for this build."""
return self._product
@property
def storage_full(self):
"""The storage-full image (fvm.blk) produced by the build."""
if self._storage_full:
return self._storage_full
elif STORAGE_FULL not in self.images:
return None
path = self.fuchsia_build_dir.join(self.images[STORAGE_FULL]['path'])
self._storage_full = FvmImage(self._api, path, self.tool('fvm'))
return self._storage_full
@property
def filesystem_sizes(self):
"""The filesystem_sizes data produced by the build."""
if self._filesystem_sizes is None:
filesystem_sizes_file = self.fuchsia_build_dir.join(
FILESYSTEM_SIZES_FILENAME)
# Not all supported build configurations actually produce the file.
if self._api.path.exists(filesystem_sizes_file):
self._filesystem_sizes = self._api.file.read_json(
'read filesystem sizes', filesystem_sizes_file)
else:
self._filesystem_sizes = []
return self._filesystem_sizes
def tool(self, name, cpu='x64', os=None):
"""The path to a tool of the given name and cpu."""
return self._gn_results.tool(name, cpu, os)
def check_filesystem_sizes(self):
"""Checks that filesystem sizes are less than the maximum allowed
Raises:
StepFailure if the size exceeds the maximum.
"""
with self._api.step.nest('check filesystem sizes'):
error_messages = []
for fs_dict in self.filesystem_sizes:
fs_name = fs_dict['name']
with self._api.step.nest(fs_name) as size_step:
size = fs_dict['value']
limit = fs_dict['limit']
size_step.presentation.step_text = 'size, limit = %d, %d' % (
size,
limit,
)
if size > limit > 0:
size_step.presentation.status = self._api.step.FAILURE
error_message = '%s too large; was %d, but must be < %d' % (
fs_name, size, limit)
if 'debug_instructions' in fs_dict:
error_message = '%s. %s' % (error_message,
fs_dict['debug_instructions'])
error_messages.append(error_message)
if error_messages:
raise self._api.step.StepFailure('. '.join(error_messages))
def upload_debug_symbols(self,
debug_symbol_gcs_bucket,
gcs_bucket=None,
namespace=None):
"""Uploads debug symbols to Google Cloud Storage.
Args:
debug_symbol_gcs_bucket (str): GCS bucket name to upload debug symbols to.
gcs_bucket (str or None): GCS bucket to upload debug-binaries.txt record.
If None, do not create or upload the record.
namespace (str or None): A unique namespace for the GCS debug-binaries
record upload location. If None, the current build's ID is used.
"""
assert debug_symbol_gcs_bucket
path = self._api.path['cleanup'].join(
'debug-binaries.txt') if gcs_bucket else None
self._api.upload_debug_symbols(
step_name='upload debug symbols',
upload_debug_symbols_path=self.tool('upload_debug_symbols'),
bucket=debug_symbol_gcs_bucket,
# Recursively walk checkout root to discover all debug symbols.
# NOTE: A subset of debug symbols are expected to be under the build
# directory, which is currently always under checkout root.
build_id_dirs=[self.checkout_root],
record=path,
)
if path:
self._api.upload.file_to_gcs(
source=path,
bucket=gcs_bucket,
subpath=self._api.path.basename(path),
namespace=namespace,
)
def upload_results(self, gcs_bucket, is_release_version=False,
namespace=None):
"""Uploads artifacts from the build to Google Cloud Storage.
Args:
gcs_bucket (str): GCS bucket name to upload build results to.
is_release_version (bool): True if checkout is a release version.
namespace (str|None): A unique namespace for the GCS upload location;
if None, the current build's ID is used.
"""
assert gcs_bucket
with self._api.step.nest('upload build results'):
self._api.build._upload_build_results(self, gcs_bucket,
is_release_version, namespace)
self._api.build._upload_package_snapshot(self, gcs_bucket, namespace)
class GNResults(object):
"""GNResults is represents the result of a `gn gen` invocation in the fuchsia
build.
It exposes the API of the build, which defines how one can invoke ninja.
"""
_COMPDB_FILENAME = 'compile_commands.json'
def __init__(self, api, fuchsia_build_dir, compdb_exported=False):
self._api = api
self._fuchsia_build_dir = fuchsia_build_dir
zircon_ninja_instructions = api.json.read(
'read zircon ninja instructions',
fuchsia_build_dir.join(ZIRCON_JSON),
step_test_data=api.build.test_api.mock_zircon_instructions,
).json.output
self._zircon_build_dir = api.path.abs_to_path(
api.path.realpath(
fuchsia_build_dir.join(zircon_ninja_instructions['dir']),),)
self._canonical_zircon_ninja_targets = zircon_ninja_instructions['targets']
self._image_manifest = None
self._archives = None
self._test_manifest = None
self._tools = None
self._generated_sources = None
self._zircon_generated_sources = None
self._fuchsia_compdb = None
self._zircon_compdb = None
self._zbi_tests = None
if compdb_exported:
self._fuchsia_compdb = self._fuchsia_build_dir.join(self._COMPDB_FILENAME)
api.path.mock_add_paths(self._fuchsia_compdb)
self._zircon_compdb = self._zircon_build_dir.join(self._COMPDB_FILENAME)
api.path.mock_add_paths(self._zircon_compdb)
@property
def fuchsia_build_dir(self):
"""Returns the fuchsia build directory (Path)."""
return self._fuchsia_build_dir
@property
def zircon_build_dir(self):
"""Returns the associated zircon build directory (Path)."""
return self._zircon_build_dir
@property
def canonical_zircon_ninja_targets(self):
"""Returns the canonical zircon ninja targets (list(string)) that the
fuchsia build is informed of."""
return self._canonical_zircon_ninja_targets
@property
def image_manifest(self):
"""Returns the manifest of images (dict) in the GN graph.
TODO(BLD-253): Point to the schema once there is one.
"""
if not self._image_manifest:
self._image_manifest = self._api.json.read(
'read image manifest',
self.fuchsia_build_dir.join(IMAGES_JSON),
step_test_data=self._api.build.test_api.mock_image_manifest,
).json.output
return self._image_manifest
@property
def archives(self):
"""Returns the archives (dict[str]str) in the GN graph.
Maps archive name to path relative to the fuchsia build directory.
TODO(IN-882): Make this backed by its own build API module.
"""
if not self._archives:
self._archives = {}
for image in self.image_manifest:
if image['type'] == 'tgz':
self._archives[image['name']] = image['path']
return self._archives
@property
def test_manifest(self):
"""Returns the manifest of tests (dict) in the GN graph.
TODO(BLD-253): Point to the schema once there is one.
"""
if not self._test_manifest:
self._test_manifest = self._api.json.read(
'read test spec manifest',
self.fuchsia_build_dir.join(TESTS_JSON),
step_test_data=self._api.build.test_api.mock_test_spec_manifest,
).json.output
return self._test_manifest
@property
def generated_sources(self):
"""Returns the generated source files (list(str)) from the fuchsia build.
The returned paths are relative to the fuchsia build directory.
"""
if not self._generated_sources:
self._generated_sources = self._api.file.read_raw(
'read generated sources',
self.fuchsia_build_dir.join('all_fidl_json.txt'),
'//generated_fidl.json',
).splitlines()
return self._generated_sources
@property
def zircon_generated_sources(self):
"""Returns the generated source files (list(str)) from the zircon build.
The returned paths are relative to the zircon build directory.
"""
if not self._zircon_generated_sources:
self._zircon_generated_sources = self._api.json.read(
'read zircon generated sources',
self.zircon_build_dir.join('generated_sources.json'),
step_test_data=lambda: self._api.json.test_api.output(
['//generated_header.h']),
).json.output
return self._zircon_generated_sources
def tool(self, name, cpu='x64', os=None):
"""Returns the path to the specified tool provided from the tool_paths
manifest.
"""
os = os or self._api.platform.name
if not self._tools:
self._tools = {}
tool_paths_manifest = self._api.json.read(
'read tool_paths manifest',
self._fuchsia_build_dir.join(TOOL_PATHS_JSON),
step_test_data=self._api.build.test_api.mock_tool_paths_manifest
).json.output
for tool in tool_paths_manifest:
key = (tool['name'], tool['cpu'], tool['os'])
assert key not in self._tools, (
'only one tool with (name, cpu, os) == (%s, %s, %s) is allowed' %
key)
tool_path = self._api.path.abs_to_path(
self._api.path.realpath(self._fuchsia_build_dir.join(tool['path'])))
self._tools[key] = tool_path
return self._tools.get((name, cpu, os), None)
def filtered_compdb(self, filters):
"""The path to a merged compilation database, filtered via the passed filters."""
with self._api.step.nest('merge compdbs'):
compdb = []
compdb += self._api.json.read(
'read zircon compdb',
self._zircon_compdb,
stdout=self._api.json.output(),
step_test_data=lambda: self._api.json.test_api.output([{
'directory': '[START_DIR]/out/default.zircon',
'file': '../../zircon.cpp',
'command': 'clang++ zircon.cpp',
}]),
).json.output
compdb += self._api.json.read(
'read compdb',
self._fuchsia_compdb,
stdout=self._api.json.output(),
step_test_data=lambda: self._api.json.test_api.output([{
'directory': '[START_DIR]/out/default',
'file': '../../foo.cpp',
'command': 'clang++ foo.cpp',
}, {
'directory': '[START_DIR]/out/default',
'file': '../../third_party/foo.cpp',
'command': 'clang++ third_party/foo.cpp',
}, {
'directory': '[START_DIR]/out/default',
'file': '../../out/default/foo.cpp',
'command': 'clang++ foo.cpp',
}]),
).json.output
def keep_in_compdb(entry):
# Filenames are relative to the build directory, and the build directory is absolute.
build_dir = self._api.path.abs_to_path(entry['directory'])
full_path = self._api.path.abs_to_path(
self._api.path.realpath(
self._api.path.join(build_dir, entry['file'])))
if build_dir.is_parent_of(full_path):
return False
segments = entry['file'].split(os.sep)
if any(bad_segments in segments for bad_segments in filters):
return False
return True
compdb_filtered = [entry for entry in compdb if keep_in_compdb(entry)]
compdb_path = self._api.path['cleanup'].join('compile_commands.json')
self._api.file.write_json('write merged compdb', compdb_path,
compdb_filtered)
return compdb_path
@property
def zbi_tests(self):
"""Returns the ZBI tests from the Fuchsia build directory."""
if not self._zbi_tests:
zbi_tests = self._api.json.read(
'read zbi test manifest',
self.fuchsia_build_dir.join('zbi_tests.json'),
step_test_data=lambda: self._api.json.test_api.output([]),
).json.output
self._zbi_tests = {zbi_test['name']: zbi_test for zbi_test in zbi_tests}
return self._zbi_tests
class ToolchainContext(object):
"""A ToolchainContext provides specifications for building with a specific
toolchain."""
def __init__(self, api):
def cipd(package, instance, root_dir):
pkgs = api.cipd.EnsureFile()
pkgs.add_package('fuchsia/%s/${platform}' % package, instance)
api.cipd.ensure(root_dir, pkgs)
def isolated(instance, root_dir):
api.isolated.download(
'download', isolated_hash=instance, output_dir=root_dir)
self._gn_args = []
use_goma = True
if api.build._clang_toolchain:
use_goma = False
with api.step.nest('clang_toolchain'), api.context(infra_steps=True):
clang_instance = api.build._clang_toolchain['instance']
clang_type = api.build._clang_toolchain['type']
clang_dir = api.path.mkdtemp('clang')
if clang_type == 'cipd':
cipd('third_party/clang', clang_instance, clang_dir)
elif clang_type == 'isolated':
isolated(clang_instance, clang_dir)
else: # pragma: no cover
raise KeyError('clang_toolchain type "%s" not recognized' %
clang_type)
self._gn_args += ['clang_prefix="%s"' % clang_dir.join('bin')]
if api.build._gcc_toolchain:
use_goma = False
with api.step.nest('gcc_toolchain'), api.context(infra_steps=True):
gcc_instance = api.build._gcc_toolchain['instance']
gcc_type = api.build._gcc_toolchain['type']
gcc_dir = api.path.mkdtemp('gcc')
if gcc_type == 'cipd':
cipd('third_party/gcc', gcc_instance, gcc_dir)
elif gcc_type == 'isolated':
isolated(gcc_instance, gcc_dir)
else: # pragma: no cover
raise KeyError('gcc_toolchain type "%s" not recognized' % gcc_type)
self._gn_args += [
'''
if (!defined(zircon_extra_args)) { zircon_extra_args = {} }
zircon_extra_args.gcc_tool_dir = "%s"
''' % gcc_dir.join('bin')
]
if not use_goma:
@contextmanager
def null_context():
yield
self._context = null_context
self._ninja_jobs = api.platform.cpu_count
else:
api.goma.ensure()
# goma sometimes takes a while to start, so bump the timeout from the
# default of 20 seconds (it uses GOMA_PING_TIMEOUT_SEC + 20 as the actual
# timeout).
goma_env = {'GOMA_PING_TIMEOUT_SEC': '100'}
self._context = lambda: api.goma.build_with_goma(env=goma_env)
self._ninja_jobs = api.goma.jobs
self._gn_args += ['use_goma=true', 'goma_dir="%s"' % api.goma.goma_dir]
if api.build._rust_toolchain:
with api.step.nest('rust_toolchain'), api.context(infra_steps=True):
rust_instance = api.build._rust_toolchain['instance']
rust_type = api.build._rust_toolchain['type']
rust_dir = api.path.mkdtemp('rust')
if rust_type == 'cipd':
cipd('rust', rust_instance, rust_dir)
elif rust_type == 'isolated':
isolated(rust_instance, rust_dir)
else: # pragma: no cover
raise KeyError('rust_toolchain type "%s" not recognized' % rust_type)
self._gn_args += ['rustc_prefix="%s"' % rust_dir.join('bin')]
def __call__(self):
"""Returns a context for building with the associated toolchain."""
return self._context()
@property
def ninja_jobs(self):
"""Returns the number (int) of recommended ninja jobs."""
return self._ninja_jobs
@property
def gn_args(self):
"""Returns GN args list(str) contributed by the toolchain."""
return copy.copy(self._gn_args)
class FuchsiaBuildApi(recipe_api.RecipeApi):
"""APIs for building Fuchsia."""
FuchsiaBuildResults = FuchsiaBuildResults
GNResults = GNResults
TestOrchestrationInputs = TestOrchestrationInputs
ShardTaskRequest = ShardTaskRequest
def __init__(self, clang_toolchain, gcc_toolchain, rust_toolchain, *args,
**kwargs):
super(FuchsiaBuildApi, self).__init__(*args, **kwargs)
# TODO(fxb/35063): Remove most of these properties, use a spec msg.
self._clang_toolchain = clang_toolchain
self._gcc_toolchain = gcc_toolchain
self._rust_toolchain = rust_toolchain
self.__toolchain_context = None
@property
def _toolchain_context(self):
if not self.__toolchain_context:
self.__toolchain_context = ToolchainContext(self.m)
return self.__toolchain_context
def _prebuilt_path(self, checkout_root, *path):
"""Returns the Path to the host-platform subdir under the given subdirs."""
path = list(path)
path.append('{os}-{arch}'.format(
os=self.m.platform.name,
arch={'intel': 'x64'}[self.m.platform.arch],
))
return checkout_root.join('prebuilt', *path)
def from_spec(self,
build_spec,
checkout,
collect_build_metrics,
sdk_id=None,
gcs_bucket=None):
"""Builds Fuchsia from a Jiri checkout
Args:
build_spec (fuchsia_pb2.Fuchsia.Build): The input build spec.
checkout (CheckoutApi.CheckoutResults): The Fuchsia checkout.
collect_build_metrics (bool): Whether to upload build metrics to cloud
storage.
sdk_id (str): If specified, set sdk_id in GN.
gcs_bucket (str or None): The GCS bucket to upload results to, if set.
Returns:
A FuchsiaBuildResults, representing the build.
"""
archives_to_build = []
ninja_targets = build_spec.ninja_targets
# TODO(fxb/43568): Remove once it is always false.
if build_spec.include_archives:
# If we are not building images, then we need not build package-related
# targets.
if not build_spec.exclude_images:
archives_to_build.extend(['archive', 'package-archive'])
if build_spec.include_breakpad_symbols:
archives_to_build.append('breakpad-symbol-archive')
if build_spec.include_symbol_archive:
archives_to_build.append('symbol-archive')
elif not build_spec.exclude_images:
# This will build the amber-files tree that was included in the
# package-archive.
ninja_targets.append('build/images:updates')
build_dir = checkout.root_dir.join('out')
return self.with_options(
build_dir=build_dir,
checkout=checkout,
target=build_spec.target,
build_type=build_spec.build_type,
packages=build_spec.packages,
universe_packages=build_spec.universe_packages,
variants=build_spec.variants,
gn_args=build_spec.gn_args,
sdk_id=sdk_id,
ninja_targets=ninja_targets,
board=build_spec.board,
product=build_spec.product,
collect_build_metrics=collect_build_metrics,
build_host_tests=build_spec.run_tests,
build_images=not build_spec.exclude_images,
archives_to_build=tuple(archives_to_build),
gcs_bucket=gcs_bucket,
)
def with_options(self,
build_dir,
checkout,
target,
build_type,
packages,
universe_packages=(),
variants=(),
gn_args=(),
sdk_id=None,
ninja_targets=(),
board=None,
product=None,
collect_build_metrics=False,
build_host_tests=False,
build_images=True,
archives_to_build=(),
gcs_bucket=None):
"""Builds Fuchsia from a Jiri checkout.
Args:
build_dir (Path): The directory to output build results into.
checkout (CheckoutResult): The Fuchsia checkout result.
target (str): The build target, see TARGETS for allowed targets.
build_type (str): One of the build types in BUILD_TYPES
packages (sequence[str]): A sequence of packages to pass to GN to build
universe_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'universe" packages
variants (sequence[str]): A sequence of build variant selectors to pass
to GN in `select_variant`
gn_args (sequence[str]): Additional arguments to pass to GN
ninja_targets (sequence[str]): Additional target args to pass to ninja
sdk_id (str): If specified, set sdk_id in GN.
board (str): A board to pass to GN to build
product (str): A product to pass to GN to build
collect_build_metrics (bool): Whether to collect build metrics.
build_host_tests (bool): Whether to build the host tests.
build_images (bool): Whether to build the basic images needed to boot
and test on fuchsia.
archives_to_build (seq(str)): A tuple of canonical names of archives to
build.
gcs_bucket (str): GCS bucket name to upload build results to.
Returns:
A FuchsiaBuildResults, representing the recently completed build.
"""
assert target in TARGETS
assert build_type in BUILD_TYPES
assert all(archive in ARCHIVES for archive in archives_to_build)
# We need to modify the arguments when setting up toolchain so make a copy.
gn_args = copy.copy(gn_args) if gn_args else []
if checkout.release_version:
gn_args.append('build_info_version="{version}"'.format(
version=checkout.release_version))
if product:
product_base = os.path.splitext(os.path.basename(product))[0]
gn_args.append('build_info_product="%s"' % product_base)
if board:
board_base = os.path.splitext(os.path.basename(board))[0]
gn_args.append('build_info_board="%s"' % board_base)
if sdk_id:
gn_args.extend(['build_sdk_archives=true', 'sdk_id="%s"' % sdk_id])
with self.m.step.nest('build'):
return self._with_options(
checkout_root=checkout.root_dir,
fuchsia_build_dir=build_dir.join('default'),
build_type=build_type,
target=target,
packages=packages,
universe_packages=universe_packages,
variants=variants,
gn_args=gn_args,
ninja_targets=list(ninja_targets),
board=board,
product=product,
collect_build_metrics=collect_build_metrics,
build_host_tests=build_host_tests,
build_images=build_images,
archives_to_build=archives_to_build,
gcs_bucket=gcs_bucket,
)
def _run_ninja(self, step_name, build_dir, ninja_targets, jobs, build_id,
gcs_bucket):
try:
self.m.ninja(
step_name=step_name,
build_dir=build_dir,
targets=ninja_targets,
job_count=jobs,
)
except self.m.step.StepFailure:
crashreports_dir = build_dir.join('clang-crashreports')
self.m.path.mock_add_paths(crashreports_dir)
if gcs_bucket and self.m.path.exists(crashreports_dir):
with self.m.step.nest('clang-crashreports'):
with self.m.context(infra_steps=True):
temp = self.m.path.mkdtemp('reproducers')
reproducers = self.m.file.glob_paths(
'find reproducers',
crashreports_dir,
'*.sh',
test_data=(crashreports_dir.join('foo.sh'),))
for reproducer in reproducers:
base = self.m.path.splitext(self.m.path.basename(reproducer))[0]
files = self.m.file.glob_paths(
'find %s files' % base,
crashreports_dir,
base + '.*',
test_data=(crashreports_dir.join('foo.sh'),
crashreports_dir.join('foo.cpp')))
tgz_basename = '%s.tar.gz' % base
tgz_path = temp.join(tgz_basename)
archive = self.m.tar.create(tgz_path, compression='gzip')
for f in files:
archive.add(f, crashreports_dir)
archive.tar('create %s' % tgz_basename)
self.m.upload.file_to_gcs(
source=tgz_path,
bucket=gcs_bucket,
subpath=tgz_basename,
namespace=build_id,
)
raise
def gen(self,
checkout_root,
fuchsia_build_dir,
target,
build_type,
product,
board=None,
packages=(),
universe_packages=(),
variants=(),
args=(),
record_tracelogs=False,
export_compdb=False):
"""Sets up and calls `gn gen`.
Args:
checkout_root (str): The fuchsia checkout root.
target (str): The build target, see TARGETS for allowed targets.
build_type (str): One of the build types in BUILD_TYPES.
product (str): A product to pass to GN to build.
board (str or None): A board to pass to GN to build.
packages (sequence[str]): A sequence of packages to pass to GN to build.
universe_packages (sequence[str]): A sequence of packages to pass to GN
to build and add to the set of 'universe" packages.
variants (sequence[str]): A sequence of build variant selectors to pass
to GN in `select_variant`.
gn_args (sequence[str]): Additional arguments to pass to GN.
record_tracelogs (bool): Whether to record tracelogs.
export_compdb (bool): Whether to generate a compilation database.
Returns:
A GNResults object.
"""
# Set the path to GN executable since it is not installed from CIPD.
self.m.gn.set_path(
self._prebuilt_path(checkout_root, 'third_party', 'gn').join('gn'))
args = list(args)
args.extend(self._toolchain_context.gn_args)
args.extend([
'target_cpu="%s"' % target,
'is_debug=%s' % ('true' if build_type == 'debug' else 'false'),
])
# TODO(fxb/44227): Split test duration files by build configuration name
# instead of builder name.
build_config = self.m.buildbucket.builder_name
if build_config.endswith('-subbuild'):
build_config = build_config[:-len('-subbuild')]
durations_file = '%s/%s.json' % (DURATION_FILES_DIR, build_config)
self.m.path.mock_add_paths(checkout_root.join(durations_file))
if self.m.path.exists(checkout_root.join(durations_file)):
args.append('test_durations_file="%s"' % durations_file)
if board:
args.append('import("//%s") ' % board)
if product:
args.append('import("//%s") ' % product)
if packages:
base_package_labels_format = 'base_package_labels=[%s]'
# if product is set, append to base_package_labels.
if product:
base_package_labels_format = 'base_package_labels+=[%s]'
args.append(base_package_labels_format %
','.join('"%s"' % pkg for pkg in packages))
if universe_packages:
universe_package_labels_format = 'universe_package_labels=[%s]'
# if product is set, append to universe_package_labels.
if product:
universe_package_labels_format = 'universe_package_labels+=[%s]'
args.append(universe_package_labels_format %
','.join('"%s"' % pkg for pkg in universe_packages))
if variants:
# Apply double-quotes to `select_variant_shortcut`s (a string;
# e.g host-asan), but do not for `selector`s (a scope;
# e.g {variant="asan-fuzzer" target_type=["fuzzed_executable"]})
formatted_variants = [
v if re.match('^{.*}$', v) else '"%s"' % v for v in variants
]
args.append('select_variant=[%s]' % ','.join(formatted_variants))
if 'thinlto' in variants:
args.append('thinlto_cache_dir="%s"' %
self.m.path['cache'].join('thinlto'))
if record_tracelogs:
args.append("zircon_tracelog=\"%s\"" %
str(self.m.path['cleanup'].join('zircon_gn_trace.json')))
def sorted_with_imports_first(arglist):
"""Sorts arguments while ensuring that imports come first.
The `--args` passed to gen are sorted for a deterministic ordering with
imports coming first, as otherwise they might blindly redefine variables
set or modified by other arguments). This will ease reviews, as a
re-ordering of build logic will yield the same GN invocation.
"""
import_args, normal_args = [], []
for arg in arglist:
if arg.startswith('import('):
import_args.append(arg)
else:
normal_args.append(arg)
return sorted(import_args) + sorted(normal_args)
gen_options = [
fuchsia_build_dir,
'--check',
'--args=%s' % ' '.join(sorted_with_imports_first(args)),
]
if export_compdb:
gen_options.append('--export-compile-commands')
if record_tracelogs:
gen_options.append('--tracelog=%s' %
str(self.m.path['cleanup'].join('gn_trace.json')))
with self.m.macos_sdk(), self.m.context(cwd=checkout_root):
self.m.gn('gen', *gen_options)
return self.GNResults(self.m, fuchsia_build_dir, export_compdb)
def ninja(self,
gn_results,
targets=(),
zircon_targets=(),
build_canonical_zircon_targets=True,
build_images=False,
image_filter=None,
archives_to_build=(),
build_host_tests=False,
build_generated_sources=False,
build_zbi_tests=False,
gcs_bucket=None,
build_fuchsia=True,
build_zircon=True):
"""A high-level ninja abstraction that consumes GN build APIs - exposed
through GNResults - in determining what to run.
Args:
gn_results (GNResults): GN gen results.
targets (seq(str)): Fuchsia ninja targets.
zircon_targets (seq(str)): Zircon ninja targets.
build_canonical_zircon_targets (bool): Whether to build the zircon ninja
targets given in the fuchsia build 'instructions' of zircon.json.
build_images (bool): Whether to build images within the GN graph.
image_filter (lamda): A bool-valued map on the contents of
gn_results.image_manifest. If build_images is true, then only the images
for which this function is true will be built.
archives_to_build (seq(str)): A list of archives in the GN graph to build.
build_host_tests (bool): Whether to build host test executables in the
GN graph.
build_generated_sources (bool): Whether to build generated sources.
build_zbi_tests (bool): Whether to build the zbi tests in the GN graph.
gcs_bucket (str or None): A GCS bucket to upload crash reports to.
build_fuchsia (bool): Don't run Ninja on Fuchsia, only Zircon. Useful when
targets = () indicates no targets should be built for Fuchsia, rather
than indicating all targets should be built.
build_zircon (bool): Don't run Ninja on Zircon, only Fuchsia. Useful when
targets = () indicates no targets should be built for Zircon, rather
than indicating all targets should be built.
"""
# Set the path to Ninja executable since it is not installed from CIPD.
self.m.ninja.set_path(gn_results.tool('ninja'))
targets = list(targets)
zircon_targets = list(zircon_targets)
def append_target(path):
abspath = self.m.path.abs_to_path(
self.m.path.realpath(gn_results.fuchsia_build_dir.join(path)))
if gn_results.zircon_build_dir.is_parent_of(abspath):
zircon_targets.append(
os.path.relpath(
str(abspath),
str(gn_results.zircon_build_dir),
))
else:
targets.append(path)
if build_images:
image_filter = image_filter or (lambda image: True)
filtered_image_manifest = filter(image_filter, gn_results.image_manifest)
for image in filtered_image_manifest:
append_target(image['path'])
if archives_to_build:
targets.extend([
archive for name, archive in gn_results.archives.iteritems()
if name in archives_to_build
])
if build_host_tests:
for test_spec in gn_results.test_manifest:
test = self.m.testsharder.Test.from_jsonish(test_spec['test'])
if test.os != 'fuchsia':
targets.append(test.path)
if build_generated_sources:
zircon_targets.extend(gn_results.zircon_generated_sources)
targets.extend(gn_results.generated_sources)
if build_zbi_tests:
for zbi_test in gn_results.zbi_tests.itervalues():
append_target(zbi_test['path'])
if build_canonical_zircon_targets:
zircon_targets.extend(gn_results.canonical_zircon_ninja_targets)
with self.m.macos_sdk(), self.m.step.nest('ninja'):
with self._toolchain_context():
jobs = self._toolchain_context.ninja_jobs
if build_zircon:
self._run_ninja('zircon', gn_results.zircon_build_dir, zircon_targets,
jobs, self.m.buildbucket.build.id, gcs_bucket)
if build_fuchsia:
self._run_ninja('fuchsia', gn_results.fuchsia_build_dir, targets,
jobs, self.m.buildbucket.build.id, gcs_bucket)
def _with_options(self, checkout_root, fuchsia_build_dir, build_type, target,
packages, universe_packages, variants, gn_args,
ninja_targets, board, product, collect_build_metrics,
build_host_tests, build_images, archives_to_build,
gcs_bucket):
with self.m.step.nest('build fuchsia'):
gn_results = self.gen(
checkout_root=checkout_root,
fuchsia_build_dir=fuchsia_build_dir,
target=target,
build_type=build_type,
packages=packages,
universe_packages=universe_packages,
variants=variants,
args=gn_args,
board=board,
product=product,
record_tracelogs=collect_build_metrics)
def image_filter(image):
name = image['name']
# If an image is used in paving or netbooting, its manifest entry will
# specify what flags to pass to the bootserver when doing so;
# conversely, if these arguments are present, we know the image is
# needed for this.
used_for_testing = (
image.get('bootserver_pave', []) or # used to pave
image.get('bootserver_netboot', []) or # used to netboot
name in ['qemu-kernel', STORAGE_FULL] # used for qemu
)
return used_for_testing
images = {}
ninja_targets = copy.copy(ninja_targets)
if build_images:
images = {
image['name']: image
for image in gn_results.image_manifest
if image_filter(image)
}
# ids.txt is needed for symbolization.
ninja_targets.append('ids.txt')
# Needed for size checks and tracking.
ninja_targets.append('build/images:record_filesystem_sizes')
ninja_targets.append('build/images:system_snapshot')
archives = {
name: self.m.path.abspath(fuchsia_build_dir.join(archive))
for name, archive in gn_results.archives.iteritems()
if name in archives_to_build
}
# TODO(IN-882): Once the following two archives are added to images.json,
# the following logic will happen automatically in the above for-loop.
if 'package-archive' in archives_to_build:
ninja_targets.append('build/gn:package_archive')
archives['package-archive'] = fuchsia_build_dir.join('packages.tar.gz')
if 'breakpad-symbol-archive' in archives_to_build:
ninja_targets.append('build/gn:breakpad_symbols')
archives['breakpad-symbols-archive'] = (
fuchsia_build_dir.join('breakpad_symbols',
'breakpad_symbols.tar.gz'))
self.ninja(
gn_results=gn_results,
gcs_bucket=gcs_bucket,
targets=ninja_targets,
build_images=build_images,
image_filter=image_filter,
archives_to_build=archives_to_build,
build_host_tests=build_host_tests,
)
results = self.FuchsiaBuildResults(
api=self.m,
target=target,
variants=variants,
build_type=build_type,
fuchsia_build_dir=fuchsia_build_dir,
zircon_build_dir=gn_results.zircon_build_dir,
checkout_root=checkout_root,
board=board,
product=product,
gn_results=gn_results,
images=images,
archives=archives,
)
# The build produces an fvm.blk that is just big enough to fit its inputs.
# In a small number of cases, there is not enough padding for tests to run
# (on QEMU) and we see failures as a result. We guard against this by
# doubling the amount of free space.
# Note that this image is only used by QEMU and that no host side disk
# blocks are allocated right away, so the operation is cheap regardless of
# the size we extend to.
if results.storage_full:
results.storage_full.extend(2)
return results
def _upload_build_results(self, build_results, gcs_bucket, is_release_version,
build_id):
assert gcs_bucket
# Upload archives.
for name, path in build_results.archives.iteritems():
metadata = None
# Try and sign the build archive. If the we are on a release build and the
# signing script returns a signature, add it to the metadata and
# upload the public key for verification.
if is_release_version and name == 'archive':
signature = self._try_sign_archive(path)
if signature:
# Add the signature to the metadata.
metadata = {
'x-goog-meta-signature': signature,
}
# Upload the public key to GCS.
# Note that we should never actually use the DEFAULT_PUBKEY_PATH here,
# because a signature should only be generated in the presence of the
# RELEASE_PUBKEY_PATH environment variable.
self.m.upload.file_to_gcs(
source=os.getenv(RELEASE_PUBKEY_PATH, DEFAULT_PUBKEY_PATH),
bucket=gcs_bucket,
subpath=RELEASE_PUBKEY_FILENAME,
namespace=build_id,
)
# Upload the archive
self.m.upload.file_to_gcs(
source=path,
bucket=gcs_bucket,
subpath=self.m.path.basename(path),
namespace=build_id,
metadata=metadata,
)
# Upload build metrics.
self._upload_tracing_data(build_results, gcs_bucket, build_id)
self._upload_fileystem_sizes(build_results)
self._upload_blobstats_output(build_results, gcs_bucket, build_id)
def _try_sign_archive(self, archive_path):
args = [
'--archive-file',
archive_path,
]
return self.m.python(
'run signing script',
self.resource('sign.py'),
args,
venv=self.resource('sign.py.vpython'),
stdout=self.m.raw_io.output(),
).stdout
def _upload_package_snapshot(self, build_results, gcs_bucket, build_id):
assert gcs_bucket
snapshot_path = build_results.fuchsia_build_dir.join(
'obj', 'build', 'images', 'system.snapshot')
if not self.m.path.exists(snapshot_path):
return
# Upload a new table row for the system snapshot data generated during this build
snapshot = self.m.file.read_raw('read package snapshot file', snapshot_path)
build_packages_entry = {
'build_id': self.m.buildbucket.build.id,
'snapshot': snapshot,
}
basename = 'system.snapshot.json'
build_packages_entry_file = self.m.path['tmp_base'].join(basename)
self.m.step(
'write build_packages_entry_file',
['cat', self.m.json.input(build_packages_entry)],
stdout=self.m.raw_io.output(leak_to=build_packages_entry_file))
self.m.upload.file_to_gcs(
source=build_packages_entry_file,
bucket=gcs_bucket,
subpath=basename,
namespace=build_id,
)
# Upload a new table row describing this particular build. Other tables' rows
# are linked into this table using the build id as a foreign key.
builds_entry = {
'bucket': self.m.buildbucket.bucket_v1,
'builder': self.m.buildbucket.builder_name,
'build_id': self.m.buildbucket.build.id,
'gitiles_commit': [self.m.buildbucket.gitiles_commit.id],
'datetime': str(self.m.buildbucket.build.create_time.ToDatetime()),
'start_time': str(self.m.buildbucket.build.start_time.ToDatetime()),
'repo': self.m.buildbucket.build_input.gitiles_commit.project,
'arch': build_results.target,
'product': build_results.product,
'board': build_results.board,
'channel': [''],
}
self.m.bqupload.insert(
step_name='add table row: %s/%s/builds_beta' %
(BIGQUERY_PROJECT, BIGQUERY_ARTIFACTS_DATASET),
project=BIGQUERY_PROJECT,
dataset=BIGQUERY_ARTIFACTS_DATASET,
table='builds_beta',
data_file=self.m.json.input(builds_entry),
)
def _upload_fileystem_sizes(self, build_results):
"""Uploads filesystem sizes to BigQuery.
The upload also includes metadata about this build so that the data can
be used to create a self-contained BigQuery table. See INTK-709 and
INTK-1153 for context.
"""
if not build_results.filesystem_sizes:
return
metadata = {
'builder_name':
self.m.buildbucket.builder_name,
# This field is set to a string in the BQ table schema because it's just
# an opaque ID. The conversion from int to string that happens on the
# BigQuery side is not what we want, so convert here.
'build_id':
str(self.m.buildbucket.build.id),
'build_create_time_seconds':
self.m.buildbucket.build.create_time.seconds,
'gitiles_commit_host':
self.m.buildbucket.gitiles_commit.host,
'gitiles_commit_id':
self.m.buildbucket.gitiles_commit.id,
'gitiles_commit_project':
self.m.buildbucket.gitiles_commit.project,
}
size_data = []
for fs_dict in build_results.filesystem_sizes:
datum = metadata.copy()
datum['image_name'] = fs_dict['name']
datum['image_size'] = fs_dict['value']
size_data.append(datum)
size_path = self.m.path['cleanup'].join('filesystem_sizes_bq.json')
bq_formatted_str = '\n'.join(self.m.json.dumps(d) for d in size_data)
with self.m.step.nest('upload filesystem sizes'):
self.m.file.write_raw('write BQ formatted file', size_path,
bq_formatted_str)
self.m.bqupload.insert('bqupload', 'fuchsia-infra', 'fuchsia_build',
'filesystem_sizes', size_path)
def _upload_blobstats_output(self, build_results, gcs_bucket, build_id):
"""Runs the blobstats command and uploads the output files to GCS."""
dir_name = 'blobstats'
blobstats_output_dir = self.m.path['cleanup'].join(dir_name)
if self._run_blobstats(build_results, blobstats_output_dir):
# If blobstats failed, it's probably because the build intentionally
# didn't produce the input files that blobstats requires. Blobstats is
# generally just a nice-to-have anyway, so either way it's probably okay
# to silently continue without uploading results if blobstats fails.
self.m.upload.directory_to_gcs(
source=blobstats_output_dir,
bucket=gcs_bucket,
subpath=dir_name,
namespace=build_id,
)
def _run_blobstats(self, build_results, output_dir):
"""Runs the blobstats script for this build.
Args:
build_results (FuchsiaBuildResults): The results of the build on which
blobstats will be run.
output_dir (Path): The directory that the blobstats output files will be
written to.
Returns:
A boolean indicating whether blobstats completed successfully.
"""
root_dir = build_results.checkout_root
dart_binary_path = build_results.tool('dart')
blobstats_src_dir = root_dir.join('scripts', 'blobstats')
command = [
dart_binary_path,
'--packages=%s' % blobstats_src_dir.join('blobstats.packages'),
blobstats_src_dir.join('blobstats.dart'),
'--output=%s' % output_dir,
]
with self.m.context(cwd=build_results.fuchsia_build_dir):
result = self.m.step('blobstats', command, ok_ret='any')
return result.retcode == 0
def _upload_tracing_data(self, build_results, gcs_bucket, build_id):
"""Uploads GN and ninja tracing results for this build to GCS"""
# Only upload if the bucket is specified.
gn_data = self._extract_gn_tracing_data(build_results.checkout_root,
'gn_trace')
zircon_gn_data = self._extract_gn_tracing_data(build_results.checkout_root,
'zircon_gn_trace')
ninjatrace_tool = build_results.tool('ninjatrace')
ninja_data = self._extract_ninja_tracing_data(
ninjatrace_tool, build_results.fuchsia_build_dir,
build_results.checkout_root, 'ninja_trace')
zircon_ninja_data = self._extract_ninja_tracing_data(
ninjatrace_tool, build_results.zircon_build_dir,
build_results.checkout_root, 'zircon_ninja_trace')
for data in [gn_data, zircon_gn_data, ninja_data, zircon_ninja_data]:
self.m.upload.file_to_gcs(
source=data,
bucket=gcs_bucket,
subpath=self.m.path.basename(data),
namespace=build_id,
)
def _extract_ninja_tracing_data(self, ninjatrace_tool, build_dir,
checkout_root, filename):
"""Extracts the tracing data from the .ninja_log.
Args:
cipd_dir (Path): The CIPD directory.
build_dir (Path): The build directory.
checkout_root (Path): The checkout directory.
filename (str): The name of the output file (no extension).
Returns:
A Path to the file containing the gn tracing data in Chromium's
about:tracing html format.
"""
trace = self.m.path['cleanup'].join('%s.json' % filename)
html = self.m.path['cleanup'].join('%s.html' % filename)
self.m.step(
'ninja tracing', [
ninjatrace_tool,
'-filename',
build_dir.join('.ninja_log'),
'-trace-json',
trace,
],
stdout=self.m.raw_io.output(leak_to=trace))
return self._trace2html('ninja trace2html', trace, html, checkout_root)
def _extract_gn_tracing_data(self, checkout_root, filename):
"""Extracts the tracing data from this GN run.
Args:
checkout_root (Path): The checkout directory.
filename (str): The name of the output file (no extension).
Returns:
A Path to the file containing the gn tracing data in Chromium's
about:tracing html format.
"""
return self._trace2html('gn trace2html',
self.m.path['cleanup'].join('%s.json' % filename),
self.m.path['cleanup'].join('%s.html' % filename),
checkout_root)
def _trace2html(self, name, trace, html, checkout_root):
"""Converts an about:tracing file to HTML using the trace2html tool"""
# Catapult is imported in manifest/garnet, so we abort if it wasn't included
# in this checkout.
# if self.m.path['third_party'].join('catapult') not in self.m.path:
# return
self.m.python(
name=name,
script=checkout_root.join('third_party', 'catapult', 'tracing', 'bin',
'trace2html'),
args=['--output', html, trace])
return html