| # Copyright 2019 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| from contextlib import contextmanager |
| from recipe_engine import recipe_api |
| |
| import copy |
| import os |
| import re |
| |
| from .build_artifacts import BuildArtifacts |
| from .fvm import FvmImage |
| |
| # Host architecture -> number of bits -> host platform name. |
| # Add to this dictionary as we support building on more devices. |
| HOST_PLATFORMS = { |
| 'intel': { |
| 64: 'x64', |
| }, |
| } |
| |
| # List of available targets. |
| TARGETS = ['x64', 'arm64'] |
| |
| # List of available build types. |
| BUILD_TYPES = ['debug', 'release', 'thinlto', 'lto'] |
| |
| # List of canonical names for archives that the build can produce. |
| ARCHIVES = [ |
| 'archive', # Images and scripts for paving/netbooting. |
| 'package-archive', # Package metadata, blobs, and tools. |
| 'symbol-archive', # Metadata for symbolization. |
| 'breakpad-symbol-archive', # Breakpad symbols. |
| ] |
| |
| # Canonical name for fvm.blk. |
| STORAGE_FULL = 'storage-full' |
| # Canonical name for fvm.sparse.blk. |
| STORAGE_SPARSE = 'storage-sparse' |
| |
| # File in build out dir in which the sizes of filesystems are recorded. |
| FILESYSTEM_SIZES_FILENAME = 'filesystem_sizes.json' |
| |
| # Manifests produced by the build. |
| IMAGES_JSON = 'images.json' |
| ZEDBOOT_IMAGES_JSON = 'zedboot_images.json' |
| TESTS_JSON = 'tests.json' |
| ZIRCON_JSON = 'zircon.json' |
| |
| # Name of BigQuery project and table for uploading artifacts. |
| BIGQUERY_PROJECT = 'fuchsia-infra' |
| BIGQUERY_ARTIFACTS_DATASET = 'artifacts' |
| |
| # Name of the build results manifest to isolate. |
| BUILD_RESULTS_JSON = 'build_results.json' |
| |
| # The private and authorized SSH keys pulled down in the checkout, relative to |
| # the fuchsia root. |
| CHECKOUT_AUTHORIZED_KEY = '.ssh/authorized_keys' |
| CHECKOUT_PRIVATE_KEY = '.ssh/pkey' |
| |
| # Please use led to test fuchsia recipe runs when updating these pins. |
| UPLOAD_DEBUG_SYMBOLS_CIPD_VERSION = 'git_revision:76afdab6dec30f87a8d08291922e7a572033d9bc' |
| |
| |
| class FuchsiaBuildResults(object): |
| """Represents a completed build of Fuchsia.""" |
| |
| def __init__(self, |
| api, |
| checkout_root, |
| target, |
| variants, |
| build_type, |
| fuchsia_build_dir, |
| zircon_build_dir, |
| board, |
| product, |
| images=None, |
| archives=None): |
| assert target in TARGETS |
| self._api = api |
| self._checkout_root = checkout_root |
| self._fuchsia_build_dir = fuchsia_build_dir |
| self._zircon_build_dir = zircon_build_dir |
| self._target = target |
| self._variants = variants |
| self._build_type = build_type |
| self._filesystem_sizes = None |
| self._images = images if images else {} |
| self._archives = archives if archives else {} |
| self._board = board |
| self._product = product |
| self._storage_full = None |
| |
| def get_artifacts(self, shards=None): |
| """Extracts BuildArtifacts from these FuchsiaBuildResults.""" |
| return BuildArtifacts( |
| fuchsia_build_dir=self._fuchsia_build_dir, |
| amber_files=self._fuchsia_build_dir.join('amber-files'), |
| symbolize_tool=self.symbolize_tool, |
| llvm_symbolizer=self.llvm_symbolizer, |
| llvm_profdata=self.llvm_profdata, |
| llvm_cov=self.llvm_cov, |
| minfs=self.minfs, |
| zbi=self.zbi, |
| target=self.target, |
| board=self.board, |
| product=self.product, |
| variants=self.variants, |
| build_type=self.build_type, |
| images=self.images, |
| shards=shards, |
| ids=self.ids, |
| authorized_key=self.checkout_root.join(CHECKOUT_AUTHORIZED_KEY), |
| private_key=self.checkout_root.join(CHECKOUT_PRIVATE_KEY), |
| secret_specs=self.secret_specs, |
| ) |
| |
| def prebuilt_path(self, *path): |
| """Returns the Path to the host-platform subdir under the given subdirs.""" |
| path = list(path) |
| path.append('{os}-{arch}'.format( |
| os=self._api.platform.name, |
| arch={'intel': 'x64'}[self._api.platform.arch], |
| )) |
| return self._checkout_root.join('prebuilt', *path) |
| |
| @property |
| def target(self): |
| """The build target for this build.""" |
| return self._target |
| |
| @property |
| def variants(self): |
| """The variants for this build.""" |
| return self._variants |
| |
| @property |
| def build_type(self): |
| """The build type for this build.""" |
| return self._build_type |
| |
| @property |
| def zircon_build_dir(self): |
| """The directory where Zircon build artifacts may be found.""" |
| return self._zircon_build_dir |
| |
| @property |
| def fuchsia_build_dir(self): |
| """The directory where Fuchsia build artifacts may be found.""" |
| return self._fuchsia_build_dir |
| |
| @property |
| def ids(self): |
| return self._fuchsia_build_dir.join('ids.txt') |
| |
| @property |
| def secret_specs(self): |
| return self._fuchsia_build_dir.join('secret_specs') |
| |
| @property |
| def images(self): |
| """Mapping between the canonical name of an image produced by the Fuchsia |
| build to its path relative to the fuchsia build directory.""" |
| return self._images |
| |
| @property |
| def archives(self): |
| """Mapping between the canonical name of an archive produced by the Fuchsia |
| build to the absolute path to that archive on the local disk.""" |
| return self._archives |
| |
| @property |
| def checkout_root(self): |
| """The Fuchsia checkout root.""" |
| return self._checkout_root |
| |
| @property |
| def board(self): |
| """The board for this build.""" |
| return self._board |
| |
| @property |
| def product(self): |
| """The product for this build.""" |
| return self._product |
| |
| @property |
| def symbolize_tool(self): |
| """The path to the symbolize tool.""" |
| return self.prebuilt_path('tools', 'symbolize').join('symbolize') |
| |
| @property |
| def llvm_symbolizer(self): |
| """The path to the llvm_symbolizer tool.""" |
| return self.prebuilt_path('third_party', |
| 'clang').join('bin', 'llvm-symbolizer') |
| |
| @property |
| def llvm_profdata(self): |
| """The path to the llvm_profdata tool.""" |
| return self.prebuilt_path('third_party', |
| 'clang').join('bin', 'llvm-profdata') |
| |
| @property |
| def llvm_cov(self): |
| """The path to the llvm_cov tool.""" |
| return self.prebuilt_path('third_party', 'clang').join('bin', 'llvm-cov') |
| |
| @property |
| def zbi(self): |
| """The path to the zbi tool.""" |
| return self.zircon_build_dir.join('tools', 'zbi') |
| |
| @property |
| def minfs(self): |
| """The path to the minfs tool.""" |
| return self.zircon_build_dir.join('tools', 'minfs') |
| |
| @property |
| def fvm_tool(self): |
| """The path to the fvm tool.""" |
| return self.zircon_build_dir.join('tools', 'fvm') |
| |
| @property |
| def storage_full(self): |
| """The storage-full image (fvm.blk) produced by the build.""" |
| if self._storage_full: |
| return self._storage_full |
| elif STORAGE_FULL not in self.images: |
| return None |
| path = self.fuchsia_build_dir.join(self.images[STORAGE_FULL]['path']) |
| self._storage_full = FvmImage(self._api, path, self.fvm_tool) |
| return self._storage_full |
| |
| @property |
| def filesystem_sizes(self): |
| """The filesystem_sizes data produced by the build.""" |
| if self._filesystem_sizes is None: |
| filesystem_sizes_file = self.fuchsia_build_dir.join( |
| FILESYSTEM_SIZES_FILENAME) |
| # Not all supported build configurations actually produce the file. |
| if self._api.path.exists(filesystem_sizes_file): |
| self._filesystem_sizes = self._api.file.read_json( |
| 'read filesystem sizes', filesystem_sizes_file) |
| else: |
| self._filesystem_sizes = [] |
| return self._filesystem_sizes |
| |
| def check_filesystem_sizes(self): |
| """Checks that filesystem sizes are less than the maximum allowed |
| |
| Raises: |
| StepFailure if the size exceeds the maximum. |
| """ |
| with self._api.step.nest('check filesystem sizes') as size_step: |
| error_messages = [] |
| for fs_dict in self.filesystem_sizes: |
| fs_name = fs_dict['name'] |
| with self._api.step.nest(fs_name): |
| size = fs_dict['value'] |
| limit = fs_dict['limit'] |
| size_step.presentation.step_text = 'size, limit = %d, %d' % ( |
| size, |
| limit, |
| ) |
| if size > limit > 0: |
| size_step.presentation.status = self._api.step.FAILURE |
| error_messages.append( |
| '%s too large; was %d, but must be less than %d' % |
| (fs_name, size, limit)) |
| if error_messages: |
| raise self._api.step.StepFailure('. '.join(error_messages)) |
| |
| def upload_results(self, |
| gcs_bucket, |
| build_id=None, |
| upload_debug_symbols=False): |
| """Uploads artifacts from the build to Google Cloud Storage. |
| |
| Args: |
| gcs_bucket (str): GCS bucket name to upload build results to. |
| build_id (str|None): A buildbucket ID used to namespace the GCS |
| upload location; if None, the current build's ID is used. |
| upload_debug_symbols (bool): Whether to upload debug symbols. |
| """ |
| assert gcs_bucket |
| with self._api.step.nest('upload build results'): |
| self._api.build._upload_build_results(self, gcs_bucket, build_id, |
| upload_debug_symbols) |
| self._api.build._upload_package_snapshot(self, gcs_bucket, build_id) |
| |
| |
| class GNResults(object): |
| """GNResults is represents the result of a `gn gen` invocation in the fuchsia |
| build. |
| |
| It exposes the API of the build, which defines how one can invoke ninja. |
| """ |
| _COMPDB_FILENAME = 'compile_commands.json' |
| |
| def __init__(self, api, fuchsia_build_dir, compdb_exported=False): |
| self._api = api |
| self._fuchsia_build_dir = fuchsia_build_dir |
| |
| zircon_ninja_instructions = api.json.read( |
| 'read zircon ninja instructions', |
| fuchsia_build_dir.join(ZIRCON_JSON), |
| step_test_data=api.build.test_api.mock_zircon_instructions, |
| ).json.output |
| |
| self._zircon_build_dir = api.path.abs_to_path( |
| api.path.realpath( |
| fuchsia_build_dir.join(zircon_ninja_instructions['dir']),),) |
| self._canonical_zircon_ninja_targets = zircon_ninja_instructions['targets'] |
| |
| self._image_manifest = None |
| self._archives = None |
| self._test_manifest = None |
| self._generated_sources = None |
| self._zircon_generated_sources = None |
| self._fuchsia_compdb = None |
| self._zircon_compdb = None |
| self._zbi_tests = None |
| |
| if compdb_exported: |
| self._fuchsia_compdb = self._fuchsia_build_dir.join(self._COMPDB_FILENAME) |
| api.path.mock_add_paths(self._fuchsia_compdb) |
| self._zircon_compdb = self._zircon_build_dir.join(self._COMPDB_FILENAME) |
| api.path.mock_add_paths(self._zircon_compdb) |
| |
| @property |
| def fuchsia_build_dir(self): |
| """Returns the fuchsia build directory (Path).""" |
| return self._fuchsia_build_dir |
| |
| @property |
| def zircon_build_dir(self): |
| """Returns the associated zircon build directory (Path).""" |
| return self._zircon_build_dir |
| |
| @property |
| def canonical_zircon_ninja_targets(self): |
| """Returns the canonical zircon ninja targets (list(string)) that the |
| fuchsia build is informed of.""" |
| return self._canonical_zircon_ninja_targets |
| |
| @property |
| def image_manifest(self): |
| """Returns the manifest of images (dict) in the GN graph. |
| |
| TODO(BLD-253): Point to the schema once there is one. |
| """ |
| if not self._image_manifest: |
| self._image_manifest = self._api.json.read( |
| 'read image manifest', |
| self.fuchsia_build_dir.join(IMAGES_JSON), |
| step_test_data=self._api.build.test_api.mock_image_manifest, |
| ).json.output |
| |
| zedboot_image_manifest = self._api.json.read( |
| 'read zedboot image manifest', |
| self.fuchsia_build_dir.join(ZEDBOOT_IMAGES_JSON), |
| step_test_data=self._api.build.test_api.mock_zedboot_image_manifest, |
| ).json.output |
| |
| self._image_manifest.extend(zedboot_image_manifest) |
| return self._image_manifest |
| |
| @property |
| def archives(self): |
| """Returns the archives (dict[str]str) in the GN graph. |
| |
| Maps archive name to path relative to the fuchsia build directory. |
| |
| TODO(IN-882): Make this backed by its own build API module. |
| """ |
| if not self._archives: |
| self._archives = {} |
| for image in self.image_manifest: |
| if image['type'] == 'tgz': |
| self._archives[image['name']] = image['path'] |
| return self._archives |
| |
| @property |
| def test_manifest(self): |
| """Returns the manifest of tests (dict) in the GN graph. |
| |
| TODO(BLD-253): Point to the schema once there is one. |
| """ |
| if not self._test_manifest: |
| self._test_manifest = self._api.json.read( |
| 'read test spec manifest', |
| self.fuchsia_build_dir.join(TESTS_JSON), |
| step_test_data=self._api.build.test_api.mock_test_spec_manifest, |
| ).json.output |
| return self._test_manifest |
| |
| @property |
| def generated_sources(self): |
| """Returns the generated source files (list(str)) from the fuchsia build. |
| |
| The returned paths are relative to the fuchsia build directory. |
| """ |
| if not self._generated_sources: |
| self._generated_sources = self._api.file.read_raw( |
| 'read generated sources', |
| self.fuchsia_build_dir.join('all_fidl_json.txt'), |
| '//generated_fidl.json', |
| ).splitlines() |
| return self._generated_sources |
| |
| @property |
| def zircon_generated_sources(self): |
| """Returns the generated source files (list(str)) from the zircon build. |
| |
| The returned paths are relative to the zircon build directory. |
| """ |
| if not self._zircon_generated_sources: |
| self._zircon_generated_sources = self._api.json.read( |
| 'read zircon generated sources', |
| self.zircon_build_dir.join('generated_sources.json'), |
| step_test_data=lambda: self._api.json.test_api.output( |
| ['//generated_header.h']), |
| ).json.output |
| return self._zircon_generated_sources |
| |
| def filtered_compdb(self, filters): |
| """The path to a merged compilation database, filtered via the passed filters.""" |
| with self._api.step.nest('merge compdbs'): |
| compdb = [] |
| compdb += self._api.json.read( |
| 'read zircon compdb', |
| self._zircon_compdb, |
| stdout=self._api.json.output(), |
| step_test_data=lambda: self._api.json.test_api.output([{ |
| 'directory': '[START_DIR]/out/default.zircon', |
| 'file': '../../zircon.cpp', |
| 'command': 'clang++ zircon.cpp', |
| }]), |
| ).json.output |
| |
| compdb += self._api.json.read( |
| 'read compdb', |
| self._fuchsia_compdb, |
| stdout=self._api.json.output(), |
| step_test_data=lambda: self._api.json.test_api.output([{ |
| 'directory': '[START_DIR]/out/default', |
| 'file': '../../foo.cpp', |
| 'command': 'clang++ foo.cpp', |
| }, { |
| 'directory': '[START_DIR]/out/default', |
| 'file': '../../third_party/foo.cpp', |
| 'command': 'clang++ third_party/foo.cpp', |
| }, { |
| 'directory': '[START_DIR]/out/default', |
| 'file': '../../out/default/foo.cpp', |
| 'command': 'clang++ foo.cpp', |
| }]), |
| ).json.output |
| |
| def keep_in_compdb(entry): |
| # Filenames are relative to the build directory, and the build directory is absolute. |
| build_dir = self._api.path.abs_to_path(entry['directory']) |
| full_path = self._api.path.abs_to_path( |
| self._api.path.realpath( |
| self._api.path.join(build_dir, entry['file']))) |
| if build_dir.is_parent_of(full_path): |
| return False |
| |
| segments = entry['file'].split(os.sep) |
| if any(bad_segments in segments for bad_segments in filters): |
| return False |
| return True |
| |
| compdb_filtered = [entry for entry in compdb if keep_in_compdb(entry)] |
| compdb_path = self._api.path['cleanup'].join('compile_commands.json') |
| self._api.file.write_json('write merged compdb', compdb_path, |
| compdb_filtered) |
| return compdb_path |
| |
| @property |
| def zbi_tests(self): |
| """Returns the ZBI tests from the Fuchsia build directory.""" |
| if not self._zbi_tests: |
| zbi_tests = self._api.json.read( |
| 'read zbi test manifest', |
| self.fuchsia_build_dir.join('zbi_tests.json'), |
| step_test_data=lambda: self._api.json.test_api.output([]), |
| ).json.output |
| self._zbi_tests = {zbi_test['name']: zbi_test for zbi_test in zbi_tests} |
| return self._zbi_tests |
| |
| |
| class ToolchainContext(object): |
| """A ToolchainContext provides specifications for building with a specific |
| toolchain.""" |
| |
| def __init__(self, api): |
| if api.build._clang_toolchain: |
| # TODO: in thise case, we should pass use_ccache=true as a GN arg, and |
| # update the context to contribute a definition of CCACHE_DIR in its |
| # environment. |
| with api.step.nest('clang_toolchain'), api.context(infra_steps=True): |
| clang_instance = api.build._clang_toolchain['instance'] |
| clang_type = api.build._clang_toolchain['type'] |
| clang_dir = api.path.mkdtemp('clang') |
| |
| def cipd(): |
| pkgs = api.cipd.EnsureFile() |
| pkgs.add_package('fuchsia/clang/${platform}', clang_instance) |
| api.cipd.ensure(clang_dir, pkgs) |
| |
| def isolated(): |
| api.isolated.download( |
| 'download', isolated_hash=clang_instance, output_dir=clang_dir) |
| |
| {'cipd': cipd, 'isolated': isolated}[clang_type]() |
| |
| @contextmanager |
| def null_context(): |
| yield |
| |
| self._context = null_context |
| self._ninja_jobs = api.platform.cpu_count |
| self._gn_args = ['clang_prefix="%s"' % clang_dir.join('bin')] |
| |
| else: |
| api.goma.ensure() |
| self._context = api.goma.build_with_goma |
| self._ninja_jobs = api.goma.jobs |
| self._gn_args = ['use_goma=true', 'goma_dir="%s"' % api.goma.goma_dir] |
| |
| def __call__(self): |
| """Returns a context for building with the associated toolchain.""" |
| return self._context() |
| |
| @property |
| def ninja_jobs(self): |
| """Returns the number (int) of recommended ninja jobs.""" |
| return self._ninja_jobs |
| |
| @property |
| def gn_args(self): |
| """Returns GN args list(str) contributed by the toolchain.""" |
| return copy.copy(self._gn_args) |
| |
| |
| class FuchsiaBuildApi(recipe_api.RecipeApi): |
| """APIs for building Fuchsia.""" |
| |
| FuchsiaBuildResults = FuchsiaBuildResults |
| BuildArtifacts = BuildArtifacts |
| GNResults = GNResults |
| |
| def __init__(self, clang_toolchain, target, build_type, packages, |
| universe_packages, variants, gn_args, ninja_targets, board, |
| product, exclude_images, run_tests, include_breakpad_symbols, |
| include_symbol_archive, debug_symbol_bucket, environment_tags, |
| *args, **kwargs): |
| super(FuchsiaBuildApi, self).__init__(*args, **kwargs) |
| self._clang_toolchain = clang_toolchain |
| self._target = target |
| self._build_type = build_type |
| self._packages = packages |
| self._universe_packages = universe_packages |
| self._variants = variants |
| self._gn_args = gn_args |
| self._ninja_targets = ninja_targets |
| self._board = board |
| self._product = product |
| self._exclude_images = exclude_images |
| self._include_breakpad_symbols = include_breakpad_symbols |
| self._include_symbol_archive = include_symbol_archive |
| self._run_tests = run_tests |
| self._debug_symbol_bucket = debug_symbol_bucket |
| self._environment_tags = environment_tags |
| self.__toolchain_context = None |
| |
| @property |
| def _toolchain_context(self): |
| if not self.__toolchain_context: |
| self.__toolchain_context = ToolchainContext(self.m) |
| return self.__toolchain_context |
| |
| @property |
| def variants(self): |
| return self._variants |
| |
| @property |
| def environment_tags(self): |
| return self._environment_tags |
| |
| @property |
| def build_type(self): |
| return self._build_type |
| |
| @property |
| def board(self): |
| return self._board |
| |
| @property |
| def product(self): |
| return self._product |
| |
| @property |
| def run_tests(self): |
| return self._run_tests |
| |
| @property |
| def debug_symbol_bucket(self): |
| """The GCS bucket to upload debug symbols""" |
| return self._debug_symbol_bucket |
| |
| def _prebuilt_path(self, checkout_root, *path): |
| """Returns the Path to the host-platform subdir under the given subdirs.""" |
| path = list(path) |
| path.append('{os}-{arch}'.format( |
| os=self.m.platform.name, |
| arch={'intel': 'x64'}[self.m.platform.arch], |
| )) |
| return checkout_root.join('prebuilt', *path) |
| |
| def __call__(self, checkout, collect_build_metrics): |
| """Builds Fuchsia from a Jiri checkout |
| |
| Args: |
| checkout (CheckoutApi.CheckoutResults): The Fuchsia checkout. |
| collect_build_metrics (bool): Whether to upload build metrics to cloud |
| storage. |
| |
| Returns: |
| A FuchsiaBuildResults, representing the build. |
| """ |
| archives_to_build = [] |
| # If we are not building images, then we need not build package-related |
| # targets. |
| if not self._exclude_images: |
| archives_to_build.extend(['archive', 'package-archive']) |
| if self._include_breakpad_symbols: |
| archives_to_build.append('breakpad-symbol-archive') |
| if self._include_symbol_archive: |
| archives_to_build.append('symbol-archive') |
| |
| build_dir = self.m.path['start_dir'].join('out') |
| |
| build = self.with_options( |
| build_dir=build_dir, |
| checkout=checkout, |
| target=self._target, |
| build_type=self._build_type, |
| packages=self._packages, |
| universe_packages=self._universe_packages, |
| variants=self._variants, |
| gn_args=self._gn_args, |
| ninja_targets=list(self._ninja_targets), |
| board=self._board, |
| product=self._product, |
| collect_build_metrics=collect_build_metrics, |
| build_images=not self._exclude_images, |
| archives_to_build=tuple(archives_to_build), |
| ) |
| |
| return build |
| |
| def with_options(self, |
| build_dir, |
| checkout, |
| target, |
| build_type, |
| packages, |
| universe_packages=None, |
| variants=(), |
| gn_args=(), |
| ninja_targets=(), |
| board=None, |
| product=None, |
| collect_build_metrics=False, |
| build_images=True, |
| archives_to_build=(), |
| gcs_bucket=None): |
| """Builds Fuchsia from a Jiri checkout. |
| |
| Args: |
| checkout (CheckoutResult): The Fuchsia checkout result. |
| target (str): The build target, see TARGETS for allowed targets. |
| build_type (str): One of the build types in BUILD_TYPES |
| packages (sequence[str]): A sequence of packages to pass to GN to build |
| universe_packages (sequence[str]): A sequence of packages to pass to GN |
| to build and add to the set of 'universe" packages |
| variants (sequence[str]): A sequence of build variant selectors to pass |
| to GN in `select_variant` |
| gn_args (sequence[str]): Additional arguments to pass to GN |
| ninja_targets (sequence[str]): Additional target args to pass to ninja |
| board (str): A board to pass to GN to build |
| product (str): A product to pass to GN to build |
| collect_build_metrics (bool): Whether to collect build metrics. |
| build_images (bool): Whether to build the basic images needed to boot |
| and test on fuchsia. |
| archives_to_build (seq(str)): A tuple of canonical names of archives to |
| build. |
| gcs_bucket (str): GCS bucket name to upload build results to. |
| |
| Returns: |
| A FuchsiaBuildResults, representing the recently completed build. |
| """ |
| assert target in TARGETS |
| assert build_type in BUILD_TYPES |
| assert all(archive in ARCHIVES for archive in archives_to_build) |
| |
| if universe_packages is None: |
| # TODO(olivernewman): Determine if we can remove the default value for |
| # this parameter. Otherwise, add test coverage for this block. |
| universe_packages = [] # pragma: no cover |
| |
| # We need to modify the arguments when setting up toolchain so make a copy. |
| gn_args = copy.copy(gn_args) if gn_args else [] |
| if checkout.release_version: |
| gn_args.append('build_info_version="{version}"'.format( |
| version=checkout.release_version)) |
| |
| # Parse product & board out of their paths |
| # TODO(PKG-854): OTA tests fail due to a downgrade to a 0 byte blob because |
| # these were previously unset. Only set for release builds until PKG-854 |
| # is fixed. |
| if checkout.checkout_snapshot or checkout.release_version: |
| if product: |
| product_base = os.path.splitext(os.path.basename(product))[0] |
| gn_args.append('build_info_product="%s"' % product_base) |
| if board: |
| board_base = os.path.splitext(os.path.basename(board))[0] |
| gn_args.append('build_info_board="%s"' % board_base) |
| |
| with self.m.step.nest('build'): |
| return self._with_options( |
| checkout_root=checkout.root_dir, |
| fuchsia_build_dir=build_dir.join('default'), |
| build_type=build_type, |
| target=target, |
| packages=packages, |
| universe_packages=universe_packages, |
| variants=variants, |
| gn_args=gn_args, |
| ninja_targets=list(ninja_targets), |
| board=board, |
| product=product, |
| collect_build_metrics=collect_build_metrics, |
| build_images=build_images, |
| archives_to_build=archives_to_build, |
| gcs_bucket=gcs_bucket, |
| ) |
| |
| def _run_ninja(self, step_name, build_dir, ninja_targets, jobs, build_id, |
| gcs_bucket): |
| try: |
| self.m.ninja( |
| step_name=step_name, |
| build_dir=build_dir, |
| targets=ninja_targets, |
| job_count=jobs, |
| ) |
| except self.m.step.StepFailure: |
| crashreports_dir = build_dir.join('clang-crashreports') |
| self.m.path.mock_add_paths(crashreports_dir) |
| if gcs_bucket and self.m.path.exists(crashreports_dir): |
| with self.m.step.nest('clang-crashreports'): |
| with self.m.context(infra_steps=True): |
| self.m.tar.ensure() |
| temp = self.m.path.mkdtemp('reproducers') |
| reproducers = self.m.file.glob_paths( |
| 'find reproducers', |
| crashreports_dir, |
| '*.sh', |
| test_data=(crashreports_dir.join('foo.sh'),)) |
| for reproducer in reproducers: |
| base = self.m.path.splitext(self.m.path.basename(reproducer))[0] |
| files = self.m.file.glob_paths( |
| 'find %s files' % base, |
| crashreports_dir, |
| base + '.*', |
| test_data=(crashreports_dir.join('foo.sh'), |
| crashreports_dir.join('foo.cpp'))) |
| tgz_basename = '%s.tar.gz' % base |
| tgz_path = temp.join(tgz_basename) |
| archive = self.m.tar.create(tgz_path, compression='gzip') |
| for f in files: |
| archive.add(f, crashreports_dir) |
| archive.tar('create %s' % tgz_basename) |
| self.m.upload.file_to_gcs( |
| source=tgz_path, |
| bucket=gcs_bucket, |
| subpath=tgz_basename, |
| build_id=build_id, |
| ) |
| raise |
| |
| def gen(self, |
| checkout_root, |
| fuchsia_build_dir, |
| target, |
| build_type, |
| product, |
| board=None, |
| packages=(), |
| universe_packages=(), |
| variants=(), |
| args=(), |
| record_tracelogs=False, |
| export_compdb=False): |
| """Sets up and calls `gn gen`. |
| |
| Args: |
| checkout_root (str): The fuchsia checkout root. |
| target (str): The build target, see TARGETS for allowed targets. |
| build_type (str): One of the build types in BUILD_TYPES. |
| product (str): A product to pass to GN to build. |
| board (str|None): A board to pass to GN to build. |
| packages (sequence[str]): A sequence of packages to pass to GN to build. |
| universe_packages (sequence[str]): A sequence of packages to pass to GN |
| to build and add to the set of 'universe" packages. |
| variants (sequence[str]): A sequence of build variant selectors to pass |
| to GN in `select_variant`. |
| gn_args (sequence[str]): Additional arguments to pass to GN. |
| record_tracelogs (bool): Whether to record tracelogs. |
| export_compdb (bool): Whether to generate a compilation database. |
| |
| Returns: |
| A GNResults object. |
| """ |
| # Set the path to GN executable since it is not installed from CIPD. |
| self.m.gn.set_path( |
| self._prebuilt_path(checkout_root, 'third_party', 'gn').join('gn')) |
| |
| args = list(args) |
| args.extend(self._toolchain_context.gn_args) |
| args.extend([ |
| 'target_cpu="%s"' % target, |
| 'is_debug=%s' % ('true' if build_type == 'debug' else 'false'), |
| ]) |
| |
| if board: |
| args.append('import("//%s") ' % board) |
| if product: |
| args.append('import("//%s") ' % product) |
| if packages: |
| base_package_labels_format = 'base_package_labels=[%s]' |
| # if product is set, append to base_package_labels. |
| if product: |
| base_package_labels_format = 'base_package_labels+=[%s]' |
| |
| args.append(base_package_labels_format % |
| ','.join('"%s"' % pkg for pkg in packages)) |
| |
| if universe_packages: |
| universe_package_labels_format = 'universe_package_labels=[%s]' |
| # if product is set, append to universe_package_labels. |
| if product: |
| universe_package_labels_format = 'universe_package_labels+=[%s]' |
| |
| args.append(universe_package_labels_format % |
| ','.join('"%s"' % pkg for pkg in universe_packages)) |
| |
| if variants: |
| # Apply double-quotes to `select_variant_shortcut`s (a string; |
| # e.g host-asan), but do not for `selector`s (a scope; |
| # e.g {variant="asan-fuzzer" target_type=["fuzzed_executable"]}) |
| formatted_variants = [ |
| v if re.match('^{.*}$', v) else '"%s"' % v for v in variants |
| ] |
| args.append('select_variant=[%s]' % ','.join(formatted_variants)) |
| |
| if 'thinlto' in variants: |
| args.append('thinlto_cache_dir="%s"' % |
| self.m.path['cache'].join('thinlto')) |
| |
| if record_tracelogs: |
| args.append("zircon_tracelog=\"%s\"" % |
| str(self.m.path['cleanup'].join('zircon_gn_trace.json'))) |
| |
| def sorted_with_imports_first(arglist): |
| """Sorts arguments while ensuring that imports come first. |
| |
| The `--args` passed to gen are sorted for a deterministic ordering with |
| imports coming first, as otherwise they might blindly redefine variables |
| set or modified by other arguments). This will ease reviews, as a |
| re-ordering of build logic will yield the same GN invocation. |
| """ |
| import_args, normal_args = [], [] |
| for arg in arglist: |
| if arg.startswith('import('): |
| import_args.append(arg) |
| else: |
| normal_args.append(arg) |
| return sorted(import_args) + sorted(normal_args) |
| |
| gen_options = [ |
| fuchsia_build_dir, |
| '--check', |
| '--args=%s' % ' '.join(sorted_with_imports_first(args)), |
| ] |
| |
| if export_compdb: |
| gen_options.append('--export-compile-commands') |
| |
| if record_tracelogs: |
| gen_options.append('--tracelog=%s' % |
| str(self.m.path['cleanup'].join('gn_trace.json'))) |
| |
| with self.m.macos_sdk(): |
| self.m.gn('gen', *gen_options) |
| return self.GNResults(self.m, fuchsia_build_dir, export_compdb) |
| |
| def ninja(self, |
| checkout_root, |
| gn_results, |
| targets=(), |
| zircon_targets=(), |
| build_canonical_zircon_targets=True, |
| build_images=False, |
| image_filter=None, |
| archives_to_build=(), |
| build_host_tests=False, |
| build_generated_sources=False, |
| build_zbi_tests=False, |
| gcs_bucket=None, |
| build_fuchsia=True, |
| build_zircon=True): |
| """A high-level ninja abstraction that consumes GN build APIs - exposed |
| through GNResults - in determining what to run. |
| |
| Args: |
| checkout_root (str): The fuchsia checkout root. |
| gn_results (GNResults): GN gen results. |
| targets (seq(str)): Fuchsia ninja targets. |
| zircon_targets (seq(str)): Zircon ninja targets. |
| build_canonical_zircon_targets (bool): Whether to build the zircon ninja |
| targets given in the fuchsia build 'instructions' of zircon.json. |
| build_images (bool): Whether to build images within the GN graph. |
| image_filter (lamda): A bool-valued map on the contents of |
| gn_results.image_manifest. If build_images is true, then only the images |
| for which this function is true will be built. |
| archives_to_build (seq(str)): A list of archives in the GN graph to build. |
| build_host_tests (bool): Whether to build host test executables in the |
| GN graph. |
| build_generated_sources (bool): Whether to build generated sources. |
| build_zbi_tests (bool): Whether to build the zbi tests in the GN graph. |
| gcs_bucket (str|None): A GCS bucket to upload crash reports to. |
| build_fuchsia (bool): Don't run Ninja on Fuchsia, only Zircon. Useful when |
| targets = () indicates no targets should be built for Fuchsia, rather |
| than indicating all targets should be built. |
| build_zircon (bool): Don't run Ninja on Zircon, only Fuchsia. Useful when |
| targets = () indicates no targets should be built for Zircon, rather |
| than indicating all targets should be built. |
| """ |
| # Set the path to Ninja executable since it is not installed from CIPD. |
| self.m.ninja.set_path( |
| self._prebuilt_path(checkout_root, 'third_party', |
| 'ninja').join('ninja')) |
| |
| targets = list(targets) |
| zircon_targets = list(zircon_targets) |
| |
| def append_target(path): |
| abspath = self.m.path.abs_to_path( |
| self.m.path.realpath(gn_results.fuchsia_build_dir.join(path))) |
| if gn_results.zircon_build_dir.is_parent_of(abspath): |
| zircon_targets.append( |
| os.path.relpath( |
| str(abspath), |
| str(gn_results.zircon_build_dir), |
| )) |
| else: |
| targets.append(path) |
| |
| if build_images: |
| image_filter = image_filter or (lambda image: True) |
| filtered_image_manifest = filter(image_filter, gn_results.image_manifest) |
| for image in filtered_image_manifest: |
| append_target(image['path']) |
| |
| if archives_to_build: |
| targets.extend([ |
| archive for name, archive in gn_results.archives.iteritems() |
| if name in archives_to_build |
| ]) |
| |
| if build_host_tests: |
| for test_spec in gn_results.test_manifest: |
| test = self.m.testsharder.Test.from_jsonish(test_spec['test']) |
| if test.os != 'fuchsia': |
| targets.append(test.path) |
| |
| if build_generated_sources: |
| zircon_targets.extend(gn_results.zircon_generated_sources) |
| targets.extend(gn_results.generated_sources) |
| |
| if build_zbi_tests: |
| for zbi_test in gn_results.zbi_tests.itervalues(): |
| append_target(zbi_test['path']) |
| |
| if build_canonical_zircon_targets: |
| zircon_targets.extend(gn_results.canonical_zircon_ninja_targets) |
| |
| with self.m.macos_sdk(), self.m.step.nest('ninja'): |
| with self._toolchain_context(): |
| jobs = self._toolchain_context.ninja_jobs |
| if build_zircon: |
| self._run_ninja('zircon', gn_results.zircon_build_dir, zircon_targets, |
| jobs, self.m.buildbucket.build.id, gcs_bucket) |
| if build_fuchsia: |
| self._run_ninja('fuchsia', gn_results.fuchsia_build_dir, targets, |
| jobs, self.m.buildbucket.build.id, gcs_bucket) |
| |
| def _with_options(self, checkout_root, fuchsia_build_dir, build_type, target, |
| packages, universe_packages, variants, gn_args, |
| ninja_targets, board, product, collect_build_metrics, |
| build_images, archives_to_build, gcs_bucket): |
| with self.m.step.nest('build fuchsia'): |
| gn_results = self.gen( |
| checkout_root=checkout_root, |
| fuchsia_build_dir=fuchsia_build_dir, |
| target=target, |
| build_type=build_type, |
| packages=packages, |
| universe_packages=universe_packages, |
| variants=variants, |
| args=gn_args, |
| board=board, |
| product=product, |
| record_tracelogs=collect_build_metrics) |
| |
| def image_filter(image): |
| name = image['name'] |
| # If an image is used in paving or netbooting, its manifest entry will |
| # specify what flags to pass to the bootserver when doing so; |
| # conversely, if these arguments are present, we know the image is |
| # needed for this. |
| used_for_testing = ( |
| image.get('bootserver_pave', []) or # used to pave |
| image.get('bootserver_netboot', []) or # used to netboot |
| name in ['qemu-kernel', STORAGE_FULL] # used for qemu |
| ) |
| return used_for_testing |
| |
| images = {} |
| ninja_targets = copy.copy(ninja_targets) |
| |
| if build_images: |
| images = { |
| image['name']: image |
| for image in gn_results.image_manifest |
| if image_filter(image) |
| } |
| # ids.txt is needed for symbolization. |
| ninja_targets.append('ids.txt') |
| # Needed for size checks and tracking. |
| ninja_targets.append('build/images:record_filesystem_sizes') |
| ninja_targets.append('build/images:system_snapshot') |
| |
| archives = { |
| name: self.m.path.abspath(fuchsia_build_dir.join(archive)) |
| for name, archive in gn_results.archives.iteritems() |
| if name in archives_to_build |
| } |
| # TODO(IN-882): Once the following two archives are added to images.json, |
| # the following logic will happen automatically in the above for-loop. |
| if 'package-archive' in archives_to_build: |
| ninja_targets.append('build/gn:package_archive') |
| archives['package-archive'] = fuchsia_build_dir.join('packages.tar.gz') |
| |
| if 'breakpad-symbol-archive' in archives_to_build: |
| ninja_targets.append('build/gn:breakpad_symbols') |
| archives['breakpad-symbols-archive'] = ( |
| fuchsia_build_dir.join('breakpad_symbols', |
| 'breakpad_symbols.tar.gz')) |
| |
| self.ninja( |
| checkout_root=checkout_root, |
| gn_results=gn_results, |
| gcs_bucket=gcs_bucket, |
| targets=ninja_targets, |
| build_images=build_images, |
| image_filter=image_filter, |
| archives_to_build=archives_to_build, |
| build_host_tests=True, |
| ) |
| |
| # NOTE: Until zircon is built with GN and we have a unified build |
| # directory, we simulate this by symlinking* zircon images into the |
| # fuchsia build directory. The upstream isolated recipe module |
| # requires isolating files at a fixed root, and this is most conveniently |
| # taken to be the fuchsia build directory. |
| # TODO(crbug.com/917604): *Actually we copy instead of symlink until the |
| # attached isolated symlinking bug is fixed, which prevents symlinked |
| # files from registering as downloadable from the isolate server. |
| for name, image in images.iteritems(): |
| current_path = self.m.path.abs_to_path( |
| self.m.path.abspath(fuchsia_build_dir.join(image['path']))) |
| if gn_results.zircon_build_dir.is_parent_of(current_path): |
| filename = self.m.path.basename(current_path) |
| new_filename = '%s-created_by_recipe%s' % (os.path.splitext(filename)) |
| new_path = fuchsia_build_dir.join(new_filename) |
| self.m.file.copy('copy %s to fuchsia build dir' % name, current_path, |
| new_path) |
| images[name]['path'] = new_filename |
| |
| results = self.FuchsiaBuildResults( |
| api=self.m, |
| target=target, |
| variants=variants, |
| build_type=build_type, |
| fuchsia_build_dir=fuchsia_build_dir, |
| zircon_build_dir=gn_results.zircon_build_dir, |
| checkout_root=checkout_root, |
| board=board, |
| product=product, |
| images=images, |
| archives=archives, |
| ) |
| |
| # The build produces an fvm.blk that is just big enough to fit its inputs. |
| # In a small number of cases, there is not enough padding for tests to run |
| # (on QEMU) and we see failures as a result. We guard against this by |
| # doubling the amount of free space. |
| # Note that this image is only used by QEMU and that no host side disk |
| # blocks are allocated right away, so the operation is cheap regardless of |
| # the size we extend to. |
| if results.storage_full: |
| results.storage_full.extend(2) |
| return results |
| |
| def _upload_build_results(self, build_results, gcs_bucket, build_id, |
| upload_debug_symbols): |
| assert gcs_bucket |
| # Upload archives. |
| for path in build_results.archives.itervalues(): |
| self.m.upload.file_to_gcs( |
| source=path, |
| bucket=gcs_bucket, |
| subpath=self.m.path.basename(path), |
| build_id=build_id, |
| ) |
| # Upload build metrics. |
| self._upload_tracing_data(build_results, gcs_bucket, build_id) |
| self._run_bloaty(build_results, gcs_bucket, build_id) |
| self._upload_fileystem_sizes(build_results) |
| self._upload_blobstats_output(build_results, gcs_bucket, build_id) |
| if upload_debug_symbols: |
| self._upload_debug_symbols(build_results) |
| |
| def _upload_debug_symbols(self, build_results): |
| with self.m.step.nest('ensure upload_debug_symbols'): |
| with self.m.context(infra_steps=True): |
| pkgs = self.m.cipd.EnsureFile() |
| pkgs.add_package('fuchsia/infra/upload_debug_symbols/${platform}', |
| UPLOAD_DEBUG_SYMBOLS_CIPD_VERSION) |
| cipd_dir = self.m.path['start_dir'].join('cipd', 'upload_debug_symbols') |
| self.m.cipd.ensure(cipd_dir, pkgs) |
| upload_debug_symbols_tool = cipd_dir.join('upload_debug_symbols') |
| cmd = [ |
| upload_debug_symbols_tool, |
| '-bucket=' + self.debug_symbol_bucket, |
| ] |
| dirs = [ |
| build_results.fuchsia_build_dir.join('.build-id'), |
| build_results.zircon_build_dir.join('.build-id'), |
| build_results.prebuilt_path('third_party', |
| 'clang').join('lib', 'debug', '.build-id'), |
| build_results.checkout_root.join('prebuilt', 'build_ids'), |
| ] |
| cmd.extend(dirs) |
| self.m.step('upload debug symbols', cmd) |
| |
| def _upload_package_snapshot(self, build_results, gcs_bucket, build_id): |
| assert gcs_bucket |
| snapshot_path = build_results.fuchsia_build_dir.join( |
| 'obj', 'build', 'images', 'system.snapshot') |
| if not self.m.path.exists(snapshot_path): |
| return |
| |
| # Upload a new table row for the system snapshot data generated during this build |
| snapshot = self.m.file.read_raw('read package snapshot file', snapshot_path) |
| build_packages_entry = { |
| 'build_id': self.m.buildbucket.build.id, |
| 'snapshot': snapshot, |
| } |
| |
| basename = 'system.snapshot.json' |
| build_packages_entry_file = self.m.path['tmp_base'].join(basename) |
| |
| self.m.step( |
| 'write build_packages_entry_file', |
| ['cat', self.m.json.input(build_packages_entry)], |
| stdout=self.m.raw_io.output(leak_to=build_packages_entry_file)) |
| |
| self.m.upload.file_to_gcs( |
| source=build_packages_entry_file, |
| bucket=gcs_bucket, |
| subpath=basename, |
| build_id=build_id, |
| ) |
| |
| # Upload a new table row describing this particular build. Other tables' rows |
| # are linked into this table using the build id as a foreign key. |
| builds_entry = { |
| 'bucket': self.m.buildbucket.bucket_v1, |
| 'builder': self.m.buildbucket.builder_name, |
| 'build_id': self.m.buildbucket.build.id, |
| 'gitiles_commit': [self.m.buildbucket.gitiles_commit.id], |
| 'datetime': str(self.m.buildbucket.build.create_time.ToDatetime()), |
| 'start_time': str(self.m.buildbucket.build.start_time.ToDatetime()), |
| 'repo': self.m.buildbucket.build_input.gitiles_commit.project, |
| 'arch': build_results.target, |
| 'product': build_results.product, |
| 'board': build_results.board, |
| 'channel': [''], |
| } |
| |
| self.m.bqupload.insert( |
| step_name='add table row: %s/%s/builds_beta' % |
| (BIGQUERY_PROJECT, BIGQUERY_ARTIFACTS_DATASET), |
| project=BIGQUERY_PROJECT, |
| dataset=BIGQUERY_ARTIFACTS_DATASET, |
| table='builds_beta', |
| data_file=self.m.json.input(builds_entry), |
| ) |
| |
| def _run_bloaty(self, build_results, gcs_bucket, build_id): |
| """Runs bloaty on the specified build results. |
| |
| The data is generated by running Bloaty McBloatface on the binaries in the |
| build results. If this is called more than once, it will only run once. |
| This function requires the build_metrics_gcs_bucket property to have been |
| set. |
| |
| Returns: |
| A Path to the file containing the resulting bloaty data. |
| """ |
| assert gcs_bucket |
| with self.m.step.nest('ensure bloaty'): |
| with self.m.context(infra_steps=True): |
| cipd_dir = self.m.path['cleanup'].join('cipd') |
| pkgs = self.m.cipd.EnsureFile() |
| pkgs.add_package('fuchsia/tools/bloatalyzer/${platform}', 'latest') |
| pkgs.add_package('fuchsia/third_party/bloaty/${platform}', 'latest') |
| self.m.cipd.ensure(cipd_dir, pkgs) |
| |
| bloaty_basename = 'bloaty.html' |
| bloaty_file = self.m.path['cleanup'].join(bloaty_basename) |
| self.m.step( |
| 'bloaty', |
| [ |
| cipd_dir.join('bloatalyzer'), |
| '-bloaty', |
| cipd_dir.join('bloaty'), |
| '-input', |
| build_results.ids, |
| '-output', |
| bloaty_file, |
| # We can't include all targets because the page won't load, so limit the output. |
| '-top-files', |
| '50', |
| '-top-syms', |
| '50', |
| '-format', |
| 'html', |
| # Limit the number of jobs so that we don't overwhelm the bot. |
| '-jobs', |
| str(min(self.m.platform.cpu_count, 32)), |
| ]) |
| self.m.upload.file_to_gcs( |
| source=bloaty_file, |
| bucket=gcs_bucket, |
| subpath=bloaty_basename, |
| build_id=build_id, |
| ) |
| |
| def _upload_fileystem_sizes(self, build_results): |
| """Uploads filesystem sizes to BigQuery. |
| |
| The upload also includes metadata about this build so that the data can |
| be used to create a self-contained BigQuery table. See INTK-709 and |
| INTK-1153 for context. |
| """ |
| if not build_results.filesystem_sizes: |
| return |
| |
| metadata = { |
| 'builder_name': |
| self.m.buildbucket.builder_name, |
| # This field is set to a string in the BQ table schema because it's just |
| # an opaque ID. The conversion from int to string that happens on the |
| # BigQuery side is not what we want, so convert here. |
| 'build_id': |
| str(self.m.buildbucket.build.id), |
| 'build_create_time_seconds': |
| self.m.buildbucket.build.create_time.seconds, |
| 'gitiles_commit_host': |
| self.m.buildbucket.gitiles_commit.host, |
| 'gitiles_commit_id': |
| self.m.buildbucket.gitiles_commit.id, |
| 'gitiles_commit_project': |
| self.m.buildbucket.gitiles_commit.project, |
| } |
| size_data = [] |
| for fs_dict in build_results.filesystem_sizes: |
| datum = metadata.copy() |
| datum['image_name'] = fs_dict['name'] |
| datum['image_size'] = fs_dict['value'] |
| size_data.append(datum) |
| size_path = self.m.path['cleanup'].join('filesystem_sizes_bq.json') |
| bq_formatted_str = '\n'.join(self.m.json.dumps(d) for d in size_data) |
| with self.m.step.nest('upload filesystem sizes'): |
| self.m.file.write_raw('write BQ formatted file', size_path, |
| bq_formatted_str) |
| self.m.bqupload.insert('bqupload', 'fuchsia-infra', 'fuchsia_build', |
| 'filesystem_sizes', size_path) |
| |
| def _upload_blobstats_output(self, build_results, gcs_bucket, build_id): |
| """Runs the blobstats command and uploads the output files to GCS.""" |
| dir_name = 'blobstats' |
| blobstats_output_dir = self.m.path['cleanup'].join(dir_name) |
| if self._run_blobstats(build_results, blobstats_output_dir): |
| # If blobstats failed, it's probably because the build intentionally |
| # didn't produce the input files that blobstats requires. Blobstats is |
| # generally just a nice-to-have anyway, so either way it's probably okay |
| # to silently continue without uploading results if blobstats fails. |
| self.m.upload.directory_to_gcs( |
| source=blobstats_output_dir, |
| bucket=gcs_bucket, |
| subpath=dir_name, |
| build_id=build_id, |
| ) |
| |
| def _run_blobstats(self, build_results, output_dir): |
| """Runs the blobstats script for this build. |
| |
| Args: |
| build_results (FuchsiaBuildResults): The results of the build on which |
| blobstats will be run. |
| output_dir (Path): The directory that the blobstats output files will be |
| written to. |
| |
| Returns: |
| A boolean indicating whether blobstats completed successfully. |
| """ |
| platform_dirname = '%s-x%d' % (self.m.platform.name, self.m.platform.bits) |
| root_dir = build_results.checkout_root |
| dart_binary_path = root_dir.join('topaz', 'tools', 'prebuilt-dart-sdk', |
| platform_dirname, 'bin', 'dart') |
| blobstats_src_dir = root_dir.join('scripts', 'blobstats') |
| command = [ |
| dart_binary_path, |
| '--packages=%s' % blobstats_src_dir.join('blobstats.packages'), |
| blobstats_src_dir.join('blobstats.dart'), |
| '--output=%s' % output_dir, |
| ] |
| with self.m.context(cwd=build_results.fuchsia_build_dir): |
| result = self.m.step('blobstats', command, ok_ret='any') |
| return result.retcode == 0 |
| |
| def _upload_tracing_data(self, build_results, gcs_bucket, build_id): |
| """Uploads GN and ninja tracing results for this build to GCS""" |
| with self.m.step.nest('ensure ninjatrace'): |
| with self.m.context(infra_steps=True): |
| cipd_dir = self.m.path['cleanup'].join('cipd') |
| pkgs = self.m.cipd.EnsureFile() |
| pkgs.add_package('fuchsia/tools/ninjatrace/${platform}', 'latest') |
| self.m.cipd.ensure(cipd_dir, pkgs) |
| |
| # Only upload if the bucket is specified. |
| gn_data = self._extract_gn_tracing_data(build_results.checkout_root, |
| 'gn_trace') |
| zircon_gn_data = self._extract_gn_tracing_data(build_results.checkout_root, |
| 'zircon_gn_trace') |
| ninja_data = self._extract_ninja_tracing_data( |
| cipd_dir, build_results.fuchsia_build_dir, build_results.checkout_root, |
| 'ninja_trace') |
| zircon_ninja_data = self._extract_ninja_tracing_data( |
| cipd_dir, build_results.zircon_build_dir, build_results.checkout_root, |
| 'zircon_ninja_trace') |
| |
| for data in [gn_data, zircon_gn_data, ninja_data, zircon_ninja_data]: |
| self.m.upload.file_to_gcs( |
| source=data, |
| bucket=gcs_bucket, |
| subpath=self.m.path.basename(data), |
| build_id=build_id, |
| ) |
| |
| def _extract_ninja_tracing_data(self, cipd_dir, build_dir, checkout_root, |
| filename): |
| """Extracts the tracing data from the .ninja_log. |
| |
| Args: |
| cipd_dir (Path): The CIPD directory. |
| build_dir (Path): The build directory. |
| checkout_root (Path): The checkout directory. |
| filename (str): The name of the output file (no extension). |
| |
| Returns: |
| A Path to the file containing the gn tracing data in Chromium's |
| about:tracing html format. |
| """ |
| trace = self.m.path['cleanup'].join('%s.json' % filename) |
| html = self.m.path['cleanup'].join('%s.html' % filename) |
| self.m.step( |
| 'ninja tracing', [ |
| cipd_dir.join('ninjatrace'), |
| '-filename', |
| build_dir.join('.ninja_log'), |
| '-trace-json', |
| trace, |
| ], |
| stdout=self.m.raw_io.output(leak_to=trace)) |
| return self._trace2html('ninja trace2html', trace, html, checkout_root) |
| |
| def _extract_gn_tracing_data(self, checkout_root, filename): |
| """Extracts the tracing data from this GN run. |
| |
| Args: |
| checkout_root (Path): The checkout directory. |
| filename (str): The name of the output file (no extension). |
| |
| Returns: |
| A Path to the file containing the gn tracing data in Chromium's |
| about:tracing html format. |
| """ |
| return self._trace2html('gn trace2html', |
| self.m.path['cleanup'].join('%s.json' % filename), |
| self.m.path['cleanup'].join('%s.html' % filename), |
| checkout_root) |
| |
| def _trace2html(self, name, trace, html, checkout_root): |
| """Converts an about:tracing file to HTML using the trace2html tool""" |
| |
| # Catapult is imported in manifest/garnet, so we abort if it wasn't included |
| # in this checkout. |
| # if self.m.path['third_party'].join('catapult') not in self.m.path: |
| # return |
| self.m.python( |
| name=name, |
| script=checkout_root.join('third_party', 'catapult', 'tracing', 'bin', |
| 'trace2html'), |
| args=['--output', html, trace]) |
| return html |