| # Copyright 2019 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| import contextlib |
| import copy |
| import functools |
| import json |
| |
| import attr |
| from google.protobuf import json_format as jsonpb |
| |
| from recipe_engine import recipe_api |
| |
| from .orchestration_inputs import _TestOrchestrationInputs |
| |
| from PB.go.chromium.org.luci.buildbucket.proto import ( |
| builder_common as builder_common_pb2, |
| ) |
| from PB.go.chromium.org.luci.resultdb.proto.v1.test_result import TestStatus |
| from PB.go.fuchsia.dev.fuchsia.tools.integration.fint.proto import ( |
| context as context_pb2, |
| set_artifacts as fint_set_artifacts_pb2, |
| build_artifacts as fint_build_artifacts_pb2, |
| ) |
| |
| # `fint set` and `fint build`, respectively, will write these files to the |
| # artifact directory specified by the context spec. |
| FINT_SET_ARTIFACTS = "set_artifacts.json" |
| FINT_BUILD_ARTIFACTS = "build_artifacts.json" |
| |
| # Manifests produced by the build. |
| BAZEL_SDK_INFO_JSON = "bazel_sdk_info.json" |
| BOARDS_JSON = "boards.json" |
| CIPD_ASSEMBLY_ARTIFACTS_JSON = "cipd_assembly_artifacts.json" |
| CTF_ARTIFACTS_JSON = "ctf_artifacts.json" |
| GERRIT_SIZE_REPORT_JSON = "gerrit_size_report.json" |
| DETAILED_SIZE_CHECKER_DATA_JSON = "detailed_size_checker_data.json" |
| ASSEMBLY_MANIFESTS_JSON = "assembly_manifests.json" |
| RBE_CONFIG_JSON = "rbe_config.json" |
| TOOL_PATHS_JSON = "tool_paths.json" |
| PACKAGE_ARCHIVES_JSON = "package_archives.json" |
| PARTITIONS_JSON = "partitions.json" |
| TRIAGE_SOURCES_JSON = "triage_sources.json" |
| VNAMES_CONFIG_JSON = "vnames_config.json" |
| |
| # Name of compdb generated by GN, it is expected in the root of the build |
| # directory. |
| GN_COMPDB_FILENAME = "compile_commands.json" |
| |
| # Name of the log containing the build failure summary. |
| FAILURE_SUMMARY_LOG = "failure summary" |
| |
| # Set as an output property and consumed by the |
| # go/cq-incremental-builder-monitor_dev dashboard. |
| BUILD_FAILED_PROPERTY = "build_failed" |
| |
| |
| class NoSuchTool(Exception): |
| def __init__(self, name, cpu, os): |
| super().__init__( |
| f"no such tool in {TOOL_PATHS_JSON}: (name={name!r}, cpu={cpu!r}, os={os!r})" |
| ) |
| |
| |
| class NoSuchPackageArchive(Exception): |
| def __init__(self, name, cpu): |
| super().__init__( |
| f"no such package in {PACKAGE_ARCHIVES_JSON}: (name={name!r}, cpu={cpu!r})" |
| ) |
| |
| |
| @attr.s |
| class _FuchsiaBuildResults: |
| """Represents the outputs of a completed build of Fuchsia.""" |
| |
| _api = attr.ib() |
| checkout = attr.ib() |
| build_dir = attr.ib() |
| gn_results = attr.ib() |
| images = attr.ib(factory=list) |
| archives = attr.ib(factory=list) |
| _size_check_failed = attr.ib(init=False, default=False) |
| fint_build_artifacts = attr.ib( |
| type=fint_build_artifacts_pb2.BuildArtifacts, default=None |
| ) |
| |
| @property |
| def set_metadata(self): |
| return self.gn_results.fint_set_artifacts.metadata |
| |
| @property |
| def product_bundle_name(self): |
| product = self.set_metadata.product.split("/")[-1].split(".")[0] |
| board = self.set_metadata.board.split("/")[-1].split(".")[0] |
| if product and board: |
| return product + "." + board |
| return None |
| |
| @property |
| def affected_tests(self): |
| """Returns a list of names of tests affected by the current gerrit change.""" |
| if self._api.build.can_exit_early_if_unaffected(self.checkout): |
| return self.fint_build_artifacts.affected_tests |
| return [] |
| |
| @property |
| def no_work(self): |
| """Returns whether all testing can be skipped.""" |
| if self._api.build.can_exit_early_if_unaffected(self.checkout): |
| return self.fint_build_artifacts.build_not_affected |
| return False |
| |
| @functools.cached_property |
| def bazel_sdk_path(self): |
| """The Bazel SDK path relative to the build output directory.""" |
| bazel_sdk_info = self._api.file.read_json( |
| f"read {BAZEL_SDK_INFO_JSON}", |
| self.build_dir / BAZEL_SDK_INFO_JSON, |
| test_data=[{"location": "gen/bazel_sdk"}], |
| ) |
| if len(bazel_sdk_info) != 1: # pragma: no cover |
| raise self._api.step.StepFailure( |
| f"expected exactly 1 Bazel SDK entry, got {len(bazel_sdk_info)}" |
| ) |
| return self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir / bazel_sdk_info[0]["location"]) |
| ) |
| |
| @functools.cached_property |
| def assembly_manifest(self): |
| """The main assembly manifest produced by the build. |
| |
| Returns: |
| dict: Assembly Manifest. |
| """ |
| assembly_manifests = self._api.file.read_json( |
| f"read {ASSEMBLY_MANIFESTS_JSON}", |
| self.build_dir / ASSEMBLY_MANIFESTS_JSON, |
| test_data=[ |
| { |
| "image_name": "fuchsia", |
| "assembly_manifest_path": "assembly_manifest.json", |
| } |
| ], |
| ) |
| fuchsia_manifest = None |
| for manifest_data in assembly_manifests: |
| name = manifest_data["image_name"] |
| path = manifest_data["assembly_manifest_path"] |
| # Read the file so that it will be visible and debugging in the LUCI output. |
| self._api.file.read_json( |
| f"read {name} assembly manifest", |
| self.build_dir / path, |
| ) |
| if name == "fuchsia": |
| fuchsia_manifest = path |
| if not fuchsia_manifest: # pragma: no cover |
| raise self._api.step.StepFailure("expected fuchsia assembly manifest") |
| return fuchsia_manifest |
| |
| @functools.cached_property |
| def binary_sizes(self): |
| """The binary size report produced by the build. |
| |
| Returns: |
| dict: Binary size report. |
| """ |
| detailed_size_checker_data_items = self._api.file.read_json( |
| f"read {DETAILED_SIZE_CHECKER_DATA_JSON}", |
| self.build_dir / DETAILED_SIZE_CHECKER_DATA_JSON, |
| test_data=[{"name": "size_budgets", "path": "size_budgets.json"}], |
| ) |
| for detailed_size_checker_data in detailed_size_checker_data_items: |
| name = detailed_size_checker_data["name"] |
| path = detailed_size_checker_data["path"] |
| self._api.file.read_json( |
| f"read {name}: ({path})", |
| self.build_dir / path, |
| ) |
| |
| size_report_paths = self._api.file.read_json( |
| f"read {GERRIT_SIZE_REPORT_JSON}", |
| self.build_dir / GERRIT_SIZE_REPORT_JSON, |
| test_data=["size_report.json"], |
| ) |
| if len(size_report_paths) != 1: # pragma: no cover |
| raise self._api.step.StepFailure( |
| f"expected exactly 1 size report, got {len(size_report_paths)}" |
| ) |
| return self._api.file.read_json( |
| f"read {size_report_paths[0]}", |
| self.build_dir / size_report_paths[0], |
| test_data={"component": 1}, |
| ) |
| |
| @functools.cached_property |
| def cts_artifacts(self): |
| """The paths to CTS artifacts relative to the checkout root.""" |
| relpaths = self._api.file.read_json( |
| "read cts artifacts manifest", |
| self.build_dir / CTF_ARTIFACTS_JSON, |
| test_data=["foo.far", "bar.far"], |
| ) |
| return [ |
| self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir / relpath) |
| ) |
| for relpath in relpaths |
| ] |
| |
| def cipd_assembly_artifacts(self, missing_ok=True): |
| """Paths to files to include in an assembly artifacts CIPD package. |
| |
| cipd_assembly_artifacts.json contains a list of paths to manifests |
| produced by the build, where each manifest itself contains a list of |
| dictionaries corresponding to files required for product assembly. |
| |
| Returns a dictionary mapping from relative path within the CIPD package |
| to the absolute path to the file that should be copied into that |
| destination. This dictionary is the result of merging all the manifests |
| referenced by cipd_assembly_inputs.json. |
| """ |
| |
| def abspath(relpath_in_build_dir): |
| return self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir / relpath_in_build_dir) |
| ) |
| |
| assembly_manifests = [ |
| m["path"] |
| for m in self._api.file.read_json( |
| f"read {CIPD_ASSEMBLY_ARTIFACTS_JSON}", |
| abspath(CIPD_ASSEMBLY_ARTIFACTS_JSON), |
| test_data=[{"path": "obj/assembly_inputs.json"}], |
| ) |
| ] |
| copy_mapping = {} |
| for manifest in assembly_manifests: |
| self._api.path.mock_add_paths(abspath(manifest)) |
| if missing_ok and not self._api.path.exists(abspath(manifest)): |
| continue # pragma: no cover |
| inputs = self._api.file.read_json( |
| f"read {manifest}", |
| abspath(manifest), |
| test_data=[ |
| { |
| "source": "host_x64/tool", |
| "destination": "host_x64/tool", |
| }, |
| { |
| "source": "../../prebuilt/other-tool", |
| "destination": "prebuilt/other-tool", |
| }, |
| ], |
| ) |
| for inp in inputs: |
| source, dest = abspath(inp["source"]), inp["destination"] |
| if dest in copy_mapping: # pragma: no cover |
| raise self._api.step.StepFailure( |
| f"Multiple files have the same destination {dest}: {copy_mapping[dest]}, {source}" |
| ) |
| copy_mapping[dest] = source |
| return copy_mapping |
| |
| @functools.cached_property |
| def boards(self): |
| """Parses the `boards` build api module to create a mapping of board |
| names to their paths. |
| |
| Returns: |
| A mapping of released board name to absolute board artifact path. |
| """ |
| boards = self._api.file.read_json( |
| f"read {BOARDS_JSON}", |
| self.build_dir / BOARDS_JSON, |
| test_data=[{"name": "foo", "outdir": "obj/foo-board"}], |
| ) |
| return {board["name"]: self.build_dir / board["outdir"] for board in boards} |
| |
| @functools.cached_property |
| def partitions(self): |
| """Parses the `partitions` build api module to create a mapping of |
| partitions configuration names to their paths. |
| |
| Returns: |
| A mapping of released partition configuration name to absolute |
| artifact path. |
| """ |
| partitions = self._api.file.read_json( |
| f"read {PARTITIONS_JSON}", |
| self.build_dir / PARTITIONS_JSON, |
| test_data=[{"name": "foo", "outdir": "obj/foo-partition"}], |
| ) |
| return { |
| partition["name"]: self.build_dir / partition["outdir"] |
| for partition in partitions |
| } |
| |
| @functools.cached_property |
| def generated_sources(self): |
| """The paths to the generated source files relative to the checkout root.""" |
| generated_sources = [] |
| for path in self.gn_results.generated_sources: |
| try: |
| abspath = self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir.joinpath(path.lstrip("/"))) |
| ) |
| except ValueError: # pragma: no cover |
| raise self._api.step.StepFailure( |
| f"Invalid path in generated_sources.json: {path}" |
| ) |
| self._api.path.mock_add_paths(abspath) |
| if self._api.path.exists(abspath): |
| generated_sources.append( |
| self._api.path.relpath(abspath, self.checkout.root_dir) |
| ) |
| return generated_sources |
| |
| @functools.cached_property |
| def triage_sources(self): |
| """The paths to the triage sources relative to the checkout root.""" |
| return [ |
| self._api.path.relpath(f, self.checkout.root_dir) |
| for f in self.gn_results.triage_sources |
| ] |
| |
| @functools.cached_property |
| def vnames_json_path(self): |
| """The path to the vnames.json relative to the checkout root.""" |
| return self.gn_results.vnames_json_path |
| |
| @property |
| def compdb_path(self): |
| return self.gn_results.compdb_path |
| |
| def tool(self, name, cpu=None, os=None, **kwargs): |
| """The path to a tool of the given name and cpu.""" |
| return self.gn_results.tool(name, cpu, os, **kwargs) |
| |
| def package_archive(self, name, cpu, **kwargs): |
| """The paths of a package archive for all api levels built given a name and cpu.""" |
| return self.gn_results.package_archive(name, cpu, **kwargs) |
| |
| def upload_artifacts(self, sign_artifacts=False): |
| """Upload artifacts to GCS based on the manifest emitted by the |
| artifactory tool. |
| |
| Args: |
| sign_artifacts (bool): Whether to sign the artifacts and attach the |
| signatures to the uploaded files. |
| """ |
| with self._api.step.nest("upload artifacts"): |
| cmd = [ |
| self.tool("artifactory"), |
| "up", |
| "-namespace", |
| self._api.artifacts.build_path(), |
| "-upload-manifest-json-output", |
| self._api.json.output(), |
| self.build_dir, |
| ] |
| upload_manifest = self._api.step( |
| "emit artifactory manifest", |
| cmd, |
| ).json.output |
| self._api.artifacts.verify_blobs( |
| "verify blobs", |
| self._api.json.input(upload_manifest), |
| ) |
| self._api.artifacts.upload( |
| "upload", |
| self._api.json.input(upload_manifest), |
| sign_artifacts=sign_artifacts, |
| ) |
| |
| def report_binary_sizes(self): |
| """Reports binary_sizes output property for consumption by Gerrit.""" |
| with self._api.step.nest("report binary sizes") as presentation: |
| presentation.properties["binary_sizes"] = self.binary_sizes |
| |
| def check_size_budgets(self): |
| """Checks whether binary size report has 1+ budgets exceeded.""" |
| return self._api.binary_size.check_budgets( |
| "check budgets", |
| self._api.json.input(self.binary_sizes), |
| ) |
| |
| def check_size_creep( |
| self, |
| gitiles_remote, |
| base_commit, |
| ci_bucket, |
| ci_builder=None, |
| gerrit_changes=None, |
| size_creep_label=None, |
| ): |
| """Attempts to compute the size diff against the matching CI builder |
| and checks for size creep failures. |
| |
| Args: |
| gitiles_remote (str): Gitiles remote for base commit. |
| base_commit (str): Base commit as sha1. |
| ci_bucket (str): Bucket name of the CI builder to inspect. |
| ci_builder (str): Builder name of the CI builder to inspect. |
| Defaults to the current builder's name. |
| gerrit_changes (seq<GerritChange>): Current Gerrit change(s). If |
| this argument is not set, size creep will not be enforced. |
| size_creep_label (str): Gerrit label which must be approved in order |
| to be exempt from size creep. If this argument is not set, size |
| creep will not be enforced. |
| |
| Raises: |
| StepFailure: One or more size creep budgets were exceeded. |
| """ |
| |
| def get_exceeded_components(diff): |
| return [ |
| component_diff |
| for component_diff in diff["component_diffs"] |
| if component_diff["creep_budget_exceeded"] |
| ] |
| |
| with self._api.step.nest("check size creep") as presentation: |
| diff_step, diff = self._api.binary_size.diff_ci( |
| "diff ci", |
| gitiles_remote, |
| base_commit, |
| builder_common_pb2.BuilderID( |
| project=self._api.buildbucket.build.builder.project, |
| bucket=ci_bucket, |
| builder=ci_builder or self._api.buildbucket.build.builder.builder, |
| ), |
| self._api.json.input(self.binary_sizes), |
| ) |
| |
| prepared_step = self._api.reported_step.prepare_step( |
| "check_size_creep", diff_step |
| ) |
| |
| try: |
| if ( |
| diff |
| and diff["creep_budget_exceeded"] |
| and gerrit_changes |
| and size_creep_label |
| ): |
| change_details = self._api.gerrit.change_details( |
| name="get change details", |
| change_id=str(gerrit_changes[0].change), |
| ).json.output |
| if ( |
| change_details.get("labels", {}) |
| .get(size_creep_label, {}) |
| .get("approved") |
| ): |
| return |
| |
| prepared_step.add_artifact( |
| "exceeded size creep budgets", |
| json.dumps(get_exceeded_components(diff)), |
| ) |
| |
| prepared_step.set_test_status(TestStatus.FAIL) |
| |
| presentation.status = self._api.step.FAILURE |
| raise self._api.step.StepFailure( |
| f"One or more binary size creep budgets were exceeded. " |
| f"See the binary size table in Gerrit for more details." |
| f"\n\n If this change cannot be reworked to fit under " |
| f"the creep budgets, request a {size_creep_label}+1 by " |
| f"adding fuchsia-size-reviews@google.com as a reviewer " |
| f"on your CL, and a size reviewer will be " |
| f"automatically assigned. Once you have " |
| f"{size_creep_label}+1, retry the build / CQ attempt." |
| ) |
| finally: |
| prepared_step.upload() |
| |
| def diff_product_size( |
| self, |
| gitiles_remote, |
| base_commit, |
| ci_bucket, |
| ci_builder=None, |
| ): |
| """Attempts to compute the product size diff against the matching CI |
| builder and writes the results to stdout log. |
| |
| Args: |
| gitiles_remote (str): Gitiles remote for base commit. |
| base_commit (str): Base commit as sha1. |
| ci_bucket (str): Bucket name of the CI builder to inspect. |
| ci_builder (str): Builder name of the CI builder to inspect. |
| Defaults to the current builder's name. |
| |
| Output: |
| Writes results of product size diff to Stdout. |
| """ |
| |
| with self._api.step.nest("diff product size"): |
| # The diff will be available in stdout. |
| # The creep limit will be enforced in check_size_creep. |
| self._api.binary_size.diff_product_size( |
| self.assembly_manifest, |
| gitiles_remote, |
| base_commit, |
| builder_common_pb2.BuilderID( |
| project=self._api.buildbucket.build.builder.project, |
| bucket=ci_bucket, |
| builder=ci_builder or self._api.buildbucket.build.builder.builder, |
| ), |
| self.tool("ffx"), |
| self.build_dir, |
| ) |
| |
| def upload(self, gcs_bucket, namespace=None): |
| """Uploads artifacts from the build to Google Cloud Storage. |
| |
| Args: |
| gcs_bucket (str): GCS bucket name to upload build results to. |
| namespace (str|None): A unique namespace for the GCS upload |
| location; if None, the current build's ID is used. |
| """ |
| assert gcs_bucket |
| with self._api.step.nest("upload build results"): |
| for archive in self.archives: |
| path = self.build_dir / archive["path"] |
| |
| # Upload the archive |
| self._api.gsutil.upload_namespaced_file( |
| source=path, |
| bucket=gcs_bucket, |
| subpath=self._api.path.basename(path), |
| namespace=namespace, |
| ) |
| |
| |
| @attr.s(frozen=True) |
| class _GNResults: |
| """_GNResults represents the result of a `gn gen` invocation in the fuchsia build. |
| |
| It exposes the API of the build, which defines how one can invoke |
| ninja. |
| """ |
| |
| _api = attr.ib() |
| build_dir = attr.ib() |
| fint_set_artifacts = attr.ib( |
| # Optional for convenience when writing tests. Production recipe code |
| # should always populate this field. |
| default=None, |
| # Proto objects are not hashable. |
| hash=False, |
| ) |
| # The following attributes are private because they are only intended to be |
| # used from within this recipe module, not by any recipes that use this |
| # recipe module. |
| _fint_path = attr.ib(default=None) |
| _fint_params_path = attr.ib(default=None) |
| _fint_context = attr.ib(default=None) |
| |
| def __attrs_post_init__(self): |
| # Eagerly read in the tools manifest so that it always has the same |
| # step name regardless of when the caller first accesses the manifest. |
| self._tools # pylint: disable=pointless-statement |
| |
| @property |
| def skip_build(self): |
| """Whether it's safe to skip doing a full build.""" |
| return self.fint_set_artifacts.skip_build |
| |
| @property |
| def gn_trace_path(self): |
| """The path to a GN trace file produced by `fint set`.""" |
| return self.fint_set_artifacts.gn_trace_path |
| |
| @functools.cached_property |
| def generated_sources(self): |
| """Returns the generated source files (list(str)) from the fuchsia build. |
| |
| The returned paths are relative to the fuchsia build directory. |
| """ |
| return self._api.file.read_json( |
| "read generated sources", |
| self.build_dir / "generated_sources.json", |
| test_data=["generated_header.h"], |
| ) |
| |
| @functools.cached_property |
| def sdk_archives(self): |
| """Returns a list of absolute paths to the SDK archives.""" |
| archives = self._api.file.read_json( |
| "read sdk_archives.json", |
| self.build_dir / "sdk_archives.json", |
| test_data=[{"path": "sdk_archives/foo_sdk.tar.gz"}], |
| ) |
| return [ |
| self._api.path.abs_to_path( |
| self._api.path.realpath( |
| self.build_dir.joinpath(archive["path"].lstrip("/")) |
| ) |
| ) |
| for archive in archives |
| ] |
| |
| def tool(self, name, cpu=None, os=None, mock_for_tests=True): |
| """Returns the path to the specified tool provided from the tool_paths |
| manifest. |
| |
| Args: |
| name (str): The short name of the tool, as it appears in |
| tool_paths.json (usually the same as the basename of the |
| executable). |
| cpu (str): The arch of the machine the tool will run on. |
| os (str): The OS of the machine the tool will run on. |
| mock_for_tests (bool): Whether to mock the tool info if it's not |
| found in tool_paths.json. Ignored in production, only |
| useful in recipe unit test mode for getting code coverage of |
| the missing tool code path. |
| """ |
| os = os or self._api.platform.name |
| cpu = cpu or {"arm": "arm64", "intel": "x64"}[self._api.platform.arch] |
| try: |
| tool_relpath = self._tools[name, cpu, os] |
| except KeyError: |
| # If we're in recipe unit testing mode, just return some mock info |
| # for the tool if it's not in tool_paths.json. Requiring that every |
| # tool used by the recipe shows up in the mock tool_paths.json is a |
| # maintenance burden, since adding a dependency on a new tool also |
| # requires modifying the mock tool_paths.json. It would also |
| # create much more noise in expectation files. |
| if self._api.build._test_data.enabled and mock_for_tests: |
| tool_relpath = f"{os}_{cpu}/{name}" |
| else: |
| raise NoSuchTool(name, cpu, os) |
| |
| try: |
| # Use normpath to not expand symlinks, this matters for llvm tools |
| # which should be called through their symlink. |
| return self._api.path.abs_to_path( |
| self._api.path.normpath(self.build_dir / tool_relpath) |
| ) |
| except ValueError: # pragma: no cover |
| raise self._api.step.StepFailure( |
| f"Invalid path in tool_paths.json: {tool_relpath}" |
| ) |
| |
| @functools.cached_property |
| def _tools(self): |
| tools = {} |
| tool_paths_manifest = self._api.file.read_json( |
| "read tool_paths manifest", |
| self.build_dir / TOOL_PATHS_JSON, |
| test_data=[ |
| {"name": "foo", "cpu": "x64", "os": "linux", "path": "linux_x64/foo"} |
| ], |
| ) |
| for tool in tool_paths_manifest: |
| key = (tool["name"], tool["cpu"], tool["os"]) |
| if key in tools: |
| raise self._api.step.StepFailure( |
| f"{TOOL_PATHS_JSON} contains multiple {key} entries, paths are {tools[key]!r} and {tool['path']!r}" |
| ) |
| tools[key] = tool["path"] |
| return tools |
| |
| def package_archive(self, name, cpu): |
| """Returns a mapping of all of the api levels built for the specified |
| package variant to their corresponding far file paths. |
| |
| Args: |
| name (str): The archived package's name, as referenced to by |
| package_archives.json. |
| cpu (str): The package's target cpu arch. |
| """ |
| try: |
| return self._package_archives[name, cpu] |
| except KeyError: |
| raise NoSuchPackageArchive(name, cpu) |
| |
| @functools.cached_property |
| def _package_archives(self): |
| # Get a list of individual package archive metadata JSON files. |
| package_archive_manifests = self._api.file.read_json( |
| "read package_archives manifest list", |
| self.build_dir / PACKAGE_ARCHIVES_JSON, |
| test_data=[ |
| "foo/bar/baz.json", |
| ], |
| ) |
| |
| # Get a list of package archive variants and their paths. |
| package_archives = [ |
| self._api.file.read_json( |
| "read package archive manifest", |
| self.build_dir / manifest, |
| test_data={ |
| "name": "foo", |
| "cpu": "x64", |
| "api_level": -1, |
| "path": "linux_x64/foo.far", |
| }, |
| ) |
| for manifest in package_archive_manifests |
| ] |
| |
| # Create a mapping of {(package_name, target_cpu): {api_level: far_path}}. |
| path_by_api_level_by_key = {} |
| for package_archive in package_archives: |
| key = (package_archive["name"], package_archive["cpu"]) |
| path_by_api_level = path_by_api_level_by_key.setdefault(key, {}) |
| api_level = package_archive["api_level"] |
| far_path = self.build_dir / package_archive["path"] |
| if api_level in path_by_api_level: |
| raise self._api.step.StepFailure( |
| f"{PACKAGE_ARCHIVES_JSON} @ {key} contains duplicate " |
| + f"configurations for api_level {api_level}, paths are " |
| + f"{path_by_api_level[api_level]!r} and {far_path!r}" |
| ) |
| path_by_api_level[api_level] = far_path |
| return path_by_api_level_by_key |
| |
| @functools.cached_property |
| def triage_sources(self): |
| """Returns the absolute paths of the files defined in the triage_sources |
| manifest.""" |
| triage_sources_manifest = self._api.file.read_json( |
| "read triage_sources manifest", |
| self.build_dir / TRIAGE_SOURCES_JSON, |
| test_data=self._api.build.test_api.mock_triage_sources_manifest(), |
| ) |
| return [ |
| self._api.path.abs_to_path(self._api.path.realpath(self.build_dir / source)) |
| for source in triage_sources_manifest |
| ] |
| |
| @functools.cached_property |
| def rbe_config_path(self): |
| """Returns the checkout relative path to the RBE config specified in |
| the Fuchsia source tree.""" |
| rbe_config_manifest = self._api.file.read_json( |
| "read rbe_config manifest", |
| self.build_dir / RBE_CONFIG_JSON, |
| test_data=[ |
| { |
| "path": "../../path/to/rbe/config.cfg", |
| }, |
| { |
| "path": "../../path/to/rbe/experimental.cfg", |
| }, |
| ], |
| ) |
| reproxy_config_paths = [ |
| self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir / m["path"]) |
| ) |
| for m in rbe_config_manifest |
| ] |
| |
| # If the manifest lists additional reproxy configurations, |
| # concatenate them together. |
| all_config_text = [ |
| self._api.file.read_text(f"read proxy cfg {cfg}", cfg) |
| for cfg in reproxy_config_paths |
| ] |
| joint_config = self.build_dir / "joint_reproxy.cfg" |
| self._api.file.write_text( |
| "join reproxy configs", joint_config, "".join(all_config_text) |
| ) |
| return joint_config |
| |
| @functools.cached_property |
| def vnames_json_path(self): |
| """Returns the checkout relative path to the VNames config specified in |
| the Fuchsia source tree.""" |
| vnames_config_manifest = self._api.file.read_json( |
| "read vnames_config manifest", |
| self.build_dir / VNAMES_CONFIG_JSON, |
| test_data=[{"path": "../../path/to/infra/vnames.json"}], |
| ) |
| assert len(vnames_config_manifest) == 1 |
| vnames_json_path = self._api.path.abs_to_path( |
| self._api.path.realpath(self.build_dir / vnames_config_manifest[0]["path"]) |
| ) |
| return vnames_json_path |
| |
| @property |
| def compdb_path(self): |
| return self.build_dir / GN_COMPDB_FILENAME |
| |
| |
| class FuchsiaBuildApi(recipe_api.RecipeApi): |
| """APIs for building Fuchsia.""" |
| |
| # Name of the top-level nest step created by `with_options()` that contains |
| # all of the other build steps. Can be used by orchestrator recipes to |
| # retrieve logs attached to this step in subbuilds. |
| BUILD_STEP_NAME = "build" |
| |
| FINT_PARAMS_PATH_PROPERTY = "fint_params_path" |
| NoSuchTool = NoSuchTool |
| NoSuchPackageArchive = NoSuchPackageArchive |
| |
| def __init__(self, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
| self._emitted_output_properties = False |
| |
| def with_options( |
| self, |
| checkout, |
| fint_params_path, |
| build_dir=None, |
| collect_coverage=False, |
| incremental=False, |
| sdk_id=None, |
| clean_check=True, |
| run_all_tests=False, |
| use_sandboxing=True, |
| **kwargs, |
| ): |
| """Builds Fuchsia from a Jiri checkout. |
| |
| Depending on the fint parameters, may or may not build |
| Fuchsia-the-operating-system (i.e. the Fuchsia images). |
| |
| Args: |
| checkout (CheckoutResult): The Fuchsia checkout result. |
| fint_params_path (str): The path, relative to the checkout root, |
| of a platform spec textproto to pass to fint. |
| build_dir (Path): The build output directory. |
| collect_coverage (bool): Whether to build for collecting |
| coverage. |
| incremental (bool): Whether or not to build incrementally. |
| sdk_id (str): If specified, set sdk_id in GN. |
| run_all_tests (bool): Whether to run all tests. If true, the build will NOT |
| be skipped even if it is determined to be unaffected. |
| use_sandboxing (bool): Whether to run the build in a sandbox. |
| **kwargs (dict): Passed through to _build(). |
| |
| Returns: |
| A FuchsiaBuildResults, representing the build. |
| """ |
| if incremental: |
| cache_ctx = self.m.cache.guard("incremental") |
| else: |
| cache_ctx = contextlib.nullcontext() |
| |
| with self.m.step.nest( |
| self.BUILD_STEP_NAME |
| ) as presentation, self.m.macos_sdk(), cache_ctx: |
| fint_params = self.m.file.read_text( |
| "read fint params", |
| checkout.root_dir / fint_params_path, |
| test_data='field: "value"', |
| ) |
| presentation.logs["fint_params"] = fint_params |
| # Write the fint params path to output properties so `fx repro` |
| # knows which file to use. |
| presentation.properties[self.FINT_PARAMS_PATH_PROPERTY] = fint_params_path |
| |
| gn_results = self.gen( |
| checkout=checkout, |
| fint_params_path=fint_params_path, |
| build_dir=build_dir, |
| sdk_id=sdk_id, |
| collect_coverage=collect_coverage, |
| presentation=presentation, |
| use_sandboxing=use_sandboxing, |
| ) |
| |
| # Upload tests.json to Logdog so it's available to aid in debugging. |
| self.m.file.read_text( |
| "read tests.json", gn_results.build_dir / "tests.json" |
| ) |
| |
| if ( |
| not run_all_tests |
| and self.can_exit_early_if_unaffected(checkout) |
| and gn_results.skip_build |
| ): |
| return None |
| |
| build_result = self._build( |
| checkout=checkout, |
| gn_results=gn_results, |
| presentation=presentation, |
| use_sandboxing=use_sandboxing, |
| **kwargs, |
| ) |
| |
| # Verify the checkout is not dirtied by a build. |
| if clean_check: |
| checkout.check_clean() |
| |
| return build_result |
| |
| def gen( |
| self, |
| checkout, |
| fint_params_path, |
| build_dir=None, |
| sdk_id=None, |
| collect_coverage=False, |
| presentation=None, |
| use_sandboxing=False, |
| ): |
| """Sets up and calls `gn gen`. |
| |
| Args: |
| checkout (CheckoutApi.CheckoutResults): The checkout results. |
| fint_params_path (str): The path to a build params textproto to |
| pass to fint, relative to the checkout directory. |
| build_dir (Path): The build directory. This defaults to "out/not-default". |
| Note that changing the build dir will invalidate Goma caches so ideally |
| should be only used when Goma is disabled. |
| sdk_id (str): The current SDK ID |
| collect_coverage (bool): Whether to build for collecting coverage. |
| presentation (StepPresentation): Step to attach logs to. |
| use_sandboxing (bool): Whether to run the gen in a sandbox. |
| |
| Returns: |
| A _GNResults object. |
| """ |
| assert fint_params_path, "fint_params_path must be set" |
| |
| if not build_dir: |
| # Some parts of the build require the build dir to be two |
| # directories nested underneath the checkout dir. |
| # We choose the path to be intentionally different from |
| # "out/default" because that is what most developers use locally and we |
| # want to prevent the build from relying on those directory names. |
| build_dir = checkout.root_dir.joinpath("out", "not-default") |
| |
| # with_options() normally handles setting up the macOS SDK, but we need |
| # to ensure it's set up here to support recipes that call `gen()` |
| # directly. |
| with self.m.macos_sdk(), self.m.context(cwd=checkout.root_dir): |
| context = self._fint_context( |
| checkout=checkout, |
| build_dir=build_dir, |
| sdk_id=sdk_id, |
| collect_coverage=collect_coverage, |
| ) |
| fint_path = self._fint_path(checkout) |
| # We call this step `gn gen` even though it runs `fint set` |
| # to avoid confusion about when the recipe runs `gn gen`. |
| step = self._run_fint( |
| "gn gen", |
| "set", |
| fint_path, |
| checkout.root_dir / fint_params_path, |
| context, |
| use_sandboxing=use_sandboxing, |
| ) |
| |
| prepared_step = self.m.reported_step.prepare_step("gn_gen", step) |
| |
| # Artifacts should be produced even if some `fint set` steps |
| # failed, as the artifacts may contain useful logs. |
| try: |
| artifacts = self.m.file.read_proto( |
| "read fint set artifacts", |
| self.m.path.join(context.artifact_dir, FINT_SET_ARTIFACTS), |
| fint_set_artifacts_pb2.SetArtifacts, |
| "JSONPB", |
| test_proto=fint_set_artifacts_pb2.SetArtifacts( |
| gn_trace_path=self.m.path.join( |
| context.artifact_dir, "mock-gn-trace.json" |
| ), |
| use_goma=True, |
| enable_rbe=True, |
| metadata=dict( |
| board="boards/x64.gni", |
| optimize="debug", |
| product="products/core.gni", |
| target_arch="x64", |
| variants=["asan"], |
| ), |
| ), |
| ) |
| except self.m.file.Error: |
| if step.retcode == 0: # pragma: no cover |
| # If fint passed then we must be able to read the artifacts. |
| raise |
| artifacts = fint_set_artifacts_pb2.SetArtifacts() |
| |
| # Log the full failure summary so the entire text is visible even if |
| # it's truncated in the summary markdown. |
| if artifacts.failure_summary and presentation: |
| summary = artifacts.failure_summary.splitlines() |
| presentation.logs[FAILURE_SUMMARY_LOG] = summary |
| prepared_step.add_artifact(FAILURE_SUMMARY_LOG, summary) |
| |
| prepared_step.upload() |
| |
| if step.retcode: |
| if artifacts.failure_summary: |
| msg = self.m.buildbucket_util.summary_message( |
| artifacts.failure_summary, |
| f"(failure summary truncated, see the '{FAILURE_SUMMARY_LOG}' log for full failure details)", |
| ) |
| else: |
| msg = "Unrecognized fint set failure, see stdout for details." |
| msg += self._repro_line() |
| raise self.m.step.StepFailure(msg) |
| elif artifacts.failure_summary: |
| raise self.m.step.StepFailure( |
| "`fint set` emitted a failure summary but had a retcode of zero." |
| + self._repro_line() |
| ) |
| |
| return self.gn_results( |
| build_dir, |
| artifacts, |
| fint_path=fint_path, |
| fint_params_path=checkout.root_dir / fint_params_path, |
| fint_context=context, |
| ) |
| |
| def _build( |
| self, |
| checkout, |
| gn_results, |
| presentation, |
| allow_dirty=False, |
| artifact_gcs_bucket=None, |
| upload_namespace=None, |
| timeout_secs=90 * 60, |
| use_sandboxing=False, |
| ): |
| """Runs `fint build`. |
| |
| Fint build consumes the GN build APIs from the build directory as |
| well as the fint params path to determine what targets to build. |
| |
| Args: |
| checkout (CheckoutResult): The Fuchsia checkout result. |
| gn_results (_GNResults): GN gen results. |
| presentation (StepPresentation): Presentation to attach important |
| logs to. |
| allow_dirty (bool): Skip the ninja no op check. |
| artifact_gcs_bucket (str): GCS bucket name to upload build artifacts |
| to. |
| timeout_secs (str): The timeout for running `fint build`. |
| upload_namespace (str): The namespace within the build stats GCS |
| bucket to upload to. |
| use_sandboxing (bool): Whether to run the gen in a sandbox. |
| |
| Returns: |
| A FuchsiaBuildResults, representing the build. |
| """ |
| if gn_results.fint_set_artifacts.use_goma: |
| self.m.goma.set_path(self.m.path.dirname(gn_results.tool("goma_ctl"))) |
| goma_context = self.m.goma() |
| else: |
| goma_context = contextlib.nullcontext() |
| |
| if gn_results.fint_set_artifacts.enable_rbe: |
| rbe_context = self.m.rbe( |
| config_path=gn_results.rbe_config_path, |
| reclient_path=self.m.path.dirname(gn_results.tool("rewrapper")), |
| # ninja builds should not expose absolute paths in remote actions |
| absolute_path_policy=self.m.rbe.AbsolutePathPolicy.REJECT, |
| ) |
| else: |
| rbe_context = contextlib.nullcontext() |
| |
| context = copy.deepcopy(gn_results._fint_context) |
| context.skip_ninja_noop_check = allow_dirty |
| |
| with goma_context, rbe_context: |
| fint_build_step = self._run_fint( |
| step_name="ninja", |
| command="build", |
| fint_path=gn_results._fint_path, |
| static_path=gn_results._fint_params_path, |
| context=context, |
| timeout=timeout_secs, |
| use_sandboxing=( |
| # We don't use sandboxing when goma is enabled because: |
| # 1. The location of the goma socket is dependent on the |
| # current user, which by design is not known inside the |
| # sandbox. |
| # 2. Only the GCC builders utilize goma - everything else |
| # uses RBE directly, which we do run inside a sandbox. |
| use_sandboxing |
| and not gn_results.fint_set_artifacts.use_goma |
| ), |
| use_buildproxywrap=True, |
| ) |
| |
| prepared_step = self.m.reported_step.prepare_step("ninja", fint_build_step) |
| |
| # Artifacts should be produced even if some `fint build` steps failed, |
| # as the artifacts may contain useful logs that will help understand |
| # the cause of failure. |
| try: |
| fint_build_artifacts = self.m.file.read_proto( |
| "read fint build artifacts", |
| self.m.path.join(context.artifact_dir, FINT_BUILD_ARTIFACTS), |
| fint_build_artifacts_pb2.BuildArtifacts, |
| "JSONPB", |
| test_proto=self.test_api.fint_build_artifacts_proto( |
| # This isn't super realistic because tests sometimes aren't |
| # included in the build graph, in which case there would never |
| # be any affected tests even in CQ with changed files. But it's |
| # simplest if we just configure the mock affected tests here. |
| affected_tests=( |
| ["test1", "test2"] if checkout.changed_files() else [] |
| ), |
| built_images=[{"name": "foo", "type": "blk", "path": "foo.img"}], |
| ), |
| ) |
| except self.m.file.Error: |
| if fint_build_step.retcode == 0: # pragma: no cover |
| # If fint passed then we must be able to read the artifacts. |
| raise |
| fint_build_artifacts = fint_build_artifacts_pb2.BuildArtifacts() |
| |
| # Log the full failure summary so the entire text is visible even if |
| # it's truncated in the summary markdown. |
| if fint_build_artifacts.failure_summary: |
| summary = fint_build_artifacts.failure_summary.splitlines() |
| presentation.logs[FAILURE_SUMMARY_LOG] = summary |
| prepared_step.add_artifact(FAILURE_SUMMARY_LOG, summary) |
| |
| prepared_step.upload() |
| |
| if fint_build_artifacts.log_files: |
| with self.m.step.nest("read fint log files"): |
| for name, path in fint_build_artifacts.log_files.items(): |
| presentation.logs[name] = self.m.file.read_text( |
| f"read {self.m.path.basename(path)}", path, include_log=False |
| ).splitlines() |
| |
| # Only emit the ninja duration once even if we build multiple times. |
| # The builders for which we care about tracking the Ninja duration all |
| # only build once. For builders like perfcompare that build twice, |
| # the second build is incremental, so it is very fast and its duration |
| # is less meaningful than the original build's duration. |
| # Also include metrics like action counts. |
| if not self._emitted_output_properties: |
| presentation.properties["ninja_duration_seconds"] = ( |
| fint_build_artifacts.ninja_duration_seconds |
| ) |
| presentation.properties["ninja_action_metrics"] = jsonpb.MessageToDict( |
| fint_build_artifacts.ninja_action_metrics, |
| including_default_value_fields=False, # avoid breaking tests on proto-update |
| preserving_proto_field_name=True, |
| ) |
| self._emitted_output_properties = True |
| |
| if ( |
| artifact_gcs_bucket |
| and upload_namespace |
| and fint_build_artifacts.debug_files |
| ): |
| with self.m.step.nest("upload build debug files"): |
| tree = self.m.file.symlink_tree(self.m.path.mkdtemp("build-debug")) |
| for df in fint_build_artifacts.debug_files: |
| tree.register_link( |
| self.m.path.abs_to_path(df.path), |
| linkname=tree.root / df.upload_dest, |
| ) |
| tree.create_links("create symlinks") |
| upload_step = self.m.gsutil.upload_namespaced_directory( |
| tree.root, |
| bucket=artifact_gcs_bucket, |
| # Upload build debug files to a dedicated subdirectory to |
| # avoid colliding with other files in this bucket. |
| subpath="build-debug", |
| namespace=upload_namespace, |
| link_name="build debug logs", |
| ) |
| # Copy through the GCS link to the top-level build step. |
| presentation.links.update(upload_step.presentation.links) |
| |
| if artifact_gcs_bucket and upload_namespace: |
| try: |
| self._upload_buildstats_output( |
| gn_results, |
| artifact_gcs_bucket, |
| upload_namespace, |
| fint_build_artifacts, |
| ) |
| except Exception as e: |
| self.m.step.empty("upload buildstats failure").presentation.logs[ |
| "exception" |
| ] = str(e).splitlines() |
| self._upload_tracing_data( |
| gn_results, artifact_gcs_bucket, upload_namespace, fint_build_artifacts |
| ) |
| |
| if fint_build_step.retcode: |
| if fint_build_artifacts.failure_summary: |
| msg = self.m.buildbucket_util.summary_message( |
| fint_build_artifacts.failure_summary, |
| f"(failure summary truncated, see the '{FAILURE_SUMMARY_LOG}' log for full failure details)", |
| ) |
| else: |
| msg = "Unrecognized fint build failure, see stdout for details." |
| msg += self._repro_line() |
| raise self.m.step.StepFailure(msg) |
| |
| if fint_build_artifacts.failure_summary: |
| raise self.m.step.StepFailure( |
| "`fint build` emitted a failure summary but had a retcode of zero." |
| + self._repro_line() |
| ) |
| |
| # Recursively convert to a dict so that we don't have to deal with |
| # proto Struct objects, which have some annoying properties like |
| # not showing the missing key value when raising a KeyError. |
| artifacts_dict = jsonpb.MessageToDict( |
| fint_build_artifacts, |
| including_default_value_fields=True, |
| preserving_proto_field_name=True, |
| ) |
| |
| return self.build_results( |
| build_dir=gn_results.build_dir, |
| checkout=checkout, |
| gn_results=gn_results, |
| images=artifacts_dict["built_images"], |
| archives=artifacts_dict["built_archives"], |
| fint_build_artifacts=fint_build_artifacts, |
| ) |
| |
| def can_exit_early_if_unaffected(self, checkout): |
| """Returns whether or not we can safely skip building (and testing).""" |
| return ( |
| # No changed_files -> CI, which we always want to test. fint should |
| # also take this into account and never indicate that testing can be |
| # skipped if there are no changed files, but we add an extra check |
| # here to be safe. |
| checkout.changed_files() |
| # Changes to integration, in particular for both infra configs and |
| # jiri manifests, can affect any stage of the build but generally |
| # won't cause changes to the build graph. |
| and not checkout.contains_integration_patch |
| # Recipe changes can also impact all stages of a build, but recipes |
| # aren't included in the build graph. |
| and not self.m.recipe_testing.enabled |
| ) |
| |
| def gn_results(self, *args, **kwargs): |
| return _GNResults(self.m, *args, **kwargs) |
| |
| def build_results(self, *args, **kwargs): |
| return _FuchsiaBuildResults(self.m, *args, **kwargs) |
| |
| def download_test_orchestration_inputs(self, digest): |
| return _TestOrchestrationInputs.download(self.m, digest) |
| |
| def test_orchestration_inputs_from_build_results(self, *args, **kwargs): |
| return _TestOrchestrationInputs.from_build_results(self.m, *args, **kwargs) |
| |
| def test_orchestration_inputs_property_name(self, without_cl): |
| if without_cl: |
| return _TestOrchestrationInputs.DIGEST_PROPERTY_WITHOUT_CL |
| else: |
| return _TestOrchestrationInputs.DIGEST_PROPERTY |
| |
| @functools.lru_cache(maxsize=None) |
| def _fint_context(self, checkout, build_dir, sdk_id, collect_coverage): |
| """Assembles a fint Context spec.""" |
| return context_pb2.Context( |
| checkout_dir=str(checkout.root_dir), |
| build_dir=str(build_dir), |
| artifact_dir=str(self.m.path.mkdtemp("fint_artifacts")), |
| sdk_id=sdk_id or "", |
| changed_files=[ |
| context_pb2.Context.ChangedFile( |
| path=self.m.path.relpath(path, checkout.root_dir) |
| ) |
| for path in checkout.changed_files() |
| ], |
| cache_dir=str(self.m.path.cache_dir), |
| release_version=( |
| str(checkout.release_version) if checkout.release_version else "" |
| ), |
| clang_toolchain_dir=str(checkout.clang_toolchain_dir), |
| gcc_toolchain_dir=str(checkout.gcc_toolchain_dir), |
| rust_toolchain_dir=str(checkout.rust_toolchain_dir), |
| pgo_profile_path=str(checkout.pgo_profile_path), |
| collect_coverage=collect_coverage, |
| goma_job_count=self.m.goma.jobs, |
| ) |
| |
| def _fint_path(self, checkout): |
| """Builds and returns the path to a fint executable.""" |
| fint_path = self.m.path.mkdtemp("fint") / "fint" |
| bootstrap_path = checkout.root_dir.joinpath( |
| "tools", "integration", "bootstrap.sh" |
| ) |
| self.m.step("bootstrap fint", [bootstrap_path, "-o", fint_path]) |
| return fint_path |
| |
| def _run_fint( |
| self, |
| step_name, |
| command, |
| fint_path, |
| static_path, |
| context, |
| timeout=None, |
| use_sandboxing=False, |
| use_buildproxywrap=False, |
| **kwargs, |
| ): |
| context_textproto = self.m.proto.encode(context, "TEXTPB") |
| context_path = self.m.path.mkstemp() |
| self.m.file.write_raw("write fint context", context_path, context_textproto) |
| # TODO(fxbug.dev/101594): These should be configured higher in the |
| # stack in one of {swarming_bot, bbagent, recipe_wrapper}. Set them here |
| # for now to unblock isolation work (b/234060366). |
| xdg_env_vars = [ |
| "HOME", |
| "XDG_CACHE_HOME", |
| "XDG_CONFIG_HOME", |
| "XDG_DATA_HOME", |
| "XDG_HOME", |
| "XDG_STATE_HOME", |
| ] |
| env = self.m.context.env |
| env["BUILDBUCKET_ID"] = self.m.buildbucket_util.id |
| env["BUILDBUCKET_BUILDER"] = self.m.buildbucket_util.full_builder_name() |
| fint_cmd = [ |
| fint_path, |
| command, |
| "-static", |
| static_path, |
| "-context", |
| context_path, |
| ] |
| bpw = None |
| if use_buildproxywrap: |
| bpw = self.m.buildproxywrap.new_instance() |
| env.update(bpw.env()) |
| fint_cmd = bpw.auth_proxied_cmd(fint_cmd) |
| # Never sandbox on macOS, because nsjail is Linux-specific. |
| if use_sandboxing and not self.m.platform.is_mac: |
| self.m.file.ensure_directory( |
| "ensure build directory exists", context.build_dir |
| ) |
| for env_var in xdg_env_vars: |
| env[env_var] = "$tmpdir" |
| fint_cmd = self.m.nsjail.sandboxed_cmd( |
| fint_cmd, |
| env=env, |
| symlinks={ |
| # Ninja requires that it be able to run /bin/sh. |
| "/bin/bash": "/bin/sh", |
| # Symlinking /dev/fd to /proc/self/fd allows for shell redirection. |
| "/proc/self/fd": "/dev/fd", |
| # Makes writing to /dev/stdout work. |
| "/proc/self/fd/1": "/dev/stdout", |
| }, |
| ro_mounts=filter( |
| # Filter out empty strings. |
| lambda s: s, |
| [ |
| str(context_path), |
| str(fint_path), |
| "/dev/zero", |
| # TODO(rudymathu): The Android avbtool invokes openssl. It |
| # would be nice if we could have it invoke a vendored |
| # version instead, at which point we can remove this mount. |
| "/usr/bin/openssl", |
| *(bpw.ro_mounts() if use_buildproxywrap else []), |
| ], |
| ), |
| rw_mounts=filter( |
| # Filter out empty strings. |
| lambda s: s, |
| [ |
| # TODO(rudymathu): Move the checkout dir to readonly |
| # mounts once we account for affected test analysis. |
| context.checkout_dir, |
| str(self.m.path.cache_dir), |
| context.artifact_dir, |
| context.build_dir, |
| "/dev/null", |
| "/dev/urandom", |
| env.get("RBE_socket_path"), |
| *(bpw.rw_mounts() if use_buildproxywrap else []), |
| ], |
| ), |
| use_linux_tools=True, |
| use_luci_git=True, |
| ) |
| ctx = contextlib.nullcontext() |
| else: |
| for env_var in xdg_env_vars: |
| env[env_var] = env.get("TMPDIR", str(self.m.path.cleanup_dir)) |
| ctx = self.m.context(env=env) |
| |
| with ctx: |
| if use_buildproxywrap: |
| step = bpw.proxied_step( |
| step_name, |
| inner_cmd=fint_cmd, |
| ok_ret="any", |
| timeout=timeout, |
| **kwargs, |
| ) |
| else: |
| step = self.m.step( |
| step_name, |
| fint_cmd, |
| ok_ret="any", |
| timeout=timeout, |
| **kwargs, |
| ) |
| |
| if step.retcode or step.exc_result.had_timeout: |
| step.presentation.status = self.m.step.FAILURE |
| step.presentation.properties[BUILD_FAILED_PROPERTY] = True |
| step.presentation.step_text = f"run by `fint {command}`" |
| step.presentation.logs["context.textproto"] = context_textproto |
| if step.exc_result.had_timeout: |
| timeout_string = f"{int(timeout)} seconds" |
| if timeout > 60: |
| timeout_string = f"{int(timeout / 60)} minutes" |
| |
| self.m.reported_step.upload(step_name, step) |
| |
| raise self.m.step.StepFailure( |
| f"`{step_name}` timed out after {timeout_string}." |
| ) |
| return step |
| |
| def _upload_buildstats_output( |
| self, gn_results, gcs_bucket, build_id, fint_build_artifacts |
| ): |
| """Runs the buildstats command for Fuchsia and uploads the output files to GCS.""" |
| buildstats_binary_path = gn_results.tool("buildstats") |
| self.m.path.mock_add_paths(buildstats_binary_path) |
| if not self.m.path.exists(buildstats_binary_path): # pragma: no cover |
| # We might be trying to run buildstats after catching a build |
| # failure, in which case ninja may not even have gotten as far as |
| # building the buildstats tool. |
| raise Exception("The build did not produce the buildstats tool") |
| |
| output_name = "fuchsia-buildstats.json" |
| output_path = self.m.path.mkdtemp("buildstats") / output_name |
| command = [ |
| buildstats_binary_path, |
| "--ninjalog", |
| fint_build_artifacts.ninja_log_path, |
| "--compdb", |
| fint_build_artifacts.ninja_compdb_path, |
| "--graph", |
| fint_build_artifacts.ninja_graph_path, |
| "--output", |
| output_path, |
| ] |
| with self.m.context(cwd=gn_results.build_dir): |
| self.m.step("fuchsia buildstats", command) |
| |
| self.m.gsutil.upload_namespaced_file( |
| source=output_path, |
| bucket=gcs_bucket, |
| subpath=output_name, |
| namespace=build_id, |
| ) |
| |
| def _upload_tracing_data( |
| self, gn_results, artifact_gcs_bucket, upload_namespace, fint_build_artifacts |
| ): |
| """Uploads GN and ninja tracing results for this build to GCS.""" |
| paths_to_upload = [] |
| if gn_results.gn_trace_path: |
| paths_to_upload += [ |
| ("fuchsia_gn_trace.json", gn_results.gn_trace_path), |
| ] |
| |
| ninja_trace = self.m.path.mkstemp("fuchsia_ninja_trace.json") |
| try: |
| self.m.step( |
| "ninjatrace", |
| [ |
| gn_results.tool("ninjatrace"), |
| "-ninjalog", |
| fint_build_artifacts.ninja_log_path, |
| "-compdb", |
| fint_build_artifacts.ninja_compdb_path, |
| "-graph", |
| fint_build_artifacts.ninja_graph_path, |
| "-critical-path", |
| "-trace-json", |
| ninja_trace, |
| ], |
| stdout=self.m.raw_io.output_text(leak_to=ninja_trace), |
| ) |
| except self.m.step.StepFailure: |
| pass |
| else: |
| paths_to_upload += [("fuchsia_ninja_trace.json", ninja_trace)] |
| |
| with self.m.step.nest("upload traces") as presentation: |
| for filename, path in paths_to_upload: |
| step = self.m.gsutil.upload_namespaced_file( |
| source=path, |
| bucket=artifact_gcs_bucket, |
| subpath=filename, |
| namespace=upload_namespace, |
| ) |
| # Perfetto needs an unauthenticated URL. |
| # TODO(fxbug.dev/66249): Perfetto cannot load non-public traces. |
| # Consider hiding this link in such cases. |
| step.presentation.links["perfetto_ui"] = ( |
| f"https://ui.perfetto.dev/#!?url={self.m.gsutil.unauthenticated_url(step.presentation.links[filename])}" |
| ) |
| # This is shown as a workaround to hint users on how to load |
| # non-public traces. |
| presentation.links["fuchsia.dev guide"] = ( |
| "https://fuchsia.dev/fuchsia-src/development/tracing/tutorial/converting-visualizing-a-trace#html-trace" |
| ) |
| |
| with self.m.step.nest("sponge links") as presentation: |
| # Link to find all of the bazel subinvocations' sponge pages |
| presentation.links["sponge invocations"] = ( |
| f"http://sponge/invocations/?q=BUILDBUCKET_ID:{self.m.buildbucket_util.id}" |
| ) |
| |
| def _repro_line(self): |
| # Adding the extra breakline characters to express a line break in markdown |
| return f"\n\nYou can reproduce this build by running `fx repro {self.m.buildbucket_util.id}`" |