| #!/usr/bin/env python3 |
| # Copyright 2022 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| "Run a Bazel command from Ninja. See bazel_action.gni for details." |
| |
| import argparse |
| import dataclasses |
| import errno |
| import filecmp |
| import json |
| import os |
| import shlex |
| import shutil |
| import stat |
| import sys |
| import typing as T |
| from pathlib import Path |
| |
| _SCRIPT_DIR = os.path.dirname(__file__) |
| sys.path.insert(0, _SCRIPT_DIR) |
| import build_utils |
| import thread_pool_helpers |
| import workspace_utils |
| from build_utils import FilePath |
| |
| _BUILD_API_DIR = os.path.join(_SCRIPT_DIR, "../../api") |
| sys.path.insert(0, _BUILD_API_DIR) |
| from debug_symbols import ( |
| DebugSymbolEntryType, |
| DebugSymbolExporter, |
| DebugSymbolsManifestParser, |
| ) |
| |
| _BUILD_BAZEL_DIR = os.path.dirname(_SCRIPT_DIR) |
| |
| # Directory where to find Starlark input files. |
| _STARLARK_DIR = os.path.join(_BUILD_BAZEL_DIR, "starlark") |
| |
| # Directory where to find templated files. |
| _TEMPLATE_DIR = os.path.join(_BUILD_BAZEL_DIR, "templates") |
| |
| # A list of built-in Bazel workspaces like @bazel_tools// which are actually |
| # stored in the prebuilt Bazel install_base directory with a timestamp *far* in |
| # the future, e.g. 2033-01-01. This is a hack that Bazel uses to determine when |
| # its install base has changed unexpectedly. |
| # |
| # If any file from these directories are listed in a depfile, they will force |
| # rebuilds on *every* Ninja invocations, because the tool will consider that |
| # the outputs are always older (as 2022 < 2033). |
| # |
| # This list is thus used to remove these from depfile inputs. Given that the |
| # files are part of Bazel's installation, their content would hardly change |
| # between build invocations anyway, so this is safe. |
| # |
| _BAZEL_BUILTIN_REPOSITORIES = ( |
| "bazel_tools", |
| "bazel_features_globals", |
| "bazel_features_version", |
| "local_config_cc", |
| "local_config_platform", |
| # The two repositories below were added by Bazel 7.2. |
| "host_platform", |
| "internal_platforms_do_not_use", |
| # Created and used internally by @rules_python |
| "rules_python_internal", |
| # Introduced by bzlmod |
| "bazel_skylib", |
| "bazel_skylib+", |
| "platforms", |
| "rules_cc", |
| "rules_license", |
| "rules_license+", |
| "rules_python", |
| "rules_python+", |
| "pythons_hub", # A sub-repo created by rule_python+ |
| "rules_rust", |
| "io_bazel_rules_go", |
| # Created from in-tree top-level module |
| "fuchsia_sdk_common", |
| ) |
| |
| # A list of file extensions for files that should be ignored from depfiles. |
| _IGNORED_FILE_SUFFIXES = ( |
| # .pyc files contain their own timestamp which does not necessarily match |
| # their file timestamp, triggering the python interpreter to rewrite them |
| # more or less randomly. Apart from that, their content always matches the |
| # corresponding '.py' file which will always be listed as a real input, |
| # so ignoring them is always safe. |
| # |
| # See https://stackoverflow.com/questions/23775760/how-does-the-python-interpreter-know-when-to-compile-and-update-a-pyc-file |
| # |
| ".pyc", |
| ) |
| |
| # A set of labels that should be ignored from depfiles. |
| _IGNORED_LABELS = { |
| # This file does not exist. It is returned by the cquery due to an |
| # unfortunate sequence of events. |
| # |
| # See details in https://fxbug.dev/434864899. |
| "//third_party/rust_crates/vendor/ansi_term-0.12.1:LICENSE", |
| # Internal targets for building Go SDK from rules_go. |
| "@@rules_go++go_sdk+io_bazel_rules_nogo//:BUILD.bazel", |
| "@@rules_go++go_sdk+io_bazel_rules_nogo//:scope.bzl", |
| "@@rules_go++go_sdk+main___wrap_0//:ROOT", |
| } |
| |
| # A list of external repository names which do not require a hash content file |
| # I.e. their implementation should already record the right dependencies to |
| # their input files. |
| _BAZEL_NO_CONTENT_HASH_REPOSITORIES = ( |
| "fuchsia_build_config", |
| "fuchsia_build_info", |
| "fuchsia_prebuilt_rust", |
| "gn_targets", |
| ) |
| |
| # Maps from apparent repo names to canonical repo names. |
| # |
| # This dictionary is created for determining the paths for external |
| # repositories, which use canonical repo names as directory names. |
| _APPARENT_REPO_NAME_TO_CANONICAL = { |
| "fuchsia_prebuilt_rust": "+_repo_rules+fuchsia_prebuilt_rust", |
| } |
| |
| # Technical notes on input (source and build files) located in Bazel external |
| # repositories. |
| # |
| # This script uses Bazel queries to extract the list of implicit input files |
| # from Bazel. This list typically includes a very large number of files that |
| # are located in Bazel external repositories, for example, consider the |
| # following labels: |
| # |
| # @bazel_skylib//lib:BUILD |
| # @platforms//cpu:BUILD |
| # @rules_cc/cc:find_cc_toolchain.bzl |
| # @fuchsia_clang//:cc_toolchain_config.bzl |
| # @fuchsia_sdk//:BUILD.bazel |
| # @fuchsia_sdk//pkg/fdio:include/lib/fdio/vfs.h |
| # @fuchsia_sdk//fidl/fuchsia.unknown:unknown.fidl |
| # @internal_sdk//src/lib/fxl_fxl:command_line.cc |
| # |
| # These labels need to be converted to actual file paths in the Bazel |
| # output_base, which consists in converting the repository name '@foo' into a |
| # repository directory such as `$BAZEL_TOPDIR/output_base/external/foo`. |
| # |
| # Simply listing these paths in the depfile unfortunately triggers flaky Ninja |
| # no-op check failures on our infra builders during incremental builds. |
| # Because Bazel will sometimes decide to re-generate the content of a |
| # repository (by re-running its repository rule function) under certain |
| # conditions, which can be triggered semi-randomly between two consecutive |
| # Ninja build actions. |
| # |
| # Also many times a repository `@foo` is regenerated because one of its |
| # repository dependency (e.g. `@bar`) has to be updated, but the new content |
| # for `@foo` would be identical nonetheless. |
| # |
| # This regeneration is mostly unpredictable, but can be worked-around in the |
| # following way, implemented in the script below: |
| # |
| # - Most repository directories are symlinks that simply point outside of the |
| # Bazel workspace. This can happen for two types of `bazel_dep`s declared in |
| # MODULE.bazel: |
| # |
| # 1) Repositories vendored in `third_party/bazel_vendor`, which |
| # automatically resolves to the vendored directory. For example, for |
| # @bazel_skylib: |
| # |
| # $TOPDIR/output_base/external/bazel_skylib+ |
| # --symlink--> $TOPDIR/workspace/third_party/bazel_vendor/bazel_skylib+ |
| # --symlink--> $FUCHSIA_DIR/third_party/bazel_vendor/bazel_skylib+ |
| # |
| # 2) Repositories declared with `local_path_override`, which resolves to |
| # the path defined in `local_path_override`. For example, for |
| # @rules_fuchsia: |
| # |
| # $TOPDIR/output_base/external/rules_fuchsia+ |
| # --symlink--> $TOPDIR/workspace/build/bazel_sdk/bazel_rules_fuchsia |
| # --symlink--> $FUCHSIA_DIR/build/bazel_sdk/bazel_rules_fuchsia |
| # |
| # In these case, replacing the repository directory with its final target |
| # directly is safe and cannot trigger no-op failures, so: |
| # |
| # @bazel_skylib//lib:BUILD |
| # ---mapped--> ../../third_party/bazel_skylib/lib/BUILD |
| # |
| # @platforms//cpu:BUILD |
| # ---mapped--> ../../third_party/bazel_platforms/cpu/BUILD |
| # |
| # - Many repositories, even if their content is generated by a repository rule, |
| # contain files or directories that are symlinks pointing outside of the |
| # Bazel workspace. For example: |
| # |
| # @fuchsia_sdk//pkg/fdio:include/lib/fdio/vfs.h |
| # --symlink--> $NINJA_OUTPUT_DIR/sdk/exported/in_tree_collection/pkg/fdio/include/lib/fdio/vfs.h |
| # --symlink--> $FUCHSIA_DIR/sdk/lib/fdio/include/lib/fdio/vfs.h |
| # |
| # @fuchsia_clang//:lib |
| # --symlink--> $FUCHSIA_DIR/prebuilt/third_party/clang/{host_tag}/lib |
| # |
| # It is sufficient to replace the label with the final link target, as in: |
| # |
| # @fuchsia_sdk//pkg/fdio:include/lib/fdio/vfs.h |
| # --mapped--> ../../sdk/lib/fdio/include/lib/fdio/vfs.h |
| # |
| # @fuchsia_clang//:lib/x86_64-unknown/fuchsia/libunwind.so.1 |
| # --mapped--> ../../prebuilt/third_party/clang/{host_tag}/lib/x86_64-unknown-fuchsia/libunwind.so.1 |
| # |
| # There is still a minor issue here: what if re-generating the repository |
| # would change the symlink's content (i.e. pointing to a different location, |
| # replacing the symlink with a regular file, or removing the symlink?). Then |
| # the depfile would have recorded a now stale, and thus incorrect, implicit |
| # input dependency. |
| # |
| # Most of the time, this will create un-necessary build actions on the next |
| # Ninja invocation, and in rare cases, may even break the build. However, |
| # this is no different from regular Ninja issues with depfiles (see |
| # https://fxbug.dev/42164069), which cannot guarantee, by design, correct |
| # incremental build. |
| # |
| # So resolving the symlink seems an acceptable solution, as it would not be |
| # worse than Ninja's current limitations. |
| # |
| # - Other files are generated by a repository rule, for example |
| # @fuchsia_sdk//:BUILD.bazel, is generated by the fuchsia_sdk_repository() |
| # rule implementation function, which reads the content of many input JSON |
| # metadata files to determine what goes in that file. |
| # |
| # Fortunately, this rule also uses a special file containing a content hash |
| # for all these input metadata files, which is: |
| # |
| # $BAZEL_TOPDIR/workspace/fuchsia_build_generated/fuchsia_sdk.hash |
| # |
| # This file is generated by the `workspace_utils.py` script which is invoked |
| # from the //build/regenerator.py script invoked at `fx gen` time, which |
| # also ensures that all files used to compute the hash file are listed as |
| # implicit inputs for the Ninja build plan. |
| # |
| # Thus if any of the input manifest file changes, Ninja will force re-running |
| # the regenerator script, which will update the content hash file, which will |
| # force a Bazel build command invoked from this bazel_action.py script to |
| # regenerate all of @fuchsia_sdk//. |
| # |
| # For the depfile entries generated by this script, it is thus enough to |
| # replace any label to a generated (non-symlink) repository file, to the path |
| # to the corresponding content hash file, if it exists, e.g.: |
| # |
| # @fuchsia_sdk//:BUILD.bazel |
| # --mapped--> $BAZEL_TOPDIR/workspace/fuchsia_build_generated/fuchsia_sdk.hash |
| # |
| # @fuchsia_sdk//pkg/fdio:BUILD.bazel |
| # --mapped--> $BAZEL_TOPDIR/workspace/fuchsia_build_generated/fuchsia_sdk.hash |
| # |
| # There are several files under $BAZEL_TOPDIR/fuchsia_build_generated/, |
| # each one matching a given repository name. So the algorithm used to map a |
| # label like `@foo//:package/file` that does not point to a symlink is: |
| # |
| # 1) Lookup if a content hash file exists for repository @foo, e.g. look |
| # for $BAZEL_TOPDIR/fuchsia_build_generated/foo.hash |
| # |
| # 2) If the content hash file exists, map the label directly to that file. |
| # |
| # @foo//:package/file --mapped--> |
| # $BAZEL_TOPDIR/fuchsia_build_generated/foo.hash |
| # |
| # 3) If the content hash file does not exist, print an error message |
| # or ignore the file (controlled by the _ASSERT_ON_IGNORED_FILES |
| # definition below). Asserting is preferred to detect these cases |
| # as soon as possible, but can be disabled locally during development. |
| # |
| # - Some repository files are symlinks pointing to generated files belonging |
| # to other external repositories, for example: |
| # |
| # @fuchsia_sdk//tools/x64:cmc-meta.json |
| # --symlink--> $OUTPUT_BASE/external/fuchsia_idk/tools/x64/cmc-meta-json |
| # |
| # Which is a file generated by the @fuchsia_idk//tools/x64:cmc-meta.json |
| # target. |
| # |
| # In this case, the content hash file of the target repository should be used, |
| # if it exists, or an error should be reported. |
| # |
| # - Finally, files whose name is a content-based hash are ignored as well, |
| # because adding them to the depfile can only lead to stale depfile entries |
| # that will trigger Ninja errors. |
| # |
| # This is because if the corresponding content changes, the file's name |
| # also changes, and the file's timestamp becomes irrelevant and can only |
| # trigger un-necessary rebuilds. Moreover, a stale output dependency can |
| # appear as a Ninja no-op error that looks like: |
| # |
| # output obj/build/images/fuchsia/fuchsia/legacy/blobs/0a892655d873b0431095e9b51e1dad42b520af43db7ef76aa2a08074948a1565 of phony edge with no inputs doesn't exist |
| # |
| # Detecting content-based file names looks for files which only include |
| # hexadecimal chars, and of at least 16 chars long, or which include |
| # a .build-id/xx/ prefix, where <xx> is a 2-char hexadecimal string. |
| # |
| |
| # Set this to True to debug operations locally in this script. |
| # IMPORTANT: Setting this to True will result in Ninja timeouts in CQ |
| # due to the stdout/stderr logs being too large. |
| _DEBUG = False |
| |
| # Set this to True to debug .build-id copies from the Bazel output base |
| # to the Ninja build directory. |
| _DEBUG_BUILD_ID_COPIES = _DEBUG |
| |
| # Set this to True to debug the export of debug symbols. |
| _DEBUG_SYMBOL_EXPORT = _DEBUG |
| |
| # Set this to True to debug the bazel query cache. |
| _DEBUG_BAZEL_QUERIES = _DEBUG |
| |
| # Set this to True to assert when non-symlink repository files are found. |
| # This is useful to find them when performing expensive builds on CQ |
| _ASSERT_ON_IGNORED_FILES = True |
| |
| # The name of an environment variable that will be checked. If set |
| # to "1", this adds `--sandbox_debug` to each `bazel build` invocation |
| # in order to allow developers to see the content of sandboxes for |
| # failed commands. |
| # See https://blog.bazel.build/2016/03/18/sandbox-easier-debug.html |
| _ENV_DEBUG_SANDBOX = "FUCHSIA_DEBUG_BAZEL_SANDBOX" |
| |
| |
| def debug(msg: str) -> None: |
| # Print debug message to stderr if _DEBUG is True. |
| if _DEBUG: |
| print("BAZEL_ACTION_DEBUG: " + msg, file=sys.stderr) |
| |
| |
| def get_input_starlark_file_path(filename: FilePath) -> str: |
| """Return the path of a input starlark file for Bazel queries. |
| |
| Args: |
| filename: File name, searched in //build/bazel/starlark/ |
| Returns: |
| file path to the corresponding file. |
| """ |
| result = os.path.join(_STARLARK_DIR, filename) |
| assert os.path.isfile(result), f"Missing starlark input file: {result}" |
| return result |
| |
| |
| def make_writable(p: FilePath) -> None: |
| file_mode = os.stat(p).st_mode |
| is_readonly = file_mode & stat.S_IWUSR == 0 |
| if is_readonly: |
| os.chmod(p, file_mode | stat.S_IWUSR) |
| |
| |
| def copy_writable(src: FilePath, dst: FilePath) -> None: |
| os.makedirs(os.path.dirname(dst), exist_ok=True) |
| shutil.copy2(src, dst) |
| make_writable(dst) |
| |
| |
| def hardlink_or_copy_writable( |
| src_path: str, dst_path: str, bazel_output_base_dir: str |
| ) -> None: |
| # Use lexists to make sure broken symlinks are removed as well. |
| if os.path.lexists(dst_path): |
| os.remove(dst_path) |
| |
| # See https://fxbug.dev/42072059 for context. This logic is kept here |
| # to avoid incremental failures when performing copies across |
| # different revisions of the Fuchsia checkout (e.g. when bisecting |
| # or simply in CQ). |
| # |
| # If the file is writable, and not a directory, try to hard-link it |
| # directly. Otherwise, or if hard-linking fails due to a cross-device |
| # link, do a simple copy. |
| do_copy = True |
| file_mode = os.stat(src_path).st_mode |
| is_src_readonly = file_mode & stat.S_IWUSR == 0 |
| if not is_src_readonly: |
| try: |
| os.makedirs(os.path.dirname(dst_path), exist_ok=True) |
| |
| # Get realpath of src_path to avoid symlink chains, which |
| # os.link does not handle properly even follow_symlinks=True. |
| # |
| # NOTE: it is important to link to the final real file because |
| # intermediate links can be temporary. For example, the |
| # gn_targets repository is repopulated in every bazel_action, so |
| # any links pointing to symlinks in gn_targets can be |
| # invalidated during the build. |
| os.link(os.path.realpath(src_path), dst_path) |
| |
| # Update timestamp to avoid Ninja no-op failures that can |
| # happen because Bazel does not maintain consistent timestamps |
| # in the execroot when sandboxing or remote builds are enabled. |
| if os.path.realpath(src_path).startswith( |
| os.path.abspath(bazel_output_base_dir) |
| ): |
| os.utime(dst_path) |
| do_copy = False |
| except OSError as e: |
| if e.errno != errno.EXDEV: |
| raise |
| |
| if do_copy: |
| copy_writable(src_path, dst_path) |
| |
| |
| def copy_directory_if_changed( |
| src_dir: FilePath, dst_dir: FilePath, tracked_files: list[FilePath] |
| ) -> None: |
| """Copy directory from |src_path| to |dst_path| if |tracked_files| have different mtimes. |
| |
| NOTE this function deliberately uses __mtime__, instead of content, of |
| tracked_files to determine whether directories need a re-copy. This follows |
| the convention many tools used in the build are using, where they use mtime |
| of a file as a proxy for the freshness of a directory, because Ninja only |
| understands timestamps. |
| |
| See http://b/365838961 for details. |
| """ |
| assert os.path.isdir( |
| src_dir |
| ), "{} is not a dir, but copy dir is called.".format(src_dir) |
| |
| def all_tracked_files_unchanged( |
| src_dir: FilePath, dst_dir: FilePath, tracked_files: list[FilePath] |
| ) -> bool: |
| """Use __mtime__ to determine whether any tracke file has changed. |
| |
| Returns true iff mtimes of tracked files are identical in src_dir and |
| dst_dir. |
| """ |
| for tracked_file in tracked_files: |
| dst_tracked_file = os.path.join(dst_dir, tracked_file) |
| if not os.path.exists(dst_tracked_file): |
| return False |
| src_tracked_file = os.path.join(src_dir, tracked_file) |
| if os.path.getmtime(src_tracked_file) != os.path.getmtime( |
| dst_tracked_file |
| ): |
| return False |
| return True |
| |
| if all_tracked_files_unchanged(src_dir, dst_dir, tracked_files): |
| return |
| |
| if os.path.lexists(dst_dir): |
| rmtree_threaded(dst_dir) |
| |
| copy_directory_threaded(src_dir, dst_dir) |
| |
| |
| def rmtree_threaded(dirname: FilePath) -> None: |
| """Uses a threadpool to delete all the files in a directory tree faster than shutil.rmtree(). |
| |
| This is about 2x faster than using shutil.rmtree() on its own. |
| """ |
| |
| # Find all the files in the tree, from the bottom up so that the directories are emptied from |
| # the bottom-up (the order they'll be deleted in.) |
| files: list[str] = [] |
| for root, _, filenames in os.walk(dirname, topdown=False): |
| files.extend([os.path.join(root, filename) for filename in filenames]) |
| |
| # Delete all the files in one big threadpool |
| thread_pool_helpers.map_threaded(os.remove, files) |
| |
| # Now delete all the (empty) direcctories |
| shutil.rmtree(dirname) |
| |
| |
| def copy_directory_threaded(src_dir: FilePath, dst_dir: FilePath) -> None: |
| directories: list[str] = [] |
| files: list[tuple[str, str]] = [] |
| for root, dirnames, filenames in os.walk(src_dir): |
| relroot = os.path.relpath(root, src_dir) |
| if relroot != ".": |
| directories.append(os.path.join(dst_dir, relroot)) |
| files.extend( |
| [ |
| ( |
| os.path.join(src_dir, relroot, filename), |
| os.path.join(dst_dir, relroot, filename), |
| ) |
| for filename in filenames |
| ] |
| ) |
| |
| # Benchmarking confirmed that it's faster to create all the directories that are |
| # needed, first, and then perform a multi-threaded copying of files, than it is |
| # to use a thread pool copy per directory. |
| thread_pool_helpers.map_threaded(os.makedirs, directories) |
| thread_pool_helpers.starmap_threaded(copy_writable, files) |
| |
| |
| # filecmp uses a tiny buffer for comparisons, forcing it to a larger size will |
| # reduce the number of I/O operations and drastically speed it up (as much as 10x) |
| setattr(filecmp, "BUFSIZE", 256 * 1024) |
| |
| |
| def check_if_need_to_copy_file(args: tuple[str, str]) -> bool: |
| """Check if the file copy given as a src,dst tuple needs to be performed. |
| |
| This compares the files and returns true if they need to be copied. |
| """ |
| src_path, dst_path = args |
| assert os.path.isfile( |
| src_path |
| ), "{} is not a file, but copy file is called.".format(src_path) |
| |
| # NOTE: For some reason, filecmp.cmp() will return True if |
| # dst_path does not exist, even if src_path is not empty!? |
| if os.path.exists(dst_path) and filecmp.cmp( |
| src_path, dst_path, shallow=False |
| ): |
| return False |
| return True |
| |
| |
| def write_file_if_changed(dst_path: FilePath, content: str) -> None: |
| if os.path.exists(dst_path): |
| with open(dst_path, "rt") as f: |
| current_content = f.read() |
| if current_content == content: |
| return |
| |
| # Use lexists to make sure broken symlinks are removed as well. |
| if os.path.lexists(dst_path): |
| os.remove(dst_path) |
| |
| os.makedirs(os.path.dirname(dst_path), exist_ok=True) |
| with open(dst_path, "wt") as f: |
| f.write(content) |
| |
| |
| def depfile_quote(path: FilePath) -> str: |
| """Quote a path properly for depfiles, if necessary. |
| |
| shlex.quote() does not work because paths with spaces |
| are simply encased in single-quotes, while the Ninja |
| depfile parser only supports escaping single chars |
| (e.g. ' ' -> '\\ '). |
| |
| Args: |
| path: input file path. |
| Returns: |
| The input file path with proper quoting to be included |
| directly in a depfile. |
| """ |
| return str(path).replace("\\", "\\\\").replace(" ", "\\ ") |
| |
| |
| def get_build_id_dir_files_to_copy( |
| build_id_dir: FilePath, |
| ) -> list[tuple[Path, Path]]: |
| """Returns a list of debug symbol files from a source .build-id directory that need |
| to be copied to the top-level one.""" |
| files_to_copy = [] |
| for path in os.listdir(build_id_dir): |
| bid_path = os.path.join(build_id_dir, path) |
| if len(path) == 2 and os.path.isdir(bid_path): |
| for obj in os.listdir(bid_path): |
| src_path = os.path.join(bid_path, obj) |
| dst_path = os.path.join(".build-id", path, obj) |
| if _DEBUG_BUILD_ID_COPIES: |
| print(f"BUILD_ID {src_path} --> {dst_path}") |
| files_to_copy.append((Path(src_path), Path(dst_path))) |
| |
| return files_to_copy |
| |
| |
| def remove_gn_toolchain_suffix(gn_label: str) -> str: |
| """Remove the toolchain suffix of a GN label.""" |
| return gn_label.partition("(")[0] |
| |
| |
| class BazelLabelMapper(object): |
| """Provides a way to map Bazel labels to file bazel_paths. |
| |
| Usage is: |
| 1) Create instance, passing the path to the Bazel workspace. |
| 2) Call source_label_to_path(<label>) where the label comes from |
| a query. |
| """ |
| |
| def __init__(self, bazel_workspace: str, output_dir: str): |
| # Get the $OUTPUT_BASE/external directory from the $WORKSPACE_DIR, |
| # the following only works in the context of the Fuchsia platform build |
| # because the workspace/ and output_base/ directories are always |
| # parallel entries in the $BAZEL_TOPDIR. |
| # |
| # Another way to get it is to call `bazel info output_base` and append |
| # /external to the result, but this would slow down every call to this |
| # script, and is not worth it for now. |
| # |
| self._root_workspace = os.path.abspath(bazel_workspace) |
| self._output_dir = os.path.abspath(output_dir) |
| output_base = os.path.normpath( |
| os.path.join(bazel_workspace, "..", "output_base") |
| ) |
| self._output_base = output_base |
| |
| assert os.path.isdir(output_base), f"Missing directory: {output_base}" |
| self._external_dir_prefix = ( |
| os.path.realpath(os.path.join(output_base, "external")) + "/" |
| ) |
| |
| # Some repositories have generated files that are associated with |
| # a content hash file generated by //build/regenerator.py. This map is |
| # used to return the path to such file if it exists, or an empty |
| # string otherwise. |
| self._repository_hash_map: dict[str, str] = {} |
| |
| def _get_repository_content_hash(self, repository_name: str) -> str: |
| """Check whether a repository name has an associated content hash file. |
| |
| Args: |
| repository_name: Bazel repository name, must start with an @, |
| e.g. '@foo' or '@@foo.1.0' |
| |
| Returns: |
| If the corresponding repository has a content hash file, return |
| its path. Otherwise, return an empty string. |
| """ |
| # TODO(jayzhuang): Refine the logic for bazel_action.py incremental |
| # builds. |
| # |
| # Use the innermost repository name for finding content hash file. |
| # |
| # The call here is unfortunately a bit awkward given how these helper |
| # functions are defined. We are planning on overhauling the logic for |
| # incremental builds so leaving it as-is for now. |
| repository_name = "@" + workspace_utils.innermost_repository_name( |
| f"{repository_name}//:root" |
| ) |
| hash_file = self._repository_hash_map.get(repository_name, None) |
| if hash_file is None: |
| # Canonical names like @@foo.<version> need to be converted to just `foo` here. |
| file_prefix = repository_name[1:] |
| if file_prefix.startswith("@"): |
| name, dot, version = file_prefix[1:].partition(".") |
| if dot == ".": |
| file_prefix = name |
| else: |
| # No version, get rid of initial @@ |
| file_prefix = file_prefix[1:] |
| |
| # First look into $BUILD_DIR/regenerator_outputs/bazel_content_hashes/ |
| # then into $WORKSPACE/fuchsia_build_generated/ which should contain |
| # symlinks to Ninja-generated content hashes. |
| hash_file = os.path.join( |
| self._output_dir, |
| "regenerator_outputs", |
| "bazel_content_hashes", |
| file_prefix + ".hash", |
| ) |
| if not os.path.exists(hash_file): |
| # LINT.IfChange(fuchsia_build_generated_hashes) |
| hash_file = os.path.join( |
| self._root_workspace, |
| "fuchsia_build_generated", |
| file_prefix + ".hash", |
| ) |
| if not os.path.exists(hash_file): |
| hash_file = "" |
| # LINT.ThenChange(//build/bazel/scripts/workspace_utils.py) |
| |
| self._repository_hash_map[repository_name] = hash_file |
| |
| return hash_file |
| |
| def source_label_to_path( |
| self, label: str, relative_to: str | None = None |
| ) -> str: |
| """Convert a Bazel label to a source file into the corresponding file path. |
| |
| Args: |
| label: A fully formed Bazel label, as return by a query. If BzlMod is |
| enabled, this expects canonical repository names to be present |
| (e.g. '@foo.12//src/lib:foo.cc' and no '@foo//src/lib:foo.cc'). |
| relative_to: Optional directory path string. |
| Returns: |
| If relative_to is None, the absolute path to the corresponding source |
| file, otherwise, the same path relative to `relative_to`. |
| |
| This returns an empty string if the file should be ignored, i.e. |
| not added to the depfile. |
| """ |
| # |
| # NOTE: Only the following input label formats are supported |
| # |
| # //<package>:<target> |
| # @//<package>:<target> |
| # @<name>//<package>:<target> |
| # @@<name>//<package>:<target> |
| # @@<name>.<version>//<package>:<target> |
| # |
| repository, sep, package_label = label.partition("//") |
| assert sep == "//", f"Missing // in source label: {label}" |
| if repository == "" or repository == "@": |
| # @// references the root project workspace, it should normally |
| # not appear in queries, but handle it here just in case. |
| # |
| # // references a path relative to the current workspace, but the |
| # queries are always performed from the root project workspace, so |
| # is equivalent to @// for this function. |
| repository_dir = self._root_workspace |
| from_external_repository = False |
| else: |
| # A note on canonical repository directory names. |
| # |
| # An external repository named 'foo' in the project's WORKSPACE.bazel |
| # file will be stored under `$OUTPUT_BASE/external/foo` when BzlMod |
| # is not enabled. |
| # |
| # However, it will be stored under `$OUTPUT_BASE/external/@foo.<version>` |
| # instead when BzlMod is enabled, where <version> is determined statically |
| # by Bazel at startup after resolving the dependencies expressed from |
| # the project's MODULE.bazel file. |
| # |
| # It is not possible to guess <version> here but queries will always |
| # return labels for items in the repository that look like: |
| # |
| # @@foo.<version>//... |
| # |
| # This is called a "canonical label", this allows the project to use |
| # @foo to reference the repository in its own BUILD.bazel files, while |
| # a dependency module would call it @com_acme_foo instead. All three |
| # labels will point to the same location. |
| # |
| # Queries always return canonical labels, so removing the initial @ |
| # and the trailing // allows us to get the correct repository directory |
| # in all cases. |
| assert repository.startswith( |
| "@" |
| ), f"Invalid repository name in source label {label}" |
| |
| # @@ is used with canonical repo names, so remove both @@ and @. |
| repository_name = repository.removeprefix("@@").removeprefix("@") |
| repository_dir = ( |
| self._external_dir_prefix |
| + _APPARENT_REPO_NAME_TO_CANONICAL.get( |
| repository_name, repository_name |
| ) |
| ) |
| from_external_repository = True |
| |
| package, colon, target = package_label.partition(":") |
| assert colon == ":", f"Missing colon in source label {label}" |
| path = os.path.join(repository_dir, package, target) |
| |
| # Check whether this path is a symlink to something else. |
| # Use os.path.realpath() which always return an absolute path |
| # after resolving all symlinks to their final destination, then |
| # compare it with os.path.abspath(path): |
| real_path = os.path.realpath(path) |
| if real_path != os.path.abspath(path): |
| # This is symlink, so first resolve it to its destination. |
| path = real_path |
| |
| # If the symlink points to another external repository, try |
| # to find a content hash file for it, or return an empty path. |
| if path.startswith(self._external_dir_prefix): |
| # This path points to generated files in another Bazel external |
| # repository. Check if the latter has a content hash file, or |
| # return an empty path. |
| repo_path = path[len(self._external_dir_prefix) :] |
| repo_name, sep, _ = repo_path.partition("/") |
| assert ( |
| sep == "/" |
| ), f"Unexpected external repository path: external/{repo_path} (from {label})" |
| path = self._get_repository_content_hash("@" + repo_name) |
| |
| elif from_external_repository: |
| # This is generated file inside an external repository. Find a content |
| # hash file for it, or return an empty path. |
| path = self._get_repository_content_hash(repository) |
| |
| if path: |
| assert os.path.isabs( |
| path |
| ), f"Unexpected non-absolute path: {path} (from {label})" |
| |
| # Check that the translated path does not point into the output_base |
| # as timestamps in this directory are not guaranteed to be consistent. |
| assert not path.startswith( |
| self._output_base |
| ), f"Path should not be in Bazel output_base: {path} (from {label})" |
| |
| if relative_to: |
| path = os.path.relpath(path, relative_to) |
| |
| return path |
| |
| |
| def verify_unknown_gn_targets( |
| build_files_error: list[str], |
| gn_action_target: str, |
| bazel_targets: list[str], |
| ) -> int: |
| """Check for unknown @gn_targets// dependencies. |
| |
| Args: |
| build_files_error: list of error lines from bazel build or query. |
| gn_action_target: Label of the GN bazel_action() target for this invocation. |
| bazel_targets: list of Bazel targets invoked by the GN bazel_action() target. |
| |
| Returns: |
| On success, simply return 0. On failure, print a human friendly |
| error message explaining the situation to stderr, then return 1. |
| """ |
| missing_ninja_outputs = set() |
| missing_ninja_packages = set() |
| build_dirs: set[str] = set() |
| for error_line in build_files_error: |
| if not ("ERROR: " in error_line and "@gn_targets//" in error_line): |
| continue |
| |
| pos = error_line.find("@@gn_targets//") |
| if pos < 0: |
| # Should not happen, do not assert and let the caller print the full error |
| # after this. |
| print(f"UNSUPPORTED ERROR LINE: {error_line}", file=sys.stderr) |
| return 0 |
| |
| ending_pos = error_line.find("'", pos) |
| if ending_pos < 0: |
| print(f"UNSUPPORTED ERROR LINE: {error_line}", file=sys.stderr) |
| return 0 |
| |
| label = error_line[pos + 1 : ending_pos] # skip first @. |
| if error_line[:pos].endswith(": no such package '"): |
| # The line looks like the following when the BUILD.bazel references a label |
| # that does not belong to a @gn_targets package. |
| # |
| # ERROR: <abspath>/BUILD.bazel:<line>:<column>: no such package '@@gn_targets//<dir>': ... |
| # |
| # This happens when the GN bazel_action() targets fails to dependon the corresponding |
| # bazel_input_file() or bazel_input_directory() target, and that none of its other |
| # dependencies expose other targets from the same package / directory. The error message |
| # does not give any information about the target name being evaluated by the query though. |
| missing_ninja_packages.add(label) |
| |
| elif error_line[:pos].endswith(": no such target '"): |
| # The line looks like this when the BUILD.bazel files references the wrong |
| # label from a package exposed in @gn_targets//: |
| # ERROR: <abspath>/BUILD.bazel:<line>:<column>: no such target '@@gn_targets//<dir>:<name>' ... |
| missing_ninja_outputs.add(label) |
| |
| if not missing_ninja_outputs and not missing_ninja_packages: |
| return 0 |
| |
| missing_outputs = sorted(missing_ninja_outputs) |
| missing_packages = sorted(missing_ninja_packages) |
| |
| _ERROR = """ |
| BAZEL_ACTION_ERROR: Unknown @gn_targets targets. |
| |
| The following GN target: {gn_target} |
| Builds the following Bazel target(s): {bazel_targets} |
| """.format( |
| gn_target=gn_action_target, |
| bazel_targets=" ".join(bazel_targets), |
| ) |
| |
| if not missing_packages: |
| _ERROR += """Which references the following unknown @gn_targets labels: |
| |
| {missing_bazel_labels} |
| |
| To fix this, ensure that bazel_input_file() or bazel_input_directory() |
| targets are defined in the GN graph for: |
| |
| {missing_gn_labels} |
| |
| Then ensure that the GN target depends on them transitively. |
| """.format( |
| missing_bazel_labels="\n ".join(missing_outputs), |
| missing_gn_labels="\n ".join( |
| f"//{o.removeprefix('@gn_targets//')}" for o in missing_outputs |
| ), |
| ) |
| |
| else: |
| missing_labels = missing_outputs + missing_packages |
| missing_build_files = set() |
| for label in missing_labels: |
| label = label.removeprefix("@gn_targets//") |
| gn_dir, sep, gn_name = label.partition(":") |
| if sep != ":": |
| gn_dir = label |
| missing_build_files.add(f"//{gn_dir}/BUILD.gn") |
| |
| _ERROR += """Which references the following unknown @gn_targets labels or packages: |
| |
| {missing_bazel_labels} |
| |
| To fix this, ensure that bazel_input_file() or bazel_input_directory() |
| targets are defined in the following build files: |
| |
| {missing_build_files} |
| |
| Then ensure that the GN target depends on them transitively. |
| """.format( |
| missing_bazel_labels="\n ".join( |
| missing_outputs + missing_packages |
| ), |
| missing_build_files="\n ".join(sorted(missing_build_files)), |
| ) |
| |
| print(_ERROR, file=sys.stderr) |
| return 1 |
| |
| |
| def is_ignored_input_label(label: str) -> bool: |
| """Return True if the label of a build or source file should be ignored.""" |
| is_builtin = ( |
| workspace_utils.innermost_repository_name(label) |
| in _BAZEL_BUILTIN_REPOSITORIES |
| ) |
| is_ignored = ( |
| label.endswith(_IGNORED_FILE_SUFFIXES) or label in _IGNORED_LABELS |
| ) |
| return is_builtin or is_ignored |
| |
| |
| def label_requires_content_hash(label: str) -> bool: |
| """Return True if the label or source file belongs to a repository |
| that requires a content hash file.""" |
| return not ( |
| workspace_utils.innermost_repository_name(label) |
| in _BAZEL_NO_CONTENT_HASH_REPOSITORIES |
| ) |
| |
| |
| def generate_debug_symbols_manifest( |
| bazel_targets: list[str], |
| bazel_launcher: build_utils.BazelLauncher, |
| configured_args: list[str], |
| bazel_execroot: Path, |
| build_dir: Path, |
| manifest_output_path: Path, |
| time_profile: build_utils.TimeProfile, |
| ) -> tuple[bool, list[DebugSymbolEntryType]]: |
| """Generate a debug symbol manifest. |
| |
| This also ensures that all debug binaries the mnifest points to are |
| properly materialized in the Bazel output base, even when remote |
| caching is enabled. |
| |
| Args: |
| bazel_targets: List of top-level Bazel target labels. The debug |
| symbols for all their transitive dependencies will be included. |
| bazel_launcher: A BazelLauncher instance. |
| configured_args: List of Bazel configuration-specific arguments |
| to run bazel build or bazel cquery. |
| bazel_execroot: Path to Bazel execroot. |
| build_dir: Path to Ninja build direvtory. |
| manifest_output_path: Path where the manifest will be written. |
| only used in error messages, this function does not write |
| the file itself. |
| time_profile: A TimeProfile instance. |
| Returns: |
| In case of failure, return (False, []). |
| IN case of success, return (True, debug_entries) where debug_entries |
| is a list of dictionaries describing debug symbols according to the |
| //:debug_symbols schema. |
| """ |
| # Unfortunately, using --output_groups=+build_id_dirs only applies |
| # to the top-level Bazel targets being built, and not to their transitive |
| # dependencies. When building product bundles, this means that no |
| # debug symbols will be generated, as the build_id_dirs output groups |
| # are only supported by fuchsia_package() targets. |
| # |
| # To work around this, use an aspect to ensure that debug binaries are |
| # materialized in the execroot, and generate a debug_symbols.json manifest |
| # file from the dependencies of each top-level Bazel targets. |
| # |
| # Due to Bazel bugs / limitations, the stderr output of this command must |
| # be processed to retrieve the path of the generated manifest files. For |
| # details, read //build/bazel/debug_symbols/aspects.bzl. |
| # |
| # NOTE: Because capturing stderr is required here this bazel build |
| # invocation cannot be merged with the main one above. |
| # |
| # Then these manifest are processed, to rebase their paths, and merged into |
| # a single output debug_symbols.json file for the top-level bazel_action() |
| # GN target. |
| |
| time_profile.start( |
| "generate_debug_symbol_manifests", |
| "Generate debug symbol manifests from deps of top-level targets.", |
| ) |
| # First, build the debug binaries and target-specific manifests. |
| cmd_args = ( |
| ["build"] |
| + configured_args |
| + [ |
| "--output_groups=+debug_symbol_files", |
| "--aspects=//build/bazel/debug_symbols:aspects.bzl%generate_manifest", |
| ] |
| + bazel_targets |
| ) |
| # Always capture both output streams. |
| ret = bazel_launcher.run_bazel_command( |
| cmd_args, print_stdout=False, print_stderr=False |
| ) |
| time_profile.stop() |
| if ret.returncode != 0: |
| print( |
| "ERROR: Cannot generate debug symbols:\n%s\n%s" |
| % (ret.stdout, ret.stderr), |
| file=sys.stderr, |
| ) |
| return (False, []) |
| |
| if _DEBUG_SYMBOL_EXPORT: |
| print( |
| "DEBUG SYMBOLS STDERR: ====================\n%s\n======================\n" |
| % ret.stderr |
| ) |
| |
| # Second, extract paths to target-specific manifest, relative to the execroot. |
| time_profile.start( |
| "merge_debug_symbol_manifests", |
| "Merge target-specific manifests into final version.", |
| ) |
| manifest_paths = [] |
| # LINT.IfChange(debug_symbols_manifest_prefix) |
| manifest_path_prefix = "DEBUG_SYMBOLS_MANIFEST_PATH=" |
| # LINT.ThenChange(//build/bazel/debug_symbols/aspects.bzl:debug_symbols_manifest_prefix) |
| for line in ret.stderr.splitlines(): |
| # Bazel starts every print() output line on stderr with `DEBUG: <filepath>:<line>:` |
| # This looks for a line like 'DEBUG: .... DEBUG_SYMBOLS_MANIFEST_PATH=<path>' |
| if not line.startswith("DEBUG:"): |
| continue |
| pos = line.find(manifest_path_prefix) |
| if pos < 0: |
| continue |
| manifest_paths.append(line[pos + len(manifest_path_prefix) :]) |
| |
| # Third, read the manifests, merging their content while rebasing paths. |
| def rebase_execroot_path(path: str) -> str: |
| """Convert an execroot-relative path to a build-dir relative one. |
| |
| Use os.path.realpath() to resolve symlinks properly. |
| """ |
| return os.path.relpath( |
| os.path.realpath(os.path.join(bazel_execroot, path)), |
| build_dir, |
| ) |
| |
| output_manifest = [] |
| recorded_entries: set[str] = set() |
| |
| for manifest_path in manifest_paths: |
| input_manifest_path = os.path.join(bazel_execroot, manifest_path) |
| with open(input_manifest_path, "rt") as f: |
| input_manifest = json.load(f) |
| |
| for input_entry in input_manifest: |
| src_debug = input_entry["debug"] |
| if src_debug in recorded_entries: |
| continue # Ignore duplicates. |
| |
| recorded_entries.add(src_debug) |
| |
| # Adjust paths |
| entry = input_entry.copy() |
| for key in ("debug", "stripped", "breakpad", "elf_build_id"): |
| src_path = entry.get(key, "") |
| if src_path: |
| entry[key] = rebase_execroot_path(src_path) |
| output_manifest.append(entry) |
| |
| time_profile.start( |
| "compute_elf_build_ids", |
| "Extra GNU build-id values from ELF binaries.", |
| ) |
| parser = DebugSymbolsManifestParser() |
| parser.enable_build_id_resolution() |
| parser.parse_manifest_json(output_manifest, manifest_output_path) |
| |
| if _DEBUG_SYMBOL_EXPORT: |
| print("DEBUG SYMBOLS:\n%s" % output_manifest) |
| |
| time_profile.stop() |
| return (True, parser.entries) |
| |
| |
| def copy_debug_symbols_to_build_dir( |
| build_dir: Path, debug_symbols_manifest: list[DebugSymbolEntryType] |
| ) -> None: |
| """Copy debug symbols from the Bazel execroot to {NINJA_BUILD_DIR}/.build-id |
| |
| Useful when performing local debugging and symbolization. This does not affect |
| infra builds which use artifactory instead to upload the symbols to cloud |
| storage. |
| |
| Args: |
| build_dir: Path to Ninja build directory. |
| debug_symbols_manifest: A list of DebugSymbolEntryType values, similar |
| to what is returned by generate_debug_symbols_manifest(). |
| """ |
| exporter = DebugSymbolExporter( |
| build_dir, |
| log=lambda m: ( |
| print(f"DEBUG: {m}", file=sys.stderr) |
| if _DEBUG_SYMBOL_EXPORT |
| else None |
| ), |
| ) |
| exporter.parse_debug_symbols(debug_symbols_manifest) |
| debug_copies = exporter.get_debug_symbols_to_build_id_copies( |
| os.path.join(build_dir, ".build-id") |
| ) |
| |
| def copy_build_id_file(src_path: str, dst_path: str) -> None: |
| os.makedirs(os.path.dirname(dst_path), exist_ok=True) |
| shutil.copyfile(src_path, dst_path) |
| |
| thread_pool_helpers.starmap_threaded( |
| copy_build_id_file, |
| debug_copies, |
| ) |
| |
| |
| def list_to_pairs(l: T.Iterable[T.Any]) -> T.Iterable[tuple[T.Any, T.Any]]: |
| is_first = True |
| for val in l: |
| if is_first: |
| last_val = val |
| is_first = False |
| else: |
| yield (last_val, val) |
| is_first = True |
| |
| |
| @dataclasses.dataclass |
| class FileOutputs: |
| bazel_path: str |
| ninja_path: str |
| |
| |
| class FileOutputsAction(argparse.Action): |
| """ArgumentParser action class to convert --file-outputs arguments into FileOutputs instances.""" |
| |
| def __init__( # type: ignore |
| self, option_strings, dest, nargs=None, default=None, **kwargs |
| ): |
| if nargs is not None: |
| raise ValueError("nargs not allowed") |
| if default is not None: |
| raise ValueError("default not allowed") |
| super().__init__(option_strings, dest, nargs="+", default=[], **kwargs) |
| |
| def __call__(self, parser, namespace, values, option_string): # type: ignore |
| if len(values) < 2: |
| raise ValueError( |
| f"expected at least 2 arguments for {option_string}" |
| ) |
| if len(values) & 1 != 0: |
| raise ValueError( |
| f"expected an even number of arguments for {option_string}" |
| ) |
| dest_list = getattr(namespace, self.dest, []) |
| dest_list.extend( |
| [ |
| FileOutputs(bazel_path, ninja_path) |
| for bazel_path, ninja_path in list_to_pairs(values) |
| ] |
| ) |
| setattr(namespace, self.dest, dest_list) |
| |
| |
| @dataclasses.dataclass |
| class DirectoryOutputs: |
| bazel_path: str |
| ninja_path: str |
| tracked_files: list[str] = dataclasses.field(default_factory=list) |
| copy_debug_symbols: bool = False |
| |
| |
| class DirectoryOutputsAction(argparse.Action): |
| """ArgumentParser action class to convert --directory-outputs arguments into DirectoryOutputs instances.""" |
| |
| def __init__( # type: ignore |
| self, option_strings, dest, nargs=None, default=None, **kwargs |
| ): |
| if nargs is not None: |
| raise ValueError("nargs not allowed") |
| if default is not None: |
| raise ValueError("default not allowed") |
| super().__init__(option_strings, dest, nargs="+", default=[], **kwargs) |
| |
| def __call__(self, parser, namespace, values, option_string): # type: ignore |
| if len(values) < 3: |
| raise ValueError( |
| f"expected at least 3 arguments for {option_string}" |
| ) |
| dest_list = getattr(namespace, self.dest, []) |
| bazel_path = values[0] |
| ninja_path = values[1] |
| copy_debug_symbols = bool(values[2] == "true") |
| tracked_files = values[3:] |
| dest_list.append( |
| DirectoryOutputs( |
| bazel_path, ninja_path, tracked_files, copy_debug_symbols |
| ) |
| ) |
| setattr(namespace, self.dest, dest_list) |
| |
| |
| @dataclasses.dataclass |
| class FinalSymlinkOutputs: |
| bazel_path: str |
| ninja_path: str |
| |
| |
| class FinalSymlinkOutputsAction(argparse.Action): |
| """ArgumentParser action class to convert --final-symlink-outputs arguments into a FinalSymlinkOutputs instances.""" |
| |
| def __init__( # type: ignore |
| self, option_strings, dest, nargs=None, default=None, **kwargs |
| ): |
| if nargs is not None: |
| raise ValueError("nargs not allowed") |
| if default is not None: |
| raise ValueError("default not allowed") |
| super().__init__(option_strings, dest, nargs="+", default=[], **kwargs) |
| |
| def __call__(self, parser, namespace, values, option_string): # type: ignore |
| if len(values) != 2: |
| raise ValueError(f"expected 2 arguments for {option_string}") |
| dest_list = getattr(namespace, self.dest, []) |
| bazel_path = values[0] |
| ninja_path = values[1] |
| dest_list.append(FinalSymlinkOutputs(bazel_path, ninja_path)) |
| setattr(namespace, self.dest, dest_list) |
| |
| |
| @dataclasses.dataclass |
| class PackageOutputs: |
| package_label: str |
| archive_path: T.Optional[str] = None |
| manifest_path: T.Optional[str] = None |
| copy_debug_symbols: bool = False |
| |
| |
| class PackageOutputsAction(argparse.Action): |
| """ArgumentParser action class to convert --package-outputs arguments into PackageOutputs instances.""" |
| |
| def __init__( # type: ignore |
| self, option_strings, dest, nargs=None, default=None, **kwargs |
| ): |
| if nargs is not None: |
| raise ValueError("nargs not allowed") |
| if default is not None: |
| raise ValueError("default not allowed") |
| super().__init__(option_strings, dest, nargs=4, default=[], **kwargs) |
| |
| def __call__(self, parser, namespace, values, option_string): # type: ignore |
| assert len(values) == 4 |
| if not values[0]: |
| raise ValueError("expected non-empty Bazel package label.") |
| dest_list = getattr(namespace, self.dest, []) |
| package_label = values[0] |
| archive_path = values[1] if values[1] != "NONE" else None |
| manifest_path = values[2] if values[2] != "NONE" else None |
| copy_debug_symbols = bool(values[3] == "true") |
| dest_list.append( |
| PackageOutputs( |
| package_label, archive_path, manifest_path, copy_debug_symbols |
| ) |
| ) |
| setattr(namespace, self.dest, dest_list) |
| |
| |
| def main() -> int: |
| parser = argparse.ArgumentParser(description=__doc__) |
| parser.add_argument( |
| "--build-dir", |
| type=Path, |
| help="Specify Ninja build directory (defaults to current directory)", |
| ) |
| parser.add_argument( |
| "--fuchsia-dir", |
| type=Path, |
| help="Specify Fuchsia source directory (defaults to auto-detected)", |
| ) |
| parser.add_argument( |
| "--command", |
| required=True, |
| help="Bazel command, e.g. `build`, `run`, `test`", |
| ) |
| parser.add_argument( |
| "--gn-target-label", |
| required=True, |
| help="Label of GN target invoking this script.", |
| ) |
| parser.add_argument( |
| "--gn-targets-repository-dir", |
| type=Path, |
| help="Path of @gn_targets repository directory for this invocation.", |
| ) |
| parser.add_argument( |
| "--bazel-targets", |
| default=[], |
| nargs="*", |
| help="list of bazel target patterns.", |
| ) |
| parser.add_argument( |
| "--stamp-files", |
| type=Path, |
| default=[], |
| nargs="*", |
| help="list of stamp files to touch.", |
| ) |
| parser.add_argument( |
| "--file-outputs", |
| action=FileOutputsAction, |
| help="A list of (bazel_path, ninja_path) file bazel_paths.", |
| ) |
| parser.add_argument( |
| "--directory-outputs", |
| action=DirectoryOutputsAction, |
| help="4 or more arguments to specify a single Bazel output directory. Begins with (bazel_path, ninja_path, copy_debug_symbols) values, followed by one or more tracked relative file", |
| ) |
| parser.add_argument( |
| "--package-outputs", |
| action=PackageOutputsAction, |
| help="A tuple of four values describing Fuchsia package related outputs. Fields are Bazel package target label, archive output path or 'NONE', manifest output path or 'NONE', and copy debug symbols flag as either 'true' or 'false' string", |
| ) |
| parser.add_argument( |
| "--final-symlink-outputs", |
| action=FinalSymlinkOutputsAction, |
| help="A (bazel_path, ninja_path) pair to specify a single final symlink to a Bazel artifact.", |
| ) |
| |
| parser.add_argument( |
| "--bazel-build-events-log-json", |
| type=Path, |
| help="Path to JSON formatted event log for build actions.", |
| ) |
| |
| parser.add_argument("--depfile", help="Ninja depfile output path.") |
| parser.add_argument( |
| "--allow-directory-in-outputs", |
| action="store_true", |
| default=False, |
| help="Allow directory outputs in `--bazel-outputs`, NOTE timestamps on directories do NOT accurately reflect content freshness, which can lead to incremental build incorrectness.", |
| ) |
| parser.add_argument( |
| "--path-mapping", |
| help="If specified, write a mapping of Ninja outputs to realpaths Bazel outputs", |
| ) |
| parser.add_argument( |
| "--command-file", |
| help="If specified, write the command used to invoke Bazel to file.", |
| ) |
| parser.add_argument( |
| "--timings-file", |
| help="If specified, write timings of each step in this script to file.", |
| ) |
| parser.add_argument( |
| "--debug-symbols-manifest", |
| help="If specified, write debug symbols manifest to file.", |
| ) |
| parser.add_argument( |
| "--verbose_failures", |
| action="store_true", |
| default=False, |
| help="If specified, extra information is printed on Bazel failures.", |
| ) |
| parser.add_argument("extra_bazel_args", nargs=argparse.REMAINDER) |
| |
| args = parser.parse_args() |
| |
| if not args.bazel_targets: |
| return parser.error("A least one --bazel-targets value is needed!") |
| |
| _build_fuchsia_package = args.command == "build" and args.package_outputs |
| |
| if args.extra_bazel_args and args.extra_bazel_args[0] != "--": |
| return parser.error( |
| "Extra bazel args should be separated from script args using --" |
| ) |
| args.extra_bazel_args = args.extra_bazel_args[1:] |
| |
| try: |
| bazel_paths = build_utils.BazelPaths.new( |
| args.fuchsia_dir, args.build_dir |
| ) |
| except ValueError as e: |
| parser.error(str(e)) |
| |
| build_dir = bazel_paths.ninja_build_dir |
| workspace_dir = bazel_paths.workspace |
| if not workspace_dir.exists(): |
| return parser.error( |
| f"Workspace directory does not exist: {workspace_dir}" |
| ) |
| |
| if not bazel_paths.launcher.exists(): |
| return parser.error( |
| f"Bazel launcher does not exist: {bazel_paths.launcher}" |
| ) |
| |
| time_profile = build_utils.TimeProfile() |
| |
| bazel_output_base_dir = bazel_paths.output_base |
| |
| jobs = None |
| if "--config=remote" in args.extra_bazel_args: |
| cpus = os.cpu_count() |
| if cpus: |
| jobs = 10 * cpus |
| |
| if jobs is None: |
| # If an explicit job count was passed to `fx build`, tell Bazel to respect it. |
| # See https://fxbug.dev/351623259 |
| job_count = os.environ.get("FUCHSIA_BAZEL_JOB_COUNT") |
| if job_count: |
| jobs = int(job_count) |
| |
| if args.gn_targets_repository_dir: |
| time_profile.start( |
| "gn_targets_dir", "Updating @gn_targets directory symlink" |
| ) |
| # Update fuchsia_build_generated/gn_targets_dir to point |
| # to a new location that matches the content of @gn_target |
| # for the current bazel_action() target. |
| # LINT.IfChange(gn_targets_dir) |
| build_utils.force_symlink( |
| os.path.join( |
| workspace_dir, "fuchsia_build_generated/gn_targets_dir" |
| ), |
| os.path.realpath(args.gn_targets_repository_dir), |
| ) |
| # LINT.ThenChange(//build/bazel/toplevel.MODULE.bazel:gn_targets_dir) |
| |
| extract_debug_symbols = any( |
| entry.copy_debug_symbols |
| for entry in args.package_outputs + args.directory_outputs |
| ) |
| |
| bazel_launcher = build_utils.BazelLauncher( |
| bazel_paths.launcher, |
| log_err=lambda msg: ( |
| print(f"BAZEL_ACTION_ERROR: {msg}", file=sys.stderr) |
| if _DEBUG |
| else None |
| ), |
| ) |
| |
| time_profile.start("query_cache", "loading Bazel query cache") |
| query_cache = build_utils.BazelQueryCache( |
| workspace_dir / "fuchsia_build_generated/bazel_query_cache" |
| ) |
| |
| def run_bazel_query( |
| query_cmd: str, query_args: list[str] |
| ) -> T.Optional[list[str]]: |
| """Run a Bazel query, return output as list of lines. |
| |
| Args: |
| query_cmd: Query command ("query", "cquery" or "aquery"). |
| query_args: Query arguments. |
| Returns: |
| On success, a list of output lines. On failure return None. |
| """ |
| return query_cache.get_query_output( |
| query_cmd, |
| query_args, |
| bazel_launcher, |
| log=lambda m: ( |
| print(f"DEBUG: {m}", file=sys.stderr) |
| if _DEBUG_BAZEL_QUERIES |
| else None |
| ), |
| ) |
| |
| def run_starlark_cquery( |
| query_targets: list[str], starlark_filename: str |
| ) -> list[str]: |
| """Run a Bazel cquery and process its output with a starlark file. |
| |
| Args: |
| query_targets: A list of Bazel targets to run the query over. |
| starlark_filename: Name of starlark file from //build/bazel/starlark. |
| Returns: |
| A list of output lines. |
| Raises: |
| AssertionError in case of failure. |
| """ |
| result = run_bazel_query( |
| "cquery", |
| [ |
| "--config=quiet", |
| "--output=starlark", |
| "--starlark:file", |
| get_input_starlark_file_path(starlark_filename), |
| ] |
| + configured_args |
| + ["set(%s)" % " ".join(query_targets)], |
| ) |
| assert result is not None |
| return result |
| |
| configured_args = args.extra_bazel_args |
| |
| time_profile.start( |
| "buildfiles_genquery", "Generating buildfiles_genquery/BUILD.bazel" |
| ) |
| |
| # All bazel targets as a set() expression for Bazel queries below. |
| # See https://bazel.build/query/language#set |
| query_targets = "set(%s)" % " ".join(args.bazel_targets) |
| |
| # Generate the genquery target for listing all build files that we need to |
| # include in the depfile for this target, i.e. any changes in these build |
| # files should trigger a rebuild of this target. |
| genquery_tmpl = os.path.join(_TEMPLATE_DIR, "template.genrule.bazel") |
| with open(genquery_tmpl, "rt") as tmpl: |
| output_build_content = tmpl.read().format( |
| query_expression=f"buildfiles(deps({query_targets}))", |
| query_scopes=",".join((f'"{s}"' for s in args.bazel_targets)), |
| query_opts='"--output=label"', |
| ) |
| |
| output_build_file = workspace_dir / "buildfiles_genquery/BUILD.bazel" |
| write_file_if_changed(output_build_file, output_build_content) |
| |
| time_profile.start( |
| f"bazel {args.command}", "Invoking Bazel {args.command} command" |
| ) |
| |
| cmd_args = [args.command] |
| |
| if args.bazel_build_events_log_json: |
| # Create parent directory to avoid Bazel complaining it cannot |
| # write the events log file. |
| args.bazel_build_events_log_json.parent.mkdir( |
| parents=True, exist_ok=True |
| ) |
| cmd_args += [ |
| f"--build_event_json_file={args.bazel_build_events_log_json.resolve()}" |
| ] |
| |
| cmd_args += configured_args |
| cmd_args += ["//buildfiles_genquery:genquery"] |
| cmd_args += args.bazel_targets |
| if _DEBUG or args.verbose_failures: |
| cmd_args += ["--verbose_failures"] |
| |
| # Add --sandbox_debug if FUCHSIA_DEBUG_BAZEL_SANDBOX=1 is |
| # in the environment. |
| if os.environ.get(_ENV_DEBUG_SANDBOX, "0") == "1": |
| cmd_args.append("--sandbox_debug") |
| |
| if jobs: |
| cmd_args += [f"--jobs={jobs}"] |
| |
| quiet = os.environ.get("FX_BUILD_QUIET") == "1" |
| if quiet: |
| cmd_args += ["--config=quiet"] |
| |
| if _DEBUG: |
| debug( |
| "BUILD_CMD: " |
| + build_utils.cmd_args_to_string([bazel_paths.launcher] + cmd_args) |
| ) |
| |
| # Save the command.profile.gz data for analysis. |
| # Convert '//some/gn:label' into 'obj/some/gn/label.command.profile.gz' |
| command_profile_filename = ( |
| f"{args.gn_target_label[2:].replace(':', '/')}.command.profile.gz" |
| ) |
| cmd_args += ["--profile", f"{build_dir}/obj/{command_profile_filename}"] |
| |
| if args.command_file: |
| write_file_if_changed( |
| args.command_file, |
| " \\\n ".join( |
| shlex.quote(str(c)) for c in [bazel_paths.launcher] + cmd_args |
| ) |
| + "\n", |
| ) |
| |
| # When quiet is set, capture both stdout and stderr to avoid printing anything |
| # to the terminal. Even when using `--config=quiet`, Bazel insists on printing |
| # some output when it runs in an interactive terminal. |
| # |
| # When quiet is not set, print both stdout and stderr to the terminal to make |
| # sure consoule output from Bazel are correctly printed out by Ninja. |
| ret = bazel_launcher.run_bazel_command( |
| cmd_args, print_stdout=not quiet, print_stderr=not quiet |
| ) |
| |
| time_profile.stop() |
| |
| if ret.returncode != 0: |
| if quiet: |
| # Print the captured outputs in quiet mode to help debugging build errors. |
| if ret.stdout: |
| sys.stdout.buffer.write(ret.stdout) |
| if ret.stderr: |
| sys.stderr.buffer.write(ret.stderr) |
| |
| # Detect the error message corresponding to a Bazel target |
| # referencing a @gn_targets//<dir>:<name> label |
| # that does not exist. This happens when the GN bazel_action() |
| # fails to depend on the proper bazel_input_file() or |
| # bazel_input_directory() dependency. |
| # |
| # NOTE: Path to command.log should be stable, because we explicitly set |
| # output_base. See https://bazel.build/run/scripts#command-log. |
| if verify_unknown_gn_targets( |
| (bazel_output_base_dir / "command.log").read_text().splitlines(), |
| args.gn_target_label, |
| args.bazel_targets, |
| ): |
| return 1 |
| |
| # This is a different error, just print it as is. |
| # |
| # Note most build users are not interested in executing bazel directly, so hiding this |
| # message bechind a flag. |
| if _DEBUG or args.verbose_failures: |
| print( |
| "\nERROR when calling Bazel. To reproduce, run this in the Ninja output directory:\n\n %s\n" |
| % " ".join(shlex.quote(c) for c in ret.args), |
| file=sys.stderr, |
| ) |
| return 1 |
| |
| if args.command == "build": |
| time_profile.start( |
| "list_sources", "Query the list of Bazel source files" |
| ) |
| |
| # Get the list of input build files from the output of the genquery |
| # target that was built along the other requested Bazel targets. |
| # Doing this is considerably faster than performing a query here. |
| build_files_query_output = ( |
| workspace_dir / "bazel-bin/buildfiles_genquery/genquery" |
| ) |
| build_files = build_files_query_output.read_text().splitlines() |
| |
| # Perform a cquery to get all source inputs for the targets, this |
| # returns a list of Bazel labels followed by "(null)" because these |
| # are never configured. E.g.: |
| # |
| # //build/bazel/examples/hello_world:hello_world (null) |
| # |
| bazel_source_files = run_bazel_query( |
| "cquery", |
| [ |
| "--config=quiet", |
| "--output", |
| "label", |
| f'kind("source file", deps({query_targets}))', |
| ] |
| + configured_args, |
| ) |
| |
| if bazel_source_files is None: |
| return 1 |
| |
| if _DEBUG: |
| debug("SOURCE FILES:\n%s\n" % "\n".join(bazel_source_files)) |
| |
| # Remove the ' (null)' suffix of each result line. |
| source_files = [l.partition(" (null)")[0] for l in bazel_source_files] |
| time_profile.stop() |
| |
| time_profile.start( |
| "check_outputs_for_copying", |
| "Validate output files to copy are actually files.", |
| ) |
| |
| file_copies: list[tuple[Path, Path]] = [] |
| unwanted_dirs = [] |
| |
| for file_output in args.file_outputs: |
| src_path = workspace_dir / file_output.bazel_path |
| if src_path.is_dir(): |
| unwanted_dirs.append(src_path) |
| continue |
| dst_path = Path(file_output.ninja_path) |
| file_copies.append((src_path, dst_path)) |
| |
| if unwanted_dirs: |
| print( |
| "\nDirectories are not allowed in --file-outputs Bazel paths, got directories:\n\n%s\n" |
| % "\n".join(str(d) for d in unwanted_dirs) |
| ) |
| return 1 |
| |
| final_symlinks: list[tuple[Path, Path]] = [] |
| for final_symlink_output in args.final_symlink_outputs: |
| src_path = workspace_dir / final_symlink_output.bazel_path |
| target_path = src_path.resolve() |
| link_path = Path(final_symlink_output.ninja_path) |
| final_symlinks.append((target_path, link_path)) |
| |
| bazel_execroot = bazel_paths.execroot |
| |
| if _build_fuchsia_package: |
| time_profile.start( |
| "package_info", "Run cquery to extract Fuchsia package information" |
| ) |
| |
| for entry in args.package_outputs: |
| if not ( |
| entry.archive_path |
| or entry.manifest_path |
| or entry.copy_debug_symbols |
| ): |
| continue |
| |
| # Run a cquery to extract the FuchsiaPackageInfo and |
| # FuchsiaDebugSymbolInfo provider values. |
| query_result = run_starlark_cquery( |
| [entry.package_label], |
| "package_archive_manifest_and_debug_symbol_dirs.cquery", |
| ) |
| assert ( |
| len(query_result) > 2 |
| ), f"Unexpected FuchsiaPackageInfo cquery result: {query_result}" |
| |
| # Get all paths, which are relative to the Bazel execroot. |
| bazel_archive_path, bazel_manifest_path = query_result[:2] |
| bazel_debug_symbol_dirs = query_result[2:] |
| |
| if entry.archive_path: |
| file_copies.append( |
| ( |
| bazel_execroot / bazel_archive_path, |
| entry.archive_path, |
| ) |
| ) |
| |
| if entry.manifest_path: |
| file_copies.append( |
| ( |
| bazel_execroot / bazel_manifest_path, |
| entry.manifest_path, |
| ) |
| ) |
| |
| time_profile.stop() |
| |
| time_profile.start( |
| "check_output_directories", |
| "Validate that output directories are ready to be copied.", |
| ) |
| |
| dir_copies: list[tuple[Path, Path, list[FilePath]]] = [] |
| missing_directories: list[Path] = [] |
| unwanted_files: list[Path] = [] |
| invalid_tracked_files: list[Path] = [] |
| |
| for dir_output in args.directory_outputs: |
| src_path = workspace_dir / dir_output.bazel_path |
| if not src_path.exists(): |
| missing_directories.append(src_path) |
| continue |
| if not src_path.is_dir(): |
| unwanted_files.append(src_path) |
| continue |
| for tracked_file in dir_output.tracked_files: |
| tracked_file = src_path / tracked_file |
| if not tracked_file.is_file(): |
| invalid_tracked_files.append(tracked_file) |
| dst_path = Path(dir_output.ninja_path) |
| dir_copies.append( |
| (src_path, dst_path, [Path(f) for f in dir_output.tracked_files]) |
| ) |
| |
| if missing_directories: |
| print( |
| "\nError: Directory provided to --directory-outputs is missing, got:\n\n%s\n" |
| % "\n".join(str(d) for d in missing_directories) |
| ) |
| return 1 |
| |
| if unwanted_files: |
| print( |
| "\nError: Non-directories are not allowed in --directory-outputs Bazel path, got:\n\n%s\n" |
| % "\n".join(str(f) for f in unwanted_files) |
| ) |
| return 1 |
| |
| if invalid_tracked_files: |
| print( |
| "\nError: Missing or non-directory tracked files from --directory-outputs Bazel path:\n\n%s\n" |
| % "\n".join(str(f) for f in invalid_tracked_files) |
| ) |
| return 1 |
| |
| if file_copies: |
| time_profile.start( |
| "check_copy_files", |
| "Check to see if files need to be copied or not.", |
| ) |
| |
| files_to_copy = [ |
| file_copy |
| for file_copy in thread_pool_helpers.filter_threaded( |
| check_if_need_to_copy_file, file_copies |
| ) |
| if file_copy |
| ] |
| |
| if files_to_copy: |
| time_profile.start( |
| "copy_files", "Copy Bazel output files to Ninja build directory" |
| ) |
| |
| thread_pool_helpers.starmap_threaded( |
| hardlink_or_copy_writable, |
| [ |
| (src, dst, bazel_output_base_dir) |
| for src, dst in files_to_copy |
| ], |
| ) |
| |
| if final_symlinks: |
| time_profile.start("symlink_outputs", "Symlink output files.") |
| # This doesn't need to use a thread pool because as of today there's only ever |
| # one file, in one action, that uses this codepath. |
| for target_path, link_path in final_symlinks: |
| build_utils.force_symlink(link_path, target_path) |
| |
| if dir_copies: |
| time_profile.start( |
| "copy_directories", |
| "Copy Bazel output directories to Ninja build directory", |
| ) |
| for src_path, dst_path, tracked_files in dir_copies: |
| copy_directory_if_changed(src_path, dst_path, tracked_files) |
| |
| time_profile.stop() |
| |
| # Drop tracked_files from dir_copies so it can be concatenated with |
| # file_copies later. |
| all_copies = file_copies + [(src, dst) for src, dst, _ in dir_copies] |
| |
| success, debug_symbols_manifest = generate_debug_symbols_manifest( |
| bazel_targets=args.bazel_targets, |
| bazel_launcher=bazel_launcher, |
| configured_args=configured_args, |
| bazel_execroot=bazel_execroot, |
| build_dir=build_dir, |
| manifest_output_path=args.debug_symbols_manifest, |
| time_profile=time_profile, |
| ) |
| if not success: |
| return 1 |
| |
| if args.debug_symbols_manifest: |
| # Write the debug symbols manifest. This is referenced by {BUILD_DIR}/debug_symbols.json |
| # which will be used by artifactory to upload the symbols to cloud storage on infra |
| # builds. |
| with open(args.debug_symbols_manifest, "wt") as f: |
| json.dump(debug_symbols_manifest, f, indent=2) |
| |
| if extract_debug_symbols: |
| time_profile.start( |
| "copy_debug_symbols", "Copy debug symbols to Ninja build directory." |
| ) |
| copy_debug_symbols_to_build_dir(build_dir, debug_symbols_manifest) |
| |
| if args.path_mapping: |
| time_profile.start("file_mapping", "Write file mapping file") |
| # When determining source path of the copied output, follow links to get |
| # out of bazel-bin, because the content of bazel-bin is not guaranteed |
| # to be stable after subsequent `bazel` commands. |
| write_file_if_changed( |
| args.path_mapping, |
| "\n".join( |
| str(dst_path) |
| + ":" |
| + os.path.relpath(src_path.resolve(), build_dir) |
| for src_path, dst_path in all_copies |
| ), |
| ) |
| |
| if args.depfile: |
| time_profile.start("depfile", "Write Ninja depfile") |
| # Perform a cquery to get all source inputs for the targets. This |
| # returns a list of Bazel labels followed by "(null)" because these |
| # are never configured during analysis. E.g.: |
| # |
| # //build/bazel/examples/hello_world:hello_world (null) |
| # |
| mapper = BazelLabelMapper(str(workspace_dir), str(build_dir)) |
| |
| all_inputs = [ |
| label |
| for label in build_files + source_files |
| if not is_ignored_input_label(label) |
| ] |
| |
| # Convert output labels to paths relative to the current directory. |
| if _DEBUG: |
| debug("ALL INPUTS:\n%s\n" % "\n".join(all_inputs)) |
| |
| ignored_labels = [] |
| all_sources = set() |
| for label in all_inputs: |
| path = mapper.source_label_to_path( |
| label, relative_to=str(build_dir) |
| ) |
| if path: |
| if build_utils.is_likely_content_hash_path(path): |
| debug(f"{path} ::: IGNORED CONTENT HASH NAME") |
| else: |
| debug(f"{path} <-- {label}") |
| all_sources.add(path) |
| elif label_requires_content_hash(label): |
| debug(f"IGNORED: {label}") |
| ignored_labels.append(label) |
| |
| if _ASSERT_ON_IGNORED_FILES and ignored_labels: |
| print( |
| "ERROR: Found ignored external repository files:", |
| file=sys.stderr, |
| ) |
| for label in ignored_labels: |
| print(f" {label}", file=sys.stderr) |
| print( |
| """ |
| These files are likely generated by a Bazel repository rule which has |
| no associated content hash file. Due to this, Bazel may regenerate them |
| semi-randomly in ways that confuse Ninja dependency computations. |
| |
| To solve this issue, change the build/bazel/scripts/bazel_action.py script to |
| add corresponding entries to the _BAZEL_NO_CONTENT_HASH_REPOSITORIES list, to |
| track all input files that the repository rule may access when it is run. |
| """, |
| file=sys.stderr, |
| ) |
| return 1 |
| |
| depfile_content = "%s: %s\n" % ( |
| " ".join(depfile_quote(c) for _, c in all_copies), |
| " ".join(depfile_quote(c) for c in sorted(all_sources)), |
| ) |
| |
| if _DEBUG: |
| debug("DEPFILE[%s]\n" % depfile_content) |
| |
| write_file_if_changed(args.depfile, depfile_content) |
| |
| for stamp_file in args.stamp_files: |
| stamp_file.parent.mkdir(parents=True, exist_ok=True) |
| stamp_file.write_text("") |
| |
| time_profile.stop() |
| if _DEBUG: |
| time_profile.print(0.001) |
| |
| if args.timings_file: |
| os.makedirs(os.path.dirname(args.timings_file), exist_ok=True) |
| with open(args.timings_file, "wt") as f: |
| json.dump(time_profile.to_json_timings(), f) |
| |
| if args.command == "build": |
| # Update $BUILD_DIR/bazel_build_invocations.json with a |
| # JSON object describing the current invocation. Since all bazel_action() |
| # are in a pool of depth 1, there is no need to lock the file. |
| # Note that this file is always created as an empty list by `fx build` |
| # and `fint build` before invoking Ninja, so there is no need to check |
| # for its existence here. |
| build_utils.LastBazelBuildInvocations.append_to_build_dir( |
| build_dir, |
| build_utils.BazelBuildInvocation( |
| bazel_targets=args.bazel_targets, |
| build_args=configured_args, |
| gn_label=args.gn_target_label, |
| gn_targets_dir=str(args.gn_targets_repository_dir), |
| bazel_action_timings=time_profile.to_json_timings(), |
| ), |
| ) |
| |
| # Done! |
| return 0 |
| |
| |
| if __name__ == "__main__": |
| sys.exit(main()) |