blob: fc4a0abf8639a8acea8a802f15ca3654c599f2c5 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("$zx/public/gn/build_api_module.gni")
import("$zx/public/gn/config/standard.gni")
import("$zx/public/gn/copied_files_dir.gni")
import("$zx/public/gn/prebuilt.gni")
import("$zx/public/gn/test/zbi_test.gni")
import("$zx/public/gn/toolchain/c_utils.gni")
import("$zx/public/gn/toolchain/environment_redirect.gni")
import("$zx/public/gn/zbi.gni")
declare_args() {
# Defines the `//:default` target: what `ninja` with no arguments does.
# TODO(BLD-353): This must be set by the controlling Fuchsia GN build.
default_deps = false
}
assert(default_deps != false,
"TODO(BLD-353): Zircon build must be driven by Fuchsia build")
###
### Build API modules.
###
# This is the top-level build API module that just lists all the others.
# Each element of the list is the simple name of the API module; the
# module's contents are found at "$root_build_dir/$target_name.json".
#
# TODO(BLD-353): Note this does not include build_api_module() uses meant
# just for legacy build integration. We don't consider those to be actual
# build API modules; using build_api_module() is just a convenience for
# creating them. Importantly, they are not part of the API contract with
# consumers of the overall build. They're purely implementation details of
# the temporary Frankenbuild arrangement.
#
# Type: list(string)
build_api_module("api") {
testonly = true
data_keys = [ "build_api_modules" ]
deps = [
":args",
":binaries",
":generated_sources",
":images",
":packages",
":tests",
":tool_paths",
":zbi_tests",
]
}
# See //:args in the fuchsia build for a description.
build_api_module("args") {
contents = read_file("$root_build_dir/args.gn", "scope")
}
# This describes all the binaries linked by the build.
#
# This enumerates each linked binary (executable, shared library, or
# loadable/"plug-in" module). This includes host tools, kernels, boot
# loaders, drivers, as well as normal executables. It does not yet include
# any non-native binary formats. This is meant to reach the entire
# dependency graph of all binaries that the build would ever produce. Not
# every binary described is necessary actually produced by any given Ninja
# run. Either the $debug or the $dist file for any individual binary can
# be passed to Ninja as a specific target argument to ensure it's built and
# up to date before making use of that binary. Like all build_api_module()
# targets, the top-level "binaries" target serves as a Ninja target to
# request that every binary described be built.
#
# Type: list(scope)
#
# cpu
# Required: CPU architecture the binary is for, e.g. "arm64" or "x64".
# Type: string
#
# os
# Required: OS the binary is for, e.g. "fuchsia", "linux", or "mac".
# Type: string
#
# environment
# Required: The ${toolchain.environment} name of what specific
# execution this was built for, e.g. "user", "host", "guest". The
# tuple of ($cpu, $os, $environment) should indicate what hardware and
# software environment this binary is compatible with.
# Type: string
#
# label
# Required: The GN label of the binary target.
# Type: label_with_toolchain
#
# type
# Required: The type of binary.
# Type: "executable" or "shared_library" or "loadable_module"
#
# debug
# Required: Path to where the unstripped or separate debug file is
# found, relative to $root_build_dir. If $dist is omitted, this
# is also the file that is used at runtime.
# Type: path relative to $root_build_dir
#
# dist
# Optional: Path to where the stripped binary for deployment/execution is
# found, relative to $root_build_dir. This binary may be required for
# some debugging tasks if $debug is a separate debug file rather than
# an unstripped file. It should exactly match the binary that will be
# seen on devices or run directly on hosts.
# Type: path relative to $root_build_dir
#
# elf_build_id
# Optional: Path to a file containing the lowercase ASCII hexadecimal
# representation of the ELF build ID in this binary. This is omitted
# for OS environments that don't use ELF. For an ELF binary that
# doesn't have a build ID note, this key will be present but point to
# an empty file.
# Type: path relative to $root_build_dir
#
# breakpad
# Optional: Path to the breakpad symbol file for the debug binary. This
# will only be present if $output_breakpad_syms was set.
# Type: path relative to $root_build_dir
build_api_module("binaries") {
testonly = true
deps = [ ":everything" ]
data_keys = [ "binaries" ]
}
# This describes all the generated source files in the build.
#
# The intent is that telling Ninja to build all these individual files
# will be the minimal work sufficient for source code analysis of all
# the files described in the compilation database to be viable.
#
# Type: list(path relative to $root_build_dir)
#
build_api_module("generated_sources") {
testonly = true
deps = [ ":everything" ]
data_keys = [ "generated_sources" ]
}
# This describes all the "image" files the build can produce.
#
# Consumers of the build should look here for the images to be built.
# The $cpu, $name, and $type fields identify the purpose of each image.
# Consumers are expected to ignore extra images they have no use for or
# whose fields they don't understand.
#
# The $path field indicates where the file is found in the build
# directory. The presence of an image in the list means that the build
# *can* produce it, not that the build *will* produce it. Hence,
# consumers should use $path as an explicit argument to Ninja to ensure
# that each needed image gets built.
#
# Type: list(scope)
#
# cpu
# Required: CPU architecture the image is for, e.g. "arm64" or "x64".
# Type: string
#
# name
# Required: The primary way that this image is known to consumers.
# Note that the name need not be unique within the images list.
# The tuple of ($name, $type, $cpu) should be unique.
# Type: string
#
# label
# Required: The GN label of the image target.
# Type: label_with_toolchain
#
# path
# Required: Path to where the file is found, relative to $root_build_dir.
# This is also the argument to Ninja that ensures this image will be built.
# Type: path relative to $root_build_dir
#
# testonly
# Optional: This image includes test code/data not meant for production.
# Type: bool
# Default: false
#
# tags
# Optional: Tags associated with the image. Certain tags may indicate
# to the build API consumer what should be done with the image.
# Type: list(string)
#
# type
# Required: Type of file, e.g. "zbi". This often corresponds to the
# extension used on the image file name, but not always. For many
# miscellaneous formats, this is just "bin" and the consumer is
# expected to understand what the particular format is for particular
# $name, $cpu combinations. Other types include:
# * "zbi": the ZBI (<zircon/boot/image.h>) format
# * "efi": an EFI executable that an EFI boot loader can load
# * "kernel": some other format loaded by a boot loader or emulator
# * "blk": contents to be written to a storage device or partition
# Type: string
#
build_api_module("images") {
testonly = true
data_keys = [ "images" ]
deps = [ ":everything" ] + default_deps
}
# Packages defined in the build.
#
# Type: list(scope)
#
# * name
# - Required: Name of the package.
# - Type: string
#
# * label
# - Required: GN label of the package() target.
# - Type: label_with_toolchain
#
# * cpu, os
# - Required: $current_cpu and $current_os values, respectively, for
# which this package is intended.
# - Type: string
#
# * manifest
# - Required: Manifest file of the package's sandbox filesystem contents.
# - Type: path relative to $root_build_dir
#
# * tags
# - Optional: List of tags to associate with the package.
# - Type: list(string)
#
# * testonly
# - Optional: True if this package should never be in a production build.
# - Type: bool
# - Default: false
#
build_api_module("packages") {
testonly = true
data_keys = [ "packages" ]
deps = [ ":everything" ]
}
# Tests in the build.
#
# Below, the "testing root" refers to $root_build_dir on host, and the full
# filesystem path from the root on device.
#
# Type: list(scope)
#
# * name
# - Required: Name of test.
# - Type: string
#
# * label
# - Required: GN label associated with the test
# - Type: label_with_toolchain
#
# * path
# - Required: Path to the test's executable.
# - Type: path relative to the testing root.
#
# * cpu, os
# - Required: $current_cpu and $current_os values, respectively, for
# which this test is intended.
# - Type: string
#
# * disabled
# - Optional: a free-form string indicating a reason for the test being
# disabled.
# - Type: string
#
# * runtime_deps
# - Optional: a JSON file containing a list of root_build_dir-relative
# paths defining ascribed runtime dependencies of the test. These
# dependencies are aggregated via the metadata graph of the associated
# test target under a data key of "test_runtime_deps.
# - Type: path relative to root_build_dir
#
build_api_module("tests") {
testonly = true
data_keys = [ "tests" ]
deps = default_deps
}
# Tools provided by the build or as prebuilts, to be used outside the build.
#
# Type: list(scope)
#
# * name
# - Required: Name of a host tool.
# - Type: string
#
# * label
# - Required: GN label associated with the tool.
# - Type: label_with_toolchain
#
# * path
# - Required: Path to the tool's executable for the build host.
# - Type: path relative to $root_build_dir
#
# * cpu, os
# - Required: $current_cpu and $current_os values, respectively, for
# which this tool is intended.
# - Type: string
#
build_api_module("tool_paths") {
testonly = true
deps = [
":tool_paths.metadata",
"$zx/public/gn/toolchain:llvm-tools",
"tools:all-hosts",
]
data_keys = [ "tool_paths" ]
}
group("tool_paths.metadata") {
visibility = [ ":tool_paths" ]
_eponymous_3p_tools = [
"gn",
"ninja",
]
metadata = {
tool_paths = []
foreach(tool, _eponymous_3p_tools) {
tool_paths += [
{
cpu = host_cpu
label = get_label_info(":$target_name", "label_with_toolchain")
name = tool
os = host_os
path = rebase_path(
"$prebuilt_dir/third_party/$tool/$host_platform/$tool",
root_build_dir)
},
]
}
}
}
# This tells the infra recipes how to run ZBI tests, defined with zbi_test(),
# which is a class of tests that are 'run' by booting an associated ZBI and
# listening on serial for a particular string indicating success.
#
# The schema produced here matches //:images with the addition of the
# `success_string` key.
#
# Type: list(scope)
#
# * cpu
# - Required: CPU architecture the image is for, e.g. "arm64" or "x64".
# - Type: string
#
# * name
# - Required: The primary way that this ZBI is known to consumers.
# Note that the name need not be unique within the images list.
# The tuple of ($name, $cpu) should be unique.
# - Type: string
#
# * label
# - Required: The GN label of the image target.
# - Type: label_with_toolchain
#
# * path
# - Required: Path to where the file is found, relative to $root_build_dir.
# This is also the argument to Ninja that ensures this image will be built.
# - Type: path relative to $root_build_dir
#
# * bootserver_netboot
# - Required: bootserver commandline option for booting the ZBI.
# - Type: string
#
# * success_string
# - Required: The string that the ZBI test outputs to indicate success.
# - Type: string
#
# * device_types
# - Required: The list of device types that this test should be run on.
# //build/testing/platforms.gni lists the allowed values; others will
# be ignored.
# - Type: list(string)
#
# * qemu_kernel_label
# - Optional: Label appearing in $label of an //:images entry.
# That image should be used in place of the usual `qemu-kernel` image.
# - Type: label_with_toolchain
#
# * timeout
# - Optional: Timeout for running the test, in seconds.
# - Type: seconds
#
build_api_module("zbi_tests") {
testonly = true
data_keys = [ "zbi_tests" ]
deps = [ ":everything" ]
}
###
### Top-level targets.
###
# This collects all the top-level targets that should contribute to the
# "everything" target, which is the root of build_api_module() metadata walks.
_everything_deps = [ "tools:all-hosts" ]
# Just building this constitutes a useful test that the build system and
# compiler configurations work as intended. It may also entail building a
# lot of stuff that is not actually useful to build and/or cannot be tested
# easily (or at all).
group("build-tests") {
testonly = true
deps = [ "$zx/public/gn/toolchain:noop-test" ]
}
_everything_deps += [ ":build-tests" ]
# This is used in the top-level build_api_module() deps to reach "everything".
# Note that "all" is a magical target name to Ninja, hence this name instead.
group("everything") {
testonly = true
# TODO(mcgrathr): Infra depends on not seeing too many things in the
# build API, so only include things used by the legacy build for now.
#deps = _everything_deps
deps = default_deps
}
# This is the default target that Ninja builds when given no target arguments.
group("default") {
testonly = true
deps = default_deps
}
###
### TODO(BLD-353): Temporary hacks for integrating with the legacy Fuchsia
### GN build.
###
if (current_toolchain == default_toolchain) {
build_api_module("legacy_dirs") {
testonly = true
data_keys = [ "legacy_dirs" ]
walk_keys = [ "legacy_barrier" ]
deps = []
foreach(cpu, standard_fuchsia_cpus) {
deps += [ ":legacy-$cpu" ]
}
foreach(host, standard_build_hosts) {
deps += [ ":legacy_host_targets-${host.os}-${host.cpu}" ]
}
}
foreach(cpu, standard_fuchsia_cpus) {
group("legacy-$cpu") {
testonly = true
deps = [
":legacy-aux-$cpu",
":legacy-deps-$cpu",
":legacy-tests-$cpu",
":legacy_targets-$cpu",
]
}
build_api_module("legacy_targets-$cpu") {
testonly = true
data_keys = [ "legacy_targets" ]
walk_keys = [ "legacy_barrier" ]
deps = [
":all-ulib-$cpu",
"tools",
]
}
build_api_module("legacy_sysroot-$cpu") {
testonly = true
data_keys = [ "legacy_sysroot" ]
walk_keys = [ "legacy_barrier" ]
deps = [ ":all-ulib-$cpu" ]
}
build_api_module("legacy_fuzzers-$cpu") {
testonly = true
data_keys = [ "fuzzer" ]
deps = [ ":legacy-tests-$cpu" ]
}
}
foreach(host, standard_build_hosts) {
build_api_module("legacy_host_targets-${host.os}-${host.cpu}") {
testonly = true
data_keys = [ "legacy_targets" ]
walk_keys = [ "legacy_barrier" ]
deps = [ ":all-ulib-host-${host.os}-${host.cpu}" ]
}
}
}
foreach(host, standard_build_hosts) {
environment_redirect("all-ulib-host-${host.os}-${host.cpu}") {
testonly = true
cpu = host.cpu
os = host.os
environment_label = "$zx/public/gn/toolchain:host"
deps = [ "$zx/system/ulib:ulib-host" ]
}
}
foreach(cpu, standard_fuchsia_cpus) {
if (current_toolchain == default_toolchain) {
zbi_input("legacy-tests-$cpu") {
output_dir = root_build_dir
testonly = true
cpu = cpu
deps = [ ":legacy-tests-deps-$cpu" ]
}
# The legacy Fuchsia GN build needs to consume a manifest of libraries
# built for ASan (and all the libraries themselves). When the main
# build is ASan, it needs to find the non-ASan libraries here too.
manifest_file("legacy-aux-$cpu") {
testonly = true
deps = [
":all-ulib-$cpu",
":instrumented-ulib-$cpu",
# Include the tests so their libraries are in the aux manifest
# when individual binaries are drawn from the tests manifest.
":legacy-tests-deps-$cpu",
]
metadata = {
# This prevents build_api_module("legacy-$cpu") from visiting the
# ASan incarnations of all the libraries. The legacy build wants
# only the manifest of ASan shared libraries. It wants the targets
# themselves only from the primary build.
legacy_barrier = []
}
}
}
environment_redirect("legacy-deps-$cpu") {
visibility = [ ":*" ]
testonly = true
environment_label = "$zx/public/gn/toolchain:user"
cpu = cpu
deps = [
"$zx/bootloader",
"$zx/kernel",
]
metadata = {
legacy_barrier = []
}
}
environment_redirect("legacy-tests-deps-$cpu") {
visibility = [ ":*" ]
testonly = true
environment_label = "$zx/public/gn/toolchain:user"
cpu = cpu
direct = true
deps = [ "$zx/system/utest" ]
}
environment_redirect("all-ulib-$cpu") {
testonly = true
cpu = cpu
environment_label = "$zx/public/gn/toolchain:user"
shlib = true
exclude_variant_tags = [ "instrumented" ]
deps = [ "$zx/system/ulib" ]
}
environment_redirect("instrumented-ulib-$cpu") {
testonly = true
cpu = cpu
environment_label = "$zx/public/gn/toolchain:user"
deps = [ ":instrumented-ulib-redirect" ]
}
}
if (toolchain.environment == "user") {
instrumented_variants = []
foreach(selector, toolchain.variant_selectors) {
if (selector.tags + [ "instrumented" ] - [ "instrumented" ] !=
selector.tags) {
instrumented_variants += [ selector.variant ]
instrumented_variants -= [ selector.variant ]
instrumented_variants += [ selector.variant ]
}
}
group("instrumented-ulib-redirect") {
testonly = true
visibility = [ ":*" ]
deps = []
foreach(variant, instrumented_variants) {
deps += [ ":instrumented-ulib-redirect.$variant" ]
}
}
foreach(variant, instrumented_variants) {
environment_redirect("instrumented-ulib-redirect.$variant") {
testonly = true
visibility = [ ":instrumented-ulib-redirect" ]
variant = variant
deps = [
"$zx/public/gn/config:shared-libc++-deps",
"$zx/system/ulib",
]
foreach(dep, toolchain.implicit_deps) {
if (dep == "$dep") {
deps += [ dep ]
} else {
deps += dep.add
}
}
}
}
}
# This allows arm64 boot shims to be always built and later surfaced by the
# unification build API.
environment_redirect("boot-shim-arm64") {
cpu = "arm64"
environment_label = "$zx/public/gn/toolchain:user"
deps = [ "//kernel/target/arm64/boot-shim" ]
}
build_api_module("legacy_images") {
testonly = true
data_keys = [ "images" ]
deps = [
":boot-shim-arm64",
":zbi_tests",
"tools",
]
foreach(cpu, standard_fuchsia_cpus) {
deps += [ ":legacy-$cpu" ]
}
}
# This target complements legacy_images in that it lists the various elements
# that end up in images.
foreach(cpu, standard_fuchsia_cpus) {
build_api_module("legacy_unification-$cpu") {
testonly = true
data_keys = [ "zx_manifest" ]
deps = [
":legacy-$cpu",
"tools",
]
if (cpu == "arm64") {
deps += [ ":boot-shim-arm64" ]
}
# Add the dependencies of legacy-aux directly, as that target sets a barrier
# that's not desirable here.
deps += [
":all-ulib-$cpu",
":instrumented-ulib-$cpu",
":legacy-tests-deps-$cpu",
]
}
}