blob: b9e3c18246061ae1e861e6091b1aafe2033fc98a [file] [log] [blame]
# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""CI recipe for Bazel-SDK-based projects in the Fuchsia ecosystem.
This recipe supports projects that need the following steps in their CI jobs:
1. Checkout code.
2. Invoke well-known Bazel bootstrapping script included in the Bazel SDK.
3. Build a specified fuchsia_builder_group() target with Bazel.
4. Upload build artifacts (images, blobs, drivers, etc.) per the upload
manifest(s) emitted by step 3.
5. Run tests in parallel shards on separate machines per the test manifest(s)
emitted by step 3.
6. Trigger tests in downstream projects, passing through the uploaded build
artifacts.
The recipe interacts with each project via a fuchsia_builder_group() target
specified as a property. This rule defines what to build, test, and upload.
Interacting with each project only via such a target has severalbenefits:
- Prevents project-specific implementation details from leaking into the recipe.
This keeps the recipe simple, and minimizes the need for soft transitions by
allowing project owners to make changes within the project without changing
the infrastructure.
- Ensures that continuous integration workflows can be easily run locally just
by running the same commands as the recipe.
- Enforces that each project's continuous integration workflow follows a
reasonable process that's consistent with Fuchsia's CI guidelines:
https://fuchsia.dev/fuchsia-src/contribute/governance/rfcs/0148_ci_guidelines
"""
from typing import Generator
from google.protobuf import json_format as jsonpb
from PB.infra.build_test_upload.test_manifest import TestManifest
from PB.recipe_modules.fuchsia.cipd_util.upload_manifest import CIPDUploadManifest
from PB.recipes.fuchsia.bazel_build_test_upload import BuilderManifest, InputProperties
from recipe_engine import config_types, recipe_api, recipe_test_api
from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
DEPS = [
"fuchsia/artifacts",
"fuchsia/buildbucket_util",
"fuchsia/cas_util",
"fuchsia/cipd_util",
"fuchsia/dpi",
"fuchsia/fxt",
"fuchsia/git_checkout",
"fuchsia/presubmit_util",
"fuchsia/release",
"fuchsia/swarming_retry",
"recipe_engine/cipd",
"recipe_engine/context",
"recipe_engine/file",
"recipe_engine/json",
"recipe_engine/path",
"recipe_engine/properties",
"recipe_engine/proto",
"recipe_engine/raw_io",
"recipe_engine/resultdb",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = InputProperties
def RunSteps(api: recipe_api.RecipeScriptApi, props: InputProperties):
# Projects should not assume they'll always be checked out at the same
# absolute path, so do the checkout in a random temporary directory.
checkout_dir = api.path.mkdtemp("checkout")
_, git_revision = api.git_checkout(props.remote, path=checkout_dir)
upload_namespace = api.buildbucket_util.id
# TODO(fxbug.dev/101594): These should be configured higher in the stack in
# one of {swarming_bot, bbagent, recipe_wrapper}. Set them here for now to
# unblock isolation work (b/234060366).
xdg_env_vars = [
"HOME",
"XDG_CACHE_HOME",
"XDG_CONFIG_HOME",
"XDG_DATA_HOME",
"XDG_HOME",
"XDG_STATE_HOME",
]
env = api.context.env
for env_var in xdg_env_vars:
env[env_var] = env.get("TMPDIR", str(api.path.cleanup_dir))
# TODO(fxbug.dev/112403): Repos that use Bazel should pass the release
# version via a `--config` flag instead of an env var.
if release_version := api.release.ref_to_release_version(
ref="HEAD", repo_path=checkout_dir
):
env["RELEASE_VERSION"] = release_version
# We need to tell bazel not to attempt to find a local cc toolchain. If
# we don't it will try to configure one based off of the local gcc. We
# do not have gcc on our bots so this will fail.
env["BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN"] = "1"
build_dir = api.path.mkdtemp("build")
output_user_root_dir = build_dir / "user_root"
with api.context(cwd=checkout_dir, env=env):
# Invoke well-known bootstrapping script which should install a pinned
# version of Bazel from CIPD at tools/bazel.
api.step(
"bootstrap bazel",
[checkout_dir.joinpath("scripts", "bootstrap.sh")],
)
bazel_path = checkout_dir.joinpath("tools", "bazel")
# TODO(fxbug.dev/112403): Explicitly fetch dependencies first rather
# than implicitly depending on `bazel build` to fetch dependencies.
# Then, run `bazel build` in a sandbox.
api.step(
"bazel build",
[
bazel_path,
"--output_base",
build_dir,
"--output_user_root",
output_user_root_dir,
"build",
*props.build_options,
props.build_target,
],
)
builder_manifest = BuilderManifest()
# The builder manifest is a file generated by the build that specifies
# test and upload requests. The following query is a stable query that
# will discover the manifest path.
query_step = api.step(
"query builder manifest path",
[
bazel_path,
"cquery",
*props.build_options,
props.build_target,
"--starlark:expr",
"(providers(target).get('@fuchsia_infra//infra/private:providers.bzl%FuchsiaBuilderGroupInfo') or providers(target).get('@@fuchsia_infra//infra/private:providers.bzl%FuchsiaBuilderGroupInfo')).manifest.path",
"--output",
"starlark",
],
stdout=api.raw_io.output_text(),
step_test_data=lambda: api.raw_io.test_api.stream_output_text(
"builder_manifest.json",
),
)
query_step.presentation.logs["stdout"] = query_step.stdout
builder_manifest_path = checkout_dir.joinpath(query_step.stdout.strip())
builder_manifest = api.file.read_proto(
"read builder manifest",
builder_manifest_path,
BuilderManifest,
codec="JSONPB",
)
if builder_manifest.testing_manifest and props.testing_pool:
testing_manifest = api.file.read_proto(
f"read test manifest {builder_manifest.testing_manifest}",
checkout_dir / builder_manifest.testing_manifest,
TestManifest,
codec="JSONPB",
)
with api.step.nest("upload test dependencies"):
task_requests = create_task_requests(
api,
testing_manifest,
testing_pool=props.testing_pool,
default_service_account=props.default_service_account,
)
run_tests(api, task_requests)
gcs_manifests = [
api.file.read_json(
f"read gcs manifest {gcs_manifest_path}",
checkout_dir / gcs_manifest_path,
)
for gcs_manifest_path in builder_manifest.gcs_manifests
]
api.artifacts.gcs_bucket = props.gcs_bucket
api.artifacts.namespace = upload_namespace
# TODO(fxbug.dev/112403): There should probably only be one GCS manifest
# allowed.
for gcs_manifest in gcs_manifests:
api.artifacts.upload(
"upload from manifest",
api.json.input(gcs_manifest),
sign_artifacts=props.sign_artifacts,
)
cipd_manifests = [
api.file.read_proto(
f"read cipd manifest {cipd_manifest_path}",
checkout_dir / cipd_manifest_path,
CIPDUploadManifest,
codec="JSONPB",
)
for cipd_manifest_path in builder_manifest.cipd_manifests
]
cas_digests = {}
for cipd_manifest in cipd_manifests:
step_name = f"cipd upload {cipd_manifest.pkg_name}"
if not props.upload_to_cipd:
step_name += " (dry run)"
with api.step.nest(step_name):
api.cipd_util.upload_from_manifest(
cipd_manifest.pkg_name,
cipd_manifest,
build_dir=build_dir,
repository=props.remote,
git_revision=git_revision,
upload_to_cipd=props.upload_to_cipd,
cas_digests=cas_digests,
)
if props.mos_upload_options.repo_hostname:
api.dpi.upload(
"upload to MOS-TUF repos",
build_dir=build_dir,
options=props.mos_upload_options,
)
if props.gcs_bucket and props.HasField("fxt_options"):
api.fxt.orchestrate_fxt_tests(
bucket=props.gcs_bucket,
namespace=upload_namespace,
options=props.fxt_options,
)
if cas_digests and props.HasField("external_presubmit_options"):
api.presubmit_util.orchestrate(
options=props.external_presubmit_options,
cl_subject=f"[test] Dryrun build {api.buildbucket_util.id}",
package_overrides=cas_digests,
)
def create_task_requests(
api,
testing_manifest,
testing_pool,
default_service_account,
):
root_dir = api.path.abs_to_path(testing_manifest.root_dir)
task_requests = []
for test_group in testing_manifest.test_groups:
cas_digest = api.cas_util.upload(
staging_dir=root_dir,
upload_paths=[root_dir / f for f in test_group.runfiles],
step_name=f"upload files for {test_group.name}",
)
# TODO(fxbug.dev/106189): Pass `include=True` to `wrap()` so it creates
# a sub-invocation for each task. This will require granting the task
# service account permission to create new ResultDB invocations.
cmd = api.resultdb.wrap(list(test_group.command))
request = (
api.swarming.task_request()
.with_name(test_group.name)
.with_service_account(default_service_account)
.with_resultdb()
)
request = request.with_slice(
0,
request[0]
.with_command(cmd)
.with_relative_cwd(test_group.exec_dir)
.with_cas_input_root(cas_digest)
.with_cipd_ensure_file(
api.cipd.EnsureFile()
# TODO(fxbug.dev/106189): Pin these tools instead of using
# "latest".
.add_package(
"infra/tools/rdb/${platform}", "latest", subdir="luci-tools"
).add_package(
"infra/tools/luci-auth/${platform}", "latest", subdir="luci-tools"
)
)
# This environment variable is written to by the Orchestrate tool:
# https://cs.opensource.google/search?q=TEST_UNDECLARED_OUTPUTS_DIR&sq=&ss=fuchsia%2Ffuchsia:tools%2Forchestrate%2F
.with_env_vars(TEST_UNDECLARED_OUTPUTS_DIR="${ISOLATED_OUTDIR}")
# The command constructed by `api.resultdb.wrap()` assumes that
# `rdb` is on $PATH.
.with_env_prefixes(PATH=["luci-tools"])
.with_dimensions(pool=testing_pool, **test_group.scheduling_dimensions)
.with_execution_timeout_secs(test_group.timeout_secs)
.with_io_timeout_secs(5 * 60)
.with_expiration_secs(60 * 60),
)
task_requests.append(request)
return task_requests
def run_tests(api, task_requests):
tasks = [Task(request, api) for request in task_requests]
api.swarming_retry.run_and_present_tasks(
tasks, max_attempts=1, collect_output_dir=api.path.mkdtemp("testing_outputs")
)
class Task(swarming_retry_api.TriggeredTask):
def present_attempt(self, task_step_presentation, attempt, **kwargs):
if attempt.result.cas_outputs:
task_step_presentation.links["test artifacts"] = (
attempt.result.cas_outputs.url
)
super().present_attempt(task_step_presentation, attempt, **kwargs)
def GenTests(api) -> Generator[recipe_test_api.TestData, None, None]:
default_remote = "https://fuchsia.googlesource.com/foo"
def properties(**kwargs):
props = {
"remote": default_remote,
"build_target": "//src:infra",
"build_options": ["--config", "fuchsia_x64"],
"default_service_account": "artifact-readers@fuchsia-infra.iam.gserviceaccount.com",
}
props.update(kwargs)
return api.properties(jsonpb.ParseDict(props, InputProperties()))
def cipd_manifest_data(files=None):
if not files:
files = {"local/foo/bar": "package/foobar"}
return api.proto.output(
CIPDUploadManifest(
pkg_name="fuchsia/foo",
files=[
CIPDUploadManifest.FileToUpload(source=k, dest=v)
for k, v in files.items()
],
)
)
yield (
api.buildbucket_util.test("fxt_tests", git_repo=default_remote)
+ properties(
gcs_bucket="foo-artifacts",
sign_artifacts=True,
fxt_options={"sdk_mode": True},
)
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
gcs_manifests=["gcs_manifest.json"],
),
),
)
+ api.step_data(
"read gcs manifest gcs_manifest.json",
stdout=api.json.output([{"source": "foo.txt", "destination": "foo.txt"}]),
)
+ api.fxt.orchestrate_fxt_tests()
)
yield (
api.buildbucket_util.test("mos_cipd", git_repo=default_remote)
+ properties(
mos_upload_options={
"repo_hostname": "test.fuchsia-update.googleusercontent.com",
"gcs_bucket": "discover-cloud.appspot.com",
"manifest_path": "path/to/mos/manifest",
},
upload_to_cipd=True,
)
+ api.release.ref_to_release_version("releases/0.20191018.0.1")
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(
# Cover the case where we upload an entire subdirectory into the
# root of the CIPD package.
{"dir/pkg": "."},
),
)
)
yield (
api.buildbucket_util.test(
"invalid_cipd_upload",
git_repo=default_remote,
# The manifest should be validated even in presubmit.
tryjob=True,
status="FAILURE",
)
+ properties()
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(
# Paths outside the build directory are disallowed.
{"foo/../../checkout-path": "package_path/foo/bar"}
),
)
)
yield (
api.buildbucket_util.test(
"cipd_upload_tryjob", git_repo=default_remote, tryjob=True
)
+ properties(
external_presubmit_options={
"gerrit_host": "foo-review.googlesource.com",
"gerrit_project": "bar",
},
)
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(),
)
+ api.presubmit_util.create_cl()
+ api.presubmit_util.wait_for_cq()
)
yield (
api.buildbucket_util.test(
"failed_tests", git_repo=default_remote, status="FAILURE"
)
+ properties(
testing_pool="testpool",
)
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
testing_manifest="test_manifest.json",
),
),
)
+ api.step_data(
"read test manifest test_manifest.json",
api.file.read_proto(
TestManifest(
root_dir="[START_DIR]/foo",
test_groups=[
TestManifest.TestGroup(
name="group1",
command=["run-tests", "--verbose"],
exec_dir="path/to/exec/dir",
scheduling_dimensions=[("foo", "bar")],
# The first file should be ignored since its entire
# directory is included.
runfiles=["some/runtime/dep", "some/runtime"],
)
],
)
),
)
+ api.swarming_retry.collect_data(
[api.swarming_retry.failed_task(name="group1", task_id=0)]
)
)