blob: ad5bdd876c373f1dcfd7e12bc51b901408aa988d [file] [log] [blame]
# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""CI recipe for Bazel-SDK-based projects in the Fuchsia ecosystem.
This recipe supports projects that need the following steps in their CI jobs:
1. Checkout code.
2. Invoke well-known Bazel bootstrapping script included in the Bazel SDK.
3. Build a specified fuchsia_builder_group() target with Bazel.
4. Upload build artifacts (images, blobs, drivers, etc.) per the upload
manifest(s) emitted by step 3.
5. Run tests in parallel shards on separate machines per the test manifest(s)
emitted by step 3.
6. Trigger tests in downstream projects, passing through the uploaded build
artifacts.
The recipe interacts with each project via a fuchsia_builder_group() target
specified as a property. This rule defines what to build, test, and upload.
Interacting with each project only via such a target has severalbenefits:
- Prevents project-specific implementation details from leaking into the recipe.
This keeps the recipe simple, and minimizes the need for soft transitions by
allowing project owners to make changes within the project without changing
the infrastructure.
- Ensures that continuous integration workflows can be easily run locally just
by running the same commands as the recipe.
- Enforces that each project's continuous integration workflow follows a
reasonable process that's consistent with Fuchsia's CI guidelines:
https://fuchsia.dev/fuchsia-src/contribute/governance/rfcs/0148_ci_guidelines
"""
from google.protobuf import json_format as jsonpb
from PB.recipes.fuchsia.bazel_build_test_upload import BuilderManifest, InputProperties
from PB.infra.build_test_upload.upload_manifest import CIPDUploadManifest
from PB.infra.build_test_upload.test_manifest import TestManifest
from RECIPE_MODULES.fuchsia.swarming_retry import api as swarming_retry_api
DEPS = [
"fuchsia/artifacts",
"fuchsia/buildbucket_util",
"fuchsia/cas_util",
"fuchsia/checkout",
"fuchsia/cipd_util",
"fuchsia/dpi",
"fuchsia/fxt",
"fuchsia/git",
"fuchsia/git_checkout",
"fuchsia/release",
"fuchsia/swarming_retry",
"recipe_engine/cipd",
"recipe_engine/context",
"recipe_engine/file",
"recipe_engine/json",
"recipe_engine/path",
"recipe_engine/properties",
"recipe_engine/proto",
"recipe_engine/raw_io",
"recipe_engine/resultdb",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = InputProperties
def RunSteps(api, props):
# Projects should not assume they'll always be checked out at the same
# absolute path, so do the checkout in a random temporary directory.
checkout_dir = api.path.mkdtemp("checkout")
if props.jiri_manifest:
api.checkout.with_options(
path=checkout_dir,
manifest=props.jiri_manifest,
remote=props.remote,
project=props.jiri_project,
attributes=props.jiri_attributes,
)
with api.context(cwd=checkout_dir):
git_revision = api.git.rev_parse(
"HEAD",
step_name="resolve HEAD",
step_test_data=lambda: api.raw_io.test_api.stream_output_text("abc123"),
)
else:
_, git_revision = api.git_checkout(props.remote, path=checkout_dir)
upload_namespace = api.buildbucket_util.id
# TODO(fxbug.dev/101594): These should be configured higher in the
# stack in one of {swarming_bot, bbagent, recipe_bootstrap}. Set them
# here for now to unblock isolation work (b/234060366).
xdg_env_vars = [
"HOME",
"XDG_CACHE_HOME",
"XDG_CONFIG_HOME",
"XDG_DATA_HOME",
"XDG_HOME",
"XDG_STATE_HOME",
]
env = api.context.env
for env_var in xdg_env_vars:
env[env_var] = env.get("TMPDIR", str(api.path["cleanup"]))
# TODO(fxbug.dev/112403): Repos that use Bazel should pass the release
# version via a `--config` flag instead of an env var.
if release_version := api.release.ref_to_release_version(
ref="HEAD", repo_path=checkout_dir
):
env["RELEASE_VERSION"] = release_version
build_dir = api.path.mkdtemp("build")
output_user_root_dir = build_dir.join("user_root")
with api.context(cwd=checkout_dir, env=env):
# Invoke well-known bootstrapping script which should install a pinned
# version of Bazel from CIPD at tools/bazel.
api.step(
"bootstrap bazel",
[checkout_dir.join("scripts", "bootstrap.sh")],
)
bazel_path = checkout_dir.join("tools", "bazel")
# TODO(fxbug.dev/112403): Explicitly fetch dependencies first rather
# than implicitly depending on `bazel build` to fetch dependencies.
# Then, run `bazel build` in a sandbox.
api.step(
"bazel build",
[
bazel_path,
"--output_base",
build_dir,
"--output_user_root",
output_user_root_dir,
"build",
*props.build_options,
props.build_target,
],
)
builder_manifest = BuilderManifest()
# The builder manifest is a file generated by the build that specifies
# test and upload requests. The following query is a stable query that
# will discover the manifest path.
query_step = api.step(
"query builder manifest path",
[
bazel_path,
"cquery",
*props.build_options,
props.build_target,
"--starlark:expr",
"providers(target).get('@fuchsia_infra//infra/private:providers.bzl%FuchsiaBuilderGroupInfo').manifest.path",
"--output",
"starlark",
],
stdout=api.raw_io.output_text(),
step_test_data=lambda: api.raw_io.test_api.stream_output_text(
"builder_manifest.json",
),
)
query_step.presentation.logs["stdout"] = query_step.stdout
builder_manifest_path = checkout_dir.join(query_step.stdout.strip())
builder_manifest = api.file.read_proto(
"read builder manifest",
builder_manifest_path,
BuilderManifest,
codec="JSONPB",
)
test_manifests = [
api.file.read_proto(
f"read test manifest {test_manifest_path}",
checkout_dir.join(test_manifest_path),
TestManifest,
codec="JSONPB",
)
for test_manifest_path in builder_manifest.test_manifests
]
# TODO(fxbug.dev/112403): Probably should not use a loop here.
for test_manifest in test_manifests:
with api.step.nest("upload test dependencies"):
task_requests = create_task_requests(
api,
test_manifest,
testing_pool=props.testing_pool,
default_service_account=props.default_service_account,
)
run_tests(api, task_requests)
gcs_manifests = [
api.file.read_json(
f"read gcs manifest {gcs_manifest_path}",
checkout_dir.join(gcs_manifest_path),
)
for gcs_manifest_path in builder_manifest.gcs_manifests
]
api.artifacts.gcs_bucket = props.gcs_bucket
api.artifacts.namespace = upload_namespace
# TODO(fxbug.dev/112403): There should probably only be one GCS manifest
# allowed.
for gcs_manifest in gcs_manifests:
api.artifacts.upload_from_manifest(
"upload from manifest",
api.json.input(gcs_manifest),
sign_artifacts=props.sign_artifacts,
)
cipd_manifests = [
api.file.read_proto(
f"read cipd manifest {cipd_manifest_path}",
checkout_dir.join(cipd_manifest_path),
CIPDUploadManifest,
codec="JSONPB",
)
for cipd_manifest_path in builder_manifest.cipd_manifests
]
for cipd_manifest in cipd_manifests:
with api.step.nest(f"cipd upload {cipd_manifest.pkg_name}"):
cipd_upload_from_manifest(
api,
cipd_manifest.pkg_name,
cipd_manifest,
build_dir=build_dir,
repository=props.remote,
git_revision=git_revision,
)
if props.mos_upload_options.repo_hostname:
api.dpi.upload(
"upload to MOS-TUF repos",
build_dir=build_dir,
options=props.mos_upload_options,
)
if props.gcs_bucket and props.HasField("fxt_options"):
api.fxt.orchestrate_fxt_tests(
bucket=props.gcs_bucket,
namespace=upload_namespace,
options=props.fxt_options,
)
def create_task_requests(
api,
test_manifest,
testing_pool,
default_service_account,
):
root_dir = api.path.abs_to_path(test_manifest.root_dir)
task_requests = []
for test_group in test_manifest.test_groups:
cas_digest = api.cas_util.upload(
staging_dir=root_dir,
upload_paths=[root_dir.join(f) for f in test_group.runfiles],
step_name=f"upload files for {test_group.name}",
)
# TODO(fxbug.dev/106189): Pass `include=True` to `wrap()` so it creates
# a sub-invocation for each task. This will require granting the task
# service account permission to create new ResultDB invocations.
cmd = api.resultdb.wrap(list(test_group.command))
request = (
api.swarming.task_request()
.with_name(test_group.name)
.with_service_account(default_service_account)
.with_resultdb()
)
request = request.with_slice(
0,
request[0]
.with_command(cmd)
.with_relative_cwd(test_group.exec_dir)
.with_cas_input_root(cas_digest)
.with_cipd_ensure_file(
api.cipd.EnsureFile()
# TODO(fxbug.dev/106189): Pin these tools instead of using
# "latest".
.add_package(
"infra/tools/rdb/${platform}", "latest", subdir="luci-tools"
).add_package(
"infra/tools/luci-auth/${platform}", "latest", subdir="luci-tools"
)
)
# The command constructed by `api.resultdb.wrap()` assumes that
# `rdb` is on $PATH.
.with_env_prefixes(PATH=["luci-tools"])
.with_dimensions(pool=testing_pool, **test_group.scheduling_dimensions)
.with_execution_timeout_secs(test_group.timeout_secs)
.with_io_timeout_secs(5 * 60)
.with_expiration_secs(60 * 60),
)
task_requests.append(request)
return task_requests
def run_tests(api, task_requests):
tasks = [Task(request, api) for request in task_requests]
api.swarming_retry.run_and_present_tasks(tasks, max_attempts=1)
class Task(swarming_retry_api.TriggeredTask):
pass
def cipd_upload_from_manifest(
api, cipd_package, cipd_manifest, build_dir, repository, git_revision
):
staging_dir = api.path.mkdtemp("cipd")
tree = api.cas_util.hardlink_tree(staging_dir)
for f in cipd_manifest.files:
# We should generally not be uploading artifacts from outside the build
# directory, in order to ensure everything flows through the
# checkout->build->upload pipeline. So disallow uploading files from
# outside the build directory.
#
# Links to files outside the build directory are still allowed.
if ".." in f.source.split("/"):
raise api.step.StepFailure(
f"CIPD upload file source must within the build directory: {f.source}"
)
abs_source = build_dir.join(*f.source.split("/"))
# For convenience, projects can specify dest="." to upload an entire
# directory as the contents of the package.
if f.dest == ".":
if len(cipd_manifest.files) > 1: # pragma: no cover
raise api.step.StepFailure(
"Only one CIPD manifest entry is allowed if any entry's destination is '.'"
)
# No need for a tree of symlinks anymore, we can just treat the
# source directory as the staging directory.
staging_dir = abs_source
tree = None
break
abs_dest = tree.root.join(*f.dest.split("/"))
tree.register_link(abs_source, linkname=abs_dest)
if tree:
tree.create_links("create hardlinks")
api.cipd_util.upload_package(
cipd_package,
staging_dir,
search_tag={"git_revision": git_revision},
repository=repository,
)
def GenTests(api):
default_remote = "https://fuchsia.googlesource.com/foo"
def properties(**kwargs):
props = {
"remote": default_remote,
"build_target": "//src:infra",
"build_options": ["--config", "fuchsia_x64"],
"default_service_account": "artifact-readers@fuchsia-infra.iam.gserviceaccount.com",
}
props.update(kwargs)
return api.properties(jsonpb.ParseDict(props, InputProperties()))
def cipd_manifest_data(files=None):
if not files:
files = {"local/foo/bar": "package/foobar"}
return api.proto.output(
CIPDUploadManifest(
pkg_name="fuchsia/foo",
files=[
CIPDUploadManifest.FileToUpload(source=k, dest=v)
for k, v in files.items()
],
)
)
yield (
api.buildbucket_util.test("fxt_tests", git_repo=default_remote)
+ properties(
gcs_bucket="foo-artifacts",
sign_artifacts=True,
fxt_options={"image_name": "image-name"},
)
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
gcs_manifests=["gcs_manifest.json"],
),
),
)
+ api.step_data(
"read gcs manifest gcs_manifest.json",
stdout=api.json.output([{"source": "foo.txt", "destination": "foo.txt"}]),
)
+ api.fxt.orchestrate_fxt_tests()
)
yield (
api.buildbucket_util.test("jiri__mos__cipd", git_repo=default_remote)
+ properties(
jiri_manifest="path/to/manifest",
mos_upload_options={
"repo_hostname": "test.fuchsia-update.googleusercontent.com",
"gcs_bucket": "discover-cloud.appspot.com",
"manifest_path": "path/to/mos/manifest",
},
)
+ api.release.ref_to_release_version("releases/0.20191018.0.1")
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(
# Cover the case where we upload an entire subdirectory into the
# root of the CIPD package.
{"dir/pkg": "."},
),
)
)
yield (
api.buildbucket_util.test(
"invalid_cipd_upload",
git_repo=default_remote,
# The manifest should be validated even in presubmit.
tryjob=True,
status="failure",
)
+ properties()
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(
# Paths outside the build directory are disallowed.
{"foo/../../checkout-path": "package_path/foo/bar"}
),
)
)
yield (
api.buildbucket_util.test(
"cipd_upload_tryjob", git_repo=default_remote, tryjob=True
)
+ properties()
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
cipd_manifests=["cipd_manifest.json"],
),
),
)
+ api.step_data(
"read cipd manifest cipd_manifest.json",
cipd_manifest_data(),
)
)
yield (
api.buildbucket_util.test(
"failed_tests", git_repo=default_remote, status="failure"
)
+ properties(
testing_pool="testpool",
)
+ api.step_data(
"read builder manifest",
api.file.read_proto(
BuilderManifest(
test_manifests=["test_manifest.json"],
),
),
)
+ api.step_data(
"read test manifest test_manifest.json",
api.file.read_proto(
TestManifest(
root_dir="[START_DIR]/foo",
test_groups=[
TestManifest.TestGroup(
name="group1",
command=["run-tests", "--verbose"],
exec_dir="path/to/exec/dir",
scheduling_dimensions=[("foo", "bar")],
# The first file should be ignored since its entire
# directory is included.
runfiles=["some/runtime/dep", "some/runtime"],
)
],
)
),
)
+ api.swarming_retry.collect_data(
[api.swarming_retry.failed_task(name="group1", task_id=0)]
)
)