blob: 1bbbfeb2329c1dab67bd9a771b6923eaab4942ce [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for building Fuchsia and isolating build artifacts."""
from recipe_engine import post_process
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.infra.fuchsia import Fuchsia
from PB.recipes.fuchsia.fuchsia.build import InputProperties
PYTHON_VERSION_COMPATIBILITY = "PY3"
# These represent the location of the CAS digest in the output of this
# recipe when building SDK archives. Must be kept in sync with sdk.py.
SDK_ARCHIVE_STEP_NAME = "isolate artifacts"
SDK_ARCHIVE_OUTPUT_KEY = "isolated_output_hash"
DEPS = [
"fuchsia/artifacts",
"fuchsia/autocorrelator",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/cas_util",
"fuchsia/checkout",
"fuchsia/cipd_util",
"fuchsia/fuchsia",
"fuchsia/gce",
"fuchsia/gerrit",
"fuchsia/git",
"fuchsia/release",
"fuchsia/sso",
"fuchsia/swarming_retry",
"fuchsia/testing_requests",
"fuchsia/testsharder",
"recipe_engine/buildbucket",
"recipe_engine/context",
"recipe_engine/json",
"recipe_engine/path",
"recipe_engine/properties",
"recipe_engine/raw_io",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = InputProperties
def RunSteps(api, props):
if props.parent_id:
# Present a link to make it easier to navigate to the parent's build
# results page.
with api.step.nest("parent build") as presentation:
presentation.links[props.parent_id] = parent_build_url(api, props.parent_id)
spec = props.spec
api.fuchsia.setup(spec)
bb_input = api.buildbucket.build.input
if spec.build.run_tests and not props.parent_id:
raise api.step.empty(
"no parent_id",
step_text="subbuilds can only be triggered by parent builds",
status=api.step.INFRA_FAILURE,
)
checkout = api.checkout.from_spec(
spec.checkout, use_incremental_cache=spec.build.incremental
)
with api.step.nest("got_revision") as presentation:
# Set got_revision to the baseline integration revision for the binary-size
# Gerrit plugin.
# TODO(olivernewman): Move this into the checkout recipe module, as it's
# not specific to this recipe.
presentation.properties[
api.checkout.GOT_REVISION_PROPERTY
] = checkout.integration_revision
# Load test modifiers before building so we can fail fast if the message is
# malformed, which avoids wasting a build.
multipliers = []
if bb_input.gerrit_changes and not props.collect_coverage:
commit_msg = get_commit_msg(api, bb_input, checkout)
with api.step.nest("test multipliers") as presentation:
multipliers = api.testsharder.extract_multipliers(commit_msg)
presentation.logs["multipliers"] = api.json.dumps(
[m.render_to_jsonish() for m in multipliers], indent=2
)
if api.testsharder.should_run_all_tests(commit_msg):
spec.test.skip_unaffected_tests = False
# Give SDK subbuilds their own namespaces for upload, so they do not clobber
# in the case of multiple subbuilds.
if not props.parent_id or spec.build.sdk_subbuild:
upload_namespace = api.buildbucket_util.id
else:
upload_namespace = props.parent_id
if spec.artifact_gcs_bucket:
checkout.upload_results(spec.artifact_gcs_bucket, namespace=upload_namespace)
repo_path = None
if props.perfcompare:
# Do input checks to catch problems before doing the build.
assert (
bb_input.gerrit_changes
), "perfcompare mode is for CQ builds only, not CI: no Gerrit changes found"
project = bb_input.gerrit_changes[0].project
with api.context(cwd=checkout.root_dir):
repo_path = checkout.project(project)["path"]
# In perfcompare mode, we want each test to run a predictable number
# of times, regardless of the files being changed.
# In coverage mode, we want to collect all possible coverage from all tests
# so we don't want to abort early if an affected test task fails.
use_affected_tests = not props.perfcompare and (
props.affected_tests_only or not props.collect_coverage
)
with api.autocorrelator.context(
ci_base_commit=checkout.integration_revision, ignore_skipped_build=True
):
run_build_steps(
api,
props,
spec,
upload_namespace,
checkout,
bb_input,
multipliers,
without_cl=False,
use_affected_tests=use_affected_tests,
)
if props.perfcompare:
with api.step.nest("build without CL"):
# Unapply the topmost Git commit that was applied from Gerrit. If
# the CQ is testing a stack of multiple CLs from Gerrit, the other
# CLs are left applied.
# TODO(mseaborn): This does not handle cases where the CL changed
# Jiri manifest files or contained a patches.json file.
api.git.raw_checkout(
step_name='git checkout of "without CL" revision',
ref="HEAD^",
directory=repo_path,
)
run_build_steps(
api,
props,
spec,
upload_namespace,
checkout,
bb_input,
multipliers,
without_cl=True,
use_affected_tests=use_affected_tests,
)
def run_build_steps(
api,
props,
spec,
upload_namespace,
checkout,
bb_input,
modifiers,
without_cl,
use_affected_tests,
):
if spec.build.upload_results:
assert spec.gcs_bucket, "gcs_bucket must be set if build.upload_results is"
if without_cl:
upload_namespace += "_without_cl"
build_results = api.build.with_options(
checkout,
spec.build.fint_params_path,
allow_dirty=without_cl,
collect_coverage=props.collect_coverage,
incremental=spec.build.incremental,
sdk_id=props.sdk_id if spec.build.sdk_subbuild else None,
artifact_gcs_bucket=spec.artifact_gcs_bucket,
timeout_secs=spec.build.timeout_secs or 90 * 60,
upload_namespace=upload_namespace,
)
postprocess_build(
api,
spec,
props.parent_id,
upload_namespace,
checkout,
bb_input,
modifiers,
without_cl,
props.comment_led,
use_affected_tests,
props.affected_tests_only,
build_results,
)
def postprocess_build(
api,
spec,
parent_id,
upload_namespace,
checkout,
bb_input,
modifiers,
without_cl,
comment_led,
use_affected_tests,
affected_tests_only,
build_results,
):
had_input_multipliers = bool(modifiers)
with api.step.nest("check if build skipped") as presentation:
presentation.properties["skipped_because_unaffected"] = not build_results
if not build_results:
return
# Used by ManagedOS.
presentation.properties["target_arch"] = build_results.set_metadata.target_arch
# For simplicity we pass them into testsharder.execute() even if use_affected_tests
# is False; initialize them here so we can do that.
affected_tests = []
affected_tests_max_attempts = 0
if spec.build.run_tests and not without_cl:
affected_tests = build_results.affected_tests
with api.step.nest("record affected_tests_no_work") as presentation:
presentation.properties["affected_tests_no_work"] = build_results.no_work
if build_results.no_work:
build_results.upload_tracing_data(
gcs_bucket=spec.artifact_gcs_bucket,
namespace=upload_namespace,
)
if spec.build.upload_results:
assert spec.gcs_bucket
build_results.upload(
gcs_bucket=spec.gcs_bucket,
is_release_version=spec.checkout.is_release_version,
namespace=upload_namespace,
)
return
max_attempts_per_test = 1
if not spec.test.retry_task_on_test_failure:
max_attempts_per_test = (
spec.test.max_attempts or api.swarming_retry.DEFAULT_MAX_ATTEMPTS
)
affected_tests_max_attempts = max_attempts_per_test
# Add modifiers to specify that these tests are affected.
if (
not spec.test.affected_tests_multiply_threshold
and use_affected_tests
and affected_tests
):
modifiers.extend(
api.testsharder.affected_test_modifiers(
affected_tests, affected_tests_max_attempts
)
)
# Set max_attempts for unaffected tests from the spec.
max_attempts_per_test = (
spec.test.max_attempts_per_test or max_attempts_per_test
)
if use_affected_tests or not spec.test.retry_task_on_test_failure:
# Add a default modifier to set max attempts for any other tests.
modifiers.append(
api.testsharder.TestModifier(
name="*",
total_runs=-1,
max_attempts=max_attempts_per_test,
)
)
build_results.upload_tracing_data(
gcs_bucket=spec.artifact_gcs_bucket,
namespace=upload_namespace,
)
if spec.build.upload_results:
assert spec.gcs_bucket
build_results.upload(
gcs_bucket=spec.gcs_bucket,
is_release_version=spec.checkout.is_release_version,
namespace=upload_namespace,
)
if spec.build.assembly_artifacts_cipd_package:
with api.step.nest("publish assembly artifacts") as presentation:
# TODO(fxbug.dev/87010): Use a generic hardlinking solution
# instead of relying on a CAS utility for CIPD-related logic.
tree = api.cas_util.hardlink_tree(api.path.mkdtemp("assembly-artifacts"))
for dest, source in build_results.cipd_assembly_artifacts.items():
tree.register_link(source, tree.root.join(dest))
tree.create_links("create links")
# Upload to CIPD for releases, otherwise upload to CAS such that it
# may be used by downstream presubmits.
if spec.checkout.is_release_version:
integration_repo = checkout.root_dir.join("integration")
assert checkout.release_version, "failed to resolve release version"
cipd_ref = resolve_cipd_ref_to_update(
api,
spec.checkout.remote,
integration_repo,
checkout.release_version,
)
api.cipd_util.upload_package(
pkg_name=spec.build.assembly_artifacts_cipd_package,
pkg_root=tree.root,
refs=[cipd_ref] if cipd_ref else None,
metadata=[("version", str(checkout.release_version))],
search_tag={"git_revision": checkout.integration_revision},
)
else:
cas_digest = api.cas_util.upload(tree.root)
# This is named generically since we may upload more artifacts
# to CAS in the future.
# TODO(atyfto): Currently this property is only realistically
# set once, but theoretically it can be overwritten. Find a more
# robust way to handle this.
presentation.properties["cas_digests"] = {
spec.build.assembly_artifacts_cipd_package: cas_digest,
}
if spec.build.report_binary_sizes:
build_results.check_size_budgets()
# In SDK subbuild mode, upload SDK archive and ninja targets to CAS.
if spec.build.sdk_subbuild:
upload_paths = []
for archive in build_results.gn_results.sdk_archives:
api.path.mock_add_paths(archive)
# TODO(fxbug.dev/92108): It will be safe to assume all archives are
# built after sdk builders are migrated to build the high-level sdk
# archives target.
if api.path.exists(archive):
upload_paths.append(archive)
if upload_paths:
sdk_archive_digest = api.cas_util.upload(
# Assumes all SDK archives are in the same directory.
api.path.dirname(upload_paths[0]),
upload_paths,
step_name=SDK_ARCHIVE_STEP_NAME,
)
api.step.active_result.presentation.properties[
SDK_ARCHIVE_OUTPUT_KEY
] = sdk_archive_digest
# Must be set before testing_requests.task_requests() is called.
api.artifacts.gcs_bucket = spec.artifact_gcs_bucket
api.artifacts.namespace = upload_namespace
# If the user didn't specifically request multipliers, testsharder
# may still have produced them. In that case don't report anything.
should_report_multipliers = had_input_multipliers
if spec.build.run_tests:
if parent_id.isdigit():
# Use parent build so that testing task requests refer to
# that build, which actually orchestrates testing.
buildbucket_build = api.buildbucket.get(int(parent_id))
# If it's a try build, the parent build will not have its
# gitiles_commit populated (it's populated at runtime by
# recipe_bootstrap, but that doesn't change the input values stored
# in Buildbucket). But the commit will have been passed through to
# the subbuild via the Buildbucket ScheduleBuild API, so we can get
# it from the current build.
buildbucket_build.input.gitiles_commit.CopyFrom(bb_input.gitiles_commit)
else:
# When the parent was launched by led, it's not possible to retrieve
# the parent build, so we fall back to using our own build.
# This is technically incorrect and any tests that rely on having
# correct buildbucket metadata may fail when run via led. Ideally
# we wouldn't have any tests that knew about buildbucket, but
# for now this is OK since none of those tests run in recipes CQ,
# which uses led to test recipes changes.
buildbucket_build = api.buildbucket.build
if not comment_led:
should_report_multipliers = False
build_url = parent_build_url(api, parent_id)
shards = api.testsharder.execute(
"create test shards",
testsharder_path=build_results.tool("testsharder"),
build_dir=build_results.build_dir,
max_shard_size=spec.test.max_shard_size,
target_duration_secs=spec.test.target_shard_duration_secs,
per_test_timeout_secs=spec.test.per_test_timeout_secs,
max_shards_per_env=spec.test.max_shards_per_env,
modifiers=modifiers,
tags=spec.build.environment_tags,
# TODO(fxbug.dev/50301): Remove "and" once rolled out.
use_affected_tests=(
use_affected_tests and spec.test.affected_tests_multiply_threshold
),
affected_tests=affected_tests,
affected_tests_multiply_threshold=spec.test.affected_tests_multiply_threshold,
affected_tests_max_attempts=affected_tests_max_attempts,
affected_only=affected_tests_only and affected_tests,
image_deps=spec.test.use_cas_for_images,
hermetic_deps=spec.test.use_cas,
pave=spec.test.pave,
disabled_device_types=spec.test.disabled_device_types,
skip_unaffected_tests=(
spec.test.skip_unaffected_tests
# Recipe and integration changes can impact tests in ways that
# aren't encompassed by affected tests analysis, so we should
# run all tests on such changes.
and not checkout.contains_integration_patch
and not api.recipe_testing.enabled
),
)
if bb_input.gerrit_changes and should_report_multipliers:
gerrit_change = bb_input.gerrit_changes[0]
report_multipliers(api, shards, gerrit_change, build_url)
shards_to_run = [s for s in shards if not s.should_skip]
task_requests = api.testing_requests.task_requests(
build_results,
buildbucket_build,
spec.test.pool,
shards_to_run,
spec.test.swarming_expiration_timeout_secs,
spec.test.swarming_io_timeout_secs,
spec.test.use_runtests,
spec.test.timeout_secs,
default_service_account=spec.test.default_service_account,
pave=spec.test.pave,
targets_serial=spec.test.targets_serial,
catapult_dashboard_master=spec.test.catapult_dashboard_master,
catapult_dashboard_bot=spec.test.catapult_dashboard_bot,
release_branch=checkout.release_branch,
release_version=checkout.release_version,
test_on_gce=spec.test.test_on_gce,
zircon_args=spec.test.zircon_args,
gcem_host=spec.test.gce_mediator.endpoint,
gcem_cloud_project=spec.test.gce_mediator.cloud_project,
gcem_machine_shape=spec.test.gce_mediator.machine_shape,
use_ffx=spec.test.use_ffx,
ffx_experiment_level=spec.test.ffx_experiment_level,
use_cas=spec.test.use_cas or spec.test.use_cas_for_images,
)
orchestration_inputs = api.build.test_orchestration_inputs_from_build_results(
build_results,
task_requests,
shards,
include_generated_sources=any(
v in build_results.set_metadata.variants
for v in ["coverage", "coverage-rust", "coverage-cts", "profile"]
),
)
orchestration_inputs_digest = orchestration_inputs.upload()
dest_property = api.build.test_orchestration_inputs_property_name(without_cl)
api.step.empty("emit orchestration_inputs_hash").presentation.properties[
dest_property
] = orchestration_inputs_digest
if spec.artifact_gcs_bucket:
api.artifacts.upload(
"upload artifacts",
build_results,
upload_host_tests=spec.build.run_tests,
sign_artifacts=spec.build.sign_artifacts,
)
if spec.test.test_on_gce and spec.build.run_tests:
api.gce.create_image(
spec.test.gce_mediator.endpoint,
spec.test.gce_mediator.cloud_project,
build_results.images,
api.buildbucket.build.infra.swarming.parent_run_id,
spec.artifact_gcs_bucket,
upload_namespace,
)
if spec.build.report_binary_sizes and spec.build.size_diff_ci_bucket:
# Run this check as late as possible to increase the odds that a
# baseline CI build is found.
build_results.check_size_creep(
api.sso.sso_to_https(spec.checkout.remote),
checkout.integration_revision,
spec.build.size_diff_ci_bucket,
ci_builder=spec.build.size_diff_ci_builder,
gerrit_changes=bb_input.gerrit_changes,
size_creep_label=spec.build.size_creep_label,
)
def parent_build_url(api, parent_id):
if parent_id.isdigit():
return "https://ci.chromium.org/b/%s" % parent_id
return "https://ci.chromium.org/swarming/task/%s?server=%s" % (
api.swarming.task_id,
api.buildbucket.build.infra.swarming.hostname,
)
def get_commit_msg(api, bb_input, checkout):
"""Gets the commit message for a gerrit change from source info."""
gerrit_change = bb_input.gerrit_changes[0]
change_remote = "https://%s/%s" % (
gerrit_change.host.replace("-review", ""),
gerrit_change.project,
)
project_dir = None
for repo in checkout.source_info:
relpath = repo["relativePath"]
if api.sso.sso_to_https(repo["remote"]) == change_remote:
if relpath == ".":
project_dir = checkout.root_dir
else:
project_dir = checkout.root_dir.join(relpath)
break
if not project_dir:
return ""
with api.context(cwd=project_dir):
commit_msg = api.git.get_commit_message(step_name="get commit msg")
return commit_msg
def report_multipliers(api, shards, gerrit_change, build_url):
has_multiplier_shards = False
for shard in shards:
# A multiplier shard will start with "multiplied:".
# TODO(fxb/51896): Remove dependency on shard name.
if shard.name.startswith("multiplied:"):
has_multiplier_shards = True
break
if has_multiplier_shards:
set_gerrit_comment(
api,
"report multiplier shards",
gerrit_change,
(
"A builder created multiplier shards. Click the following "
"link for more details: %s" % build_url
),
)
def set_gerrit_comment(api, step_name, gerrit_change, message):
try:
api.gerrit.set_review(
step_name,
str(gerrit_change.change),
message=message,
test_data=api.json.test_api.output({}),
)
except api.step.StepFailure:
# Comment failures shouldn't fail the build.
pass
def resolve_cipd_ref_to_update(api, remote, integration_repo, release_version):
# TODO(fxbug.dev/99452): Configure the branch->ref mapping via properties,
# like the sdk recipe does.
canary_head = api.git.get_remote_branch_head(
api.sso.sso_to_https(remote), "refs/heads/releases/canary"
)
with api.context(cwd=integration_repo):
api.git.fetch("origin", refspec=canary_head)
canary_release_versions = api.release.get_release_versions(
ref=canary_head, repo_path=integration_repo
)
return "latest" if release_version in canary_release_versions else None
def GenTests(api):
def properties(
sdk_subbuild=False,
run_tests=True,
gcs_bucket=None,
pave=True,
catapult_dashboard_master=None,
catapult_dashboard_bot=None,
max_attempts_per_test=0,
retry_task_on_test_failure=False,
gce_mediator=None,
size_diff_ci_bucket=None,
size_creep_label=None,
test_on_gce=False,
build_timeout_secs=0,
# We rely on the buildbucket test API using this same
# ID for ci_build_message and the builds returned by get().
parent_id=str(api.buildbucket.ci_build_message().id),
is_release_version=False,
**kwargs
):
test_spec = None
if run_tests:
test_spec = Fuchsia.Test(
max_shard_size=0,
target_shard_duration_secs=10 * 60,
per_test_timeout_secs=5 * 60,
max_shards_per_env=8,
timeout_secs=30 * 60,
pool="fuchsia.tests",
swarming_expiration_timeout_secs=10 * 60,
swarming_io_timeout_secs=5 * 60,
default_service_account="service_account",
targets_serial=True,
test_on_gce=test_on_gce,
pave=pave,
catapult_dashboard_master=catapult_dashboard_master,
catapult_dashboard_bot=catapult_dashboard_bot,
max_attempts_per_test=max_attempts_per_test,
retry_task_on_test_failure=retry_task_on_test_failure,
gce_mediator=gce_mediator,
)
return api.properties(
parent_id=parent_id,
spec=Fuchsia(
checkout=Fuchsia.Checkout(
manifest="minimal",
project="integration",
remote="https://fuchsia.googlesource.com/manifest",
is_release_version=is_release_version,
),
build=Fuchsia.Build(
run_tests=run_tests,
sdk_subbuild=sdk_subbuild,
upload_results=bool(gcs_bucket),
timeout_secs=build_timeout_secs,
fint_params_path="fint_params/core.textproto",
report_binary_sizes=True,
size_diff_ci_bucket=size_diff_ci_bucket,
size_creep_label=size_creep_label,
assembly_artifacts_cipd_package="fuchsia/assembly-inputs/core.x64",
),
test=test_spec,
gcs_bucket=gcs_bucket,
artifact_gcs_bucket="fuchsia-infra-artifacts",
),
**kwargs
)
integration_remote = "https://fuchsia.googlesource.com/integration"
def test(name, status="success", tryjob=True, source_info=True, **kwargs):
ret = api.buildbucket_util.test(name, tryjob=tryjob, status=status, **kwargs)
if source_info:
ret += api.checkout.source_info(
[
{
"name": "integration",
"remote": integration_remote,
"revision": "a491082dc1b632bbcd60ba3618d20b503c2de738",
"relativePath": "integration",
},
{
"name": "fuchsia",
"remote": "https://fuchsia.googlesource.com/fuchsia",
"revision": "a491082dc1b632bbcd60ba3618d20b503c2de738",
"relativePath": ".",
},
]
)
return ret
yield (
test("default", tryjob=False)
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=True,
)
+ api.build.fint_set_artifacts(metadata=dict(variants=["profile"]))
+ api.build.create_shards()
)
yield (
test("non_numeric_parent_id", tryjob=False)
+ properties(
parent_id="not-a-number", gcs_bucket="fuchsia-infra", run_tests=True
)
+ api.build.create_shards()
)
yield (
test("subbuild_no_parent_id", status="infra_failure", source_info=False)
+ properties(run_tests=True, parent_id="")
)
# Test the case where the test spec includes fields that enable uploading
# to the Catapult performance dashboard.
yield (
test("catapult_dashboard_upload_enabled", tryjob=False)
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=True,
catapult_dashboard_master="example.fuchsia.global.ci",
catapult_dashboard_bot="example-core.x64-nuc",
)
+ api.build.create_shards()
)
yield (
test("default_cq", tryjob=True)
+ properties(
gcs_bucket="fuchsia-infra",
size_diff_ci_bucket="ci",
size_creep_label="Size-Review",
run_tests=True,
max_attempts_per_test=5,
build_timeout_secs=120 * 60,
)
+ api.build.create_shards()
+ api.step_data(
"check size creep.diff ci",
api.json.output(
{
"component_diffs": [
{
"name": "componentA",
"baseline_size": 16,
"size": 32,
"size_diff": 16,
"budget": 48,
"creep_budget": 8,
"budget_exceeded": False,
"creep_budget_exceeded": True,
},
],
"creep_budget_exceeded": True,
"baseline_build_id": 123456,
}
),
)
+ api.step_data(
"check size creep.get change details",
api.json.output(
{
"labels": {
"Size-Review": {
"approved": {
"email": "size-approver@google.com",
}
},
},
}
),
)
)
# Test that max_attempts_per_test is ignored if no affected tests are
# detected and default within-task attempts will be set to 1 if
# retry_task_on_test_failure is set to True.
yield (
test("default_cq_no_affected_retry_task_on_test_failure", tryjob=True)
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=True,
# This field will be ignored if no affected tests are detected.
max_attempts_per_test=5,
# This will cause default within-task attempts to be 1.
retry_task_on_test_failure=True,
)
+ api.build.fint_build_artifacts(affected_tests=[])
+ api.build.create_shards()
)
yield (
test("skip_if_unaffected", tryjob=True)
+ properties(gcs_bucket="fuchsia-infra")
+ api.build.fint_set_artifacts(skip_build=True)
)
yield (
test("affected_tests_no_work", tryjob=True)
+ properties(gcs_bucket="fuchsia-infra")
+ api.build.fint_build_artifacts(build_not_affected=True)
)
yield (
test(
"default_multipliers",
tryjob=True,
# Values chosen to match source_info so that we trigger the test
# multipliers code path.
project="integration",
git_repo=integration_remote,
)
+ properties(run_tests=True)
+ api.build.create_shards(with_multipliers=True)
+ api.step_data(
"get commit msg",
api.raw_io.stream_output_text("Foo\n\nMultiply: foo_tests: 123"),
)
+ api.step_data("report multiplier shards", retcode=1)
+ api.post_process(post_process.MustRun, "isolate test orchestration inputs")
)
yield (
test(
"run_all_tests",
tryjob=True,
# Values chosen to match source_info so that we trigger the
# run-all-tests code path.
project="integration",
git_repo=integration_remote,
)
+ properties(run_tests=True)
+ api.build.create_shards(with_multipliers=True)
+ api.step_data(
"get commit msg",
api.raw_io.stream_output_text("Foo\n\nRun-All-Tests: True"),
)
+ api.post_process(post_process.MustRun, "isolate test orchestration inputs")
)
yield (
test("sdk", tryjob=False)
+ properties(
run_tests=False, sdk_subbuild=True, sdk_id="sdk-id", is_release_version=True
)
+ api.release.ref_to_release_version(
"releases/0.20191018.0.1", nesting="checkout.resolve release version"
)
+ api.step_data(
"publish assembly artifacts.get release versions on h3ll0",
api.raw_io.stream_output_text(
"\n".join(
[
"releases/0.20191018.0.1",
"releases/0.20191018.0.2",
]
)
),
)
+ api.build.fint_build_artifacts()
)
yield (
test(
"cq_perfcompare",
tryjob=True,
repo="third_party/example_repo",
)
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=True,
max_attempts_per_test=5,
perfcompare=True,
)
+ api.build.create_shards()
+ api.build.create_shards(nesting="build without CL")
)
yield (
test("test_on_gce", tryjob=False)
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=True,
gce_mediator=Fuchsia.Test.GCEMediator(
endpoint="gcem-endpoint", cloud_project="gcem-cloud-project"
),
test_on_gce=True,
)
+ api.buildbucket.build(
build_pb2.Build(
infra=build_pb2.BuildInfra(
swarming=build_pb2.BuildInfra.Swarming(
hostname="chrome-swarming.appspot.com",
parent_run_id=" 50a11839dfdb5911",
)
)
)
)
+ api.build.create_shards()
)
yield (
test("failed_build_cq", tryjob=True, status="failure")
+ properties(
gcs_bucket="fuchsia-infra",
run_tests=False,
**{"$fuchsia/autocorrelator": {"ci_bucket": "ci", "ci_builder": "builder"}}
)
+ api.step_data("build.ninja", retcode=1)
+ api.autocorrelator.check_try(
[{"build_id": "456", "score": 0.98, "is_green": False}]
)
+ api.autocorrelator.check_ci(
{
"build_id": "789",
"score": 0.96,
"is_green": False,
"commit_dist": 0,
}
)
)