blob: 17cffae0cc26a5677fade0b7aec6deb08f74b4ce [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" fuchsia.py - Builds and optionally tests Fuchsia.
# Execution overview
## Checkout + Build
This recipe triggers a child build which runs the fuchsia/build recipe.
That recipe checks out the source code and builds it. This recipe
retrieves the data required to orchestrate tests via CAS.
## Test
If configured to run tests, this recipe uses the test orchestration data to run tests.
That logic is in the testing recipe module. Under the hood, that module
triggers Swarming tasks that do the actual testing, waits for them, and
reports the results.
## External Tests
If configured to run external tests, this recipe invokes various tools to pass
build artifacts to external infrastructures, trigger tests, waits for them, and
reports the results.
"""
from google.protobuf import json_format
from google.protobuf import struct_pb2
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.go.chromium.org.luci.buildbucket.proto import step as step_pb2
from PB.recipe_engine.result import RawResult
from PB.recipe_modules.recipe_engine.led.properties import (
InputProperties as LedInputProperties,
)
from PB.recipes.fuchsia.fuchsia.build import InputProperties as SubbuildInputProperties
from PB.recipes.fuchsia.fuchsia.fuchsia import InputProperties
from PB.recipes.fuchsia.fuchsia.spec import Fuchsia
DEPS = [
"fuchsia/artifacts",
"fuchsia/autocorrelator",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/cas_util",
"fuchsia/checkout",
"fuchsia/fuchsia",
"fuchsia/fxt",
"fuchsia/recipe_testing",
"fuchsia/subbuild",
"fuchsia/swarming_retry",
"fuchsia/testing",
"recipe_engine/buildbucket",
"recipe_engine/cas",
"recipe_engine/file",
"recipe_engine/led",
"recipe_engine/path",
"recipe_engine/properties",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = InputProperties
def RunSteps(api, props):
spec = props.spec
api.fuchsia.setup(spec)
# At some stage BuildBucket stores properties as google.protobuf.Value,
# which converts all numbers (including ints) to floats, which is
# lossy, so we have to use a string properties and convert to int
# internally.
child_build_id = int(props.child_build_id) if props.child_build_id else None
orchestrator_id = api.buildbucket_util.id
run_fxt_tests = any(
[
props.fxt_options.tap_projects,
props.fxt_options.guitar_config,
props.fxt_options.piper_presubmit,
]
)
if not spec.build.run_tests and not run_fxt_tests:
raise api.step.InfraFailure(
"if not running tests, use the fuchsia/build recipe directly"
)
# Configure context of uploaded artifacts for test task construction and to
# get link to build artifacts.
api.artifacts.gcs_bucket = spec.artifact_gcs_bucket
api.artifacts.namespace = orchestrator_id
# Use status="last" in case the buildbucket collect step experiences an
# infra failure but we successfully retry it, in which case everything is
# fine so we shouldn't surface the failure any higher.
with api.step.nest("build", status="last") as presentation:
child_input_properties = SubbuildInputProperties(
parent_id=orchestrator_id,
# Set by the coverage.py recipe to be passed through to build.py.
affected_tests_only=props.affected_tests_only,
# Set by the coverage.py recipe to be passed through to build.py.
coverage_collection=props.coverage_collection,
)
child_build, child_props = run_build_steps(
api,
presentation,
child_build_id,
json_format.MessageToDict(
child_input_properties, preserving_proto_field_name=True
),
)
skipped_because_unaffected = bool(child_props.get("skipped_because_unaffected"))
affected_tests_no_work = bool(child_props.get("affected_tests_no_work"))
if (
skipped_because_unaffected
or affected_tests_no_work
and not api.recipe_testing.enabled
):
# TODO(fxbug.dev/119485): Point to fuchsia.dev instead.
message = (
"Testing skipped because the change did not affect the build graph. "
"To run all tests in spite of affected test analysis, see "
"http://go/fxi-faq#how-do-i-skip-unaffected-test-analysis."
)
presentation.step_text = message
presentation.properties["skipped_because_unaffected"] = (
skipped_because_unaffected
)
presentation.properties["affected_tests_no_work"] = affected_tests_no_work
return RawResult(summary_markdown=message, status=common_pb2.SUCCESS)
# Present the link to the build artifacts uploaded by the child build.
if api.artifacts.gcs_bucket:
# If child_build_id is provided, the namespace used for the
# artifacts URL must be retrieved from the `parent_id` input
# property of the child build.
if child_build_id:
api.artifacts.namespace = child_input_properties.parent_id
presentation.links["build_artifacts"] = api.artifacts.cloud_storage_url()
# Reset the artifacts namespace to the current orchestrator id if
# changed above.
api.artifacts.namespace = orchestrator_id
if spec.build.run_tests:
(
orchestration_inputs,
orchestration_inputs_hash,
) = collect_test_orchestration_inputs(api, child_build, without_cl=False)
if props.perfcompare:
orchestration_inputs_without_cl, _ = collect_test_orchestration_inputs(
api, child_build, without_cl=True
)
# Copy to output properties so the coverage recipe can access it.
presentation.properties[
api.build.test_orchestration_inputs_property_name(False)
] = orchestration_inputs_hash
if spec.build.run_tests:
default_runs_per_shard = 5 if props.perfcompare else 1
runs_per_shard = props.runs_per_shard or default_runs_per_shard
try:
with api.autocorrelator.context(
ci_base_commit=child_props.get(api.checkout.GOT_REVISION_PROPERTY),
ignore_failed_build=True,
ignore_skipped_tests=True,
):
results_with_cl = run_test_steps(
api, props, orchestration_inputs, spec, runs_per_shard
)
except api.step.StepFailure as exc:
if (
spec.checkout.do_not_rebase_patch
and api.buildbucket_util.is_tryjob
and api.autocorrelator.AUTOCORRELATOR_HEADER not in exc.reason
):
summary_markdown = (
"This builder does not rebase on top of HEAD, which may have "
+ "caused the following failure(s). If it looks unrelated to "
+ "your CL, try rebasing the CL and relaunching the builder.\n\n"
+ exc.reason
)
raise api.step.StepFailure(summary_markdown)
raise exc
if props.perfcompare:
with api.step.nest("test without CL"):
results_without_cl = run_test_steps(
api,
props,
orchestration_inputs_without_cl,
spec,
runs_per_shard,
)
else:
results_without_cl = None
if results_with_cl:
display_performance_results(
api,
orchestration_inputs.perfcompare,
results_without_cl,
results_with_cl,
)
if run_fxt_tests:
api.fxt.orchestrate_fxt_tests(
bucket=api.artifacts.gcs_bucket,
namespace=api.artifacts.namespace,
options=props.fxt_options,
guitar_test_filter_exprs=props.guitar_test_filter_exprs,
)
if spec.build.run_tests:
message = api.testing.passed_test_msg(results_with_cl)
return RawResult(summary_markdown=message, status=common_pb2.SUCCESS)
def run_build_steps(
api,
presentation,
child_build_id,
child_input_properties,
):
builder_name = f"{api.buildbucket.build.builder.builder}-subbuild"
extra_fields = {
# We only care about these fields for the subbuild's top-level
# "build" step, but unfortunately there's no way to create a
# field mask that selects only steps with a certain name.
"steps.*.name",
"steps.*.summary_markdown",
"steps.*.logs.*.name",
"steps.*.logs.*.view_url",
}
if child_build_id:
# Text is meant to avoid confusion.
presentation.step_text = "Reusing child build instead of triggering"
output_build = api.buildbucket.get(
child_build_id, fields=api.buildbucket.DEFAULT_FIELDS.union(extra_fields)
)
build_url = f"https://ci.chromium.org/b/{int(child_build_id)}"
presentation.links[builder_name] = build_url
else:
builds = api.subbuild.launch(
[builder_name], presentation, extra_properties=child_input_properties
)
build_id = builds[builder_name].build_id
build_url = builds[builder_name].url
if not api.led.launched_by_led:
# Consumed by the performance test culprit finder
# (http://go/tq-culprit-finder).
presentation.properties["child_build_id"] = str(build_id)
builds = api.subbuild.collect([build_id], extra_fields=extra_fields)
output_build = builds[build_id].build_proto
# Copy various output properties from the child build to the parent so that
# tools don't need to look up the child build in order to get these values.
output_props = json_format.MessageToDict(output_build.output.properties)
for prop in (
# Emitted by fuchsia/build.py, and used by ManagedOS.
"target_arch",
api.artifacts.GCS_BUCKET_PROPERTY,
api.build.FINT_PARAMS_PATH_PROPERTY,
api.checkout.CHECKOUT_INFO_PROPERTY,
api.checkout.GOT_REVISION_PROPERTY,
api.checkout.REVISION_COUNT_PROPERTY,
api.checkout.CACHED_REVISION_PROPERTY,
# Used for fxbug.dev/132256.
"rpl_files",
):
if prop in output_props:
presentation.properties[prop] = output_props[prop]
# The subbuild attaches many useful logs to its top-level build step. Copy
# them through so they're accessible from the parent build without needing
# to click through to the subbuild.
for step in output_build.steps:
if step.name == api.build.BUILD_STEP_NAME:
presentation.links.update({log.name: log.view_url for log in step.logs})
# Step links are not part of the build protocol and are instead
# included in the step summary_markdown, so we must copy that over
# to get the links.
presentation.step_summary_text = step.summary_markdown
if output_build.status != common_pb2.SUCCESS:
presentation.properties["failed_to_build"] = True
if output_build.status == common_pb2.INFRA_FAILURE:
exception_type = api.step.InfraFailure
description = "raised infra failure"
else:
exception_type = api.step.StepFailure
description = "failed"
# Copy the child summary markdown into the parent summary markdown to
# better propagate error messages. If the child summary is multiple lines,
# start it on a new line.
subbuild_summary = output_build.summary_markdown.strip()
summary = f"[build]({build_url}) {description}"
if subbuild_summary:
summary += ":"
# If the subbuild summary is already multiple lines, start it on a
# new line. If it's one line, the final summary should also be one
# line.
summary += "\n\n" if "\n" in subbuild_summary else " "
summary += subbuild_summary
raise exception_type(summary)
return output_build, output_props
def run_test_steps(api, props, orchestration_inputs, spec, runs_per_shard):
tryjob = api.buildbucket_util.is_tryjob
# It's very bad if a builder that's supposed to run tests stops running
# tests, so we want a noisy failure.
#
# Any builder that has a legitimate reason for not running tests should
# either use the fuchsia/build.py recipe instead if it's a build-only
# builder, or set `run_tests=False`.
if (
not orchestration_inputs.skipped_shards
and not orchestration_inputs.task_requests
):
raise api.step.StepFailure("No tests run. Did the build produce any tests?")
testing_tasks = api.testing.run_test_tasks(
debug_symbol_url=api.artifacts.debug_symbol_url(),
orchestration_inputs=orchestration_inputs,
max_attempts=spec.test.max_attempts,
runs_per_shard=runs_per_shard,
retry_task_on_test_failure=spec.test.retry_task_on_test_failure,
abort_early_if_failed=not props.coverage_collection,
)
all_results = api.testing.test_results_from_skipped_shards(
orchestration_inputs.skipped_shards
)
successful_results = all_results[:]
for task in testing_tasks:
for attempt in task.attempts:
if attempt.test_results:
all_results.append(attempt.test_results)
if attempt.success:
assert attempt.test_results
successful_results.append(attempt.test_results)
# Upload test results
if spec.test.upload_results:
assert (
spec.artifact_gcs_bucket
), "artifact_gcs_bucket must be set if test.upload_results is"
with api.step.nest("upload test results") as presentation:
swarming_task_ids = []
resultdb_base_variant = {
"board": orchestration_inputs.fint_set_metadata.board,
"build_type": orchestration_inputs.fint_set_metadata.optimize,
"product": orchestration_inputs.fint_set_metadata.product,
}
# All test results, including non-final attempts that were retried.
# We care so we can analyze flakiness.
for result in all_results:
final_resultdb_base_variant = resultdb_base_variant.copy()
final_resultdb_base_variant["test_environment"] = result.env_name
tags = {"shard_name": result.shard_name}
# Skipped test shards will not have swarming bot ID and task ID set.
if not result.skipped:
swarming_task_ids.append(result.swarming_task_id)
tags.update(
swarming_task_id=result.swarming_task_id,
swarming_task_url=(
f"https://{result.swarming_host}/task?id={result.swarming_task_id}"
),
)
# Tasks that failed with NO_RESOURCE will not have a bot ID.
if result.swarming_bot_id:
tags.update(
swarming_bot_id=result.swarming_bot_id,
swarming_bot_url=(
f"https://{result.swarming_host}/bot?id={result.swarming_bot_id}"
),
)
if result.task_output_link:
tags.update(
cas_outputs=result.task_output_link,
)
result.upload_results(
gcs_bucket=spec.artifact_gcs_bucket,
upload_to_catapult=(
not tryjob
and spec.test.catapult_dashboard_master
and spec.test.catapult_dashboard_bot
),
orchestration_inputs=orchestration_inputs,
resultdb_base_variant=final_resultdb_base_variant,
resultdb_tags=tags,
)
# Consumed by the google3 results uploader and the coverage recipe.
presentation.properties["test-swarming-task-ids"] = swarming_task_ids
api.testing.raise_failures(testing_tasks)
return successful_results
def collect_test_orchestration_inputs(api, build_proto, without_cl):
"""Downloads archived orchestration inputs from a build.
Args:
build_proto (Build): The Build proto for the build that produced the
test orchestration inputs.
without_cl (bool): Whether to download the "without CL" build. If false,
this downloads the "with CL" build.
Returns:
FuchsiaBuildApi.TestOrchestrationInputs, hash (str)
Raises:
An InfraFailure if the required property is not found.
"""
prop_name = api.build.test_orchestration_inputs_property_name(without_cl)
orchestration_inputs_hash = api.subbuild.get_property(build_proto, prop_name)
return (
api.build.download_test_orchestration_inputs(orchestration_inputs_hash),
orchestration_inputs_hash,
)
def display_performance_results(
api, perfcompare_tool_dir, results_without_cl, results_with_cl
):
dest_dir = api.path.cleanup_dir / "perf_dataset"
with_cl_dir = dest_dir / "with_cl"
without_cl_dir = dest_dir / "without_cl"
if results_without_cl:
dir_to_upload = dest_dir
dirs_to_display = [without_cl_dir, with_cl_dir]
property_name = "perfcompare_dataset_digest"
display_step_name = "compare perf test results without and with CL"
else:
dir_to_upload = with_cl_dir
dirs_to_display = [with_cl_dir]
property_name = "perf_dataset_digest"
display_step_name = "summary of perf test results"
with api.step.nest("aggregate task outputs into single directory") as presentation:
api.file.ensure_directory("make directory", dest_dir)
if results_without_cl:
with api.step.nest('convert results for "without_cl" revision'):
make_perfcompare_dataset_dir(api, without_cl_dir, results_without_cl)
with api.step.nest('convert results for "with_cl" revision'):
make_perfcompare_dataset_dir(api, with_cl_dir, results_with_cl)
# Upload the with/without-CL dataset to CAS so that it can be
# easily downloaded for further analysis. The cost of this should be
# low because CAS uses share-by-hash (it is content-addressed) and
# the individual files already came from CAS.
dataset_hash = api.cas_util.upload(dir_to_upload)
presentation.properties[property_name] = dataset_hash
# Consumers also need to know *which* CAS instance the dataset is
# stored on.
presentation.properties["cas_instance"] = api.cas.instance
api.step(
display_step_name,
[
# Use vpython for running perfcompare.py, so that perfcompare's
# .vpython file gets used for specifying Python and library
# versions.
"vpython3",
perfcompare_tool_dir / "perfcompare.py",
"compare_perf",
]
+ dirs_to_display,
)
# Convert test results into the directory layout accepted by the
# perfcompare.py tool for a multi-boot dataset.
def make_perfcompare_dataset_dir(api, dest_dir, all_results):
by_boot_dir = dest_dir / "by_boot"
api.file.ensure_directory("make results directory", by_boot_dir)
for boot_idx, test_results in enumerate(all_results):
if not test_results.skipped:
api.file.copytree(
"copy perf test results",
test_results.results_dir,
by_boot_dir.joinpath(f"boot{int(boot_idx):06}"),
)
def GenTests(api):
def download_step_data():
task_request_jsonish = api.testing.task_request_jsonish()
return api.testing.task_requests_step_data(
[task_request_jsonish],
"build.download test orchestration inputs.load task requests",
) + api.step_data(
"build.download test orchestration inputs.load triage sources",
api.file.read_json(["triage/config.triage", "other/triage/config.triage"]),
)
def test_step_data(id="610", failure=False):
shard_name = "QEMU"
outputs = ["out/path/to/output/file"]
return download_step_data() + (
api.testing.task_retry_step_data(
[api.swarming.task_result(id=id, name=shard_name, outputs=outputs)]
)
+ api.testing.test_step_data(shard_name=shard_name, failure=failure)
)
def properties(
run_tests=True,
run_fxt_tests=False,
artifact_gcs_bucket=None,
max_attempts=None,
retry_task_on_test_failure=False,
tap_projects=(),
guitar_config=None,
piper_presubmit=False,
do_not_rebase_patch=False,
**kwargs,
):
test_spec = None
if run_tests:
test_spec = Fuchsia.Test(
max_shard_size=0,
timeout_secs=30 * 60,
pool="fuchsia.tests",
swarming_expiration_timeout_secs=10 * 60,
swarming_io_timeout_secs=5 * 60,
swarming_grace_period_secs=30,
botanist_grace_period_secs=60,
upload_results=bool(artifact_gcs_bucket),
max_attempts=max_attempts,
retry_task_on_test_failure=retry_task_on_test_failure,
)
if run_fxt_tests:
kwargs["fxt_options"] = api.fxt.Options(
tap_projects=tap_projects,
guitar_config=guitar_config,
piper_presubmit=piper_presubmit,
timeout_secs=60 * 60,
)
kwargs["guitar_test_filter_exprs"] = ["deps(//foo)"]
return api.properties(
spec=Fuchsia(
checkout=Fuchsia.Checkout(
manifest="manifest",
remote="remote",
do_not_rebase_patch=do_not_rebase_patch,
),
build=Fuchsia.Build(
run_tests=run_tests,
upload_results=bool(artifact_gcs_bucket),
),
test=test_spec,
artifact_gcs_bucket=artifact_gcs_bucket,
),
**kwargs,
)
child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
input_props={"parent_id": "123123"},
output_props={
"integration-revision-count": 1,
"test_orchestration_inputs_digest": "abc",
"got_revision": "abcdef",
"checkout_info": {"manifest": "foo"},
},
status="SUCCESS",
)
child_build.steps.add().MergeFrom(
json_format.ParseDict(
{
"name": "build",
"logs": [
{"name": "foo.txt", "view_url": "https://logs.example.com/foo.txt"},
{"name": "bar.txt", "view_url": "https://logs.example.com/bar.txt"},
],
"summary_markdown": "a summary",
},
step_pb2.Step(),
)
)
failed_child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"integration-revision-count": 1,
"test_orchestration_inputs_digest": "abc",
"got_revision": "abcdef",
"rpl_files": ["log.rrpl"],
},
status="FAILURE",
)
failed_child_build.summary_markdown = "failed to build fuchsia"
skipped_child_build = api.subbuild.try_build_message(
builder="builder-subbuild",
output_props={"skipped_because_unaffected": True, "got_revision": "abcdef"},
status="SUCCESS",
)
failed_try_child_build = api.subbuild.try_build_message(
builder="builder-subbuild",
output_props={
"integration-revision-count": 1,
"test_orchestration_inputs_digest": "abc",
"got_revision": "abcdef",
},
status="FAILURE",
)
infra_failure_child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"integration-revision-count": 1,
"test_orchestration_inputs_digest": "abc",
"got_revision": "abcdef",
},
status="INFRA_FAILURE",
)
infra_failure_child_build.summary_markdown = "checkout timed out\nafter 30m"
yield (
api.buildbucket_util.test("successful_build_and_test")
+ properties(artifact_gcs_bucket="gcs-bucket")
+ api.subbuild.child_build_steps(builds=[child_build])
+ test_step_data()
)
yield (
api.buildbucket_util.test("skipped_because_unaffected")
+ properties()
+ api.subbuild.child_build_steps(builds=[skipped_child_build])
)
# Cover the case where the build succeeds but one or more tests fail.
yield (
api.buildbucket_util.test("failed_tests_cq", tryjob=True, status="FAILURE")
+ properties(
artifact_gcs_bucket="gcs-bucket",
max_attempts=1,
do_not_rebase_patch=True,
**{"$fuchsia/autocorrelator": {"ci_bucket": "ci", "ci_builder": "builder"}},
)
+ api.subbuild.child_build_steps(builds=[child_build])
# Pass max_attempts=1 because it keeps the test expectations
# simpler. Otherwise we would have to generate test expectations
# for a retry of the failed task.
+ test_step_data(failure=True)
+ api.autocorrelator.check_try(
[{"build_id": "456", "score": 0.98, "is_green": False}]
)
+ api.autocorrelator.check_ci(
{
"build_id": "789",
"score": 0.96,
"is_green": False,
"commit_dist": 0,
}
)
)
yield (
api.buildbucket_util.test(
"failed_tests_cq_no_rebase", tryjob=True, status="FAILURE"
)
+ properties(
artifact_gcs_bucket="gcs-bucket",
max_attempts=1,
do_not_rebase_patch=True,
**{"$fuchsia/autocorrelator": {"ci_bucket": "ci", "ci_builder": "builder"}},
)
+ api.subbuild.child_build_steps(builds=[child_build])
# Pass max_attempts=1 because it keeps the test expectations
# simpler. Otherwise we would have to generate test expectations
# for a retry of the failed task.
+ test_step_data(failure=True)
)
# Test that if one shard fails after max_attempts, there are no further
# retries of another shard that fails.
def test_stopping_after_max_attempts():
outputs = ["out/path/to/output/file"]
return (
api.buildbucket_util.test("stop_after_max_attempts", status="FAILURE")
+ properties(
artifact_gcs_bucket="gcs-bucket",
# This will cause Shard2 to want to retry after it fails, but it
# won't get to because Shard1 will have already failed the max
# attempts.
retry_task_on_test_failure=True,
)
+ api.subbuild.child_build_steps(builds=[child_build])
# Expectations for shard task descriptions.
+ api.testing.task_requests_step_data(
[
api.testing.task_request_jsonish(name="Shard1"),
api.testing.task_request_jsonish(name="Shard2"),
],
"build.download test orchestration inputs.load task requests",
)
+ api.step_data(
"build.download test orchestration inputs.load triage sources",
api.file.read_json(
["triage/config.triage", "other/triage/config.triage"]
),
)
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("Shard1", "500", attempt=0)
+ api.swarming_retry.trigger_data("Shard2", "600", attempt=0)
# Shard1 fails. (Here the Swarming task returns a failure
# status.)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="500", name="Shard1", outputs=outputs, failure=True
)
],
iteration=0,
)
# Shard1 is retried and it fails a second time. It is not
# retried again because it has reached its limit of
# max_attempts=2.
+ api.swarming_retry.trigger_data("Shard1", "501", attempt=1, iteration=1)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="501", name="Shard1", outputs=outputs, failure=True
)
],
iteration=1,
)
# Shard2 fails. It is not retried because the other shard
# reached its max_attempts limit. (The Swarming task returns a
# success status but there are Fuchsia test failures.)
+ api.swarming_retry.collect_data(
[api.swarming.task_result(id="600", name="Shard2", outputs=outputs)],
iteration=2,
)
+ api.testing.test_step_data(shard_name="Shard2", failure=True, iteration=2)
)
yield test_stopping_after_max_attempts()
def test_stopping_after_failed_affected():
outputs = ["out/path/to/output/file"]
return (
api.buildbucket_util.test("stop_after_failed_affected", status="FAILURE")
+ properties(artifact_gcs_bucket="gcs-bucket")
+ api.subbuild.child_build_steps(builds=[child_build])
# Expectations for shard task descriptions.
+ api.testing.task_requests_step_data(
[
api.testing.task_request_jsonish(name="affected:Shard1"),
api.testing.task_request_jsonish(name="Shard2"),
],
"build.download test orchestration inputs.load task requests",
)
+ api.step_data(
"build.download test orchestration inputs.load triage sources",
api.file.read_json(
["triage/config.triage", "other/triage/config.triage"]
),
)
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("affected:Shard1", "500", attempt=0)
+ api.swarming_retry.trigger_data("Shard2", "600", attempt=0)
# affected:Shard1 fails. (Here the Swarming task returns a failure
# status.)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="500", name="affected:Shard1", outputs=outputs, failure=True
)
],
iteration=0,
)
# affected:Shard1 is retried and it fails a second time.
+ api.swarming_retry.trigger_data(
"affected:Shard1", "501", attempt=1, iteration=1
)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="501", name="affected:Shard1", outputs=outputs
)
],
iteration=1,
)
+ api.testing.test_step_data(
shard_name="affected:Shard1",
failure=True,
iteration=1,
)
# Shard2 is not collected because the affected shards completed and
# failed.
)
yield test_stopping_after_failed_affected()
# This is similar to test_step_data(), except it generates expectations
# for multiple runs of the same shard.
def test_step_data_perfcompare(task_id_base):
shard_name = "QEMU"
outputs = ["out/path/to/output/file"]
task_id1 = str(task_id_base)
task_id2 = str(task_id_base + 1)
test = (
download_step_data()
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("QEMU", task_id1, attempt=0)
+ api.swarming_retry.trigger_data("QEMU", task_id2, attempt=1)
# Expectations for the Swarming "collect" steps. The two tasks
# complete in different invocations of "collect".
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id=task_id1, name=shard_name, outputs=outputs
)
],
iteration=0,
)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id=task_id2, name=shard_name, outputs=outputs
)
],
iteration=1,
)
# Expectations for the "process results" steps.
+ api.testing.test_step_data(shard_name=shard_name, iteration=0)
+ api.testing.test_step_data(shard_name=shard_name, iteration=1)
)
return test
# Test expectations for the "without CL" recipe steps of perfcompare mode.
def test_step_data_perfcompare_without_cl(task_id_base):
test = test_step_data_perfcompare(task_id_base)
def rename_step(old_name, new_name):
test.step_data[new_name] = test.step_data.pop(old_name)
rename_step(
"build.download test orchestration inputs.load task requests",
"build.download test orchestration inputs (2).load task requests",
)
rename_step(
"build.download test orchestration inputs.load triage sources",
"build.download test orchestration inputs (2).load triage sources",
)
launch_names = [
"launch/collect.0.launch.QEMU (attempt 0).trigger",
"launch/collect.0.launch.QEMU (attempt 1).trigger",
"launch/collect.0.collect",
"launch/collect.0.process results.QEMU.tefmocheck",
"launch/collect.0.process results.QEMU.get extracted files",
"launch/collect.1.collect",
"launch/collect.1.process results.QEMU.tefmocheck",
"launch/collect.1.process results.QEMU.get extracted files",
]
for name in launch_names:
rename_step(name, f"test without CL.{name}")
return test
yield (
api.buildbucket_util.test("successful_build_and_test_perfcompare")
# Pass a smaller value for runs_per_shard than the default for
# perfcompare mode in order to reduce the size of the test
# expectations output, but use a number >1 in order to test
# multiple boots.
+ properties(
artifact_gcs_bucket="gcs-bucket",
perfcompare=True,
runs_per_shard=2,
)
+ api.subbuild.child_build_steps(
builds=[
api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"integration-revision-count": 1,
"test_orchestration_inputs_digest": "abc",
"test_orchestration_inputs_digest_without_cl": "efg",
"got_revision": "abcdef",
},
status="SUCCESS",
)
]
)
+ test_step_data_perfcompare(task_id_base=9900100)
+ test_step_data_perfcompare_without_cl(task_id_base=9900200)
)
yield (
api.buildbucket_util.test("child_build_provided")
+ properties(
artifact_gcs_bucket="gcs-bucket",
child_build_id=str(child_build.id),
)
+ api.buildbucket.simulated_get(child_build, step_name="build.buildbucket.get")
+ test_step_data()
)
yield (
api.buildbucket_util.test("build_only_failed", status="INFRA_FAILURE")
+ properties(run_tests=False)
)
yield (
api.buildbucket_util.test("build_failed", status="FAILURE")
+ properties(artifact_gcs_bucket="gcs-bucket")
+ api.subbuild.child_build_steps(builds=[failed_child_build])
)
yield (
api.buildbucket_util.test("build_infra_failure", status="INFRA_FAILURE")
+ properties(artifact_gcs_bucket="gcs-bucket")
+ api.subbuild.child_build_steps(builds=[infra_failure_child_build])
)
yield (
api.buildbucket_util.test("build_with_led", status="FAILURE")
+ properties(
**{
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
),
}
)
+ api.subbuild.child_led_steps(builds=[failed_child_build])
)
yield (
api.buildbucket_util.test(
"build_with_led_tryjob", tryjob=True, status="FAILURE"
)
+ properties(
**{
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
),
}
)
+ api.subbuild.child_led_steps(builds=[failed_try_child_build])
)
yield (
api.buildbucket_util.test("successful_external_tests")
+ properties(
artifact_gcs_bucket="gcs-bucket",
run_tests=False,
run_fxt_tests=True,
tap_projects=["fuchsia.tap"],
guitar_config=json_format.ParseDict(
{"projectfoo": "bar"}, struct_pb2.Struct()
),
)
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.fxt.orchestrate_fxt_tests()
)
yield (
api.buildbucket_util.test("successful_external_tests_tap_only")
+ properties(
artifact_gcs_bucket="gcs-bucket",
run_tests=False,
run_fxt_tests=True,
tap_projects=["fuchsia.tap"],
)
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.fxt.orchestrate_fxt_tests(guitar_project_request_ids=[])
)
yield (
api.buildbucket_util.test("successful_external_tests_guitar_only")
+ properties(
artifact_gcs_bucket="gcs-bucket",
run_tests=False,
run_fxt_tests=True,
guitar_config=json_format.ParseDict(
{"projectfoo": "bar"}, struct_pb2.Struct()
),
)
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.fxt.orchestrate_fxt_tests(tap_request_id="")
)
yield (
api.buildbucket_util.test("successful_external_tests_presubmit_only")
+ properties(
artifact_gcs_bucket="gcs-bucket",
run_tests=False,
run_fxt_tests=True,
piper_presubmit=True,
)
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.fxt.orchestrate_fxt_tests(piper_presubmit=True)
)
yield (
api.buildbucket_util.test("failed_external_tests", status="FAILURE")
+ properties(
artifact_gcs_bucket="gcs-bucket",
run_tests=False,
run_fxt_tests=True,
tap_projects=["fuchsia.tap"],
guitar_config=json_format.ParseDict(
{"projectfoo": "bar"}, struct_pb2.Struct()
),
)
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.fxt.orchestrate_fxt_tests(success=False)
)
yield (
api.buildbucket_util.test("no_tests_run", status="FAILURE")
+ properties(artifact_gcs_bucket="gcs-bucket")
+ api.subbuild.child_build_steps(builds=[child_build])
+ api.testing.task_requests_step_data(
[],
"build.download test orchestration inputs.load task requests",
)
)