blob: 26bc8fb9b7b15fe8628662dee43f07e4b89b0d28 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" fuchsia.py - Builds and optionally tests Fuchsia.
# Execution overview
## Configuration
This recipe uses a protocol buffer message called a spec for most of its
configuration. The only PROPERTIES are those required to acquire the spec.
The recipe fetches the spec from the git repo |spec_remote|. It determines
the correct revision to use from the BuildBucket build input to ensure it
retrieves the correct config for a pending change vs a committed change.
## Checkout + Build
This recipe triggers a child build which runs the fuchsia/build recipe.
That recipe checks out the source code and builds it. This recipe
retrieves the data required to orchestrate tests via Isolate.
## Test
If configured to run tests, this recipe uses the test orchestration data to run tests.
That logic is in the testing recipe module. Under the hood, that module
triggers Swarming tasks that do the actual testing, waits for them, and
reports the results.
## External Tests
If configured to run external tests, this recipe invokes various tools to pass
build artifacts to external infrastructures, trigger tests, waits for them, and
reports the results.
"""
from google.protobuf import json_format
from recipe_engine.recipe_api import Property
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.infra.fuchsia import Fuchsia
from PB.recipe_modules.recipe_engine.led.properties import (
InputProperties as LedInputProperties,
)
DEPS = [
"fuchsia/artifacts",
"fuchsia/build",
"fuchsia/build_input_resolver",
"fuchsia/buildbucket_util",
"fuchsia/checkout",
"fuchsia/fuchsia",
"fuchsia/fxt",
"fuchsia/gitiles",
"fuchsia/spec",
"fuchsia/subbuild",
"fuchsia/swarming_retry",
"fuchsia/testing",
"recipe_engine/buildbucket",
"recipe_engine/file",
"recipe_engine/isolated",
"recipe_engine/led",
"recipe_engine/path",
"recipe_engine/properties",
"recipe_engine/python",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = {
"child_build_id": Property(
kind=str,
help=(
"The buildbucket ID of the child build. If set, "
"will use this build instead of launching a new one."
),
default=None,
),
"spec_remote": Property(
kind=str,
help="URL of the specs git repository",
default="http://fuchsia.googlesource.com/integration",
),
"perfcompare": Property(
kind=bool,
help=(
"Enable perfcompare mode: Compare performance between the "
'"without CL" and "with CL" revisions. This involves '
"building and testing both of those revisions."
),
default=False,
),
# This property is not intended to be set by a config. It is just here
# so that a test case can pass a smaller number in order to make the
# test expectations simpler.
"boots_per_revision": Property(
kind=int,
help=(
"Number of boots of Fuchsia to run performance tests on, "
"in order to deal with cross-boot variation of performance. "
"Only applies when perfcompare mode is enabled."
),
default=5,
),
}
def RunSteps(api, child_build_id, spec_remote, perfcompare, boots_per_revision):
# At some stage BuildBucket stores properties as google.protobuf.Value,
# which converts all numbers (including ints) to floats, which is
# lossy, so we have to use a string properties and convert to int
# internally.
child_build_id = int(child_build_id) if child_build_id else None
spec, spec_revision = api.fuchsia.setup_with_spec(spec_remote)
orchestrator_id = api.buildbucket_util.id
if not spec.build.run_tests and not spec.external_tests.tap_projects:
raise api.step.InfraFailure(
"if not running tests, use the fuchsia/build recipe directly"
)
with api.step.nest("build") as presentation:
child_build = run_build_steps(
api, presentation, child_build_id, spec_revision, orchestrator_id
)
if spec.build.run_tests:
child_props = json_format.MessageToDict(child_build.output.properties)
(
orchestration_inputs,
orchestration_inputs_hash,
) = collect_test_orchestration_inputs(api, child_props, without_cl=False)
if perfcompare:
orchestration_inputs_without_cl, _ = collect_test_orchestration_inputs(
api, child_props, without_cl=True
)
# Copy to output properties so the coverage recipe can access it.
presentation.properties[
api.build.TEST_ORCHESTRATION_INPUTS_HASH_PROPERTY
] = orchestration_inputs_hash
presentation.properties[api.checkout.ROOT_DIR_PROPERTY] = child_props[
api.checkout.ROOT_DIR_PROPERTY
]
# Copy to our own properties so the results uploader in google3 can find
# it without knowing about the child.
rev_count_prop = api.checkout.REVISION_COUNT_PROPERTY
if rev_count_prop in child_props:
presentation.properties[rev_count_prop] = child_props[rev_count_prop]
# Configure context of uploaded artifacts for test task construction.
api.artifacts.gcs_bucket = spec.artifact_gcs_bucket
api.artifacts.uuid = orchestrator_id
runs_per_shard = boots_per_revision if perfcompare else 1
if spec.build.run_tests:
results_with_cl = run_test_steps(
api, orchestration_inputs, spec, runs_per_shard
)
if spec.external_tests.tap_projects:
with api.step.nest("run external tests") as presentation:
run_external_test_steps(api, presentation, spec)
if perfcompare:
with api.step.nest("test without CL"):
results_without_cl = run_test_steps(
api, orchestration_inputs_without_cl, spec, runs_per_shard
)
compare_performance_results(
api, orchestration_inputs.perfcompare, results_without_cl, results_with_cl
)
def run_build_steps(api, presentation, child_build_id, spec_revision, orchestrator_id):
builder_name = "{}-subbuild".format(api.buildbucket.build.builder.builder)
if child_build_id:
# Text is meant to avoid confusion.
presentation.step_text = "Reusing child build instead of triggering"
output_build = api.buildbucket.get(child_build_id)
build_url = "https://ci.chromium.org/b/%d" % child_build_id
presentation.links[builder_name] = build_url
else:
properties = {
"spec_revision": spec_revision,
"parent_id": orchestrator_id,
}
builds = api.subbuild.launch(
[builder_name], presentation, extra_properties=properties
)
build_id = builds[builder_name].build_id
build_url = builds[builder_name].url
if not api.led.launched_by_led:
# Consumed by rerun recipe.
with api.step.nest("child build id") as pres:
# Passing build IDs directly as properties doesn't work because all
# numbers get cast to floats, which is lossy, so we convert to str.
pres.properties["child_build_id"] = str(build_id)
builds = api.subbuild.collect([build_id], presentation)
output_build = builds[build_id].build_proto
if output_build.status != common_pb2.SUCCESS:
if output_build.status == common_pb2.INFRA_FAILURE:
exception_type = api.step.InfraFailure
description = "raised infra failure"
else:
exception_type = api.step.StepFailure
description = "failed"
# Copy the child summary markdown into the parent summary markdown to
# better propagate error messages. If the child summary is multiple lines,
# start it on a new line.
subbuild_summary = output_build.summary_markdown
raise exception_type(
"[build](%s) %s:%s%s"
% (
build_url,
description,
"\n\n" if "\n" in subbuild_summary else " ",
subbuild_summary,
)
)
return output_build
def run_test_steps(api, orchestration_inputs, spec, runs_per_shard):
tryjob = api.buildbucket_util.is_tryjob
testing_tasks = api.testing.run_test_tasks(
debug_symbol_url=api.artifacts.debug_symbol_url(),
orchestration_inputs=orchestration_inputs,
max_attempts=spec.test.max_attempts,
rerun_budget_secs=spec.test.rerun_budget_secs,
runs_per_shard=runs_per_shard,
)
all_results = []
successful_results = []
for task in testing_tasks:
for attempt in task.get_all_attempts():
if attempt.test_results:
all_results.append(attempt.test_results)
for attempt in task.get_successful_attempts():
assert attempt.test_results
successful_results.append(attempt.test_results)
# Upload test results
if spec.test.upload_results:
assert spec.gcs_bucket, "gcs_bucket must be set if test.upload_results is"
with api.step.nest("upload test results") as presentation:
link = "go/fuchsia-result-store/bid:%s" % api.buildbucket_util.id
presentation.links[link] = link.replace("go/", "https://goto.google.com/")
swarming_task_ids = []
# All test results, including non-final attempts that were retried.
# We care so we can analyze flakiness.
for test_results in all_results:
test_results.upload_results(
gcs_bucket=spec.gcs_bucket,
upload_to_catapult=(
not tryjob
and spec.test.catapult_dashboard_master
and spec.test.catapult_dashboard_bot
),
orchestration_inputs=orchestration_inputs,
)
swarming_task_ids.append(test_results.swarming_task_id)
# Consumed by the google3 results uploader and the coverage recipe.
presentation.properties["test-swarming-task-ids"] = swarming_task_ids
api.testing.raise_failures(testing_tasks)
return successful_results
def run_external_test_steps(api, presentation, spec):
resp = api.fxt.launch(
step_name="launch",
bucket=api.artifacts.gcs_bucket,
uuid=api.artifacts.uuid,
name=spec.external_tests.system_image,
projects=spec.external_tests.tap_projects,
presentation=presentation,
)
api.fxt.monitor(step_name="monitor", request_id=resp["request_id"])
def collect_test_orchestration_inputs(api, build_props, without_cl):
"""Downloads isolated orchestration inputs from a build.
Args:
build_props (dict): The properties of the build that produced the test
orchestration inputs.
without_cl (bool): Whether to download the "without CL" build. If false,
this downloads the "with CL" build.
Returns:
FuchsiaBuildApi.TestOrchestrationInputs, hash (str)
Raises:
A StepFailure if the required property is not found.
"""
prop_name = api.build.test_orchestration_inputs_property_name(without_cl)
orchestration_inputs_hash = build_props.get(prop_name)
if not orchestration_inputs_hash:
raise api.step.StepFailure("no `%s` property found" % prop_name)
return (
api.build.download_test_orchestration_inputs(orchestration_inputs_hash),
orchestration_inputs_hash,
)
def compare_performance_results(
api, perfcompare_tool_dir, results_without_cl, results_with_cl
):
with api.step.nest(
"convert results to perfcompare directory layout"
) as presentation:
dest_dir = api.path["cleanup"].join("perf_dataset")
api.file.ensure_directory("make directory", dest_dir)
dir_args = [
make_perfcompare_dataset_dir(
api, dest_dir, results_without_cl, "without_cl"
),
make_perfcompare_dataset_dir(api, dest_dir, results_with_cl, "with_cl"),
]
# Upload the with/without-CL dataset to Isolate so that it can be
# easily downloaded for further analysis. The cost of this should be
# low because Isolate uses share-by-hash (it is content-addressed) and
# the individual files already came from Isolate.
isolated = api.isolated.isolated(dest_dir)
isolated.add_dir(dest_dir)
dataset_hash = isolated.archive("isolate")
presentation.properties["perfcompare_dataset_hash"] = dataset_hash
api.python(
"compare perf test results without and with CL",
perfcompare_tool_dir.join("perfcompare.py"),
["compare_perf"] + dir_args,
)
# Convert test results into the directory layout accepted by the
# perfcompare.py tool for a multi-boot dataset.
def make_perfcompare_dataset_dir(api, dest_parent_dir, all_results, with_or_without_cl):
with api.step.nest('convert results for "%s" revision' % with_or_without_cl):
dest_dir = dest_parent_dir.join(with_or_without_cl)
by_boot_dir = dest_dir.join("by_boot")
api.file.ensure_directory("make results directory", by_boot_dir)
for boot_idx, test_results in enumerate(all_results):
api.file.copytree(
"copy perf test results",
test_results.output_dir,
by_boot_dir.join("boot%06d" % boot_idx),
)
return dest_dir
def GenTests(api):
def download_step_data(legacy_qemu):
task_request_jsonish = api.testing.task_request_jsonish(legacy_qemu=legacy_qemu)
return api.testing.task_requests_step_data(
[task_request_jsonish],
"build.download test orchestration inputs.load task requests",
)
def test_step_data(test_in_shards=True, id="610", failure=False):
shard_name = "QEMU"
if test_in_shards:
legacy_qemu = False
outputs = ["out/path/to/output/file"]
else:
legacy_qemu = True
outputs = ["output.fs"]
return download_step_data(legacy_qemu) + (
api.testing.task_retry_step_data(
[api.swarming.task_result(id=id, name=shard_name, outputs=outputs,),]
)
+ api.testing.test_step_data(
shard_name=shard_name, legacy_qemu=legacy_qemu, failure=failure
)
)
def external_tests_step_data(success=True):
return api.fxt.launch(
"run external tests.launch",
test_data={
"request_id": "test-id",
"workspace": "test-ws",
"change_num": 12345,
},
) + api.fxt.monitor("run external tests.monitor", success=success)
def spec_data(
variants=(),
device_type="QEMU",
run_tests=True,
run_external_tests=False,
test_in_shards=True,
gcs_bucket=None,
max_attempts=None,
):
test_spec = None
if run_tests:
test_spec = Fuchsia.Test(
device_type=device_type,
max_shard_size=0,
timeout_secs=30 * 60,
pool="fuchsia.tests",
test_in_shards=test_in_shards,
swarming_expiration_timeout_secs=10 * 60,
swarming_io_timeout_secs=5 * 60,
upload_results=bool(gcs_bucket),
use_runtests=True,
max_attempts=max_attempts,
)
external_tests_spec = None
if run_external_tests:
external_tests_spec = Fuchsia.ExternalTests(
system_image="core-x64", tap_projects=["fuchsia.tests"],
)
return api.spec.spec_loaded_ok(
step_name="load spec.build_init",
message=Fuchsia(
checkout=Fuchsia.Checkout(
manifest="manifest",
remote="remote",
upload_results=bool(gcs_bucket),
),
build=Fuchsia.Build(
variants=variants,
build_type="debug",
run_tests=run_tests,
board="boards/x64.gni",
product="products/core.gni",
target="x64",
upload_results=bool(gcs_bucket),
),
test=test_spec,
external_tests=external_tests_spec,
gcs_bucket=gcs_bucket,
artifact_gcs_bucket=gcs_bucket,
),
)
child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"checkout_root": "/checkout/root",
"integration-revision-count": 1,
"test_orchestration_inputs_hash": "abc",
},
status="SUCCESS",
)
failed_child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"checkout_root": "/checkout/root",
"integration-revision-count": 1,
"test_orchestration_inputs_hash": "abc",
},
status="FAILURE",
)
failed_child_build.summary_markdown = "failed to build fuchsia"
failed_try_child_build = api.subbuild.try_build_message(
builder="builder-subbuild",
output_props={
"checkout_root": "/checkout/root",
"integration-revision-count": 1,
"test_orchestration_inputs_hash": "abc",
},
status="FAILURE",
)
failed_try_child_build.summary_markdown = "failed to build fuchsia"
infra_failure_child_build = api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"checkout_root": "/checkout/root",
"integration-revision-count": 1,
"test_orchestration_inputs_hash": "abc",
},
status="INFRA_FAILURE",
)
infra_failure_child_build.summary_markdown = "checkout timed out\nafter 30m"
yield api.fuchsia.test(
"successful_build_and_test",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
) + spec_data(gcs_bucket="gcs-bucket", variants=("profile",)) + test_step_data()
# Cover the case where the build succeeds but one or more tests fail.
yield (
api.fuchsia.test(
"failed_tests",
status="failure",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
)
# Pass max_attempts=1 because it keeps the test expectations
# simpler. Otherwise we would have to generate test expectations
# for a retry of the failed task.
+ spec_data(gcs_bucket="gcs-bucket", variants=("profile",), max_attempts=1)
+ test_step_data(failure=True)
)
# Test that if one shard fails after max_attempts, there are no further
# retries of another shard that fails.
def test_stopping_after_max_attempts():
outputs = ["out/path/to/output/file"]
return (
api.fuchsia.test(
"stop_after_max_attempts",
status="failure",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
)
+ spec_data(gcs_bucket="gcs-bucket", variants=("profile",))
# Expectations for shard task descriptions.
+ api.testing.task_requests_step_data(
[
api.testing.task_request_jsonish(legacy_qemu=False, name="Shard1"),
api.testing.task_request_jsonish(legacy_qemu=False, name="Shard2"),
],
"build.download test orchestration inputs.load task requests",
)
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("Shard1", "500", attempt=0)
+ api.swarming_retry.trigger_data("Shard2", "600", attempt=0)
# Shard1 fails. (Here the Swarming task returns a failure
# status.)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="500", name="Shard1", outputs=outputs, failure=True
)
],
iteration=0,
)
# Shard1 is retried and it fails a second time. It is not
# retried again because it has reached its limit of
# max_attempts=2.
+ api.swarming_retry.trigger_data("Shard1", "501", attempt=1, iteration=1)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="501", name="Shard1", outputs=outputs, failure=True
)
],
iteration=1,
)
# Shard2 fails. It is not retried because the other shard
# reached its max_attempts limit. (The Swarming task returns a
# success status but there are Fuchsia test failures.)
+ api.swarming_retry.collect_data(
[api.swarming.task_result(id="600", name="Shard2", outputs=outputs)],
iteration=2,
)
+ api.testing.test_step_data(
shard_name="Shard2", legacy_qemu=False, failure=True, iteration=2
)
)
yield test_stopping_after_max_attempts()
def test_stopping_after_failed_affected():
outputs = ["out/path/to/output/file"]
return (
api.fuchsia.test(
"stop_after_failed_affected",
status="failure",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
)
+ spec_data(gcs_bucket="gcs-bucket", variants=("profile",))
# Expectations for shard task descriptions.
+ api.testing.task_requests_step_data(
[
api.testing.task_request_jsonish(
legacy_qemu=False, name="affected:Shard1"
),
api.testing.task_request_jsonish(legacy_qemu=False, name="Shard2"),
],
"build.download test orchestration inputs.load task requests",
)
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("affected:Shard1", "500", attempt=0)
+ api.swarming_retry.trigger_data("Shard2", "600", attempt=0)
# affected:Shard1 fails. (Here the Swarming task returns a failure
# status.)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="500", name="affected:Shard1", outputs=outputs, failure=True
)
],
iteration=0,
)
# affected:Shard1 is retried and it fails a second time.
+ api.swarming_retry.trigger_data(
"affected:Shard1", "501", attempt=1, iteration=1
)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id="501", name="affected:Shard1", outputs=outputs
)
],
iteration=1,
)
+ api.testing.test_step_data(
shard_name="affected:Shard1",
legacy_qemu=False,
failure=True,
iteration=1,
)
# Shard2 is not collected because the affected shards completed and
# failed.
)
yield test_stopping_after_failed_affected()
# This is similar to test_step_data(), except it generates expectations
# for multiple runs of the same shard.
def test_step_data_perfcompare(task_id_base):
shard_name = "QEMU"
outputs = ["out/path/to/output/file"]
task_id1 = str(task_id_base)
task_id2 = str(task_id_base + 1)
test = (
download_step_data(legacy_qemu=False)
# Expectations for the task launch steps.
+ api.swarming_retry.trigger_data("QEMU", task_id1, attempt=0)
+ api.swarming_retry.trigger_data("QEMU", task_id2, attempt=1)
# Expectations for the Swarming "collect" steps. The two tasks
# complete in different invocations of "collect".
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id=task_id1, name=shard_name, outputs=outputs
)
],
iteration=0,
)
+ api.swarming_retry.collect_data(
[
api.swarming.task_result(
id=task_id2, name=shard_name, outputs=outputs
)
],
iteration=1,
)
# Expectations for the "process results" steps.
+ api.testing.test_step_data(
shard_name=shard_name, legacy_qemu=False, iteration=0,
)
+ api.testing.test_step_data(
shard_name=shard_name, legacy_qemu=False, iteration=1,
)
)
return test
# Test expectations for the "without CL" recipe steps of perfcompare mode.
def test_step_data_perfcompare_without_cl(task_id_base):
test = test_step_data_perfcompare(task_id_base)
def rename_step(old_name, new_name):
test.step_data[new_name] = test.step_data.pop(old_name)
rename_step(
"build.download test orchestration inputs.load task requests",
"build.download test orchestration inputs (2).load task requests",
)
launch_names = [
"launch/collect.0.launch.QEMU (attempt 0).trigger",
"launch/collect.0.launch.QEMU (attempt 1).trigger",
"launch/collect.0.collect",
"launch/collect.0.process results.QEMU.tefmocheck",
"launch/collect.0.process results.QEMU.get extracted files",
"launch/collect.1.collect",
"launch/collect.1.process results.QEMU.tefmocheck",
"launch/collect.1.process results.QEMU.get extracted files",
]
for name in launch_names:
rename_step(name, "test without CL.%s" % name)
return test
yield (
api.fuchsia.test(
"successful_build_and_test_perfcompare",
steps=[
api.subbuild.child_build_steps(
builds=[
api.subbuild.ci_build_message(
builder="builder-subbuild",
output_props={
"checkout_root": "/checkout/root",
"integration-revision-count": 1,
"test_orchestration_inputs_hash": "abc",
"test_orchestration_inputs_hash_without_cl": "efg",
},
status="SUCCESS",
)
]
),
],
)
+ spec_data(gcs_bucket="gcs-bucket", variants=("profile",))
+ test_step_data_perfcompare(task_id_base=9900100)
+ test_step_data_perfcompare_without_cl(task_id_base=9900200)
# Pass a smaller value for boots_per_revision than the default to
# reduce the size of the test expectations output, but use a number
# >1 in order to test multiple boots.
+ api.properties(perfcompare=True, boots_per_revision=2)
)
yield api.fuchsia.test(
"child_build_provided__test_not_in_shards",
steps=[
api.buildbucket.simulated_get(
child_build, step_name="build.buildbucket.get"
),
],
) + spec_data(gcs_bucket="gcs-bucket", test_in_shards=False) + test_step_data(
test_in_shards=False
) + api.properties(
child_build_id=str(child_build.id)
)
yield api.fuchsia.test("build_only_failed", status="infra_failure",) + spec_data(
run_tests=False
)
yield api.fuchsia.test(
"build_failed",
status="failure",
steps=[api.subbuild.child_build_steps(builds=[failed_child_build]),],
) + spec_data(gcs_bucket="gcs-bucket", variants=("profile",))
yield api.fuchsia.test(
"build_infra_failure",
status="infra_failure",
steps=[api.subbuild.child_build_steps(builds=[infra_failure_child_build]),],
) + spec_data(gcs_bucket="gcs-bucket", variants=("profile",))
yield api.fuchsia.test(
"build_with_led",
status="failure",
properties={
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
isolated_input=LedInputProperties.IsolatedInput(
hash="abc123",
namespace="default-gzip",
server="isolateserver.appspot.com",
),
),
},
steps=[api.subbuild.child_led_steps(builds=[failed_child_build]),],
) + spec_data()
yield api.fuchsia.test(
"build_with_led_tryjob",
status="failure",
properties={
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
isolated_input=LedInputProperties.IsolatedInput(
hash="abc123",
namespace="default-gzip",
server="isolateserver.appspot.com",
),
),
},
tryjob=True,
steps=[api.subbuild.child_led_steps(builds=[failed_try_child_build]),],
) + api.build_input_resolver.set_gerrit_branch() + api.gitiles.refs(
"refs", ["refs/heads/master", "deadbeef",]
) + spec_data()
yield api.fuchsia.test(
"build_passed_but_hash_is_missing",
status="failure",
steps=[
api.subbuild.child_build_steps(
builds=[
api.subbuild.ci_build_message(
builder="builder-subbuild", status="SUCCESS",
)
]
)
],
) + spec_data()
yield api.fuchsia.test(
"successful_external_tests",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
) + spec_data(
gcs_bucket="gcs-bucket", run_tests=False, run_external_tests=True
) + external_tests_step_data()
yield api.fuchsia.test(
"failed_external_tests",
status="failure",
steps=[api.subbuild.child_build_steps(builds=[child_build])],
) + spec_data(
gcs_bucket="gcs-bucket", run_tests=False, run_external_tests=True
) + external_tests_step_data(
success=False
)