blob: 1397ca34632d206f6ac3e500dd9e599e190db64f [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import post_process
from recipe_engine.recipe_api import Property
from PB.infra.fuchsia import Fuchsia
DEPS = [
"fuchsia/artifacts",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/swarming_retry",
"fuchsia/testing",
"fuchsia/testing_requests",
"fuchsia/testsharder",
"recipe_engine/buildbucket",
"recipe_engine/json",
"recipe_engine/path",
"recipe_engine/step",
"recipe_engine/swarming",
"recipe_engine/time",
]
PROPERTIES = {
"gcs_bucket": Property(
kind=str,
help="GCS bucket for uploading checkout, build, and test results",
default="fuchsia_infra",
),
"device_type": Property(
kind=str,
help="Passed through to spec field Fuchsia.Test.device_type",
default="QEMU",
),
"pave": Property(
kind=bool, help="Passed through to spec field Fuchsia.Test.pave", default=True
),
"test_in_shards": Property(
kind=bool,
help="Passed through to spec field Fuchsia.Test.test_in_shards",
default=False,
),
"upload_to_catapult": Property(kind=bool, default=False),
"per_test_timeout_secs": Property(
kind=int,
help="Passed through to spec field Fuchsia.Test.per_test_timeout_secs",
default=0,
),
"rerun_budget_secs": Property(
kind=int,
help="Passed through to spec field Fuchsia.Test.rerun_budget_secs",
default=0,
),
"use_runtests": Property(kind=bool, help="Whether to use runtests", default=False),
"variants": Property(kind=list, help="GN variants", default=[]),
"max_attempts": Property(
kind=int,
help="Passed through to spec field Fuchsia.test.max_attempts",
default=1,
),
}
def RunSteps(
api,
gcs_bucket,
device_type,
pave,
test_in_shards,
upload_to_catapult,
per_test_timeout_secs,
rerun_budget_secs,
use_runtests,
variants,
max_attempts,
):
api.swarming_retry.prune_tracebacks_for_testing()
upload_results = bool(gcs_bucket)
# Intended to be a minimal amount of code to create a valid FuchsiaBuildResults object.
checkout_root = api.path["start_dir"]
fuchsia_build_dir = checkout_root.join("out", "default")
gn_results = api.build.gn_results(fuchsia_build_dir)
images_list = api.json.read(
"read images", fuchsia_build_dir.join("images.json")
).json.output
images_dict = {i["type"] + "/" + i["name"]: i for i in images_list}
build_results = api.build.build_results(
checkout_root,
"arm64",
variants,
"build-type",
fuchsia_build_dir,
checkout_root.join("out", "default.zircon"),
"//boards/foo.gni",
"//products/foo.gni",
gn_results,
images_dict,
)
# Configure context of uploaded artifacts for test task construction.
api.artifacts.gcs_bucket = "fuchsia-artifacts"
api.artifacts.uuid = api.buildbucket_util.id
test_spec = Fuchsia.Test(
device_type=device_type,
pave=pave,
pool="fuchsia.tests",
test_in_shards=test_in_shards,
per_test_timeout_secs=per_test_timeout_secs,
use_runtests=use_runtests,
default_service_account="default_service_account",
rerun_budget_secs=rerun_budget_secs,
)
if rerun_budget_secs:
test_spec.max_attempts = 1
else:
test_spec.max_attempts = max_attempts
spec = Fuchsia(test=test_spec)
if test_in_shards:
shards = api.testsharder.execute(
"load test shards", "testsharder", fuchsia_build_dir
)
task_requests = api.testing_requests.task_requests(
build_results,
api.buildbucket.build,
spec.test.per_test_timeout_secs,
spec.test.pool,
shards,
spec.test.swarming_expiration_timeout_secs,
spec.test.swarming_io_timeout_secs,
spec.test.use_runtests,
timeout_secs=spec.test.timeout_secs,
default_service_account=test_spec.default_service_account,
)
else:
task_requests = api.testing_requests.deprecated_task_requests(
build_results,
api.buildbucket.build,
api.testing_requests.deprecated_test_cmds(test_spec),
test_spec.device_type,
test_spec.pool,
test_spec.timeout_secs,
test_spec.pave,
swarming_expiration_timeout_secs=spec.test.swarming_expiration_timeout_secs,
swarming_io_timeout_secs=spec.test.swarming_io_timeout_secs,
default_service_account=test_spec.default_service_account,
)
orchestration_inputs = api.build.test_orchestration_inputs_from_build_results(
build_results, task_requests
)
testing_tasks = api.testing.run_test_tasks(
debug_symbol_url=api.artifacts.debug_symbol_url(),
orchestration_inputs=orchestration_inputs,
max_attempts=spec.test.max_attempts,
rerun_budget_secs=spec.test.rerun_budget_secs,
)
# Upload test results
if upload_results:
final_results = [
task.get_all_attempts()[-1].test_results
for task in testing_tasks
if task.get_all_attempts()[-1].test_results
]
for test_results in final_results:
test_results.upload_results(
gcs_bucket=gcs_bucket,
upload_to_catapult=upload_to_catapult,
orchestration_inputs=orchestration_inputs,
)
api.testing.raise_failures(testing_tasks)
def GenTests(api):
# For coverage
api.testing.task_requests_step_data([api.testing.task_request_jsonish(False)], "")
def test(*args, **kwargs):
return api.testing.test(*args, **kwargs) + api.step_data(
"read images", api.build.mock_image_manifest()
)
# Test cases for deprecated testing functions.
yield test("deprecated")
# Test cases for testing in shards.
# TODO(fxb/9784): during mass clean-up, move into into api.testing.test_api.
test_task_outputs = [
api.testing_requests.SYSLOG_NAME,
api.testing_requests.SERIAL_LOG_NAME,
# We need something under TEST_RESULTS_DIR_NAME so that the code under
# test finds summary.json. Because of how we've mocked this,
# the file doesn't actually need to be named summary.json.
# TODO(garymm): switch to summary.json for clarity.
api.testing_requests.TEST_RESULTS_DIR_NAME + "/path/to/output_file.txt",
"benchmark.catapult_json",
]
def test_task_data(*shard_names, **kwargs): # pylint: disable=invalid-name
iteration = kwargs.pop("iteration", 0)
assert not kwargs
results = []
step_data = api.step_data(None)
for idx, name in enumerate(shard_names):
results.append(
api.swarming.task_result(
id=str(idx), name=name, outputs=test_task_outputs
)
)
step_data += api.testing.test_step_data(
shard_name=name, legacy_qemu="EMU" in name
)
step_data += api.testing.task_retry_step_data(results, iteration=iteration)
return step_data
yield test(
"test_with_no_shards",
clear_default_steps=True,
properties={"test_in_shards": True,},
steps=[api.testsharder.execute("load test shards", shards=())],
)
# fuchsia-0000 passes the first time.
# fuchsia-0001 has tests that always fail.
# fuchsia-0002 always times out.
# fuchsia-0003 has tests that fail the first time but pass the second time.
yield test(
"test_in_shards_mixed_failure",
status="failure",
clear_default_steps=True,
properties={
"max_attempts": 0, # 0 means default
"test_in_shards": True,
# Here to get coverage for this path without adding another test.
"per_test_timeout_secs": 1,
},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0001",
tests=[api.testsharder.test("test1")],
dimensions=dict(device_type="NUC"),
),
api.testsharder.shard(
name="fuchsia-0002",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0003",
tests=[api.testsharder.test("test3")],
dimensions=dict(device_type="NUC"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610", name="fuchsia-0000", outputs=test_task_outputs,
),
api.swarming.task_result(
id="710", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="810",
name="fuchsia-0002",
state=api.swarming.TaskState.TIMED_OUT,
outputs=["serial.txt", "syslog.txt"],
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="711", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="811",
name="fuchsia-0002",
state=api.swarming.TaskState.TIMED_OUT,
outputs=["serial.txt", "syslog.txt"],
),
api.swarming.task_result(
id="911", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=1,
),
api.testing.test_step_data(
# Test that multiplied shards (where the same test executable is
# run many times) are handled correctly.
shard_name="fuchsia-0000",
legacy_qemu=False,
tests_json=[
{
"test": {
"path": "host_x64/foo_test",
"name": "foo_test (%i)" % (i + 1),
"package_url": "fuchsia-pkg://fuchsia.com/foo_test",
},
}
for i in range(5)
],
),
api.testing.test_step_data(
shard_name="fuchsia-0001", legacy_qemu=False, failure=True, iteration=0
),
api.testing.test_step_data(
shard_name="fuchsia-0001", failure=True, iteration=1
),
api.testing.test_step_data(
shard_name="fuchsia-0003", legacy_qemu=False, failure=True, iteration=0
),
api.testing.test_step_data(
shard_name="fuchsia-0003", legacy_qemu=False, iteration=1
),
],
)
multiplied_tests = [api.testing_requests.default_tests()[0]] * 5
yield test(
"test_in_shards_single_attempt",
status="failure",
clear_default_steps=True,
properties={"test_in_shards": True, "per_test_timeout_secs": 1,},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="multiplied:fuchsia-0000",
tests=multiplied_tests,
dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="multiplied:fuchsia-0000",
outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="multiplied:fuchsia-0000",
failure=True,
iteration=0,
tests_json=[{"test": t.render_to_jsonish()} for t in multiplied_tests],
),
],
)
yield test(
"upload_to_catapult",
clear_default_steps=True,
properties={"test_in_shards": True, "upload_to_catapult": True},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="Linux",
tests=api.testing_requests.default_tests(),
# For coverage of non-fuchsia specific code paths.
dimensions=dict(os="linux"),
),
],
),
test_task_data("Linux"),
],
)
yield test(
"symbolizer_failure",
expect_failure=True,
clear_default_steps=True,
properties={"test_in_shards": True},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs,
),
],
iteration=0,
),
api.step_data(
"launch/collect.0.process results.fuchsia-0000.symbolize %s"
% api.testing_requests.SYSLOG_NAME,
retcode=1,
),
],
)
yield test(
"rerun",
expect_failure=True,
clear_default_steps=True,
properties={"test_in_shards": True, "rerun_budget_secs": 100,},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="fuchsia-0000", iteration=0, failure=True
),
],
) + (
# One less than rerun_budget_secs. Should result in the
# testing task being run exactly once.
api.time.step(99)
)
tests = [api.testsharder.test(name="test%02d" % i) for i in range(20)]
# Ensure that we cover the presentation logic for when a single test fails
# multiple times.
tests.append(tests[0])
yield test(
"many_tests_fail",
expect_failure=True,
clear_default_steps=True,
properties={"test_in_shards": True},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000", tests=tests, dimensions=dict(os="fuchsia"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="fuchsia-0000",
tests_json=[{"test": t.render_to_jsonish()} for t in tests],
iteration=0,
failure=True,
),
],
)
yield test(
"failed_affected_tests_aborts_early",
status="failure",
clear_default_steps=True,
properties={
"max_attempts": 0, # 0 means default
"test_in_shards": True,
"run_affected": True,
},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="affected:fuchsia-0000",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0001",
tests=[api.testsharder.test("test1")],
dimensions=dict(device_type="NUC"),
),
api.testsharder.shard(
name="affected:fuchsia-0002",
tests=api.testing_requests.default_tests(),
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0003",
tests=[api.testsharder.test("test3")],
dimensions=dict(device_type="NUC"),
),
],
),
# launch all tasks.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="710", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="810",
name="affected:fuchsia-0002",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=0,
collect_iteration=-1,
),
# affected:fuchsia-0000 and fuchsia-0001 fail.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="710", name="fuchsia-0001", outputs=test_task_outputs,
),
],
iteration=-1,
collect_iteration=0,
),
# relaunch failed tasks.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="711", name="fuchsia-0001", outputs=test_task_outputs,
),
],
iteration=1,
collect_iteration=-1,
),
# affected:fuchsia-0000 fails and fuchsia-0003 passes.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=-1,
collect_iteration=1,
),
# affected:fuchsia-0002 passes. We stop launching and collecting
# since all affected tasks have completed and one failed after the
# max attempts.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="810",
name="affected:fuchsia-0002",
outputs=test_task_outputs,
),
],
iteration=-1,
collect_iteration=2,
),
api.testing.test_step_data(
shard_name="affected:fuchsia-0000", failure=True, iteration=0,
),
api.testing.test_step_data(
shard_name="affected:fuchsia-0000", failure=True, iteration=1
),
api.testing.test_step_data(
shard_name="fuchsia-0001", failure=True, iteration=0
),
api.testing.test_step_data(shard_name="affected:fuchsia-0002", iteration=2),
api.testing.test_step_data(shard_name="fuchsia-0003", iteration=1),
],
)