blob: 1dc1b096150747e8ea2f1fd2c501db8c9bc4f3f2 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.recipe_api import Property
from PB.go.fuchsia.dev.fuchsia.tools.integration.fint.proto import (
set_artifacts as fint_set_artifacts_pb2,
)
from PB.infra.fuchsia import Fuchsia
DEPS = [
"fuchsia/artifacts",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/checkout",
"fuchsia/swarming_retry",
"fuchsia/testing",
"fuchsia/testing_requests",
"fuchsia/testsharder",
"recipe_engine/buildbucket",
"recipe_engine/path",
"recipe_engine/swarming",
"recipe_engine/time",
]
PROPERTIES = {
"gcs_bucket": Property(
kind=str,
help="GCS bucket for uploading checkout, build, and test results",
default="fuchsia_infra",
),
"pave": Property(
kind=bool, help="Passed through to spec field Fuchsia.Test.pave", default=True
),
"upload_to_catapult": Property(kind=bool, default=False),
"per_test_timeout_secs": Property(
kind=int,
help="Passed through to spec field Fuchsia.Test.per_test_timeout_secs",
default=0,
),
"rerun_budget_secs": Property(
kind=int,
help="Passed through to spec field Fuchsia.Test.rerun_budget_secs",
default=0,
),
"use_runtests": Property(kind=bool, help="Whether to use runtests", default=False),
"max_attempts": Property(
kind=int,
help="Passed through to spec field Fuchsia.test.max_attempts",
default=1,
),
}
def RunSteps(
api,
gcs_bucket,
pave,
upload_to_catapult,
per_test_timeout_secs,
rerun_budget_secs,
use_runtests,
max_attempts,
):
api.swarming_retry.prune_tracebacks_for_testing()
upload_results = bool(gcs_bucket)
# Intended to be a minimal amount of code to create a valid FuchsiaBuildResults object.
checkout_root = api.path["start_dir"]
checkout = api.checkout.CheckoutResults(
checkout_root,
snapshot_file=api.path["start_dir"].join("snapshot"),
release_branch=None,
release_version=None,
source_info={},
)
build_dir = checkout_root.join("out", "not-default")
gn_results = api.build.gn_results(
build_dir, fint_set_artifacts=fint_set_artifacts_pb2.SetArtifacts()
)
build_results = api.build.build_results(
checkout=checkout,
build_dir=build_dir,
gn_results=gn_results,
images=[{"name": "zircon-a", "type": "zbi", "path": "fuchsia.zbi"}],
)
# Configure context of uploaded artifacts for test task construction.
api.artifacts.gcs_bucket = "fuchsia-artifacts"
api.artifacts.uuid = api.buildbucket_util.id
test_spec = Fuchsia.Test(
pave=pave,
pool="fuchsia.tests",
per_test_timeout_secs=per_test_timeout_secs,
use_runtests=use_runtests,
default_service_account="default_service_account",
rerun_budget_secs=rerun_budget_secs,
)
if rerun_budget_secs:
test_spec.max_attempts = 1
else:
test_spec.max_attempts = max_attempts
spec = Fuchsia(test=test_spec)
shards = api.testsharder.execute("load test shards", "testsharder", build_dir)
task_requests = api.testing_requests.task_requests(
build_results,
api.buildbucket.build,
spec.test.per_test_timeout_secs,
spec.test.pool,
shards,
spec.test.swarming_expiration_timeout_secs,
spec.test.swarming_io_timeout_secs,
spec.test.use_runtests,
timeout_secs=spec.test.timeout_secs,
default_service_account=test_spec.default_service_account,
)
orchestration_inputs = api.build.test_orchestration_inputs_from_build_results(
build_results, task_requests
)
testing_tasks = api.testing.run_test_tasks(
debug_symbol_url=api.artifacts.debug_symbol_url(),
orchestration_inputs=orchestration_inputs,
max_attempts=spec.test.max_attempts,
rerun_budget_secs=spec.test.rerun_budget_secs,
retry_task_on_test_failure=False,
per_test_timeout_secs=spec.test.per_test_timeout_secs,
)
# Upload test results
if upload_results:
final_results = [
task.get_all_attempts()[-1].test_results
for task in testing_tasks
if task.get_all_attempts()[-1].test_results
]
for test_results in final_results:
test_results.upload_results(
gcs_bucket=gcs_bucket,
upload_to_catapult=upload_to_catapult,
orchestration_inputs=orchestration_inputs,
resultdb_base_variant={"test_environment": test_results.env_name},
resultdb_tags=["key1:var1", "key2:var2"],
)
api.testing.raise_failures(testing_tasks)
def GenTests(api):
# For coverage
api.testing.task_requests_step_data([api.testing.task_request_jsonish(False)], "")
# TODO(fxb/9784): during mass clean-up, move into into api.testing.test_api.
test_task_outputs = [
api.testing_requests.SYSLOG_NAME,
api.testing_requests.SERIAL_LOG_NAME,
# We need something under TEST_RESULTS_DIR_NAME so that the code under
# test finds summary.json. Because of how we've mocked this,
# the file doesn't actually need to be named summary.json.
# TODO(olivernewman): switch to summary.json for clarity.
api.testing_requests.TEST_RESULTS_DIR_NAME + "/path/to/output_file.txt",
"benchmark.catapult_json",
]
def test_task_data(*shard_names, **kwargs): # pylint: disable=invalid-name
iteration = kwargs.pop("iteration", 0)
assert not kwargs
results = []
step_data = api.step_data(None)
for idx, name in enumerate(shard_names):
results.append(
api.swarming.task_result(
id=str(idx), name=name, outputs=test_task_outputs
)
)
step_data += api.testing.test_step_data(shard_name=name)
step_data += api.testing.task_retry_step_data(results, iteration=iteration)
return step_data
yield api.testing.test(
"test_with_no_shards",
clear_default_steps=True,
steps=[api.testsharder.execute("load test shards", shards=())],
)
# fuchsia-0000 passes the first time.
# fuchsia-0001 has tests that always fail.
# fuchsia-0002 always times out.
# fuchsia-0003 has tests that fail the first time but pass the second time.
yield api.testing.test(
"mixed_failure",
status="failure",
clear_default_steps=True,
properties={
"max_attempts": 0, # 0 means default
# Here to get coverage for this path without adding another test.
"per_test_timeout_secs": 1,
},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000", dimensions=dict(device_type="QEMU")
),
api.testsharder.shard(
name="fuchsia-0001", dimensions=dict(device_type="NUC")
),
api.testsharder.shard(
name="fuchsia-0002", dimensions=dict(device_type="QEMU")
),
api.testsharder.shard(
name="fuchsia-0003", dimensions=dict(device_type="NUC")
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610", name="fuchsia-0000", outputs=test_task_outputs,
),
api.swarming.task_result(
id="710", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="810",
name="fuchsia-0002",
state=api.swarming.TaskState.TIMED_OUT,
outputs=["serial.txt", "syslog.txt"],
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="711", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="811",
name="fuchsia-0002",
failure=True,
outputs=["serial.txt", "syslog.txt"],
),
api.swarming.task_result(
id="911", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=1,
),
api.testing.test_step_data(
# Test that multiplied shards (where the same test executable is
# run many times) are handled correctly.
shard_name="fuchsia-0000",
tests=["foo_test (%i)" % (i + 1) for i in range(5)],
),
api.testing.test_step_data(
shard_name="fuchsia-0001",
failure=True,
tefmocheck_failure=True,
iteration=0,
),
api.testing.test_step_data(
shard_name="fuchsia-0001",
failure=True,
tefmocheck_failure=True,
iteration=1,
),
api.testing.test_step_data(
shard_name="fuchsia-0003",
failure=True,
tefmocheck_failure=True,
iteration=0,
),
api.testing.test_step_data(shard_name="fuchsia-0003", iteration=1),
],
)
multiplied_tests = ["test0"] * 5
yield api.testing.test(
"single_attempt",
status="failure",
clear_default_steps=True,
properties={"per_test_timeout_secs": 1,},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="multiplied:fuchsia-0000",
tests=multiplied_tests,
dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="multiplied:fuchsia-0000",
outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="multiplied:fuchsia-0000",
failure=True,
iteration=0,
tests=multiplied_tests,
),
],
)
yield api.testing.test(
"upload_to_catapult",
clear_default_steps=True,
properties={"upload_to_catapult": True},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="Linux",
tests=multiplied_tests,
# For coverage of non-fuchsia specific code paths.
dimensions=dict(os="linux"),
),
],
),
test_task_data("Linux"),
api.testing.test_step_data(shard_name="Linux", tests=multiplied_tests,),
],
)
yield api.testing.test(
"symbolizer_failure",
expect_failure=True,
clear_default_steps=True,
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000", dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs,
),
],
iteration=0,
),
api.step_data(
"launch/collect.0.process results.fuchsia-0000.symbolize %s"
% api.testing_requests.SYSLOG_NAME,
retcode=1,
),
],
)
yield api.testing.test(
"rerun",
expect_failure=True,
clear_default_steps=True,
properties={"rerun_budget_secs": 100,},
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000", dimensions=dict(device_type="QEMU"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs,
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="fuchsia-0000", iteration=0, failure=True
),
],
) + (
# One less than rerun_budget_secs. Should result in the
# testing task being run exactly once.
api.time.step(99)
)
tests = ["test%02d" % i for i in range(20)]
# Ensure that we cover the presentation logic for when a single test fails
# multiple times.
tests.append(tests[0])
yield api.testing.test(
"many_tests_fail",
expect_failure=True,
clear_default_steps=True,
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="fuchsia-0000", tests=tests, dimensions=dict(os="fuchsia"),
),
],
),
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="611", name="fuchsia-0000", outputs=test_task_outputs
),
],
iteration=0,
),
api.testing.test_step_data(
shard_name="fuchsia-0000", tests=tests, iteration=0, failure=True,
),
],
)
yield api.testing.test(
"failed_affected_tests_aborts_early",
status="failure",
clear_default_steps=True,
properties={"max_attempts": 0}, # 0 means default
steps=[
api.testsharder.execute(
"load test shards",
shards=[
api.testsharder.shard(
name="affected:fuchsia-0000",
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0001", dimensions=dict(device_type="NUC"),
),
api.testsharder.shard(
name="affected:fuchsia-0002",
dimensions=dict(device_type="QEMU"),
),
api.testsharder.shard(
name="fuchsia-0003", dimensions=dict(device_type="NUC"),
),
],
),
# launch all tasks.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="710", name="fuchsia-0001", outputs=test_task_outputs,
),
api.swarming.task_result(
id="810",
name="affected:fuchsia-0002",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=0,
collect_iteration=-1,
),
# fuchsia-0001 fails with a task failure.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="710",
name="fuchsia-0001",
outputs=test_task_outputs,
failure=True,
),
],
iteration=-1,
collect_iteration=0,
),
# relaunch failed tasks.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="711", name="fuchsia-0001", outputs=test_task_outputs,
),
],
iteration=1,
collect_iteration=-1,
),
# affected:fuchsia-0000 fails and fuchsia-0003 passes.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="610",
name="affected:fuchsia-0000",
outputs=test_task_outputs,
),
api.swarming.task_result(
id="910", name="fuchsia-0003", outputs=test_task_outputs,
),
],
iteration=-1,
collect_iteration=1,
),
# affected:fuchsia-0000 failed with a non-tefmocheck failure, so we
# don't relaunch.
# affected:fuchsia-0002 passes. We stop collecting since all
# affected tasks have completed and one failed after the max
# attempts.
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="810",
name="affected:fuchsia-0002",
outputs=test_task_outputs,
),
],
iteration=-1,
collect_iteration=2,
),
api.testing.test_step_data(
shard_name="affected:fuchsia-0000", failure=True, iteration=1
),
api.testing.test_step_data(shard_name="affected:fuchsia-0002", iteration=2),
api.testing.test_step_data(
shard_name="fuchsia-0003", iteration=1, run_count_per_test=2
),
],
)