blob: f2818f12a21c6d2badc2cba317f0408be92db3c4 [file] [log] [blame]
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for processing code coverage.
# Execution overview
## Get build results
This recipe gets or triggers coverage builders and collects the results.
## Checkout + Build
It creates a checkout and builds the generated files so that these
can be uploaded with the coverage report to GCS.
## Process coverage
It collects the test results from all the coverage builders and generates a
coverage report based on the coverage data.
"""
import functools
from google.protobuf import json_format
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.recipe_engine.result import RawResult
from PB.infra.coverage import CoverageCollectionType
from PB.recipe_modules.recipe_engine.led.properties import (
InputProperties as LedInputProperties,
)
from PB.recipes.fuchsia.fuchsia.coverage import InputProperties
from PB.recipes.fuchsia.fuchsia.fuchsia import InputProperties as FuchsiaInputProperties
DEPS = [
"fuchsia/artifacts",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/cas_util",
"fuchsia/checkout",
"fuchsia/cipd_ensure",
"fuchsia/gerrit",
"fuchsia/gsutil",
"fuchsia/python3",
"fuchsia/recipe_testing",
"fuchsia/subbuild",
"fuchsia/testing",
"fuchsia/utils",
"recipe_engine/buildbucket",
"recipe_engine/context",
"recipe_engine/file",
"recipe_engine/json",
"recipe_engine/path",
"recipe_engine/platform",
"recipe_engine/properties",
"recipe_engine/step",
"recipe_engine/swarming",
]
PROPERTIES = InputProperties
# The property name for the list of test task ids run by a fuchsia builder.
TASK_IDS_PROPERTY = "test-swarming-task-ids"
COVARGS_LOG_LEVEL = "debug"
COVARGS_OUTPUT_JSON = "covargs-output.json"
CODE_COVERAGE_BUCKET = "code-coverage-data"
CODE_COVERAGE_PATH = "{type}/{host}/{project}/{change}/{bucket}/{builder}/{id}/metadata"
COVERAGE_REPORT_NAME = "index.html"
COVERAGE_DATA_JSON = "coverage.json"
CTF_COVERAGE_BUILDER = "fuchsia-ctf-coverage"
SDK_SOURCE_LIST = "sdk_source_set_list.json"
FIDL_API_MAPPING_LIST = "ctf_fidl_mangled_to_api_mapping.json"
PLASA_LIST = "test_coverage_report.plasa.json"
ABSOLUTE_UPLOADER_TOOL_NAME = "raw_coverage_cloud_uploader"
INCREMENTAL_UPLOADER_TOOL_NAME = "cloud_client"
# Version types to pass with the -summary and -llvm-profdata flags for covargs.
CLANG_TYPE = "clang"
RUST_TYPE = "rust"
def RunSteps(api, props):
# Collect non-fatal errors here to return at the end.
errors = []
if not props.child_build_ids:
with api.step.nest("get builders to launch") as pres:
# For CTF coverage, we don't care about the changed files and always
# want to launch the coverage builders.
launch_all = props.collect_absolute or is_ctf_coverage(api)
builder_names = builders_by_filetype(
api, props.coverage_builders, launch_all
)
if not builder_names:
pres.properties["no_builders_collect_coverage"] = True
message = (
"No builders collect coverage for the changed files: no "
"work to do."
)
return RawResult(summary_markdown=message, status=common_pb2.SUCCESS)
# TODO(fxbug.dev/75085): Update/remove list once issues are fixed.
malformed_tests_allowlist = []
build_ids = list(props.child_build_ids)
if not props.child_build_ids:
build_ids = get_builds(
api,
builder_names,
affected_tests_only=props.affected_tests_only,
collect_absolute=props.collect_absolute,
)
assert build_ids, "failed to get builds"
checkout = api.checkout.fuchsia_with_options(
manifest=props.manifest,
remote=props.remote,
rebase_patch=not api.buildbucket_util.is_tryjob,
)
# Need to run api.build.gen() to generate the tool_paths.json to access the
# llvm tools and to generate the profile source list for ctf coverage.
gn_results = api.build.gen(
checkout=checkout,
fint_params_path=props.fint_params_path,
)
instrumentation_files = [] if props.collect_absolute else checkout.changed_files()
if is_ctf_coverage(api):
profile_source_list = api.file.read_json(
"read profile source list",
gn_results.build_dir / SDK_SOURCE_LIST,
test_data=["file1", "file2"],
)
# The file paths are relative to the checkout root, but they should
# either be absolute paths or relative to the build directory, so we
# append the paths to the checkout root to make them absolute.
instrumentation_files = [checkout.root_dir / f for f in profile_source_list]
builds = collect_builds(api, build_ids)
# Get orchestration_inputs from any build. The artifacts used from here
# are host tools that should be the same across all builders.
orchestration_inputs = None
# Collect ctf_fidl_mangled_to_api_mapping.json for CTF coverage.
fidl_api_mapping_list = None
# Collect test_coverage_report.plasa.json for CTF coverage.
plasa_list = None
exception_type = api.step.InfraFailure
testing_skipped = True
failed_builds_summaries = []
build_ids_to_labels = {}
for build in builds:
skipped_because_unaffected = get_output_property(
build, "skipped_because_unaffected"
)
affected_tests_no_work = get_output_property(build, "affected_tests_no_work")
task_ids = get_output_property(build, TASK_IDS_PROPERTY)
# If skipping all unaffected tests, the build may not have run any
# tests if there were no affected tests.
no_tests_run = (
props.affected_tests_only
and build.status == common_pb2.SUCCESS
and (not task_ids or len(list(task_ids)) == 0)
)
if (
skipped_because_unaffected or affected_tests_no_work or no_tests_run
) and not api.recipe_testing.enabled:
continue
testing_skipped = False
orchestration_inputs_hash = get_output_property(
build, api.build.test_orchestration_inputs_property_name(False)
)
if not orchestration_inputs_hash:
# Only return an InfraFailure if all failures are infra failures.
# Otherwise, return a StepFailure.
if build.status == common_pb2.FAILURE:
exception_type = api.step.StepFailure
failed_builds_summaries.append(build.summary_markdown)
continue
if not orchestration_inputs:
orchestration_inputs = api.build.download_test_orchestration_inputs(
orchestration_inputs_hash
)
# Collect generated sources from all builds.
if api.path.exists(orchestration_inputs.generated_sources_root):
api.python3(
"copy generated sources to checkout",
[
api.resource("copy_sources.py"),
"--source_dir",
orchestration_inputs.generated_sources_root,
"--dest_dir",
checkout.root_dir,
],
)
# Collect build-ids.json for all builds.
build_ids_file = api.path.mkstemp()
api.artifacts.gcs_bucket = get_output_property(build, "artifact_gcs_bucket")
api.artifacts.namespace = build.id
api.artifacts.download(
"get build-ids.json",
"build-ids.json",
build_ids_file,
)
build_ids_json = api.file.read_json(
"read build-ids.json",
build_ids_file,
test_data={
"malformed_id": "//src/sys/appmgr/integration_tests/logs:log_tests_bin",
"valid_id": "//expected/valid/test",
},
)
build_ids_to_labels.update(build_ids_json)
# Collect FIDL mapping and PlaSA definition for CTF coverage.
if is_ctf_coverage(api):
if fidl_api_mapping_list is None:
fidl_api_mapping_list = download_artifact(
api, FIDL_API_MAPPING_LIST, build.id, errors
)
if plasa_list is None:
plasa_list = download_artifact(api, PLASA_LIST, build.id, errors)
if testing_skipped:
# TODO(fxbug.dev/119485): Point to fuchsia.dev instead.
message = (
"Testing skipped because the change did not affect the build graph. "
"To run all tests in spite of affected test analysis, see "
"http://go/fxi-faq#how-do-i-skip-unaffected-test-analysis."
)
# Ideally we would also bubble up the "skipped_because_unaffected" and
# "affected_tests_no_work" properties, but they are difficult to
# synthesize into a single bool in the multi-build case.
return RawResult(summary_markdown=message, status=common_pb2.SUCCESS)
if not orchestration_inputs:
raise exception_type(
"\n".join(failed_builds_summaries)
if failed_builds_summaries
else "no orchestration inputs found"
)
# Configure context of uploaded artifacts to get debug_symbol url.
api.artifacts.gcs_bucket = props.artifact_gcs_bucket
summary_files = {CLANG_TYPE: [], RUST_TYPE: []}
covargs_inputs_tree = api.file.symlink_tree(api.path.mkdtemp("covargs_inputs"))
for build in builds:
builder = build.builder.builder
version_type = CLANG_TYPE
if ".rs" in props.coverage_builders[builder]:
version_type = RUST_TYPE
task_ids = get_output_property(build, TASK_IDS_PROPERTY)
if not task_ids:
continue
# All tasks should have been completed, so there's no need to have a timeout.
results = api.swarming.collect(
"collect",
list(task_ids),
output_dir=api.path.mkdtemp("swarming"),
)
for result in results:
if not result.success:
continue
with api.step.nest(f"process result for {result.name}"):
results_dir = api.testing.extract_test_results(
step_name="extract",
task_result=result,
)
summary_files[version_type].append(
results_dir / api.testing.TEST_SUMMARY_JSON
)
covargs_inputs_tree.register_link(
target=results_dir,
linkname=covargs_inputs_tree.root.joinpath(
f"{build.builder.builder}-{result.name}"
),
)
if not summary_files[CLANG_TYPE] and not summary_files[RUST_TYPE]:
raise api.step.StepFailure("no summary files could be retrieved")
# The covargs tool must be run from the build dir since it deals with
# relative filepaths to the build dir. The build dir is expected to be
# "checkout_root/out/not-default" as set in api.build.gen().
build_dir = "out/not-default"
abs_build_dir = checkout.root_dir / build_dir
api.file.ensure_directory("ensure build directory", abs_build_dir)
with api.context(cwd=abs_build_dir):
try:
process_coverage(
api=api,
checkout_root=checkout.root_dir,
covargs_path=orchestration_inputs.covargs,
summary_files=summary_files,
debug_symbol_url=api.artifacts.debug_symbol_url(),
llvm_profdata={
CLANG_TYPE: gn_results.tool("llvm-profdata"),
RUST_TYPE: gn_results.tool("llvm-profdata-rust"),
},
llvm_cov=gn_results.tool("llvm-cov"),
gcs_bucket=props.artifact_gcs_bucket,
build_dir=build_dir,
absolute_coverage_args=props.absolute_coverage_args,
covargs_inputs_tree=covargs_inputs_tree,
instrumentation_files=instrumentation_files,
build_ids_to_labels=build_ids_to_labels,
malformed_tests_allowlist=malformed_tests_allowlist,
fidl_api_mapping_list=fidl_api_mapping_list,
plasa_list=plasa_list,
use_authenticated_url=props.use_authenticated_url,
)
except Exception as e:
errors.append(str(e))
finally:
covargs_inputs_tree.create_links("create tree of covargs inputs")
api.cas_util.upload(
covargs_inputs_tree.root, step_name="upload covargs inputs"
)
if errors:
raise api.step.StepFailure("\n".join(errors))
def get_output_property(build, property_name):
property_value = None
if property_name in build.output.properties:
property_value = build.output.properties[property_name]
return property_value
def download_artifact(api, artifact_name, bid, errors):
local_path = None
try:
local_path = api.path.mkstemp()
api.artifacts.download(
f"get {artifact_name}",
artifact_name,
local_path,
)
except Exception:
local_path = None
errors.append(f"{artifact_name} not found from build {bid}")
return local_path
def get_builds(api, builder_names, affected_tests_only=False, collect_absolute=False):
# TODO(fxb/9847): Instead of launching all builds, get existing builds
# corresponding to same gerrit_change or gitiles_commit.
builders_to_launch = list(builder_names)
# launch builders_to_launch
with api.step.nest("launch builders") as presentation:
properties = FuchsiaInputProperties(
affected_tests_only=affected_tests_only,
# The `coverage_collection` property tells the launched builder that it
# was launched to collect coverage and should be handled
# accordingly. For example, coverage builders should not multiply
# tests or abort early if a task failed. A SELECTIVE type tells the
# builder to use selective instrumentation if run on a gerrit
# change.
coverage_collection=(
CoverageCollectionType.FULL
if collect_absolute
else CoverageCollectionType.SELECTIVE
),
)
builds = api.subbuild.launch(
builders_to_launch,
presentation,
extra_properties=json_format.MessageToDict(
properties, preserving_proto_field_name=True
),
# The coverage recipe doesn't care about whether the coverage
# builders it launched pass all their tests or not, so it will pass
# even if the subbuilds fail. So we need to show the subbuilds in
# Gerrit in order to inform CL authors of test failures.
hide_in_gerrit=False,
)
return [build.build_id for build in builds.values()]
def collect_builds(api, build_ids):
with api.step.nest("collect builds"):
builds = api.subbuild.collect(build_ids)
return [build.build_proto for build in builds.values()]
def is_ctf_coverage(api):
return api.buildbucket.build.builder.builder == CTF_COVERAGE_BUILDER
def upload_incremental(api, test_coverage, build_dir):
change = api.buildbucket.build.input.gerrit_changes[0]
tool_path = api.cipd_ensure(
api.resource("incremental_uploader/coverage_cipd.ensure"),
"fuchsia/coverage/incremental_uploader/${platform}",
executable_path=INCREMENTAL_UPLOADER_TOOL_NAME,
)
cmd = [
tool_path,
"--env=prod",
"--timeout=60s",
f"--host={change.host}",
f"--project={change.project}",
f"--change_id={change.change}",
f"--patchset={int(change.patchset)}",
f"--uploader_name={api.buildbucket.build.builder.builder}",
f"--uploader_id={api.buildbucket_util.id}",
"--format=LLVM",
f"--coverage_file={test_coverage}",
f"--insert_dir_prefix={build_dir}",
]
api.step("upload to incremental coverage service", cmd)
def upload_absolute(
api, test_coverage, absolute_coverage_args, build_dir, gitiles_commit, requests_log
):
tool_path = api.cipd_ensure(
api.resource("absolute_uploader/coverage_cipd.ensure"),
"fuchsia/coverage/absolute_uploader/${platform}",
executable_path=ABSOLUTE_UPLOADER_TOOL_NAME,
)
cmd = [
tool_path,
"--absolute_coverage_service_env=prod",
f"--host={absolute_coverage_args.host}",
f"--project={absolute_coverage_args.project}",
f"--ref={gitiles_commit.ref}",
f"--uploader_name={api.buildbucket.build.builder.builder}",
f"--uploader_id={api.buildbucket_util.id}",
"--format=LLVM",
f"--coverage_file={test_coverage}",
f"--insert_path_prefix={build_dir}",
f"--requests_log={requests_log}",
"--timeout=5m",
]
api.step(
f"upload to absolute coverage service for {gitiles_commit.id}",
cmd + [f"--commit_id={gitiles_commit.id}"],
)
def process_coverage(
api,
checkout_root,
covargs_path,
summary_files,
debug_symbol_url,
llvm_profdata,
llvm_cov,
gcs_bucket,
build_dir,
absolute_coverage_args,
covargs_inputs_tree,
instrumentation_files,
build_ids_to_labels,
malformed_tests_allowlist,
fidl_api_mapping_list=None,
plasa_list=None,
use_authenticated_url=False,
):
output_dir = api.path.cleanup_dir / "coverage"
report_dir = api.path.cleanup_dir / "metadata"
temp_dir = api.path.mkdtemp("covargs")
# TODO(fxbug.dev/86596): Once llvm tools are statically linked, we can just
# upload the llvm-profdata tool by itself.
for tool in [covargs_path, llvm_cov]:
covargs_inputs_tree.register_link(
target=tool,
linkname=covargs_inputs_tree.root.joinpath(api.path.basename(tool)),
)
for version, tool in llvm_profdata.items():
covargs_inputs_tree.register_link(
target=tool,
linkname=covargs_inputs_tree.root / f"llvm-profdata-{version}",
)
cmd = [
# For debugging covargs changes, replace `covargs_path` with
# `api.resource("covargs")` and copy the new covargs into the
# recipes/fuchsia/coverage.resources directory.
covargs_path,
"-level",
COVARGS_LOG_LEVEL,
"-json-output",
api.json.output(
name=COVARGS_OUTPUT_JSON, leak_to=temp_dir / COVARGS_OUTPUT_JSON
),
"-output-dir",
output_dir,
"-llvm-profdata",
f"{llvm_profdata[RUST_TYPE]}={RUST_TYPE}",
"-llvm-profdata",
f"{llvm_profdata[CLANG_TYPE]}={CLANG_TYPE}",
"-llvm-profdata",
llvm_profdata[CLANG_TYPE],
"-llvm-cov",
llvm_cov,
"-symbol-server",
debug_symbol_url,
"-symbol-cache",
api.path.cache_dir / "symbol",
# TODO(phosek): remove this flag when debugging phase is over.
"-save-temps",
temp_dir,
"-report-dir",
report_dir,
"-base",
checkout_root,
"-compilation-dir",
checkout_root / build_dir,
]
# TODO(gulfem): When debuginfod is supported in internal builders, remove internal
# builder check, and add debuginfod flags to all coverage builders.
if not use_authenticated_url:
cmd.extend(
[
"-debuginfod-server",
api.gsutil.http_url(gcs_bucket, "", unauthenticated_url=True),
"-debuginfod-cache",
temp_dir / "debuginfod",
]
)
for version_type, summaries in summary_files.items():
for summary in summaries:
cmd.extend(["-summary", f"{summary}={version_type}"])
for instrumentation_file in instrumentation_files:
cmd.extend(["-src-file", instrumentation_file])
if is_ctf_coverage(api):
cmd.append("-skip-functions=false")
try:
api.step("covargs", cmd)
finally:
api.cas_util.upload(temp_dir, step_name="upload temporary files")
# Upload the coverage report to the Chromium coverage service.
gitiles_commit = api.buildbucket.build.input.gitiles_commit
if not is_ctf_coverage(api):
if (
not api.buildbucket.build.input.gerrit_changes
and gitiles_commit.host
and gitiles_commit.project
and gitiles_commit.id
):
dst = CODE_COVERAGE_PATH.format(
type="postsubmit",
host=gitiles_commit.host,
project=gitiles_commit.project,
change=gitiles_commit.id,
bucket=api.buildbucket.build.builder.bucket,
builder=api.buildbucket.build.builder.builder,
id=api.buildbucket.build.id,
)
step_result = api.gsutil.rsync(
name="upload report",
src=report_dir,
bucket=CODE_COVERAGE_BUCKET,
dst=dst,
recursive=True,
options={
"parallel_process_count": api.platform.cpu_count,
"parallel_thread_count": 1,
},
multithreaded=True,
)
step_result.presentation.properties.update(
{
"coverage_metadata_gs_paths": [dst],
"mimic_builder_names": [api.buildbucket.build.builder.builder],
"coverage_gs_bucket": CODE_COVERAGE_BUCKET,
"coverage_is_presubmit": False,
}
)
# Upload the coverage data to the absolute coverage service.
# TODO(ihuh): Output the coverage.json to the output directory instead of
# the temp directory used for debugging purposes.
test_coverage = api.path.join(temp_dir, COVERAGE_DATA_JSON)
# TODO(fxbug.dev/70826): Don't upload to incremental/absolute coverage
# service for internal builders until permissions are granted.
if api.buildbucket_util.is_dev_or_try and not use_authenticated_url:
upload_incremental(api, test_coverage, build_dir)
elif (
absolute_coverage_args
and not is_ctf_coverage(api)
and not use_authenticated_url
):
# TODO(fxbug.dev/90510): Remove when issue is fixed.
# Split test_coverage to upload in smaller chunks.
# Data looks like {"data": [{"files": []}]}. We want to split by files.
data = api.file.read_json(
"read coverage data",
test_coverage,
include_log=False,
test_data={"version": "1", "data": [{"files": [{"filename": "file1"}]}]},
)
split_files = []
requests_logs_dir = api.path.mkdtemp("requests_logs")
# The chunk size is an empirically chosen value to avoid timing out in
# the upload step.
chunk_size = 7000
new_data = {k: v for k, v in data.items() if k != "data"}
for i, d in enumerate(data["data"]):
files = d["files"]
split_data = [
files[j : j + chunk_size] for j in range(0, len(files), chunk_size)
]
for j, chunk in enumerate(split_data):
new_data["data"] = []
new_data["data"].append({"files": chunk})
tmp = api.path.join(temp_dir, f"{int(i)}_{int(j)}_{COVERAGE_DATA_JSON}")
api.file.write_json(
f"write {api.path.basename(tmp)}",
tmp,
new_data,
include_log=False,
)
split_files.append(tmp)
for i, coverage_file in enumerate(split_files):
requests_log = api.path.join(requests_logs_dir, f"{int(i)}_requests_log")
api.utils.retry(
functools.partial(
upload_absolute,
api,
coverage_file,
absolute_coverage_args,
build_dir,
gitiles_commit,
requests_log,
),
max_attempts=3,
)
api.cas_util.upload(requests_logs_dir, step_name="requests_logs")
# Upload the coverage report to our own bucket.
# TODO(ihuh): move this into gsutil module/deduplicate this with other GCS logic
dst = f"builds/{api.buildbucket_util.id}/coverage"
try:
# For CTF coverage, upload the test coverage.json to our GCS bucket to
# be available for postprocessing. Also upload the source list to see
# which files are missing tests completely and are thus not included in
# the coverage report.
if is_ctf_coverage(api):
api.file.move("move test coverage to output dir", test_coverage, output_dir)
api.file.move(
"move source list to output dir",
checkout_root.joinpath(build_dir, SDK_SOURCE_LIST),
output_dir,
)
if fidl_api_mapping_list is not None:
api.file.move(
"move FIDL API mapping list to output dir",
fidl_api_mapping_list,
output_dir / FIDL_API_MAPPING_LIST,
)
if plasa_list is not None:
api.file.move(
"move PlaSA list to output dir",
plasa_list,
output_dir / PLASA_LIST,
)
api.gsutil.upload(
name="upload coverage",
src=output_dir,
bucket=gcs_bucket,
dst=dst,
recursive=True,
gzip_exts=["html"],
options={
"parallel_process_count": api.platform.cpu_count,
"parallel_thread_count": 1,
},
multithreaded=True,
no_clobber=True,
)
finally:
coverage_report_step = api.step.empty("coverage report")
link = api.gsutil.http_url(
gcs_bucket,
api.gsutil.join(dst, COVERAGE_REPORT_NAME),
unauthenticated_url=not use_authenticated_url,
)
coverage_report_step.presentation.links[COVERAGE_REPORT_NAME] = link
with api.step.nest("check malformed binaries"):
malformed = api.file.read_text(
"read malformed binaries",
temp_dir / "malformed_binaries.txt",
).splitlines()
malformed_test_labels = [
# The labels are in the form of //path/to/target($toolchain). Strip
# the toolchain to match the labels in the allowlist.
build_ids_to_labels.get(str(m), "").rsplit("(", 1)[0]
for m in malformed
]
unexpected_valid_tests = []
# There are currently no tests in the malformed_tests_allowlist, so the
# following block won't be covered.
for test in malformed_tests_allowlist: # pragma: nocover
if test in malformed_test_labels:
malformed_test_labels.remove(test)
break
unexpected_valid_tests.append(test)
if len(malformed_test_labels) > 3:
step = api.step("malformed test labels", None)
step.presentation.step_text = "\n".join(malformed_test_labels)
raise api.step.StepFailure(
"more than 3 malformed tests. See `malformed test labels` step for full list."
)
if malformed_test_labels or unexpected_valid_tests:
raise api.step.StepFailure(
f"unexpected malformed tests: {list(malformed_test_labels)!r}\nunexpected valid tests: {list(unexpected_valid_tests)!r}"
)
def builders_by_filetype(api, builders_to_filetypes, launch_all):
if not api.buildbucket_util.is_tryjob or launch_all:
return builders_to_filetypes.keys()
gerrit_change = api.buildbucket.build.input.gerrit_changes[0]
change_details = api.gerrit.change_details(
name="get change details",
change_id=f"{gerrit_change.project}~{gerrit_change.change}",
host=gerrit_change.host,
query_params=["ALL_REVISIONS", "ALL_FILES"],
test_data=api.json.test_api.output(
{
"branch": "main",
"revisions": {
"d4e5f6": {
"_number": 3,
"ref": "refs/changes/00/100/3",
"files": {"file.cc": {}},
},
"a1b2c3": {
"_number": 7,
"ref": "refs/changes/00/100/7",
"files": {"file.cc": {}, "file.c": {}, "file.h": {}},
},
"g7h8i9": {
"_number": 9,
"ref": "refs/changes/00/100/9",
"files": {"unsupported": {}},
},
},
}
),
).json.output
patchsets = [
rev
for rev in change_details["revisions"].values()
if rev["_number"] == gerrit_change.patchset
]
assert len(patchsets) == 1
changed_files = patchsets[0].get("files", {}).keys()
changed_filetypes = {api.path.splitext(f)[1] for f in changed_files}
return [
builder
for builder, filetypes in builders_to_filetypes.items()
if changed_filetypes & set(filetypes)
]
def GenTests(api):
collect_steps = (
api.step_data(
"collect",
api.swarming.collect(
[
api.swarming.task_result(
id="610",
name="Linux",
outputs=["out/path/to/output/file"],
),
api.swarming.task_result(
id="710",
failure=True,
name="QEMU",
outputs=["out/path/to/output/file"],
),
api.swarming.task_result(
id="810",
name="QEMU-(2)",
outputs=["out/summary.json", "serial_log.txt"],
),
api.swarming.task_result(
id="910",
name="QEMU-(3)",
outputs=["out/summary.json"],
),
]
),
)
+ api.testing.task_requests_step_data(
[api.testing.task_request_jsonish()],
"download test orchestration inputs.load task requests",
)
+ api.step_data(
"download test orchestration inputs.load triage sources",
api.file.read_json(["triage/config.triage", "other/triage/config.triage"]),
)
)
def properties(**kwargs):
props = {
"manifest": "minimal",
"remote": "https://fuchsia.googlesource.com/manifest",
"fint_params_path": "fint_params/coverage.textproto",
"coverage_builders": {
"coverage-builder": [".cc"],
"coverage-builder2": [".h", ".c"],
"coverage-rust-builder": [".rs"],
},
"absolute_coverage_args": {"host": "fuchsia", "project": "fuchsia"},
"artifact_gcs_bucket": "fuchsia-infra-artifacts",
}
props.update(kwargs)
return api.properties(**props)
coverage_build = api.subbuild.ci_build_message(
build_id=8945511751514863184,
builder="coverage-builder",
output_props={
"test-swarming-task-ids": ["610", "710", "810", "910"],
"test_orchestration_inputs_digest": "abc",
},
status="SUCCESS",
)
coverage_led_build = api.subbuild.ci_build_message(
builder="coverage-builder",
output_props={
"test-swarming-task-ids": ["610", "710", "810", "910"],
"test_orchestration_inputs_digest": "abc",
},
input_props={
"$recipe_engine/led": {
"led_run_id": "led/user_example.com/abc123",
},
},
status="SUCCESS",
)
coverage_rust_build = api.subbuild.ci_build_message(
build_id=8945511751514863184,
builder="coverage-rust-builder",
output_props={
"test-swarming-task-ids": ["610", "710", "810", "910"],
"test_orchestration_inputs_digest": "abc",
},
status="SUCCESS",
)
coverage_build_without_tasks = api.subbuild.ci_build_message(
build_id=8945511751514863185,
builder="coverage-builder2",
output_props={
"test_orchestration_inputs_digest": "abc",
},
status="SUCCESS",
)
coverage_build_with_infra_failure = api.subbuild.ci_build_message(
build_id=8945511751514863186,
builder="coverage-builder3",
status="INFRA_FAILURE",
)
coverage_build_with_infra_failure.summary_markdown = "raised infra failure"
coverage_build_with_failure = api.subbuild.ci_build_message(
build_id=8945511751514863187,
builder="coverage-builder4",
status="FAILURE",
)
coverage_build_with_failure.summary_markdown = "failed to build fuchsia"
coverage_build_with_no_affected_tests = api.subbuild.ci_build_message(
build_id=8945511751514863188,
builder="coverage-builder5",
output_props={
"affected_tests_no_work": True,
"test_orchestration_inputs_digest": "abc",
},
status="SUCCESS",
)
coverage_build_with_skipped_build = api.subbuild.ci_build_message(
build_id=8945511751514863189,
builder="coverage-builder6",
output_props={"skipped_because_unaffected": True},
status="SUCCESS",
)
yield (
api.buildbucket_util.test("default", tryjob=False)
+ properties()
+ collect_steps
+ api.subbuild.child_build_steps(
builds=[coverage_build, coverage_build_without_tasks],
launch_step="launch builders",
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test(
"default_ctf",
tryjob=False,
builder="fuchsia-ctf-coverage",
status="FAILURE",
)
+ properties()
+ collect_steps
+ api.subbuild.child_build_steps(
builds=[coverage_build, coverage_build_without_tasks],
launch_step="launch builders",
collect_step="collect builds",
)
+ api.step_data(f"get {FIDL_API_MAPPING_LIST}", retcode=1)
+ api.step_data(f"get {PLASA_LIST}", retcode=1)
)
yield (
api.buildbucket_util.test("default_led", tryjob=True)
+ properties(
**{
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
)
}
)
+ collect_steps
+ api.subbuild.child_led_steps(
builds=[coverage_led_build, coverage_build_without_tasks],
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test("led_with_child_build_ids", tryjob=True)
+ properties(
child_build_ids=["8945511751514863184", "8945511751514863185"],
**{
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
)
},
)
+ collect_steps
+ api.buildbucket.simulated_collect_output(
step_name="collect builds.collect",
builds=[coverage_build, coverage_build_without_tasks],
)
)
yield (
api.buildbucket_util.test("no_test_tasks", tryjob=False, status="FAILURE")
+ properties()
+ api.subbuild.child_build_steps(
builds=[coverage_build_without_tasks],
launch_step="launch builders",
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test(
"no_orchestration_inputs", tryjob=False, status="FAILURE"
)
+ properties()
+ api.subbuild.child_build_steps(
builds=[coverage_build_with_infra_failure, coverage_build_with_failure],
launch_step="launch builders",
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test("skipped_testing", tryjob=True, patch_set=3)
+ properties()
+ api.subbuild.child_build_steps(
builds=[coverage_build_with_no_affected_tests],
launch_step="launch builders",
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test("skipped_build", tryjob=True, patch_set=7)
+ properties()
+ api.subbuild.child_build_steps(
builds=[coverage_build_with_skipped_build],
launch_step="launch builders",
collect_step="collect builds",
)
)
yield (
api.buildbucket_util.test("unsupported_changed_files", tryjob=True, patch_set=9)
+ properties()
)
yield (
api.buildbucket_util.test(
"unexpected_malformed_tests", tryjob=False, status="FAILURE"
)
+ properties()
+ collect_steps
+ api.subbuild.child_build_steps(
builds=[coverage_rust_build, coverage_build_without_tasks],
launch_step="launch builders",
collect_step="collect builds",
)
+ api.step_data(
"check malformed binaries.read malformed binaries",
api.file.read_text("valid_id"),
)
)
yield (
api.buildbucket_util.test(
"too_many_unexpected_malformed_tests", tryjob=False, status="FAILURE"
)
+ properties()
+ collect_steps
+ api.subbuild.child_build_steps(
builds=[coverage_build, coverage_build_without_tasks],
launch_step="launch builders",
collect_step="collect builds",
)
+ api.step_data(
"read build-ids.json",
api.file.read_json(
{
"valid_id": "//expected/valid/test",
"valid_id2": "//expected/valid/test2",
"valid_id3": "//expected/valid/test3",
"valid_id4": "//expected/valid/test4",
}
),
)
+ api.step_data(
"check malformed binaries.read malformed binaries",
api.file.read_text("valid_id\nvalid_id2\nvalid_id3\nvalid_id4"),
)
)
yield (
api.buildbucket_util.test("collect_absolute", tryjob=True, patch_set=9)
+ properties(collect_absolute=True, use_authenticated_url=True)
+ collect_steps
+ api.subbuild.child_build_steps(
builds=[coverage_build],
launch_step="launch builders",
collect_step="collect builds",
)
)