| # Copyright 2020 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| """Recipe for processing code coverage. |
| |
| # Execution overview |
| |
| ## Get build results |
| |
| This recipe gets or triggers coverage builders and collects the results. |
| |
| ## Checkout + Build |
| |
| It creates a checkout and builds the generated files so that these |
| can be uploaded with the coverage report to GCS. |
| |
| ## Process coverage |
| |
| It collects the test results from all the coverage builders and generates a |
| coverage report based on the coverage data. |
| """ |
| |
| from recipe_engine.config import Dict, List |
| from recipe_engine.recipe_api import Property |
| |
| from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2 |
| from PB.infra.coverage import CoverageCollectionType |
| from PB.recipe_modules.recipe_engine.led.properties import ( |
| InputProperties as LedInputProperties, |
| ) |
| |
| PYTHON_VERSION_COMPATIBILITY = "PY3" |
| |
| DEPS = [ |
| "fuchsia/artifacts", |
| "fuchsia/build", |
| "fuchsia/buildbucket_util", |
| "fuchsia/cas_util", |
| "fuchsia/checkout", |
| "fuchsia/gerrit", |
| "fuchsia/gsutil", |
| "fuchsia/recipe_testing", |
| "fuchsia/subbuild", |
| "fuchsia/testing", |
| "fuchsia/utils", |
| "recipe_engine/buildbucket", |
| "recipe_engine/cipd", |
| "recipe_engine/context", |
| "recipe_engine/file", |
| "recipe_engine/json", |
| "recipe_engine/path", |
| "recipe_engine/platform", |
| "recipe_engine/properties", |
| "recipe_engine/step", |
| "recipe_engine/swarming", |
| ] |
| |
| PROPERTIES = { |
| "manifest": Property(kind=str, help="Jiri manifest to use"), |
| "remote": Property(kind=str, help="Remote manifest repository"), |
| "fint_params_path": Property( |
| kind=str, |
| help="Path to a fint params file", |
| default=None, |
| ), |
| "artifact_gcs_bucket": Property( |
| kind=str, |
| help="GCS bucket to upload build artifacts to", |
| default=None, |
| ), |
| "coverage_builders": Property( |
| kind=Dict(), |
| help="The map of builders to process coverage on and the filetypes they collect coverage for", |
| default=None, |
| ), |
| "absolute_coverage_args": Property( |
| kind=Dict(), |
| help="Extra args to pass to the absolute coverage uploader tool", |
| default=None, |
| ), |
| "affected_tests_only": Property( |
| kind=bool, |
| help="Whether to only run affected tests", |
| default=False, |
| ), |
| "child_build_ids": Property( |
| kind=List(str), |
| help="A list of child build ids to process coverage on instead of launching new coverage builders", |
| default=None, |
| ), |
| "collect_absolute": Property( |
| kind=bool, |
| help="Whether to launch all coverage_builders and collect the absolute coverage report regardless of changed filetype", |
| default=False, |
| ), |
| "use_authenticated_url": Property( |
| kind=bool, |
| help="Whether to use the authenticated URL for viewing the HTML report in GCS", |
| default=False, |
| ), |
| } |
| |
| # The property name for the list of test task ids run by a fuchsia builder. |
| TASK_IDS_PROPERTY = "test-swarming-task-ids" |
| |
| COVARGS_LOG_LEVEL = "debug" |
| COVARGS_OUTPUT_JSON = "covargs-output.json" |
| CODE_COVERAGE_BUCKET = "code-coverage-data" |
| CODE_COVERAGE_PATH = "{type}/{host}/{project}/{change}/{bucket}/{builder}/{id}/metadata" |
| COVERAGE_REPORT_NAME = "index.html" |
| COVERAGE_DATA_JSON = "coverage.json" |
| CTS_COVERAGE_BUILDER = "fuchsia-cts-coverage" |
| SDK_SOURCE_LIST = "sdk_source_set_list.json" |
| FIDL_API_MAPPING_LIST = "fidl_mangled_to_api_mapping.json" |
| PLASA_LIST = "test_coverage_report.plasa.json" |
| |
| |
| ABSOLUTE_COVERAGE_UPLOADER_CIPD_PKG = "fuchsia/coverage/upload-absolute-amd64" |
| ABSOLUTE_COVERAGE_UPLOADER_CIPD_VERSION = "absolute_coverage_google3_cl:416501045" |
| INCREMENTAL_COVERAGE_UPLOADER_CIPD_PKG = "fuchsia/coverage/upload-incremental-amd64" |
| INCREMENTAL_COVERAGE_UPLOADER_CIPD_VERSION = "incremental_coverage_google3_cl:351634601" |
| COVERAGE_UPLOADER_NAME = "llvm_cloud_uploader" |
| |
| # Version types to pass with the -summary and -llvm-profdata flags for covargs. |
| CLANG_TYPE = "clang" |
| RUST_TYPE = "rust" |
| |
| |
| def _get_output_property(build, property_name): |
| property_value = None |
| if property_name in build.output.properties: |
| property_value = build.output.properties[property_name] |
| return property_value |
| |
| |
| def _download_artifact(api, artifact_name, bid, errors): |
| local_path = None |
| try: |
| local_path = api.path.mkstemp() |
| api.artifacts.download( |
| "get %s" % artifact_name, |
| artifact_name, |
| local_path, |
| ) |
| except: |
| local_path = None |
| errors.append("%s not found from build %s" % (artifact_name, bid)) |
| return local_path |
| |
| |
| def _get_builds(api, builder_names, affected_tests_only=False, collect_absolute=False): |
| # TODO(fxb/9847): Instead of launching all builds, get existing builds |
| # corresponding to same gerrit_change or gitiles_commit. |
| builders_to_launch = list(builder_names) |
| # launch builders_to_launch |
| with api.step.nest("launch builders") as presentation: |
| properties = { |
| "affected_tests_only": affected_tests_only, |
| # The `coverage_collection` property tells the launched builder that it |
| # was launched to collect coverage and should be handled |
| # accordingly. For example, coverage builders should not multiply |
| # tests or abort early if a task failed. A SELECTIVE type tells the |
| # builder to use selective instrumentation if run on a gerrit |
| # change. |
| "coverage_collection": CoverageCollectionType.FULL |
| if collect_absolute |
| else CoverageCollectionType.SELECTIVE, |
| } |
| builds = api.subbuild.launch( |
| builders_to_launch, |
| presentation, |
| extra_properties=properties, |
| # The coverage recipe doesn't care about whether the coverage |
| # builders it launched pass all their tests or not, so it will pass |
| # even if the subbuilds fail. So we need to show the subbuilds in |
| # Gerrit in order to inform CL authors of test failures. |
| hide_in_gerrit=False, |
| ) |
| return [build.build_id for build in builds.values()] |
| |
| |
| def _collect_builds(api, build_ids, launched_by_led): |
| with api.step.nest("collect builds"): |
| builds = api.subbuild.collect(build_ids, launched_by_led) |
| return [build.build_proto for build in builds.values()] |
| |
| |
| def _is_cts_coverage(api): |
| return api.buildbucket.build.builder.builder == CTS_COVERAGE_BUILDER |
| |
| |
| def upload_incremental(api, test_coverage, build_dir): |
| change = api.buildbucket.build.input.gerrit_changes[0] |
| tool_path = api.cipd.ensure_tool( |
| INCREMENTAL_COVERAGE_UPLOADER_CIPD_PKG, |
| INCREMENTAL_COVERAGE_UPLOADER_CIPD_VERSION, |
| executable_path=COVERAGE_UPLOADER_NAME, |
| ) |
| cmd = [ |
| tool_path, |
| "--env=prod", |
| "--host=%s" % change.host, |
| "--project=%s" % change.project, |
| "--change_id=%s" % change.change, |
| "--patchset=%d" % change.patchset, |
| "--uploader_name=%s" % api.buildbucket.build.builder.builder, |
| "--uploader_id=%s" % api.buildbucket_util.id, |
| "--format=LLVM", |
| "--coverage_file=%s" % test_coverage, |
| "--insert_dir_prefix=%s" % build_dir, |
| ] |
| api.step("upload to incremental coverage service", cmd) |
| |
| |
| def upload_absolute( |
| api, test_coverage, absolute_coverage_args, build_dir, gitiles_commit |
| ): |
| tool_path = api.cipd.ensure_tool( |
| ABSOLUTE_COVERAGE_UPLOADER_CIPD_PKG, |
| ABSOLUTE_COVERAGE_UPLOADER_CIPD_VERSION, |
| executable_path=COVERAGE_UPLOADER_NAME, |
| ) |
| |
| cmd = [ |
| tool_path, |
| "--absolute_coverage_service_env=prod", |
| "--host=%s" % absolute_coverage_args["host"], |
| "--project=%s" % absolute_coverage_args["project"], |
| "--ref=%s" % gitiles_commit.ref, |
| "--uploader_name=%s" % api.buildbucket.build.builder.builder, |
| "--uploader_id=%s" % api.buildbucket_util.id, |
| "--llvm_json_file=%s" % test_coverage, |
| "--insert_path_prefix=%s" % build_dir, |
| "--timeout=5m", |
| ] |
| |
| api.step( |
| "upload to absolute coverage service for %s" % gitiles_commit.id, |
| cmd + ["--commit_id=%s" % gitiles_commit.id], |
| ) |
| |
| |
| def process_coverage( |
| api, |
| checkout_root, |
| covargs_path, |
| summary_files, |
| debug_symbol_url, |
| llvm_profdata, |
| llvm_cov, |
| gcs_bucket, |
| build_dir, |
| absolute_coverage_args, |
| covargs_inputs_tree, |
| instrumentation_files=[], |
| build_ids_to_labels={}, |
| malformed_tests_allowlist=[], |
| fidl_api_mapping_list=None, |
| plasa_list=None, |
| use_authenticated_url=False, |
| ): |
| output_dir = api.path["cleanup"].join("coverage") |
| # llvm-cov does not properly handle `../..` in filepaths, so in the html |
| # report it just appends that to the output dir. This means we need the |
| # actual output dir to be two subdirectories within the output_dir that we |
| # upload to GCS. However the html files for the source files then reference |
| # the style.css file considering the `../..` as two more directories. For |
| # example, if a filepath is `../../path/to/file`, the index.html would be |
| # written to output_dir/index.html and reference the source file html as |
| # `coverage/../../path/to/file.html` which would be written to |
| # output_dir/coverage/../../path/to/file.html or |
| # output_dir/../path/to/file.html. Then that html would reference style.css |
| # which was written at output_dir/style.css as `../../../../../style.css` (a |
| # ../ for every directory in coverage/../../path/to/file.html) which would |
| # expect the style.css at the actual path of |
| # output_dir/../../../../style.css. Thus the actual output directory we need |
| # to pass to llvm-cov should be four subdirectories within the directory |
| # that we upload to GCS, and we'd need to copy the style.css into the |
| # top-level output directory. |
| # TODO(fxbug.dev/72177): Remove hack once llvm tools can handle ../ properly. |
| llvm_output_dir = output_dir.join("coverage/coverage/coverage/coverage") |
| report_dir = api.path["cleanup"].join("metadata") |
| temp_dir = api.path.mkdtemp("covargs") |
| |
| # TODO(fxbug.dev/86596): Once llvm tools are statically linked, we can just |
| # upload the llvm-profdata tool by itself. |
| for tool in [covargs_path, llvm_cov]: |
| covargs_inputs_tree.register_link( |
| target=tool, |
| linkname=covargs_inputs_tree.root.join(api.path.basename(tool)), |
| ) |
| for version, tool in llvm_profdata.items(): |
| covargs_inputs_tree.register_link( |
| target=tool, |
| linkname=covargs_inputs_tree.root.join("llvm-profdata-%s" % version), |
| ) |
| |
| cmd = [ |
| # For debugging covargs changes, replace `covargs_path` with |
| # `api.resource("covargs")` and copy the new covargs into the |
| # recipes/fuchsia/coverage.resources directory. |
| covargs_path, |
| "-level", |
| COVARGS_LOG_LEVEL, |
| "-json-output", |
| api.json.output( |
| name=COVARGS_OUTPUT_JSON, leak_to=temp_dir.join(COVARGS_OUTPUT_JSON) |
| ), |
| "-output-dir", |
| llvm_output_dir, |
| "-llvm-profdata", |
| "%s=%s" % (llvm_profdata[RUST_TYPE], RUST_TYPE), |
| "-llvm-profdata", |
| "%s=%s" % (llvm_profdata[CLANG_TYPE], CLANG_TYPE), |
| "-llvm-profdata", |
| llvm_profdata[CLANG_TYPE], |
| "-llvm-cov", |
| llvm_cov, |
| "-symbol-server", |
| debug_symbol_url, |
| "-symbol-cache", |
| api.path["cache"].join("symbol"), |
| # TODO(phosek): remove this flag when debugging phase is over. |
| "-save-temps", |
| temp_dir, |
| "-report-dir", |
| report_dir, |
| # filepaths are relative to the build directory so source files in the |
| # checkout root are prepended with `../..` and need to be mapped to the |
| # checkout root. |
| "-path-equivalence", |
| "../..,%s" % checkout_root, |
| "-base", |
| checkout_root, |
| ] |
| |
| for version_type, summaries in summary_files.items(): |
| for summary in summaries: |
| cmd.extend(["-summary", "%s=%s" % (summary, version_type)]) |
| for instrumentation_file in instrumentation_files: |
| cmd.extend(["-src-file", instrumentation_file]) |
| |
| if _is_cts_coverage(api): |
| cmd.append("-skip-functions=false") |
| |
| try: |
| api.step("covargs", cmd) |
| finally: |
| api.cas_util.upload(temp_dir) |
| |
| # Upload the coverage report to the Chromium coverage service. |
| gitiles_commit = api.buildbucket.build.input.gitiles_commit |
| if not _is_cts_coverage(api): |
| if ( |
| not api.buildbucket.build.input.gerrit_changes |
| and gitiles_commit.host |
| and gitiles_commit.project |
| and gitiles_commit.id |
| ): |
| dst = CODE_COVERAGE_PATH.format( |
| type="postsubmit", |
| host=gitiles_commit.host, |
| project=gitiles_commit.project, |
| change=gitiles_commit.id, |
| bucket=api.buildbucket.build.builder.bucket, |
| builder=api.buildbucket.build.builder.builder, |
| id=api.buildbucket.build.id, |
| ) |
| step_result = api.gsutil.rsync( |
| name="upload report", |
| src=report_dir, |
| bucket=CODE_COVERAGE_BUCKET, |
| dst=dst, |
| recursive=True, |
| options={ |
| "parallel_process_count": api.platform.cpu_count, |
| "parallel_thread_count": 1, |
| }, |
| multithreaded=True, |
| ) |
| step_result.presentation.properties.update( |
| { |
| "coverage_metadata_gs_paths": [dst], |
| "mimic_builder_names": [api.buildbucket.build.builder.builder], |
| "coverage_gs_bucket": CODE_COVERAGE_BUCKET, |
| "coverage_is_presubmit": False, |
| } |
| ) |
| |
| # Upload the coverage data to the absolute coverage service. |
| # TODO(ihuh): Output the coverage.json to the output directory instead of |
| # the temp directory used for debugging purposes. |
| test_coverage = api.path.join(temp_dir, COVERAGE_DATA_JSON) |
| # TODO(fxbug.dev/70826): Don't upload to incremental/absolute coverage |
| # service for internal builders until permissions are granted. |
| if api.buildbucket_util.is_tryjob and not use_authenticated_url: |
| upload_incremental(api, test_coverage, build_dir) |
| elif ( |
| absolute_coverage_args |
| and not _is_cts_coverage(api) |
| and not use_authenticated_url |
| ): |
| # TODO(fxbug.dev/90510): Remove when issue is fixed. |
| # Split test_coverage to upload in smaller chunks. |
| # Data looks like {"data": [{"files": []}]}. We want to split by files. |
| data = api.file.read_json( |
| "read coverage data", |
| test_coverage, |
| include_log=False, |
| test_data={"version": "1", "data": [{"files": [{"filename": "file1"}]}]}, |
| ) |
| split_files = [] |
| # The chunk size is an empirically chosen value to avoid timing out in |
| # the upload step. |
| chunk_size = 7000 |
| new_data = {k: v for k, v in data.items() if k != "data"} |
| for i, d in enumerate(data["data"]): |
| files = d["files"] |
| split_data = [ |
| files[j : j + chunk_size] for j in range(0, len(files), chunk_size) |
| ] |
| for j, chunk in enumerate(split_data): |
| new_data["data"] = [] |
| new_data["data"].append({"files": chunk}) |
| tmp = api.path.join(temp_dir, "%d_%d_%s" % (i, j, COVERAGE_DATA_JSON)) |
| api.file.write_json( |
| "write %s" % api.path.basename(tmp), |
| tmp, |
| new_data, |
| include_log=False, |
| ) |
| split_files.append(tmp) |
| |
| def do_upload_absolute(): |
| upload_absolute( |
| api, |
| coverage_file, |
| absolute_coverage_args, |
| build_dir, |
| gitiles_commit, |
| ) |
| |
| for coverage_file in split_files: |
| api.utils.retry(do_upload_absolute, max_attempts=3) |
| |
| # Upload the coverage report to our own bucket. |
| # TODO(ihuh): move this into gsutil module/deduplicate this with other GCS logic |
| dst = "builds/%s/coverage" % api.buildbucket_util.id |
| try: |
| # For CTS coverage, upload the test coverage.json to our GCS bucket to |
| # be available for postprocessing. Also upload the source list to see |
| # which files are missing tests completely and are thus not included in |
| # the coverage report. |
| if _is_cts_coverage(api): |
| api.file.move("move test coverage to output dir", test_coverage, output_dir) |
| api.file.move( |
| "move source list to output dir", |
| checkout_root.join(build_dir, SDK_SOURCE_LIST), |
| output_dir, |
| ) |
| if fidl_api_mapping_list is not None: |
| api.file.move( |
| "move FIDL API mapping list to output dir", |
| fidl_api_mapping_list, |
| output_dir.join(FIDL_API_MAPPING_LIST), |
| ) |
| if plasa_list is not None: |
| api.file.move( |
| "move PlaSA list to output dir", |
| plasa_list, |
| output_dir.join(PLASA_LIST), |
| ) |
| # As mentioned in the comment about llvm_output_dir, copy style.css to |
| # the output_dir to make the relative paths in the html files work. |
| api.file.copy("copy style.css", llvm_output_dir.join("style.css"), output_dir) |
| api.gsutil.upload( |
| name="upload coverage", |
| src=output_dir, |
| bucket=gcs_bucket, |
| dst=dst, |
| recursive=True, |
| gzip_exts=["html"], |
| options={ |
| "parallel_process_count": api.platform.cpu_count, |
| "parallel_thread_count": 1, |
| }, |
| multithreaded=True, |
| no_clobber=True, |
| ) |
| finally: |
| coverage_report_step = api.step.empty("coverage report") |
| link = api.gsutil.http_url( |
| gcs_bucket, |
| api.gsutil.join( |
| dst, api.path.relpath(llvm_output_dir, output_dir), COVERAGE_REPORT_NAME |
| ), |
| unauthenticated_url=not use_authenticated_url, |
| ) |
| coverage_report_step.presentation.links[COVERAGE_REPORT_NAME] = link |
| |
| with api.step.nest("check malformed binaries"): |
| malformed = api.file.read_text( |
| "read malformed binaries", |
| temp_dir.join("malformed_binaries.txt"), |
| ).splitlines() |
| malformed_test_labels = [ |
| # The labels are in the form of //path/to/target($toolchain). Strip |
| # the toolchain to match the labels in the allowlist. |
| build_ids_to_labels.get(str(m), "").rsplit("(", 1)[0] |
| for m in malformed |
| ] |
| unexpected_valid_tests = [] |
| # There are currently no tests in the malformed_tests_allowlist, so the |
| # following block won't be covered. |
| for test in malformed_tests_allowlist: # pragma: nocover |
| if test in malformed_test_labels: |
| malformed_test_labels.remove(test) |
| break |
| unexpected_valid_tests.append(test) |
| |
| if len(malformed_test_labels) > 3: |
| step = api.step("malformed test labels", None) |
| step.presentation.step_text = "\n".join(malformed_test_labels) |
| raise api.step.StepFailure( |
| "more than 3 malformed tests. See `malformed test labels` step for full list." |
| ) |
| if malformed_test_labels or unexpected_valid_tests: |
| raise api.step.StepFailure( |
| "unexpected malformed tests: %r\nunexpected valid tests: %r" |
| % (list(malformed_test_labels), list(unexpected_valid_tests)) |
| ) |
| |
| |
| def builders_by_filetype(api, builders_to_filetypes, launch_all): |
| if not api.buildbucket_util.is_tryjob or launch_all: |
| return builders_to_filetypes.keys() |
| |
| gerrit_change = api.buildbucket.build.input.gerrit_changes[0] |
| change_details = api.gerrit.change_details( |
| name="get change details", |
| change_id="%s~%s" % (gerrit_change.project, gerrit_change.change), |
| host=gerrit_change.host, |
| query_params=["ALL_REVISIONS", "ALL_FILES"], |
| test_data=api.json.test_api.output( |
| { |
| "branch": "main", |
| "revisions": { |
| "d4e5f6": { |
| "_number": 3, |
| "ref": "refs/changes/00/100/3", |
| "files": {"file.cc": {}}, |
| }, |
| "a1b2c3": { |
| "_number": 7, |
| "ref": "refs/changes/00/100/7", |
| "files": {"file.cc": {}, "file.c": {}, "file.h": {}}, |
| }, |
| "g7h8i9": { |
| "_number": 9, |
| "ref": "refs/changes/00/100/9", |
| "files": {"unsupported": {}}, |
| }, |
| }, |
| } |
| ), |
| ).json.output |
| patchsets = [ |
| rev |
| for rev in change_details["revisions"].values() |
| if rev["_number"] == gerrit_change.patchset |
| ] |
| assert len(patchsets) == 1 |
| changed_files = patchsets[0].get("files", {}).keys() |
| changed_filetypes = {api.path.splitext(f)[1] for f in changed_files} |
| |
| return [ |
| builder |
| for builder, filetypes in builders_to_filetypes.items() |
| if changed_filetypes & set(filetypes) |
| ] |
| |
| |
| def RunSteps( |
| api, |
| manifest, |
| remote, |
| fint_params_path, |
| coverage_builders, |
| absolute_coverage_args, |
| affected_tests_only, |
| artifact_gcs_bucket, |
| child_build_ids, |
| collect_absolute, |
| use_authenticated_url, |
| ): |
| # Collect non-fatal errors here to return at the end. |
| errors = [] |
| |
| if not child_build_ids: |
| with api.step.nest("get builders to launch") as pres: |
| # For CTS coverage, we don't care about the changed files and always |
| # want to launch the coverage builders. |
| launch_all = collect_absolute or _is_cts_coverage(api) |
| builder_names = builders_by_filetype(api, coverage_builders, launch_all) |
| if not builder_names: |
| pres.step_text = "no builders collect coverage for the changed files" |
| return |
| |
| # TODO(fxbug.dev/75085): Update/remove list once issues are fixed. |
| malformed_tests_allowlist = [] |
| |
| build_ids = child_build_ids |
| if not child_build_ids: |
| build_ids = _get_builds( |
| api, |
| builder_names, |
| affected_tests_only=affected_tests_only, |
| collect_absolute=collect_absolute, |
| ) |
| assert build_ids, "failed to get builds" |
| |
| checkout = api.checkout.fuchsia_with_options(manifest=manifest, remote=remote) |
| |
| # Need to run api.build.gen() to generate the tool_paths.json to access the |
| # llvm tools and to generate the profile source list for cts coverage. |
| gn_results = api.build.gen( |
| checkout=checkout, |
| fint_params_path=fint_params_path, |
| ) |
| |
| instrumentation_files = [] if collect_absolute else checkout.changed_files() |
| if _is_cts_coverage(api): |
| profile_source_list = api.file.read_json( |
| "read profile source list", |
| gn_results.build_dir.join(SDK_SOURCE_LIST), |
| test_data=["file1", "file2"], |
| ) |
| # The file paths are relative to the checkout root, but they should |
| # either be absolute paths or relative to the build directory, so we |
| # append the paths to the checkout root to make them absolute. |
| instrumentation_files = [checkout.root_dir.join(f) for f in profile_source_list] |
| |
| launched_by_led = None |
| if child_build_ids: |
| # Try to determine whether the build_ids are buildbucket build ids or |
| # led swarming task ids. Buildbucket build ids are only composed of |
| # digits, but swarming task ids can contain letters as well. This will |
| # not work in the case that the build_ids happen to all be swarming task |
| # ids with only digits, but it should be good enough in most cases. |
| launched_by_led = any(not build_id.isdigit() for build_id in child_build_ids) |
| builds = _collect_builds(api, build_ids, launched_by_led) |
| # Get orchestration_inputs from any build. The artifacts used from here |
| # are host tools that should be the same across all builders. |
| orchestration_inputs = None |
| # Collect fidl_mangled_to_api_mapping.json for CTS coverage. |
| fidl_api_mapping_list = None |
| # Collect test_coverage_report.plasa.json for CTS coverage. |
| plasa_list = None |
| exception_type = api.step.InfraFailure |
| testing_skipped = True |
| failed_builds_summaries = [] |
| build_ids_to_labels = {} |
| for build in builds: |
| skipped_because_unaffected = _get_output_property( |
| build, "skipped_because_unaffected" |
| ) |
| affected_tests_no_work = _get_output_property(build, "affected_tests_no_work") |
| if ( |
| skipped_because_unaffected |
| or affected_tests_no_work |
| and not api.recipe_testing.enabled |
| ): |
| continue |
| testing_skipped = False |
| orchestration_inputs_hash = _get_output_property( |
| build, api.build.test_orchestration_inputs_property_name(False) |
| ) |
| if not orchestration_inputs_hash: |
| # Only return an InfraFailure if all failures are infra failures. |
| # Otherwise, return a StepFailure. |
| if build.status == common_pb2.FAILURE: |
| exception_type = api.step.StepFailure |
| failed_builds_summaries.append(build.summary_markdown) |
| continue |
| if not orchestration_inputs: |
| orchestration_inputs = api.build.download_test_orchestration_inputs( |
| orchestration_inputs_hash |
| ) |
| # Collect generated sources from all builds. |
| if api.path.exists(orchestration_inputs.generated_sources_root): |
| api.step( |
| "copy generated sources to checkout", |
| cmd=[ |
| "python", |
| api.resource("copy_sources.py"), |
| "--source_dir", |
| orchestration_inputs.generated_sources_root, |
| "--dest_dir", |
| checkout.root_dir, |
| ], |
| ) |
| # Collect build-ids.json for all builds. |
| build_ids_file = api.path.mkstemp() |
| api.artifacts.gcs_bucket = _get_output_property(build, "artifact_gcs_bucket") |
| api.artifacts.namespace = build.id |
| api.artifacts.download( |
| "get build-ids.json", |
| "build-ids.json", |
| build_ids_file, |
| ) |
| build_ids_json = api.file.read_json( |
| "read build-ids.json", |
| build_ids_file, |
| test_data={ |
| "malformed_id": "//src/sys/appmgr/integration_tests/logs:log_tests_bin", |
| "valid_id": "//expected/valid/test", |
| }, |
| ) |
| build_ids_to_labels.update(build_ids_json) |
| |
| # Collect FIDL mapping and PlaSA definition for CTS coverage. |
| if _is_cts_coverage(api): |
| if fidl_api_mapping_list is None: |
| fidl_api_mapping_list = _download_artifact( |
| api, FIDL_API_MAPPING_LIST, build.id, errors |
| ) |
| if plasa_list is None: |
| plasa_list = _download_artifact(api, PLASA_LIST, build.id, errors) |
| |
| if testing_skipped: |
| return |
| |
| if not orchestration_inputs: |
| raise exception_type( |
| "\n".join(failed_builds_summaries) |
| if failed_builds_summaries |
| else "no orchestration inputs found" |
| ) |
| |
| # Configure context of uploaded artifacts to get debug_symbol url. |
| api.artifacts.gcs_bucket = artifact_gcs_bucket |
| summary_files = {CLANG_TYPE: [], RUST_TYPE: []} |
| covargs_inputs_tree = api.cas_util.tree(api.path.mkdtemp("covargs_inputs")) |
| for build in builds: |
| builder = build.builder.builder |
| version_type = CLANG_TYPE |
| if ".rs" in coverage_builders[builder]: |
| version_type = RUST_TYPE |
| task_ids = _get_output_property(build, TASK_IDS_PROPERTY) |
| if not task_ids: |
| continue |
| # All tasks should have been completed, so there's no need to have a timeout. |
| results = api.swarming.collect( |
| "collect", |
| list(task_ids), |
| output_dir=api.path.mkdtemp("swarming"), |
| ) |
| for result in results: |
| if not result.success: |
| continue |
| with api.step.nest("process result for %s" % result.name) as presentation: |
| results_dir = api.testing.extract_test_results( |
| step_name="extract", |
| task_result=result, |
| ) |
| summary_files[version_type].append( |
| results_dir.join(api.testing.TEST_SUMMARY_JSON) |
| ) |
| covargs_inputs_tree.register_link( |
| target=results_dir, |
| linkname=covargs_inputs_tree.root.join( |
| "%s-%s" % (build.builder.builder, result.name) |
| ), |
| ) |
| |
| if not summary_files[CLANG_TYPE] and not summary_files[RUST_TYPE]: |
| raise api.step.StepFailure("no summary files could be retrieved") |
| |
| # The covargs tool must be run from the build dir since it deals with |
| # relative filepaths to the build dir. The build dir is expected to be |
| # "checkout_root/out/not-default" as set in api.build.gen(). |
| build_dir = "out/not-default" |
| abs_build_dir = checkout.root_dir.join(build_dir) |
| api.file.ensure_directory("ensure build directory", abs_build_dir) |
| with api.context(cwd=abs_build_dir): |
| try: |
| process_coverage( |
| api=api, |
| checkout_root=checkout.root_dir, |
| covargs_path=orchestration_inputs.covargs, |
| summary_files=summary_files, |
| debug_symbol_url=api.artifacts.debug_symbol_url(), |
| llvm_profdata={ |
| CLANG_TYPE: gn_results.tool("llvm-profdata"), |
| RUST_TYPE: gn_results.tool("llvm-profdata-rust"), |
| }, |
| llvm_cov=gn_results.tool("llvm-cov"), |
| gcs_bucket=artifact_gcs_bucket, |
| build_dir=build_dir, |
| absolute_coverage_args=absolute_coverage_args, |
| covargs_inputs_tree=covargs_inputs_tree, |
| instrumentation_files=instrumentation_files, |
| build_ids_to_labels=build_ids_to_labels, |
| malformed_tests_allowlist=malformed_tests_allowlist, |
| fidl_api_mapping_list=fidl_api_mapping_list, |
| plasa_list=plasa_list, |
| use_authenticated_url=use_authenticated_url, |
| ) |
| except Exception as e: |
| errors.append(str(e)) |
| finally: |
| covargs_inputs_tree.create_links("create tree of covargs inputs") |
| api.cas_util.upload( |
| covargs_inputs_tree.root, step_name="upload covargs inputs" |
| ) |
| if errors: |
| raise api.step.StepFailure("\n".join(errors)) |
| |
| |
| def GenTests(api): |
| collect_steps = ( |
| api.step_data( |
| "collect", |
| api.swarming.collect( |
| [ |
| api.swarming.task_result( |
| id="610", |
| name="Linux", |
| outputs=["out/path/to/output/file"], |
| ), |
| api.swarming.task_result( |
| id="710", |
| failure=True, |
| name="QEMU", |
| outputs=["out/path/to/output/file"], |
| ), |
| api.swarming.task_result( |
| id="810", |
| name="QEMU-(2)", |
| outputs=["out/summary.json", "serial_log.txt"], |
| ), |
| api.swarming.task_result( |
| id="910", |
| name="QEMU-(3)", |
| outputs=["out/summary.json"], |
| ), |
| ] |
| ), |
| ) |
| + api.testing.task_requests_step_data( |
| [api.testing.task_request_jsonish()], |
| "download test orchestration inputs.load task requests", |
| ) |
| + api.step_data( |
| "download test orchestration inputs.load triage sources", |
| api.file.read_json(["triage/config.triage", "other/triage/config.triage"]), |
| ) |
| ) |
| |
| def properties(**kwargs): |
| props = { |
| "manifest": "minimal", |
| "remote": "https://fuchsia.googlesource.com/manifest", |
| "fint_params_path": "fint_params/coverage.textproto", |
| "coverage_builders": { |
| "coverage-builder": [".cc"], |
| "coverage-builder2": [".h", ".c"], |
| "coverage-rust-builder": [".rs"], |
| }, |
| "absolute_coverage_args": {"host": "fuchsia", "project": "fuchsia"}, |
| "artifact_gcs_bucket": "fuchsia-infra-artifacts", |
| } |
| props.update(kwargs) |
| return api.properties(**props) |
| |
| coverage_build = api.subbuild.ci_build_message( |
| build_id=8945511751514863184, |
| builder="coverage-builder", |
| output_props={ |
| "test-swarming-task-ids": ["610", "710", "810", "910"], |
| "test_orchestration_inputs_digest": "abc", |
| }, |
| status="SUCCESS", |
| ) |
| coverage_rust_build = api.subbuild.ci_build_message( |
| build_id=8945511751514863184, |
| builder="coverage-rust-builder", |
| output_props={ |
| "test-swarming-task-ids": ["610", "710", "810", "910"], |
| "test_orchestration_inputs_digest": "abc", |
| }, |
| status="SUCCESS", |
| ) |
| coverage_build_without_tasks = api.subbuild.ci_build_message( |
| build_id=8945511751514863185, |
| builder="coverage-builder2", |
| output_props={ |
| "test_orchestration_inputs_digest": "abc", |
| }, |
| status="SUCCESS", |
| ) |
| coverage_build_with_infra_failure = api.subbuild.ci_build_message( |
| build_id=8945511751514863186, |
| builder="coverage-builder3", |
| status="INFRA_FAILURE", |
| ) |
| coverage_build_with_infra_failure.summary_markdown = "raised infra failure" |
| coverage_build_with_failure = api.subbuild.ci_build_message( |
| build_id=8945511751514863187, |
| builder="coverage-builder4", |
| status="FAILURE", |
| ) |
| coverage_build_with_failure.summary_markdown = "failed to build fuchsia" |
| coverage_build_with_no_affected_tests = api.subbuild.ci_build_message( |
| build_id=8945511751514863188, |
| builder="coverage-builder5", |
| output_props={ |
| "affected_tests_no_work": True, |
| "test_orchestration_inputs_digest": "abc", |
| }, |
| status="SUCCESS", |
| ) |
| coverage_build_with_skipped_build = api.subbuild.ci_build_message( |
| build_id=8945511751514863189, |
| builder="coverage-builder6", |
| output_props={"skipped_because_unaffected": True}, |
| status="SUCCESS", |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("default", tryjob=False) |
| + properties() |
| + collect_steps |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build, coverage_build_without_tasks], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test( |
| "default_cts", |
| tryjob=False, |
| builder="fuchsia-cts-coverage", |
| status="failure", |
| ) |
| + properties() |
| + collect_steps |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build, coverage_build_without_tasks], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| + api.step_data("get %s" % FIDL_API_MAPPING_LIST, retcode=1) |
| + api.step_data("get %s" % PLASA_LIST, retcode=1) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("default_led", tryjob=True) |
| + properties( |
| **{ |
| "$recipe_engine/led": LedInputProperties( |
| led_run_id="led/user_example.com/abc123", |
| ) |
| } |
| ) |
| + collect_steps |
| + api.subbuild.child_led_steps( |
| builds=[coverage_build, coverage_build_without_tasks], |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("led_with_child_build_ids", tryjob=True) |
| + properties( |
| child_build_ids=["8945511751514863184", "8945511751514863185"], |
| **{ |
| "$recipe_engine/led": LedInputProperties( |
| led_run_id="led/user_example.com/abc123", |
| ) |
| } |
| ) |
| + collect_steps |
| + api.buildbucket.simulated_collect_output( |
| step_name="collect builds.collect", |
| builds=[coverage_build, coverage_build_without_tasks], |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("no_test_tasks", tryjob=False, status="failure") |
| + properties() |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build_without_tasks], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test( |
| "no_orchestration_inputs", tryjob=False, status="failure" |
| ) |
| + properties() |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build_with_infra_failure, coverage_build_with_failure], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("skipped_testing", tryjob=True, patch_set=3) |
| + properties() |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build_with_no_affected_tests], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("skipped_build", tryjob=True, patch_set=7) |
| + properties() |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build_with_skipped_build], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("unsupported_changed_files", tryjob=True, patch_set=9) |
| + properties() |
| ) |
| |
| yield ( |
| api.buildbucket_util.test( |
| "unexpected_malformed_tests", tryjob=False, status="failure" |
| ) |
| + properties() |
| + collect_steps |
| + api.subbuild.child_build_steps( |
| builds=[coverage_rust_build, coverage_build_without_tasks], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| + api.step_data( |
| "check malformed binaries.read malformed binaries", |
| api.file.read_text("valid_id"), |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test( |
| "too_many_unexpected_malformed_tests", tryjob=False, status="failure" |
| ) |
| + properties() |
| + collect_steps |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build, coverage_build_without_tasks], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| + api.step_data( |
| "read build-ids.json", |
| api.file.read_json( |
| { |
| "valid_id": "//expected/valid/test", |
| "valid_id2": "//expected/valid/test2", |
| "valid_id3": "//expected/valid/test3", |
| "valid_id4": "//expected/valid/test4", |
| } |
| ), |
| ) |
| + api.step_data( |
| "check malformed binaries.read malformed binaries", |
| api.file.read_text("valid_id\nvalid_id2\nvalid_id3\nvalid_id4"), |
| ) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("collect_absolute", tryjob=True, patch_set=9) |
| + properties(collect_absolute=True, use_authenticated_url=True) |
| + collect_steps |
| + api.subbuild.child_build_steps( |
| builds=[coverage_build], |
| launch_step="launch builders", |
| collect_step="collect builds", |
| ) |
| ) |