| # Copyright 2019 The Fuchsia Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| """ fuchsia.py - Builds and optionally tests Fuchsia. |
| |
| # Execution overview |
| |
| ## Configuration |
| |
| This recipe uses a protocol buffer message called a spec for most of its |
| configuration. The only PROPERTIES are those required to acquire the spec. |
| The recipe fetches the spec from the git repo |spec_remote|. It determines |
| the correct revision to use from the BuildBucket build input to ensure it |
| retrieves the correct config for a pending change vs a committed change. |
| |
| ## Checkout + Build |
| |
| This recipe triggers a child build which runs the fuchsia/build recipe. |
| That recipe checks out the source code and builds it. This recipe |
| retrieves the data required to orchestrate tests via CAS. |
| |
| ## Test |
| |
| If configured to run tests, this recipe uses the test orchestration data to run tests. |
| That logic is in the testing recipe module. Under the hood, that module |
| triggers Swarming tasks that do the actual testing, waits for them, and |
| reports the results. |
| |
| ## External Tests |
| |
| If configured to run external tests, this recipe invokes various tools to pass |
| build artifacts to external infrastructures, trigger tests, waits for them, and |
| reports the results. |
| """ |
| |
| from google.protobuf import json_format |
| |
| from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2 |
| from PB.infra.fuchsia import Fuchsia |
| from PB.recipe_engine.result import RawResult |
| from PB.recipe_modules.recipe_engine.led.properties import ( |
| InputProperties as LedInputProperties, |
| ) |
| from PB.recipes.fuchsia.fuchsia.fuchsia import InputProperties |
| |
| DEPS = [ |
| "fuchsia/archive", |
| "fuchsia/artifacts", |
| "fuchsia/autocorrelator", |
| "fuchsia/build", |
| "fuchsia/buildbucket_util", |
| "fuchsia/checkout", |
| "fuchsia/fuchsia", |
| "fuchsia/fxt", |
| "fuchsia/recipe_testing", |
| "fuchsia/spec", |
| "fuchsia/subbuild", |
| "fuchsia/swarming_retry", |
| "fuchsia/testing", |
| "recipe_engine/buildbucket", |
| "recipe_engine/cas", |
| "recipe_engine/file", |
| "recipe_engine/led", |
| "recipe_engine/path", |
| "recipe_engine/properties", |
| "recipe_engine/python", |
| "recipe_engine/step", |
| "recipe_engine/swarming", |
| ] |
| |
| PROPERTIES = InputProperties |
| |
| |
| def RunSteps(api, props): |
| # TODO(fxbug.dev/77879): Stop accepting checked-in specs and only pass them |
| # via properties. |
| if props.spec_remote: |
| spec, spec_revision = api.fuchsia.setup_with_spec(props.spec_remote) |
| else: |
| api.fuchsia.setup() |
| spec = props.spec |
| spec_revision = None |
| |
| boots_per_revision = props.boots_per_revision or 5 |
| # At some stage BuildBucket stores properties as google.protobuf.Value, |
| # which converts all numbers (including ints) to floats, which is |
| # lossy, so we have to use a string properties and convert to int |
| # internally. |
| child_build_id = int(props.child_build_id) if props.child_build_id else None |
| |
| orchestrator_id = api.buildbucket_util.id |
| if not spec.build.run_tests and not spec.external_tests.tap_projects: |
| raise api.step.InfraFailure( |
| "if not running tests, use the fuchsia/build recipe directly" |
| ) |
| |
| # Configure context of uploaded artifacts for test task construction and to |
| # get link to build artifacts. |
| api.artifacts.gcs_bucket = spec.artifact_gcs_bucket |
| api.artifacts.namespace = orchestrator_id |
| |
| with api.step.nest("build") as presentation: |
| child_input_properties = { |
| "parent_id": orchestrator_id, |
| # Set by the coverage.py recipe to be passed through to build.py. |
| "affected_tests_only": props.affected_tests_only, |
| # Set by the coverage.py recipe to be passed through to build.py. |
| "collect_coverage": props.collect_coverage, |
| } |
| if spec_revision: |
| child_input_properties["spec_revision"] = spec_revision |
| child_build = run_build_steps( |
| api, |
| presentation, |
| child_build_id, |
| child_input_properties, |
| ) |
| child_props = json_format.MessageToDict(child_build.output.properties) |
| if child_props.get(api.build.FINT_PARAMS_PROPERTY): |
| presentation.logs["fint_params"] = child_props[ |
| api.build.FINT_PARAMS_PROPERTY |
| ] |
| |
| got_revision = child_props.get("got_revision") |
| presentation.properties["got_revision"] = got_revision |
| |
| for prop in ( |
| api.checkout.CHECKOUT_INFO_PROPERTY, |
| api.build.FINT_PARAMS_PATH_PROPERTY, |
| ): |
| if prop in child_props: |
| presentation.properties[prop] = child_props[prop] |
| |
| if spec.build.run_tests or spec.external_tests.tap_projects: |
| skipped_because_unaffected = bool( |
| child_props.get("skipped_because_unaffected") |
| ) |
| affected_tests_no_work = bool(child_props.get("affected_tests_no_work")) |
| if ( |
| skipped_because_unaffected |
| or affected_tests_no_work |
| and not api.recipe_testing.enabled |
| ): |
| message = ( |
| "Testing skipped because the change did not affect the build graph." |
| ) |
| presentation.step_text = message |
| presentation.properties[ |
| "skipped_because_unaffected" |
| ] = skipped_because_unaffected |
| presentation.properties[ |
| "affected_tests_no_work" |
| ] = affected_tests_no_work |
| return RawResult(summary_markdown=message, status=common_pb2.SUCCESS) |
| |
| # Present the link to the build artifacts uploaded by the child build. |
| if api.artifacts.gcs_bucket: |
| # If child_build_id is provided, the namespace used for the |
| # artifacts URL must be retrieved from the `parent_id` input |
| # property of the child build. |
| if child_build_id: |
| api.artifacts.namespace = json_format.MessageToDict( |
| child_build.input.properties |
| ).get("parent_id") |
| presentation.links["build_artifacts"] = api.artifacts.cloud_storage_url() |
| # Reset the artifacts namespace to the current orchestrator id if |
| # changed above. |
| api.artifacts.namespace = orchestrator_id |
| |
| if spec.build.run_tests: |
| ( |
| orchestration_inputs, |
| orchestration_inputs_hash, |
| ) = collect_test_orchestration_inputs(api, child_build, without_cl=False) |
| if props.perfcompare: |
| orchestration_inputs_without_cl, _ = collect_test_orchestration_inputs( |
| api, child_build, without_cl=True |
| ) |
| |
| # Copy to output properties so the coverage recipe can access it. |
| presentation.properties[ |
| api.build.test_orchestration_inputs_property_name(False) |
| ] = orchestration_inputs_hash |
| # Copy to our own properties so the results uploader in google3 can find |
| # it without knowing about the child. |
| rev_count_prop = api.checkout.REVISION_COUNT_PROPERTY |
| if rev_count_prop in child_props: |
| presentation.properties[rev_count_prop] = child_props[rev_count_prop] |
| |
| if spec.build.run_tests: |
| runs_per_shard = boots_per_revision if props.perfcompare else 1 |
| try: |
| results_with_cl = run_test_steps( |
| api, orchestration_inputs, spec, runs_per_shard |
| ) |
| except api.step.StepFailure as exc: |
| if api.buildbucket_util.is_tryjob: |
| with api.step.nest("check for correlated failures") as parent_step: |
| api.autocorrelator.check_try( |
| "check try", |
| exc, |
| exc.reason, |
| ignore_failed_build=True, |
| ignore_skipped_tests=True, |
| ) |
| api.autocorrelator.check_ci( |
| "check ci", |
| got_revision, |
| exc, |
| exc.reason, |
| ) |
| api.autocorrelator.set_properties(parent_step) |
| raise api.autocorrelator.compose_exception(exc) |
| |
| if props.perfcompare: |
| with api.step.nest("test without CL"): |
| results_without_cl = run_test_steps( |
| api, |
| orchestration_inputs_without_cl, |
| spec, |
| runs_per_shard, |
| ) |
| else: |
| results_without_cl = None |
| display_performance_results( |
| api, orchestration_inputs.perfcompare, results_without_cl, results_with_cl |
| ) |
| |
| if spec.external_tests.tap_projects: |
| with api.step.nest("run external tests") as presentation: |
| run_external_test_steps(api, presentation, spec) |
| |
| |
| def run_build_steps( |
| api, |
| presentation, |
| child_build_id, |
| child_input_properties, |
| ): |
| builder_name = "{}-subbuild".format(api.buildbucket.build.builder.builder) |
| if child_build_id: |
| # Text is meant to avoid confusion. |
| presentation.step_text = "Reusing child build instead of triggering" |
| output_build = api.buildbucket.get(child_build_id) |
| build_url = "https://ci.chromium.org/b/%d" % child_build_id |
| presentation.links[builder_name] = build_url |
| else: |
| builds = api.subbuild.launch( |
| [builder_name], presentation, extra_properties=child_input_properties |
| ) |
| build_id = builds[builder_name].build_id |
| build_url = builds[builder_name].url |
| if not api.led.launched_by_led: |
| # Consumed by rerun recipe. Passing build IDs directly as |
| # properties doesn't work because all numbers get cast to floats, |
| # which is lossy, so we convert to str. |
| presentation.properties["child_build_id"] = str(build_id) |
| builds = api.subbuild.collect([build_id], presentation) |
| output_build = builds[build_id].build_proto |
| |
| if output_build.status != common_pb2.SUCCESS: |
| presentation.properties["failed_to_build"] = True |
| if output_build.status == common_pb2.INFRA_FAILURE: |
| exception_type = api.step.InfraFailure |
| description = "raised infra failure" |
| else: |
| exception_type = api.step.StepFailure |
| description = "failed" |
| |
| # Copy the child summary markdown into the parent summary markdown to |
| # better propagate error messages. If the child summary is multiple lines, |
| # start it on a new line. |
| subbuild_summary = output_build.summary_markdown.strip() |
| summary = "[build](%s) %s" % (build_url, description) |
| if subbuild_summary: |
| summary += ":" |
| # If the subbuild summary is already multiple lines, start it on a |
| # new line. If it's one line, the final summary should also be one |
| # line. |
| summary += "\n\n" if "\n" in subbuild_summary else " " |
| summary += subbuild_summary |
| raise exception_type(summary) |
| |
| return output_build |
| |
| |
| def run_test_steps(api, orchestration_inputs, spec, runs_per_shard): |
| tryjob = api.buildbucket_util.is_tryjob |
| |
| testing_tasks = api.testing.run_test_tasks( |
| debug_symbol_url=api.artifacts.debug_symbol_url(), |
| orchestration_inputs=orchestration_inputs, |
| max_attempts=spec.test.max_attempts, |
| rerun_budget_secs=spec.test.rerun_budget_secs, |
| runs_per_shard=runs_per_shard, |
| retry_task_on_test_failure=spec.test.retry_task_on_test_failure, |
| per_test_timeout_secs=spec.test.per_test_timeout_secs, |
| ) |
| |
| all_results = [] |
| successful_results = [] |
| for task in testing_tasks: |
| for attempt in task.get_all_attempts(): |
| if attempt.test_results: |
| all_results.append(attempt.test_results) |
| for attempt in task.get_successful_attempts(): |
| assert attempt.test_results |
| successful_results.append(attempt.test_results) |
| |
| # Upload test results |
| if spec.test.upload_results: |
| assert ( |
| spec.artifact_gcs_bucket |
| ), "artifact_gcs_bucket must be set if test.upload_results is" |
| with api.step.nest("upload test results") as presentation: |
| link = "go/fuchsia-result-store/bid:%s" % api.buildbucket_util.id |
| presentation.links[link] = link.replace("go/", "https://goto.google.com/") |
| swarming_task_ids = [] |
| resultdb_base_variant = { |
| "board": orchestration_inputs.fint_set_metadata.board, |
| "build_type": orchestration_inputs.fint_set_metadata.optimize, |
| "product": orchestration_inputs.fint_set_metadata.product, |
| } |
| # All test results, including non-final attempts that were retried. |
| # We care so we can analyze flakiness. |
| for test_results in all_results: |
| final_resultdb_base_variant = resultdb_base_variant.copy() |
| final_resultdb_base_variant["test_environment"] = test_results.env_name |
| tags = [ |
| "%s:%s" % ("swarming_task_id", test_results.swarming_task_id), |
| "%s:%s" % ("swarming_bot_id", test_results.swarming_bot_id), |
| ] |
| test_results.upload_results( |
| gcs_bucket=spec.artifact_gcs_bucket, |
| upload_to_catapult=( |
| not tryjob |
| and spec.test.catapult_dashboard_master |
| and spec.test.catapult_dashboard_bot |
| ), |
| orchestration_inputs=orchestration_inputs, |
| resultdb_base_variant=final_resultdb_base_variant, |
| resultdb_tags=tags, |
| ) |
| swarming_task_ids.append(test_results.swarming_task_id) |
| # Consumed by the google3 results uploader and the coverage recipe. |
| presentation.properties["test-swarming-task-ids"] = swarming_task_ids |
| |
| api.testing.raise_failures(testing_tasks) |
| |
| return successful_results |
| |
| |
| def run_external_test_steps(api, presentation, spec): |
| api.fxt.use_staging_host = spec.external_tests.use_staging_host |
| resp = api.fxt.launch( |
| step_name="launch", |
| bucket=api.artifacts.gcs_bucket, |
| namespace=api.artifacts.namespace, |
| name=spec.external_tests.system_image, |
| projects=spec.external_tests.tap_projects, |
| presentation=presentation, |
| ) |
| try: |
| api.fxt.monitor(step_name="monitor", request_id=resp["request_id"]) |
| finally: |
| api.fxt.cleanup(step_name="cleanup", workspace=resp["workspace"]) |
| |
| |
| def collect_test_orchestration_inputs(api, build_proto, without_cl): |
| """Downloads archived orchestration inputs from a build. |
| |
| Args: |
| build_proto (Build): The Build proto for the build that produced the |
| test orchestration inputs. |
| without_cl (bool): Whether to download the "without CL" build. If false, |
| this downloads the "with CL" build. |
| |
| Returns: |
| FuchsiaBuildApi.TestOrchestrationInputs, hash (str) |
| |
| Raises: |
| An InfraFailure if the required property is not found. |
| """ |
| prop_name = api.build.test_orchestration_inputs_property_name(without_cl) |
| orchestration_inputs_hash = api.subbuild.get_property(build_proto, prop_name) |
| return ( |
| api.build.download_test_orchestration_inputs(orchestration_inputs_hash), |
| orchestration_inputs_hash, |
| ) |
| |
| |
| def display_performance_results( |
| api, perfcompare_tool_dir, results_without_cl, results_with_cl |
| ): |
| dest_dir = api.path["cleanup"].join("perf_dataset") |
| with_cl_dir = dest_dir.join("with_cl") |
| without_cl_dir = dest_dir.join("without_cl") |
| if results_without_cl: |
| dir_to_upload = dest_dir |
| dirs_to_display = [without_cl_dir, with_cl_dir] |
| property_name = "perfcompare_dataset_digest" |
| display_step_name = "compare perf test results without and with CL" |
| else: |
| dir_to_upload = with_cl_dir |
| dirs_to_display = [with_cl_dir] |
| property_name = "perf_dataset_digest" |
| display_step_name = "summary of perf test results" |
| |
| with api.step.nest("aggregate task outputs into single directory") as presentation: |
| api.file.ensure_directory("make directory", dest_dir) |
| if results_without_cl: |
| with api.step.nest('convert results for "without_cl" revision'): |
| make_perfcompare_dataset_dir(api, without_cl_dir, results_without_cl) |
| with api.step.nest('convert results for "with_cl" revision'): |
| make_perfcompare_dataset_dir(api, with_cl_dir, results_with_cl) |
| # Upload the with/without-CL dataset to CAS so that it can be |
| # easily downloaded for further analysis. The cost of this should be |
| # low because CAS uses share-by-hash (it is content-addressed) and |
| # the individual files already came from CAS. |
| dataset_hash = api.archive.upload(dir_to_upload) |
| presentation.properties[property_name] = dataset_hash |
| # Consumers also need to know *which* CAS instance the dataset is |
| # stored on. |
| presentation.properties["cas_instance"] = api.cas.instance |
| |
| api.python( |
| display_step_name, |
| perfcompare_tool_dir.join("perfcompare.py"), |
| ["compare_perf"] + dirs_to_display, |
| ) |
| |
| |
| # Convert test results into the directory layout accepted by the |
| # perfcompare.py tool for a multi-boot dataset. |
| def make_perfcompare_dataset_dir(api, dest_dir, all_results): |
| by_boot_dir = dest_dir.join("by_boot") |
| api.file.ensure_directory("make results directory", by_boot_dir) |
| for boot_idx, test_results in enumerate(all_results): |
| api.file.copytree( |
| "copy perf test results", |
| test_results.results_dir, |
| by_boot_dir.join("boot%06d" % boot_idx), |
| ) |
| |
| |
| def GenTests(api): |
| def download_step_data(): |
| task_request_jsonish = api.testing.task_request_jsonish() |
| return api.testing.task_requests_step_data( |
| [task_request_jsonish], |
| "build.download test orchestration inputs.load task requests", |
| ) + api.step_data( |
| "build.download test orchestration inputs.load triage sources", |
| api.file.read_json(["triage/config.triage", "other/triage/config.triage"]), |
| ) |
| |
| def swarming_task_result(**kwargs): |
| return api.swarming.task_result(use_cas=True, **kwargs) |
| |
| def test_step_data(id="610", failure=False): |
| shard_name = "QEMU" |
| outputs = ["out/path/to/output/file"] |
| return download_step_data() + ( |
| api.testing.task_retry_step_data( |
| [swarming_task_result(id=id, name=shard_name, outputs=outputs)] |
| ) |
| + api.testing.test_step_data(shard_name=shard_name, failure=failure) |
| ) |
| |
| def external_tests_step_data(success=True): |
| return ( |
| api.fxt.launch( |
| "run external tests.launch", |
| test_data={ |
| "request_id": "test-id", |
| "workspace": "test-ws", |
| "change_num": 12345, |
| }, |
| ) |
| + api.fxt.monitor("run external tests.monitor", success=success) |
| ) |
| |
| def spec( |
| run_tests=True, |
| run_external_tests=False, |
| artifact_gcs_bucket=None, |
| max_attempts=None, |
| retry_task_on_test_failure=False, |
| ): |
| test_spec = None |
| if run_tests: |
| test_spec = Fuchsia.Test( |
| max_shard_size=0, |
| timeout_secs=30 * 60, |
| pool="fuchsia.tests", |
| swarming_expiration_timeout_secs=10 * 60, |
| swarming_io_timeout_secs=5 * 60, |
| upload_results=bool(artifact_gcs_bucket), |
| use_runtests=True, |
| max_attempts=max_attempts, |
| retry_task_on_test_failure=retry_task_on_test_failure, |
| ) |
| external_tests_spec = None |
| if run_external_tests: |
| external_tests_spec = Fuchsia.ExternalTests( |
| system_image="core-x64", |
| tap_projects=["fuchsia.tests"], |
| ) |
| return Fuchsia( |
| checkout=Fuchsia.Checkout(manifest="manifest", remote="remote"), |
| build=Fuchsia.Build( |
| run_tests=run_tests, |
| upload_results=bool(artifact_gcs_bucket), |
| ), |
| test=test_spec, |
| external_tests=external_tests_spec, |
| artifact_gcs_bucket=artifact_gcs_bucket, |
| ) |
| |
| def properties( |
| run_tests=True, |
| run_external_tests=False, |
| artifact_gcs_bucket=None, |
| max_attempts=None, |
| retry_task_on_test_failure=False, |
| **kwargs |
| ): |
| return api.properties( |
| spec=spec( |
| run_tests=run_tests, |
| run_external_tests=run_external_tests, |
| artifact_gcs_bucket=artifact_gcs_bucket, |
| max_attempts=max_attempts, |
| retry_task_on_test_failure=retry_task_on_test_failure, |
| ), |
| **kwargs |
| ) |
| |
| child_build = api.subbuild.ci_build_message( |
| builder="builder-subbuild", |
| input_props={"parent_id": "123123"}, |
| output_props={ |
| "integration-revision-count": 1, |
| "test_orchestration_inputs_digest": "abc", |
| "fint_params": 'field: "value"', |
| "got_revision": "abcdef", |
| "checkout_info": {"manifest": "foo"}, |
| }, |
| status="SUCCESS", |
| ) |
| failed_child_build = api.subbuild.ci_build_message( |
| builder="builder-subbuild", |
| output_props={ |
| "integration-revision-count": 1, |
| "test_orchestration_inputs_digest": "abc", |
| "got_revision": "abcdef", |
| }, |
| status="FAILURE", |
| ) |
| failed_child_build.summary_markdown = "failed to build fuchsia" |
| |
| skipped_child_build = api.subbuild.try_build_message( |
| builder="builder-subbuild", |
| output_props={"skipped_because_unaffected": True, "got_revision": "abcdef"}, |
| status="SUCCESS", |
| ) |
| |
| failed_try_child_build = api.subbuild.try_build_message( |
| builder="builder-subbuild", |
| output_props={ |
| "integration-revision-count": 1, |
| "test_orchestration_inputs_digest": "abc", |
| "got_revision": "abcdef", |
| }, |
| status="FAILURE", |
| ) |
| |
| infra_failure_child_build = api.subbuild.ci_build_message( |
| builder="builder-subbuild", |
| output_props={ |
| "integration-revision-count": 1, |
| "test_orchestration_inputs_digest": "abc", |
| "got_revision": "abcdef", |
| }, |
| status="INFRA_FAILURE", |
| ) |
| infra_failure_child_build.summary_markdown = "checkout timed out\nafter 30m" |
| |
| yield ( |
| api.buildbucket_util.test("successful_build_and_test") |
| + properties(artifact_gcs_bucket="gcs-bucket") |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| + test_step_data() |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("skipped_because_unaffected") |
| # Cover the code path for reading a spec from integration.git. |
| + properties(spec_remote="https://fuchsia.googlesource.com/integration") |
| + api.spec.spec_loaded_ok(step_name="load spec.read spec", message=spec()) |
| + api.subbuild.child_build_steps(builds=[skipped_child_build]) |
| ) |
| |
| # Cover the case where the build succeeds but one or more tests fail. |
| yield ( |
| api.buildbucket_util.test("failed_tests_cq", tryjob=True, status="failure") |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| max_attempts=1, |
| **{"$fuchsia/autocorrelator": {"ci_bucket": "ci", "ci_builder": "builder"}} |
| ) |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| # Pass max_attempts=1 because it keeps the test expectations |
| # simpler. Otherwise we would have to generate test expectations |
| # for a retry of the failed task. |
| + test_step_data(failure=True) |
| + api.autocorrelator.check_try( |
| "check for correlated failures.check try", |
| test_data=[{"build_id": "456", "score": 0.98, "is_green": False}], |
| ) |
| + api.autocorrelator.check_ci( |
| "check for correlated failures.check ci", |
| test_data={ |
| "build_id": "789", |
| "score": 0.96, |
| "is_green": False, |
| "commit_dist": 0, |
| }, |
| ) |
| ) |
| |
| # Test that if one shard fails after max_attempts, there are no further |
| # retries of another shard that fails. |
| def test_stopping_after_max_attempts(): |
| outputs = ["out/path/to/output/file"] |
| return ( |
| api.buildbucket_util.test("stop_after_max_attempts", status="failure") |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| # This will cause Shard2 to want to retry after it fails, but it |
| # won't get to because Shard1 will have already failed the max |
| # attempts. |
| retry_task_on_test_failure=True, |
| ) |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| # Expectations for shard task descriptions. |
| + api.testing.task_requests_step_data( |
| [ |
| api.testing.task_request_jsonish(name="Shard1"), |
| api.testing.task_request_jsonish(name="Shard2"), |
| ], |
| "build.download test orchestration inputs.load task requests", |
| ) |
| + api.step_data( |
| "build.download test orchestration inputs.load triage sources", |
| api.file.read_json( |
| ["triage/config.triage", "other/triage/config.triage"] |
| ), |
| ) |
| # Expectations for the task launch steps. |
| + api.swarming_retry.trigger_data("Shard1", "500", attempt=0) |
| + api.swarming_retry.trigger_data("Shard2", "600", attempt=0) |
| # Shard1 fails. (Here the Swarming task returns a failure |
| # status.) |
| + api.swarming_retry.collect_data( |
| [ |
| swarming_task_result( |
| id="500", name="Shard1", outputs=outputs, failure=True |
| ) |
| ], |
| iteration=0, |
| ) |
| # Shard1 is retried and it fails a second time. It is not |
| # retried again because it has reached its limit of |
| # max_attempts=2. |
| + api.swarming_retry.trigger_data("Shard1", "501", attempt=1, iteration=1) |
| + api.swarming_retry.collect_data( |
| [ |
| swarming_task_result( |
| id="501", name="Shard1", outputs=outputs, failure=True |
| ) |
| ], |
| iteration=1, |
| ) |
| # Shard2 fails. It is not retried because the other shard |
| # reached its max_attempts limit. (The Swarming task returns a |
| # success status but there are Fuchsia test failures.) |
| + api.swarming_retry.collect_data( |
| [swarming_task_result(id="600", name="Shard2", outputs=outputs)], |
| iteration=2, |
| ) |
| + api.testing.test_step_data(shard_name="Shard2", failure=True, iteration=2) |
| ) |
| |
| yield test_stopping_after_max_attempts() |
| |
| def test_stopping_after_failed_affected(): |
| outputs = ["out/path/to/output/file"] |
| return ( |
| api.buildbucket_util.test("stop_after_failed_affected", status="failure") |
| + properties(artifact_gcs_bucket="gcs-bucket") |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| # Expectations for shard task descriptions. |
| + api.testing.task_requests_step_data( |
| [ |
| api.testing.task_request_jsonish(name="affected:Shard1"), |
| api.testing.task_request_jsonish(name="Shard2"), |
| ], |
| "build.download test orchestration inputs.load task requests", |
| ) |
| + api.step_data( |
| "build.download test orchestration inputs.load triage sources", |
| api.file.read_json( |
| ["triage/config.triage", "other/triage/config.triage"] |
| ), |
| ) |
| # Expectations for the task launch steps. |
| + api.swarming_retry.trigger_data("affected:Shard1", "500", attempt=0) |
| + api.swarming_retry.trigger_data("Shard2", "600", attempt=0) |
| # affected:Shard1 fails. (Here the Swarming task returns a failure |
| # status.) |
| + api.swarming_retry.collect_data( |
| [ |
| swarming_task_result( |
| id="500", name="affected:Shard1", outputs=outputs, failure=True |
| ) |
| ], |
| iteration=0, |
| ) |
| # affected:Shard1 is retried and it fails a second time. |
| + api.swarming_retry.trigger_data( |
| "affected:Shard1", "501", attempt=1, iteration=1 |
| ) |
| + api.swarming_retry.collect_data( |
| [ |
| swarming_task_result( |
| id="501", name="affected:Shard1", outputs=outputs |
| ) |
| ], |
| iteration=1, |
| ) |
| + api.testing.test_step_data( |
| shard_name="affected:Shard1", |
| failure=True, |
| iteration=1, |
| ) |
| # Shard2 is not collected because the affected shards completed and |
| # failed. |
| ) |
| |
| yield test_stopping_after_failed_affected() |
| |
| # This is similar to test_step_data(), except it generates expectations |
| # for multiple runs of the same shard. |
| def test_step_data_perfcompare(task_id_base): |
| shard_name = "QEMU" |
| outputs = ["out/path/to/output/file"] |
| task_id1 = str(task_id_base) |
| task_id2 = str(task_id_base + 1) |
| test = ( |
| download_step_data() |
| # Expectations for the task launch steps. |
| + api.swarming_retry.trigger_data("QEMU", task_id1, attempt=0) |
| + api.swarming_retry.trigger_data("QEMU", task_id2, attempt=1) |
| # Expectations for the Swarming "collect" steps. The two tasks |
| # complete in different invocations of "collect". |
| + api.swarming_retry.collect_data( |
| [swarming_task_result(id=task_id1, name=shard_name, outputs=outputs)], |
| iteration=0, |
| ) |
| + api.swarming_retry.collect_data( |
| [swarming_task_result(id=task_id2, name=shard_name, outputs=outputs)], |
| iteration=1, |
| ) |
| # Expectations for the "process results" steps. |
| + api.testing.test_step_data(shard_name=shard_name, iteration=0) |
| + api.testing.test_step_data(shard_name=shard_name, iteration=1) |
| ) |
| return test |
| |
| # Test expectations for the "without CL" recipe steps of perfcompare mode. |
| def test_step_data_perfcompare_without_cl(task_id_base): |
| test = test_step_data_perfcompare(task_id_base) |
| |
| def rename_step(old_name, new_name): |
| test.step_data[new_name] = test.step_data.pop(old_name) |
| |
| rename_step( |
| "build.download test orchestration inputs.load task requests", |
| "build.download test orchestration inputs (2).load task requests", |
| ) |
| rename_step( |
| "build.download test orchestration inputs.load triage sources", |
| "build.download test orchestration inputs (2).load triage sources", |
| ) |
| launch_names = [ |
| "launch/collect.0.launch.QEMU (attempt 0).trigger", |
| "launch/collect.0.launch.QEMU (attempt 1).trigger", |
| "launch/collect.0.collect", |
| "launch/collect.0.process results.QEMU.tefmocheck", |
| "launch/collect.0.process results.QEMU.get extracted files", |
| "launch/collect.1.collect", |
| "launch/collect.1.process results.QEMU.tefmocheck", |
| "launch/collect.1.process results.QEMU.get extracted files", |
| ] |
| for name in launch_names: |
| rename_step(name, "test without CL.%s" % name) |
| return test |
| |
| yield ( |
| api.buildbucket_util.test("successful_build_and_test_perfcompare") |
| # Pass a smaller value for boots_per_revision than the default to |
| # reduce the size of the test expectations output, but use a number |
| # >1 in order to test multiple boots. |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| perfcompare=True, |
| boots_per_revision=2, |
| ) |
| + api.subbuild.child_build_steps( |
| builds=[ |
| api.subbuild.ci_build_message( |
| builder="builder-subbuild", |
| output_props={ |
| "integration-revision-count": 1, |
| "test_orchestration_inputs_digest": "abc", |
| "test_orchestration_inputs_digest_without_cl": "efg", |
| "got_revision": "abcdef", |
| }, |
| status="SUCCESS", |
| ) |
| ] |
| ) |
| + test_step_data_perfcompare(task_id_base=9900100) |
| + test_step_data_perfcompare_without_cl(task_id_base=9900200) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("child_build_provided") |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| child_build_id=str(child_build.id), |
| ) |
| + api.buildbucket.simulated_get(child_build, step_name="build.buildbucket.get") |
| + test_step_data() |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("build_only_failed", status="infra_failure") |
| + properties(run_tests=False) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("build_failed", status="failure") |
| + properties(artifact_gcs_bucket="gcs-bucket") |
| + api.subbuild.child_build_steps(builds=[failed_child_build]) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("build_infra_failure", status="infra_failure") |
| + properties(artifact_gcs_bucket="gcs-bucket") |
| + api.subbuild.child_build_steps(builds=[infra_failure_child_build]) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("build_with_led", status="failure") |
| + properties( |
| **{ |
| "$recipe_engine/led": LedInputProperties( |
| led_run_id="led/user_example.com/abc123", |
| ), |
| } |
| ) |
| + api.subbuild.child_led_steps(builds=[failed_child_build]) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test( |
| "build_with_led_tryjob", tryjob=True, status="failure" |
| ) |
| + properties( |
| **{ |
| "$recipe_engine/led": LedInputProperties( |
| led_run_id="led/user_example.com/abc123", |
| ), |
| } |
| ) |
| + api.subbuild.child_led_steps(builds=[failed_try_child_build]) |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("successful_external_tests") |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| run_tests=False, |
| run_external_tests=True, |
| ) |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| + external_tests_step_data() |
| ) |
| |
| yield ( |
| api.buildbucket_util.test("failed_external_tests", status="failure") |
| + properties( |
| artifact_gcs_bucket="gcs-bucket", |
| run_tests=False, |
| run_external_tests=True, |
| ) |
| + api.subbuild.child_build_steps(builds=[child_build]) |
| + external_tests_step_data(success=False) |
| ) |