blob: 6064735c22ec9549cd195b5e60192b0e218d9218 [file] [log] [blame]
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for building Fuchsia and running performance tests.
This differs from the fuchsia recipe in the following ways:
* Performance Tests are run instead of unit tests.
* Tests are always run (this recipe is not used to verify builds).
* Test results are uploaded to the catapult dashboard after execution.
"""
from recipe_engine.config import Enum, List, Single
from recipe_engine.recipe_api import Property
from PB.recipe_modules.recipe_engine.led.properties import (
InputProperties as LedInputProperties,
)
TARGETS = ["arm64", "x64"]
BUILD_TYPES = ["debug", "release", "thinlto", "lto"]
DEPS = [
"fuchsia/artifacts",
"fuchsia/build",
"fuchsia/buildbucket_util",
"fuchsia/catapult",
"fuchsia/checkout",
"fuchsia/fuchsia",
"fuchsia/testing",
"fuchsia/testing_requests",
"recipe_engine/buildbucket",
"recipe_engine/file",
"recipe_engine/json",
"recipe_engine/led",
"recipe_engine/path",
"recipe_engine/raw_io",
"recipe_engine/step",
"recipe_engine/swarming",
"recipe_engine/time",
]
PROPERTIES = {
"manifest": Property(kind=str, help="Jiri manifest to use"),
"remote": Property(kind=str, help="Remote manifest repository"),
"target": Property(kind=Enum(*TARGETS), help="Target to build"),
"build_type": Property(
kind=Enum(*BUILD_TYPES), help="The build type", default="debug"
),
"packages": Property(kind=List(basestring), help="Packages to build", default=[]),
"variants": Property(
kind=List(basestring),
help="--variant arguments to GN in `select_variant`",
default=[],
),
"gn_args": Property(
kind=List(basestring), help="Extra args to pass to GN", default=[]
),
"ninja_targets": Property(
kind=List(basestring), help="Extra target args to pass to ninja", default=[]
),
"board": Property(kind=str, help="Board to build", default=None),
"product": Property(kind=str, help="Product to build", default=None),
"test_pool": Property(
kind=str,
help="Swarming pool from which a test task will be drawn",
default="fuchsia.tests",
),
"device_type": Property(
kind=str,
help="The type of device to execute tests on, if the value is"
" not QEMU it will be passed to Swarming as the device_type"
" dimension",
default="QEMU",
),
"pave": Property(
kind=bool,
help="Whether to pave images the device for testing. (Ignored if"
" device_type == QEMU)",
default=True,
),
# Each layer should have a Fuchsia package containing a single benchmarks.sh which
# runs all benchmarks. For more information, see the following documentation:
# https://fuchsia.googlesource.com/docs/+/master/development/benchmarking/running_on_ci.md
"benchmarks_package": Property(
kind=str, help="The name of the package containing benchmarks.sh"
),
# Performance dashboard information.
#
# These values are the search terms that will be used when finding graphs in
# the Catapult dashboard. TODO(IN-336): Link to docs once they're public.
#
# Explicitly passing these values prevents BuildBucketApi changes, builder
# renames, or other unexpected changes from affecting the data in the
# dashboard.
"dashboard_masters_name": Property(
kind=str, help='The name of the "masters" field in the performance dashboard'
),
"dashboard_bots_name": Property(
kind=str, help='The name of the "bots" field in the performance dashboard'
),
"upload_to_dashboard": Property(
kind=bool,
help="Whether to upload benchmark results. Make sure you set this to false when testing",
default=True,
),
"test_timeout_secs": Property(
kind=Single((int, float)),
help="How long to wait until timing out on tests",
default=40 * 60,
),
"gcs_bucket": Property(
kind=str, help="GCS bucket for uploading test results", default=""
),
"artifact_gcs_bucket": Property(
kind=str, help="GCS bucket to upload to and read build artifacts from"
),
"test_task_service_account": Property(
kind=str, help="The service account to run test tasks with", default=""
),
}
def RunSteps(
api,
manifest,
remote,
target,
build_type,
packages,
variants,
gn_args,
ninja_targets,
test_pool,
upload_to_dashboard,
device_type,
pave,
dashboard_masters_name,
dashboard_bots_name,
benchmarks_package,
board,
product,
test_timeout_secs,
gcs_bucket,
artifact_gcs_bucket,
test_task_service_account,
):
test_timeout_secs = int(test_timeout_secs)
checkout = api.checkout.fuchsia_with_options(
path=api.checkout.default_root_dir,
build=api.buildbucket.build,
manifest=manifest,
remote=remote,
)
execution_timestamp_ms = api.time.ms_since_epoch()
if api.led.launched_by_led:
log_url = "https://ci.chromium.org/swarming/task/%s" % api.led.run_id
else:
# Get the LUCI build log URL to attach to the perf data. This might be empty
# or None because of an infra failure.
build_id = api.buildbucket.build_id
# Although it's unusual, BuildBucketApi returns parsed JSON as the step
# result's stdout.
build_json = api.buildbucket.get_build(build_id).stdout
log_url = build_json.get("build", {}).get("url", None)
assert log_url, (
"Couldn't fetch info for build %s. BuildBucket API returned: %s"
% (build_id, build_json)
)
test_cmds = [
" ".join(
[
"/pkgfs/packages/%s/0/bin/benchmarks.sh" % benchmarks_package,
api.testing_requests.results_dir_on_target,
"--catapult-converter-args",
"--bots",
dashboard_bots_name,
"--masters",
dashboard_masters_name,
"--execution-timestamp-ms",
"%d" % execution_timestamp_ms,
"--log-url",
log_url,
]
)
]
build = api.build.with_options(
checkout=checkout,
target=target,
build_type=build_type,
packages=packages,
variants=variants,
gn_args=gn_args,
ninja_targets=ninja_targets,
board=board,
product=product,
pave=pave,
)
# Must be set before testing_requests.task_requests() is called.
api.artifacts.gcs_bucket = artifact_gcs_bucket
api.artifacts.uuid = api.buildbucket_util.id
task_requests = api.testing_requests.deprecated_task_requests(
build,
api.buildbucket.build,
test_cmds,
device_type,
test_pool,
test_timeout_secs,
pave,
default_service_account=test_task_service_account,
)
# Must be done after testing_requests.task_requests() is called, because that
# modifies the filesystem images.
# TODO(garymm,joshuaseaton): once legacy_qemu code paths are removed, remove
# this comment as it will become false.
api.artifacts.upload("upload artifacts", build)
orchestration_inputs = api.build.test_orchestration_inputs_from_build_results(
build, task_requests
)
testing_tasks = api.testing.run_test_tasks(
api.artifacts.debug_symbol_url(),
orchestration_inputs=orchestration_inputs,
max_attempts=1, # Don't retry tests in case of failures.
)
assert len(testing_tasks) == 1, "%d != 1" % len(testing_tasks)
testing_task = testing_tasks[0]
attempts = testing_task.get_all_attempts()
assert len(attempts) == 1, "%d != 1" % len(attempts)
test_results = attempts[0].test_results
# Upload results for all of the benchmarks that ran successfully.
if test_results and not api.buildbucket_util.is_tryjob:
for test in test_results.passed_tests:
if api.catapult.is_catapult_file(test["name"]):
# Save Catapult files to the test results output dir so they get
# uploaded by upload_results().
api.file.write_text(
"save catapult output for %s" % test["name"],
test_results.output_dir.join(test["name"]),
test_results.get_output(test["output_file"]),
)
test_results.upload_results(
gcs_bucket,
upload_to_catapult=upload_to_dashboard,
orchestration_inputs=orchestration_inputs,
)
api.testing.raise_failures(testing_tasks)
def GenTests(api):
# Test API response for a call to the BuildBucket API's `get` method, which
# returns JSON information for a single build.
#
# TODO(kjharland): This should be amended upstream in BuildbucketTestApi.
buildbucket_get_response = api.step_data(
"buildbucket.get",
stdout=api.raw_io.output_text(
api.json.dumps(
{
"build": {
"id": "123",
"status": "SCHEDULED",
"url": "https://ci.chromium.org/p/fuchsia/builds/b123",
"bucket": "luci.fuchsia.ci",
}
}
)
),
)
tests_json = [
{
"test": {
"name": "benchmark.catapult_json",
"os": "fuchsia",
"label": "asdf",
"path": "benchmark.catapult_json",
},
},
]
def test(*args, **kwargs):
on_device = kwargs["properties"].get("device_type", "QEMU") != "QEMU"
task_state = kwargs.pop("task_state", api.swarming.TaskState.COMPLETED)
outputs = ["out/output"] if on_device else ["output.fs"]
extra_steps = [
api.testing.task_retry_step_data(
[
api.swarming.task_result(
id="0", name="all tests", outputs=outputs, state=task_state
),
]
),
]
output_dir_contents = kwargs.pop("output_dir_contents", None)
if task_state == api.swarming.TaskState.COMPLETED:
extra_steps.append(
api.testing.test_step_data(
legacy_qemu=not on_device,
output_dir_contents=output_dir_contents,
tests_json=tests_json,
shard_name="all tests",
)
)
kwargs["steps"] = extra_steps + kwargs.get("steps", [])
return api.fuchsia.test(*args, **kwargs)
# Test cases for running Fuchsia performance tests as a swarming task.
yield test(
"successful_run",
properties=dict(
dashboard_masters_name="fuchsia.ci",
dashboard_bots_name="topaz-builder",
artifact_gcs_bucket="fuchsia-infra-artifacts",
benchmarks_package="topaz_benchmarks",
run_tests=True,
test_task_service_account="service_account",
),
steps=[buildbucket_get_response],
)
yield test(
"run_with_led",
properties={
"dashboard_masters_name": "fuchsia.ci",
"dashboard_bots_name": "topaz-builder",
"artifact_gcs_bucket": "fuchsia-infra-artifacts",
"benchmarks_package": "topaz_benchmarks",
"run_tests": True,
"test_task_service_account": "service_account",
"$recipe_engine/led": LedInputProperties(
led_run_id="led/user_example.com/abc123",
isolated_input=LedInputProperties.IsolatedInput(
hash="abc123",
namespace="default-gzip",
server="isolateserver.appspot.com",
),
),
},
)
yield test(
"failed_run",
status="failure",
properties=dict(
dashboard_masters_name="fuchsia.ci",
dashboard_bots_name="topaz-builder",
artifact_gcs_bucket="fuchsia-infra-artifacts",
benchmarks_package="topaz_benchmarks",
run_tests=True,
test_task_service_account="service_account",
),
steps=[
buildbucket_get_response,
api.testing.test_step_data(
failure=True, tests_json=tests_json, shard_name="all tests"
),
],
)
# Tests running this recipe with a pending Gerrit change. Note
# that upload_to_dashboard is false. Be sure to set this when
# testing patches.
yield test(
"with_patch",
tryjob=True,
properties=dict(
run_tests=True,
upload_to_dashboard=False,
dashboard_masters_name="fuchsia.try",
dashboard_bots_name="topaz-builder",
artifact_gcs_bucket="fuchsia-infra-artifacts",
benchmarks_package="topaz_benchmarks",
test_task_service_account="service_account",
),
steps=[buildbucket_get_response],
)
yield test(
"device_tests",
properties=dict(
dashboard_masters_name="fuchsia.ci",
dashboard_bots_name="topaz-builder",
artifact_gcs_bucket="fuchsia-infra-artifacts",
benchmarks_package="topaz_benchmarks",
run_tests=True,
device_type="Intel NUC Kit NUC7i5DNHE",
test_task_service_account="service_account",
),
steps=[buildbucket_get_response],
)