blob: b550b4baccf7b5c83cc2280f287309d12e270615 [file] [log] [blame]
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for building Fuchsia and running performance tests.
This differs from the fuchsia recipe in the following ways:
* Performance Tests are run instead of unit tests.
* Tests are always run (this recipe is not used to verify builds).
* Test results are uploaded to the catapult dashboard after execution.
"""
from recipe_engine.config import Enum, List, Single
from recipe_engine.recipe_api import Property
TARGETS = ['arm64', 'x64']
BUILD_TYPES = ['debug', 'release', 'thinlto', 'lto']
DEPS = [
'fuchsia/build',
'fuchsia/buildbucket_util',
'fuchsia/catapult',
'fuchsia/checkout',
'fuchsia/fuchsia',
'fuchsia/minfs',
'fuchsia/testing',
'fuchsia/testing_requests',
'fuchsia/testsharder',
'fuchsia/upload',
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'recipe_engine/time',
]
PROPERTIES = {
'project':
Property(kind=str, help='Jiri remote manifest project', default=None),
'manifest':
Property(kind=str, help='Jiri manifest to use'),
'remote':
Property(kind=str, help='Remote manifest repository'),
'target':
Property(kind=Enum(*TARGETS), help='Target to build'),
'build_type':
Property(
kind=Enum(*BUILD_TYPES), help='The build type', default='debug'),
'packages':
Property(kind=List(basestring), help='Packages to build', default=[]),
'variants':
Property(
kind=List(basestring),
help='--variant arguments to GN in `select_variant`',
default=[]),
'gn_args':
Property(
kind=List(basestring), help='Extra args to pass to GN', default=[]),
'ninja_targets':
Property(
kind=List(basestring),
help='Extra target args to pass to ninja',
default=[]),
'board':
Property(kind=str, help='Board to build', default=None),
'product':
Property(kind=str, help='Product to build', default=None),
'test_pool':
Property(
kind=str,
help='Swarming pool from which a test task will be drawn',
default='fuchsia.tests'),
'device_type':
Property(
kind=str,
help='The type of device to execute tests on, if the value is'
' not QEMU it will be passed to Swarming as the device_type'
' dimension',
default='QEMU'),
'pave':
Property(
kind=bool,
help='Whether to pave images the device for testing. (Ignored if'
' device_type == QEMU)',
default=True),
# Each layer should have a Fuchsia package containing a single benchmarks.sh which
# runs all benchmarks. For more information, see the following documentation:
# https://fuchsia.googlesource.com/docs/+/master/development/benchmarking/running_on_ci.md
'benchmarks_package':
Property(
kind=str, help='The name of the package containing benchmarks.sh'),
# Performance dashboard information.
#
# These values are the search terms that will be used when finding graphs in
# the Catapult dashboard. TODO(IN-336): Link to docs once they're public.
#
# Explicitly passing these values prevents BuildBucketApi changes, builder
# renames, or other unexpected changes from affecting the data in the
# dashboard.
'dashboard_masters_name':
Property(
kind=str,
help='The name of the "masters" field in the performance dashboard'
),
'dashboard_bots_name':
Property(
kind=str,
help='The name of the "bots" field in the performance dashboard'),
'upload_to_dashboard':
Property(
kind=bool,
help='Whether to upload benchmark results. Make sure you set this to false when testing',
default=True),
'test_timeout_secs':
Property(
kind=Single((int, float)),
help='How long to wait until timing out on tests',
default=40 * 60),
'gcs_bucket':
Property(
kind=str, help='GCS bucket for uploading test results', default=''),
'debug_symbol_gcs_bucket':
Property(
kind=str,
help='GCS bucket to upload to and read debug symbols from'),
}
def RunSteps(api, project, manifest, remote, target, build_type, packages,
variants, gn_args, ninja_targets, test_pool, upload_to_dashboard,
device_type, pave, dashboard_masters_name, dashboard_bots_name,
benchmarks_package, board, product, test_timeout_secs, gcs_bucket,
debug_symbol_gcs_bucket):
test_timeout_secs = int(test_timeout_secs)
checkout_root = api.path['start_dir'].join('fuchsia')
checkout = api.checkout.fuchsia_with_options(
path=checkout_root,
build=api.buildbucket.build,
manifest=manifest,
remote=remote,
project=project,
)
execution_timestamp_ms = api.time.ms_since_epoch()
# Get the LUCI build log URL to attach to the perf data. This might be empty
# or None because of an infra failure.
build_id = api.buildbucket.build_id
# Although it's unusual, BuildBucketApi returns parsed JSON as the step
# result's stdout.
build_json = api.buildbucket.get_build(build_id).stdout
log_url = build_json.get('build', {}).get('url', None)
assert log_url, "Couldn't fetch info for build %s. BuildBucket API returned: %s" % (
build_id, build_json)
# yapf: disable
test_cmds = [
' '.join(['/pkgfs/packages/%s/0/bin/benchmarks.sh' % benchmarks_package,
api.testing_requests.results_dir_on_target,
'--catapult-converter-args',
'--bots', dashboard_bots_name,
'--masters', dashboard_masters_name,
'--execution-timestamp-ms', '%d' % execution_timestamp_ms,
'--log-url', log_url])
]
# yapf: enable
build_dir = checkout.root_dir.join('out')
build = api.build.with_options(
build_dir=build_dir,
checkout=checkout,
target=target,
build_type=build_type,
packages=packages,
variants=variants,
gn_args=gn_args,
ninja_targets=ninja_targets,
board=board,
product=product,
)
build_artifacts = build.get_artifacts()
build.upload_debug_symbols(debug_symbol_gcs_bucket=debug_symbol_gcs_bucket)
shard_requests = api.testing_requests.deprecated_shard_requests(
build_artifacts, test_cmds, device_type, test_pool, test_timeout_secs,
pave)
orchestration_inputs = api.build.TestOrchestrationInputs(
build_artifacts.llvm_symbolizer, build_artifacts.minfs,
build_artifacts.symbolize_tool, shard_requests,
build_artifacts.tests_file)
test_results = api.testing.deprecated_test(
debug_symbol_gcs_bucket,
device_type,
orchestration_inputs,
max_attempts=1, # Don't retry tests in case of failures.
overwrite_summary=False,
)
# Upload results for all of the benchmarks that ran successfully.
if not api.buildbucket_util.is_tryjob:
for test_name, file_data in test_results.passed_test_outputs.iteritems():
if api.catapult.is_catapult_file(test_name):
# Save Catapult files to the test results output dir so they get
# uploaded by upload_results().
api.file.write_text(
'save catapult output for %s' % test_name,
test_results.output_dir.join(test_name),
file_data,
)
test_results.upload_results(
gcs_bucket, upload_to_catapult=upload_to_dashboard)
with api.step.defer_results():
test_results.raise_failures()
def GenTests(api):
# Test API response for a call to the BuildBucket API's `get` method, which
# returns JSON information for a single build.
#
# TODO(kjharland): This should be amended upstream in BuildbucketTestApi.
buildbucket_get_response = api.step_data(
'buildbucket.get',
stdout=api.raw_io.output_text(
api.json.dumps({
'build': {
'id': '123',
'status': 'SCHEDULED',
'url': 'https://ci.chromium.org/p/fuchsia/builds/b123',
'bucket': 'luci.fuchsia.ci',
}
})))
tests_json = [
{
'test': {
'name': 'benchmark.catapult_json',
'os': 'fuchsia',
'label': 'asdf',
'path': 'benchmark.catapult_json',
},
},
]
# Test cases for running Fuchsia performance tests as a swarming task.
yield api.fuchsia.test(
'successful_run',
properties=dict(
dashboard_masters_name='fuchsia.ci',
dashboard_bots_name='topaz-builder',
debug_symbol_gcs_bucket='debug-symbols',
benchmarks_package='topaz_benchmarks',
run_tests=True,
),
tests_json=tests_json,
steps=[
buildbucket_get_response,
],
)
yield api.fuchsia.test(
'failed_run',
status='failure',
properties=dict(
dashboard_masters_name='fuchsia.ci',
dashboard_bots_name='topaz-builder',
debug_symbol_gcs_bucket='debug-symbols',
benchmarks_package='topaz_benchmarks',
run_tests=True,
),
tests_json=tests_json,
steps=[
buildbucket_get_response,
api.testing.test_step_data(failure=True, tests_json=tests_json),
],
)
# Tests running this recipe with a pending Gerrit change. Note
# that upload_to_dashboard is false. Be sure to set this when
# testing patches.
yield api.fuchsia.test(
'with_patch',
tryjob=True,
properties=dict(
run_tests=True,
upload_to_dashboard=False,
dashboard_masters_name='fuchsia.try',
dashboard_bots_name='topaz-builder',
debug_symbol_gcs_bucket='debug-symbols',
benchmarks_package='topaz_benchmarks',
),
tests_json=tests_json,
steps=[
buildbucket_get_response,
],
)
yield api.fuchsia.test(
'device_tests',
properties=dict(
dashboard_masters_name='fuchsia.ci',
dashboard_bots_name='topaz-builder',
debug_symbol_gcs_bucket='debug-symbols',
benchmarks_package='topaz_benchmarks',
run_tests=True,
device_type='Intel NUC Kit NUC7i5DNHE',
),
tests_json=tests_json,
steps=[
buildbucket_get_response,
],
)
yield api.fuchsia.test(
'missing_test_results',
status='failure',
properties=dict(
dashboard_masters_name='fuchsia.ci',
dashboard_bots_name='topaz-builder',
debug_symbol_gcs_bucket='debug-symbols',
benchmarks_package='topaz_benchmarks',
run_tests=True,
),
tests_json=tests_json,
steps=[
buildbucket_get_response,
api.step_data('run tests.attempt 0.extract results',
api.raw_io.output_dir({})),
],
)