blob: 2bb4edf26da7843bfac22f5203089253d61b976d [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class UploadPath(object):
"""Represents a path to upload."""
def __init__(self, path):
self._path = path
@property
def path(self):
"""The Path associated with this object."""
return self._path
class DirectoryPath(UploadPath):
"""Represents a Path to a directory."""
def add_to_package(self, pkg):
pkg.add_dir(self.path)
class FilePath(UploadPath):
"""Represents a Path to a file."""
def add_to_package(self, pkg):
pkg.add_file(self.path)
class UploadApi(recipe_api.RecipeApi):
"""API for uploading build and test results."""
FilePath = FilePath
DirectoryPath = DirectoryPath
def file_to_gcs(
self, source, bucket, subpath, namespace=None, metadata=None, no_clobber=True
):
"""Uploads a file to GCS under a subpath specific to the given build.
Will upload the file to:
gs://fuchsia-infra/<bucket>/<build id>/<subpath or basename of file>
Args:
path (Path): A path to the file to upload.
bucket (str): The name of the GCS bucket to upload to.
subpath (str): The end of the destination path within the
build-specific subdirectory.
namespace (str or None): A unique ID for this build. Defaults to the current
build ID or led run ID.
metadata (dict): A dictionary of metatadata values to upload along with
the file.
no_clobber (bool): Skip upload if destination path already exists in GCS.
Returns:
The upload step.
"""
return self.m.gsutil.upload(
bucket=bucket,
src=source,
dst=self._absolute_gcs_path(namespace, subpath),
link_name=subpath,
metadata=metadata,
no_clobber=no_clobber,
name="upload %s to %s" % (subpath, bucket),
)
def directory_to_gcs(self, source, bucket, subpath, namespace=None, rsync=True):
"""Uploads a directory to GCS under a subpath specific to the given build.
Will upload the directory to:
gs://fuchsia-infra/<bucket>/<build id>/<subpath>
This operation preserves directory structure. It is also idempotent,
because it uses `gsutil rsync` as opposed to `gsutil cp`.
Args:
path (Path): A path to the file to upload.
bucket (str): The name of the GCS bucket to upload to.
subpath (str): The end of the destination path within the
build-specific subdirectory.
namespace (str or None): A unique ID for this build. Defaults to the current
build ID or led run ID.
Returns:
The upload step.
"""
kwargs = dict(
bucket=bucket,
src=source,
dst=self._absolute_gcs_path(namespace, subpath),
link_name=subpath,
recursive=True,
multithreaded=True,
no_clobber=True,
name="upload %s to %s" % (subpath, bucket),
)
if rsync:
return self.m.gsutil.rsync(**kwargs)
return self.m.gsutil.upload(**kwargs)
def _absolute_gcs_path(self, namespace, relative_path):
namespace = namespace or self.m.buildbucket_util.id
return "builds/%s/%s" % (namespace, relative_path)
def test_outputs_to_catapult(self, output_dir):
"""Uploads test outputs to Catapult from a specified directory.
Uploads only Catapult HistogramSet JSON files with the expected catapult
extension.
Args:
output_dir (Path): A directory containing catapult files produced by
the tests.
"""
for filepath in self.m.file.glob_paths(
"locate catapult files",
output_dir,
pattern=self.m.path.join("**", self.m.catapult.FILE_PATTERN),
test_data=["benchmark.catapult_json"],
):
self._upload_file_to_catapult(filepath)
def _upload_file_to_catapult(self, filepath):
basename = self.m.path.basename(filepath)
with self.m.step.nest("upload %s" % basename):
self.m.catapult.upload(input_file=filepath, timeout="60s")
def test_results_to_resultdb(
self, resultdb_bin_path, summary_filepaths, outputs_dir, base_variant
):
"""Upload summary.json file contents to ResultDB.
Requires ResultDB enabled builders.
Args:
resultdb_bin_path: Absolute path to resultdb tool in isolate archive.
summary_filepaths: Array containing summary.json file paths.
outputs_dir: Path to directory that contains test output.
This value will be joined with "output_file" field in summary.json
base_variant: A dictionary containing variant tags to upload to resultdb
"""
if self.m.resultdb.enabled and resultdb_bin_path and summary_filepaths:
cmd = [resultdb_bin_path]
base_variant = base_variant.copy()
base_variant.update(
{
"bucket": self.m.buildbucket.build.builder.bucket,
"builder": self.m.buildbucket.build.builder.builder,
}
)
if outputs_dir:
cmd.append("--output=%s" % outputs_dir)
for summary in summary_filepaths:
cmd.append("--summary=%s" % summary)
self.m.step(
"resultdb", self.m.resultdb.wrap(cmd, base_variant=base_variant)
)
def cipd_package(
self,
pkg_name,
pkg_root,
pkg_paths,
search_tag,
repository=None,
install_mode="copy",
refs=("latest",),
extra_tags=None,
):
"""Creates and uploads a CIPD package containing the tool at pkg_dir.
The tool is published to CIPD under the path pkg_name.
Args:
pkg_name (basestr): The CIPD package to publish to.
pkg_root (Path): The absolute path to the parent directory of the package.
pkg_paths (list(UploadPath)): A list of UploadPath objects which specify
the paths to directories or files to upload.
search_tag (dict): The tag to search for the CIPD pin with. This should
contain one element and be either `git_revision` or `version`.
repository (str or None): The git repository where code for the package
lives.
install_mode (str or None): The install mode for the package.
refs (str): Refs to set on the package.
extra_tags (dict or None): Extra tags to add to the package.
Returns:
The CIPDApi.Pin instance_id.
"""
pkg_def = self.m.cipd.PackageDefinition(
package_name=str(pkg_name), package_root=pkg_root, install_mode=install_mode
)
for path in pkg_paths:
path.add_to_package(pkg_def)
# E.g., "fuchsia/go/linux-amd64" -> "go".
name = str(pkg_name.split("/")[-2])
pkg_def.add_version_file(".versions/%s.cipd_version" % name)
cipd_pkg_file = self.m.path["cleanup"].join("%s.cipd" % name)
with self.m.step.nest("cipd") as step:
self.m.cipd.build_from_pkg(
pkg_def=pkg_def, output_package=cipd_pkg_file,
)
assert (
len(search_tag) == 1
), "search_tag must contain one (key: value) pair to search for."
search_tag_key = search_tag.keys()[0]
search_tag_value = search_tag[search_tag_key]
cipd_pins = self.m.cipd.search(
pkg_name, "%s:%s" % (search_tag_key, search_tag_value)
)
if cipd_pins:
self.m.step("Package is up-to-date", cmd=None)
assert len(cipd_pins) == 1, "%s has too many pins" % pkg_name
return cipd_pins[0].instance_id
tags = {}
tags.update(search_tag)
if repository:
tags["git_repository"] = repository
if extra_tags:
tags.update(extra_tags)
cipd_pin = self.m.cipd.register(
package_name=pkg_name, package_path=cipd_pkg_file, refs=refs, tags=tags,
)
step.presentation.properties.update(cipd_pin._asdict())
if search_tag:
# Return search_tag to output properties so it can be
# used by builders like goma_toolchain.
step.presentation.properties.update(search_tag)
return cipd_pin.instance_id
def upload_isolated(self, staging_dir, upload_paths=None):
"""Returns the hash of the isolated tree created from the provided
staging_dir."""
isolated = self.m.isolated.isolated(staging_dir)
upload_paths = upload_paths or [DirectoryPath(staging_dir)]
for path in upload_paths:
path.add_to_package(isolated)
with self.m.step.nest("isolated") as step:
isolated_hash = isolated.archive("isolate")
step.presentation.properties["isolated"] = isolated_hash
return isolated_hash