[benchmarking] Remove runbenchmarks library

Now that all other layers have been converted over to use benchmarking
instead, we can remove runbenchmarks.

PT-73 # comment patch

Test: CQ

Change-Id: I6e2672789973dd83ba5295f2178f36f86a4b9830
diff --git a/garnet/packages/benchmarks/BUILD.gn b/garnet/packages/benchmarks/BUILD.gn
index a4d435c..ae26865 100644
--- a/garnet/packages/benchmarks/BUILD.gn
+++ b/garnet/packages/benchmarks/BUILD.gn
@@ -18,7 +18,6 @@
   testonly = true
   public_deps = [
     "//garnet/bin/catapult_converter",
-    "//garnet/testing/runbenchmarks",
   ]
 }
 
diff --git a/garnet/testing/benchmarking/README.md b/garnet/testing/benchmarking/README.md
new file mode 100644
index 0000000..8444aae
--- /dev/null
+++ b/garnet/testing/benchmarking/README.md
@@ -0,0 +1,10 @@
+benchmarking
+==
+
+## Overview
+
+`benchmarking` is a C++ library for driving Fuchsia benchmarks.  Most of the
+programs written using this library are used by Fuchsia infrastructure which
+executes the benchmarks and uploads the results to the performance dashboard.
+
+For example usage see `//garnet/tests/benchmarks`.
diff --git a/garnet/testing/benchmarking/benchmarking.h b/garnet/testing/benchmarking/benchmarking.h
index 577aafd1..3e230fe 100644
--- a/garnet/testing/benchmarking/benchmarking.h
+++ b/garnet/testing/benchmarking/benchmarking.h
@@ -16,7 +16,8 @@
 // specific wrapper binary creating a |BenchmarksRunner| from command line
 // arguments supplied by a buildbot recipe, adding all desired benchmarks via
 // the |BenchmarksRunner::Add*| methods, and then calling
-// |BenchmarksRunner::Finish|.
+// |BenchmarksRunner::Finish|.  See the //garnet/tests/benchmarks package for
+// example usage.
 class BenchmarksRunner {
  public:
   // Create a new |BenchmarksRunner| using arguments supplied via buildbot.
diff --git a/garnet/testing/runbenchmarks/BUILD.gn b/garnet/testing/runbenchmarks/BUILD.gn
deleted file mode 100644
index aedc3e2..0000000
--- a/garnet/testing/runbenchmarks/BUILD.gn
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/package.gni")
-
-copy("copy_runbenchmarks") {
-  sources = [
-    "runbenchmarks.sh",
-  ]
-  outputs = [
-    "$root_out_dir/{{source_file_part}}",
-  ]
-}
-
-package("runbenchmarks") {
-  testonly = true
-
-  deps = [
-    ":copy_runbenchmarks",
-  ]
-
-  resources = [
-    {
-      path = "runbenchmarks.sh"
-      dest = "runbenchmarks.sh"
-    },
-  ]
-}
diff --git a/garnet/testing/runbenchmarks/OWNERS b/garnet/testing/runbenchmarks/OWNERS
deleted file mode 100644
index 5bf79d5..0000000
--- a/garnet/testing/runbenchmarks/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-kjharland@google.com
-mseaborn@google.com
-*
diff --git a/garnet/testing/runbenchmarks/README.md b/garnet/testing/runbenchmarks/README.md
deleted file mode 100644
index 02030d0..0000000
--- a/garnet/testing/runbenchmarks/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-runbenchmarks
-==
-
-## Overview
-
-`runbenchmarks` is a shell library for driving Fuchsia benchmarks.  Most of the
-programs written using this library are used by Fuchsia infrastructure which
-executes the benchmarks and uploads the results to the performance dashboard.
-
-For example usage see `//garnet/tests/benchmarks`.
-
-## API Reference
-
-### runbench_exec
-
-```sh
-runbench_exec <results_file> <arg0> <arg1> ...
-```
-
-#### Description
-
-Runs the command specified by `<arg0> <arg1> ...` and verifies that `<results_file>`
-was produced as a result.
-
-### runbench_exit
-
-```sh
-runbench_exit <output_dir>
-```
-
-#### Description
-
-Exits the current process, performing any cleanup work necessary. `<output_dir>` should
-be the same directory that test results were written to.
diff --git a/garnet/testing/runbenchmarks/runbenchmarks.sh b/garnet/testing/runbenchmarks/runbenchmarks.sh
deleted file mode 100644
index efca00f..0000000
--- a/garnet/testing/runbenchmarks/runbenchmarks.sh
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/boot/bin/sh
-#
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# This script defines a set of utility functions for running benchmarks on
-# Fuchsia.  For example usage, see the package //garnet/tests/benchmarks. For
-# detailed instructions see README.md.
-
-# Enable stricter error handling:
-# - e  Fast fail on errors.
-# - u  Fail when attempting to use undefined variables.
-#
-# This is a Dash script, and does not use the same syntax as Bash. See
-# https://wiki.ubuntu.com/DashAsBinSh for more info.
-set -ue
-
-# Whether any errors occurred while running benchmarks.
-_got_errors=0
-
-# A string of JSON objects representing benchmark results.
-#
-# The contents of this string are written to a `summary.json` file after all
-# benchmarks have run.  Infra uses this file when running benchmarks on hardware
-# as a sort of manifest.  It indicates which tests ran, where their output files
-# are located, and whether a test passed or failed.  `runbench_exec` records
-# each run in this summary.  The summary's schema is defined at:
-# https://fuchsia.googlesource.com/fuchsia/+/master/zircon/system/uapp/runtests/summary-schema.json
-_benchmark_summaries=""
-
-# This parses command line arguments:
-#  * It sets ${OUT_DIR} to the output directory specified on the command line.
-#  * It sets ${benchmarks_bot_name} to the name of the bot specified on the
-#    command line.
-#  * It reads other arguments that are used by the Catapult converter.
-#
-# This will normally be invoked as:
-#   runbench_read_arguments "$@"
-#
-# A script that uses runbench_read_arguments should be invoked as follows:
-#   benchmarks.sh <output-dir> --catapult-converter-args <args>
-runbench_read_arguments() {
-    if [ $# -lt 2 ] || [ "$2" != "--catapult-converter-args" ]; then
-        echo "Error: Missing '--catapult-converter-args' argument"
-        echo "Usage: $0 <output-dir> --catapult-converter-args <args>"
-        exit 1
-    fi
-    OUT_DIR="$1"
-    shift 2
-    _catapult_converter_args="$@"
-
-    # Parse the catapult_converter argument to find the bot name.
-    while [ $# -gt 0 ]; do
-        local arg="$1"
-        shift
-        case "${arg}" in
-            --execution-timestamp-ms|--masters|--log-url)
-                shift;;
-            --bots)
-                benchmarks_bot_name="$1"
-                shift;;
-            *)
-                _runbench_log "Error: Unrecognized argument: ${arg}"
-                exit 1;;
-        esac
-    done
-}
-
-# Runs a command and expects a results file to be produced.
-#
-# If the results file was not generated, an error message is logged and a
-# benchmark failure is recorded.
-#
-# @param {string} The expected results file.
-# @param {...} Set of arguments that make up the command-to-run.
-#
-# Example:
-#
-#    runbench_exec "$OUT_DIR/my_benchmark" trace record  \
-#        --spec-file=/path/to/my_benchmark.tspec         \
-#        --benchmark-results-file="$OUT_DIR/my_benchmark"
-#
-runbench_exec() {
-    local results_file="$1"
-    shift
-
-    # Ensure the results file doesn't already exist.
-    rm -f "${results_file}"
-
-    # Create the results file so that it's always present, even if the test
-    # crashes. Infra expects this file to always be present because it is
-    # listed in summary.json.
-    touch "${results_file}"
-
-    # Run the benchmark.
-    _runbench_log "running $@"
-    if ! "$@"; then
-        # Record that an error occurred, globally.
-        _got_errors=1
-        # Record that an error occurred for this particular benchmark.
-        _write_summary_entry "${results_file}" "FAIL"
-        return
-    fi
-
-    # Ensure the output file was actually generated.
-    if [ ! -f "${results_file}" ]; then
-        _runbench_log "error: missing file ${results_file}"
-        _got_errors=1
-        _write_summary_entry "${results_file}" "FAIL"
-        return
-    fi
-
-    # Record a successful run.
-    _write_summary_entry "${results_file}" "PASS"
-
-    # Convert the results file to a Catapult Histogram JSON file.
-    local base_name="$(basename ${results_file} .json).catapult_json"
-    local catapult_file="$(dirname ${results_file})/$base_name"
-    /bin/catapult_converter \
-        --input ${results_file} \
-        --output ${catapult_file} \
-        ${_catapult_converter_args}
-    _write_summary_entry "${catapult_file}" "PASS"
-}
-
-# Exits the current process with a code indicating whether any errors occurred.
-#
-# Additionally writes a summary.json file to the given output directory, which
-# lists all of the benchmarks that ran, along with their results.  This should
-# be the same output directory that results files have been written to.
-#
-# @param{string} Path to the output directory.
-#
-# Example: runbench_finish /path/to/dir
-runbench_finish() {
-    local out_dir="$1"
-    _write_summary_file "${out_dir}"
-
-    exit $_got_errors
-}
-
-# Logs a message to stderr.
-_runbench_log() {
-    echo "runbench: $@" >&2
-}
-
-# Records the result of running a benchmark in the summary.
-#
-# @param {string} Path to the results file.
-# @param {string} Either "PASS" or "FAIL".
-#
-# Example: _write_summary_entry /path/to/res/file "PASS"
-_write_summary_entry() {
-    local results_file="$1"
-    local pass_fail="$2"
-
-    # Build the JSON summary entry.  The name of the results file is used as
-    # `name` and `output_file`.  `name` must be unique with the summary.
-    # Since results file names also must be unique, using the filename is fine.
-    # `output_file` must be a path relative to summary.json.  We'll emit
-    # summary.json in the same directory as results files, so the filename is
-    # also fine to use here.
-
-    # Extract the last element of the pathname, e.g. "cc" from "aa/bb/cc"
-    local results_filename="${results_file##*/}"
-
-    # Create the JSON fields.
-    local name="\"name\":\"${results_filename}\""
-    local output_file="\"output_file\":\"${results_filename}\""
-    local result="\"result\":\"${pass_fail}\""
-
-    # Append the entry to the summary string.
-    _benchmark_summaries="${_benchmark_summaries}{${name},${output_file},${result}},\n"
-}
-
-# Writes summary.json in the given directory.
-#
-# @param{string} Path to the directory.
-#
-# Example: _write_summary_file /path/to/dir
-_write_summary_file() {
-    local out_dir="$1"
-    local summary_filepath="${out_dir}/summary.json"
-
-    # Strip trailing ",\n"
-    _benchmark_summaries=${_benchmark_summaries%,\\n}
-    local summary="{\"tests\":[\n${_benchmark_summaries}\n]}"
-
-    _runbench_log "writing summary.json to ${summary_filepath}"
-    echo "${summary}" > "${summary_filepath}"
-}