[benchmarking] Clean up: delete the disabled startup_benchmark

Since it has been disabled, the code that it was testing has
been moved/removed.

BLD-324 #done.

Change-Id: I35787e30bbcbffcc3a4827f7e20f937c1a000a25
diff --git a/packages/benchmarks/topaz b/packages/benchmarks/topaz
index ad5ae7d..b9ccd42 100644
--- a/packages/benchmarks/topaz
+++ b/packages/benchmarks/topaz
@@ -3,7 +3,6 @@
         "garnet/packages/benchmarks/buildbot"
     ],
     "packages": [
-        "//topaz/tests/benchmarks:topaz_benchmarks",
-        "//topaz/tests/benchmarks/story_startup:startup_benchmarks"
+        "//topaz/tests/benchmarks:topaz_benchmarks"
     ]
 }
diff --git a/tests/benchmarks/story_startup/BUILD.gn b/tests/benchmarks/story_startup/BUILD.gn
deleted file mode 100644
index 91fc2cc..0000000
--- a/tests/benchmarks/story_startup/BUILD.gn
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/go/go_binary.gni")
-import("//build/go/go_library.gni")
-import("//build/go/toolchain.gni")
-import("//build/package.gni")
-
-package("startup_benchmarks") {
-  testonly = true
-
-  deps = [
-    ":process_startup_trace",
-  ]
-
-  binaries = [
-    {
-      name = "process_startup_trace"
-    },
-    {
-      name = "run_startup_benchmark.sh"
-      source = rebase_path("run_startup_benchmark.sh")
-    },
-    {
-      name = rebase_path("topaz_startup_benchmarks.sh")
-      dest = "startup_benchmarks.sh"
-    },
-  ]
-}
-
-go_library("process_startup_trace_lib") {
-  name = "process_startup_trace"
-  deps = [
-    "//garnet/go/src/benchmarking",
-  ]
-}
-
-go_binary("process_startup_trace") {
-  output_name = "process_startup_trace"
-
-  gopackage = "process_startup_trace"
-
-  deps = [
-    ":process_startup_trace_lib",
-  ]
-}
diff --git a/tests/benchmarks/story_startup/process_startup_trace.go b/tests/benchmarks/story_startup/process_startup_trace.go
deleted file mode 100644
index 24246ba..0000000
--- a/tests/benchmarks/story_startup/process_startup_trace.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// process_startup_trace.go
-//
-// Usage:
-//
-// /pkgfs/packages/scenic_benchmarks/0/bin/process_startup_trace      \
-//    [-test_suite_name=label] [-benchmarks_out_filename=output_file] \
-//    -flutter_app_name=app_name trace_filename
-//
-// label = Optional: The name of the test suite.
-// output_file = Optional: A file to output results to.
-// app_name = The name of the flutter app to measure fps for.
-// trace_filename = The input trace files.
-//
-// The output is a JSON file with benchmark statistics.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-
-	"fuchsia.googlesource.com/benchmarking"
-)
-
-const OneMsecInUsecs float64 = 1000
-
-var (
-	verbose = false
-)
-
-func check(e error) {
-	if e != nil {
-		panic(e)
-	}
-}
-
-func getThreadsWithName(model benchmarking.Model, name string) []benchmarking.Thread {
-	threads := make([]benchmarking.Thread, 0)
-	for _, process := range model.Processes {
-		for _, thread := range process.Threads {
-			if thread.Name == name {
-				threads = append(threads, thread)
-			}
-		}
-	}
-	return threads
-}
-
-func reportEvent(event benchmarking.Event, label string, testSuite string, testResultsFile *benchmarking.TestResultsFile) {
-	dur := event.Dur / OneMsecInUsecs
-	fmt.Printf("%-35s: %.4gms\n", label, dur)
-	testResultsFile.Add(&benchmarking.TestCaseResults{
-		Label:     label,
-		TestSuite: testSuite,
-		Unit:      benchmarking.Unit(benchmarking.Milliseconds),
-		Values:    []float64{dur},
-	})
-}
-
-func reportEventsTotal(events []benchmarking.Event, label string, testSuite string, testResultsFile *benchmarking.TestResultsFile) {
-	label = "Total time in " + label
-	total := benchmarking.AvgDuration(events) * float64(len(events)) / OneMsecInUsecs
-	fmt.Printf("%-35s: %.4gms\n", label, total)
-	testResultsFile.Add(&benchmarking.TestCaseResults{
-		Label:     label,
-		TestSuite: testSuite,
-		Unit:      benchmarking.Unit(benchmarking.Milliseconds),
-		Values:    []float64{total},
-	})
-}
-
-func reportStartupMetrics(model benchmarking.Model, appName string, testSuite string, testResultsFile *benchmarking.TestResultsFile) {
-	fmt.Printf("=== Startup Metrics ===\n")
-	createStoryCallStr := "SessionStorage::CreateStoryCall"
-	createStoryCallEvent := model.FindEvents(benchmarking.EventsFilter{Name: &createStoryCallStr})[0]
-	storyStartTime := createStoryCallEvent.Start
-	addModStr := "StoryCommand::AddMod"
-	addModEvent := model.FindEvents(benchmarking.EventsFilter{Name: &addModStr})[0]
-
-	flutterUIThread := getThreadsWithName(model, appName+".ui")[0]
-
-	createRootIsolateStr := "DartIsolate::CreateRootIsolate"
-	createRootIsolateEvent := flutterUIThread.FindEvents(benchmarking.EventsFilter{Name: &createRootIsolateStr})[0]
-	serviceIsolateStartupStr := "ServiceIsolateStartup"
-	serviceIsolateStartupEvents := model.FindEvents(benchmarking.EventsFilter{Pid: &createRootIsolateEvent.Pid, Name: &serviceIsolateStartupStr})
-	// This event is only emitted if flutter profiling is enabled.
-	var serviceIsolateStartupEvent benchmarking.Event
-	if len(serviceIsolateStartupEvents) > 0 {
-		serviceIsolateStartupEvent = serviceIsolateStartupEvents[0]
-	}
-
-	flutterGPUThread := getThreadsWithName(model, appName+".gpu")[0]
-	sessionPresentStr := "SessionPresent"
-	firstSessionPresentEvent := flutterGPUThread.FindEvents(benchmarking.EventsFilter{Name: &sessionPresentStr})[0]
-
-	startupEndTime := firstSessionPresentEvent.Start + firstSessionPresentEvent.Dur
-	totalDuration := startupEndTime - storyStartTime
-
-	ledgerStr := "ledger"
-	ledgerGetPageStr := "ledger_get_page"
-	ledgerGetPageEvents := model.FindEvents(benchmarking.EventsFilter{Cat: &ledgerStr, Name: &ledgerGetPageStr})
-	ledgerBatchUploadStr := "batch_upload"
-	ledgerBatchUploadEvents := model.FindEvents(benchmarking.EventsFilter{Cat: &ledgerStr, Name: &ledgerBatchUploadStr})
-	ledgerEvents := append(ledgerGetPageEvents, ledgerBatchUploadEvents...)
-
-	fileGetVmoStr := "FileGetVmo"
-	fileGetVmoEvents := model.FindEvents(benchmarking.EventsFilter{Name: &fileGetVmoStr})
-
-	reportEvent(createStoryCallEvent, "CreateStoryCall", testSuite, testResultsFile)
-	reportEvent(addModEvent, "AddMod", testSuite, testResultsFile)
-	if serviceIsolateStartupEvent.Name != "" {
-		reportEvent(serviceIsolateStartupEvent, "ServiceIsolateStartup", testSuite, testResultsFile)
-	}
-	reportEvent(createRootIsolateEvent, "CreateRootIsolate", testSuite, testResultsFile)
-	reportEventsTotal(ledgerEvents, "Ledger", testSuite, testResultsFile)
-	reportEventsTotal(fileGetVmoEvents, "FileGetVmo", testSuite, testResultsFile)
-
-	total := totalDuration / OneMsecInUsecs
-	fmt.Printf("%-35s: %.4gms\n", "Total Startup Time", total)
-	testResultsFile.Add(&benchmarking.TestCaseResults{
-		Label:     "Total",
-		TestSuite: testSuite,
-		Unit:      benchmarking.Unit(benchmarking.Milliseconds),
-		Values:    []float64{total},
-	})
-}
-
-func main() {
-	// Argument handling.
-	verbosePtr := flag.Bool("v", false, "Run with verbose logging")
-	flutterAppNamePtr := flag.String("flutter_app_name", "", "The name of the flutter app to measure fps for.")
-	testSuitePtr := flag.String("test_suite_name", "", "Optional: The name of the test suite.")
-	outputFilenamePtr := flag.String("benchmarks_out_filename", "", "Optional: A file to output results to.")
-
-	flag.Parse()
-	if flag.NArg() == 0 || *flutterAppNamePtr == "" {
-		flag.Usage()
-		println("  trace_filename: The input trace file.")
-		os.Exit(1)
-	}
-
-	verbose = *verbosePtr
-	inputFilename := flag.Args()[0]
-	flutterAppName := *flutterAppNamePtr
-	testSuite := *testSuitePtr
-	outputFilename := *outputFilenamePtr
-
-	traceFile, err := ioutil.ReadFile(inputFilename)
-	check(err)
-
-	// Creating the trace model.
-	var model benchmarking.Model
-	model, err = benchmarking.ReadTrace(traceFile)
-	check(err)
-
-	if len(model.Processes) == 0 {
-		panic("No processes found in the model")
-	}
-
-	var testResultsFile benchmarking.TestResultsFile
-	reportStartupMetrics(model, flutterAppName, testSuite, &testResultsFile)
-
-	if outputFilename != "" {
-		outputFile, err := os.Create(outputFilename)
-		if err != nil {
-			log.Fatalf("failed to create file %s", outputFilename)
-		}
-
-		if err := testResultsFile.Encode(outputFile); err != nil {
-			log.Fatalf("failed to write results to %s: %v", outputFilename, err)
-		}
-
-		fmt.Printf("\n\nWrote benchmark values to file '%s'.\n", outputFilename)
-	}
-}
diff --git a/tests/benchmarks/story_startup/run_startup_benchmark.sh b/tests/benchmarks/story_startup/run_startup_benchmark.sh
deleted file mode 100644
index 7c2aed5..0000000
--- a/tests/benchmarks/story_startup/run_startup_benchmark.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/boot/bin/sh
-#
-# Usage: run_startup_benchmark.sh                   \
-#          --out_dir <trace output dir>             \
-#          --out_file <benchmark output file path>  \
-#          --benchmark_label <benchmark label>      \
-#          --cmd <cmd to benchmark>                 \
-#          --flutter_app_name <flutter application name>
-#
-
-while [ "$1" != "" ]; do
-  case "$1" in
-    --out_dir)
-      OUT_DIR="$2"
-      shift
-      ;;
-    --out_file)
-      OUT_FILE="$2"
-      shift
-      ;;
-    --benchmark_label)
-      BENCHMARK_LABEL="$2"
-      shift
-      ;;
-    --cmd)
-      CMD="$2"
-      shift
-      ;;
-    --flutter_app_name)
-      FLUTTER_APP_NAME="$2"
-      shift
-      ;;
-    *)
-      break
-      ;;
-  esac
-  shift
-done
-
-DATE=`date +%Y-%m-%dT%H:%M:%S`
-TRACE_FILE=$OUT_DIR/trace.$DATE.json
-
-echo "== $BENCHMARK_LABEL: Killing processes..."
-killall root_presenter*; killall scenic*; killall basemgr*
-killall view_manager*; killall flutter*; killall set_root_view*
-
-echo "== $BENCHMARK_LABEL: Tracing..."
-echo $TRACE_FILE
-
-trace record --categories=dart,flutter,ledger,modular,vfs --duration=10 \
-  --buffer-size=12 --output-file=$TRACE_FILE $CMD
-
-echo "== $BENCHMARK_LABEL: Processing trace..."
-/pkgfs/packages/startup_benchmarks/0/bin/process_startup_trace  \
-  -test_suite_name="${BENCHMARK_LABEL}"                         \
-  -flutter_app_name="${FLUTTER_APP_NAME}"                       \
-  -benchmarks_out_filename="${OUT_FILE}" "${TRACE_FILE}"
diff --git a/tests/benchmarks/story_startup/topaz_startup_benchmarks.sh b/tests/benchmarks/story_startup/topaz_startup_benchmarks.sh
deleted file mode 100644
index 3df3171..0000000
--- a/tests/benchmarks/story_startup/topaz_startup_benchmarks.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/boot/bin/sh
-#
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# This script runs all startup benchmarks for the Topaz layer. It is called by
-# benchmarks.sh.
-
-# bench(): Helper function for running Startup benchmarks.
-# Arguments:
-#     $1         Module to run.
-#     $2         Label for benchmark.
-
-bench() {
-    MODULE=$1
-    BENCHMARK=$2
-    COMMAND="fuchsia-pkg://fuchsia.com/basemgr#meta/basemgr.cmx "`
-      `"--test --enable_presenter --account_provider=fuchsia-pkg://fuchsia.com/dev_token_manager#meta/dev_token_manager.cmx "`
-      `"--base_shell=fuchsia-pkg://fuchsia.com/dev_base_shell#meta/dev_base_shell.cmx --base_shell_args=--test_timeout_ms=60000 "`
-      `"--session_shell=fuchsia-pkg://fuchsia.com/dev_session_shell#meta/dev_session_shell.cmx --session_shell_args=--root_module=${MODULE} --story_shell=fuchsia-pkg://fuchsia.com/mondrian#meta/mondrian.cmx"
-
-    runbench_exec "${OUT_DIR}/${BENCHMARK}.json"                           \
-      "/pkgfs/packages/startup_benchmarks/0/bin/run_startup_benchmark.sh"  \
-      --out_dir "${OUT_DIR}"                                               \
-      --out_file "${OUT_DIR}/${BENCHMARK}.json"                            \
-      --benchmark_label "${BENCHMARK}"                                     \
-      --flutter_app_name "${MODULE}"                                       \
-      --cmd "${COMMAND}"
-}
-
-# dashboard
-bench "dashboard" "fuchsia.startup.dashboard"
diff --git a/tests/benchmarks/topaz_benchmarks.sh b/tests/benchmarks/topaz_benchmarks.sh
index b3adbba..944840d 100644
--- a/tests/benchmarks/topaz_benchmarks.sh
+++ b/tests/benchmarks/topaz_benchmarks.sh
@@ -20,8 +20,6 @@
   # Run the gfx benchmarks in the current shell environment, because they write
   # to (hidden) global state used by runbench_finish.
 
-  # DISABLED: See BLD-324
-  # . /pkgfs/packages/startup_benchmarks/0/bin/startup_benchmarks.sh "$@"
   # DISABLED: See SCN-1223
   # . /pkgfs/packages/topaz_benchmarks/0/bin/gfx_benchmarks.sh "$@"
   echo 'Graphics performance tests disabled'