[fidl][benchmarks] Generate small summary versions of fuchsiaperf files

Change the wrapper for libperftest-based FIDL microbenchmarks so that
the *.fuchsiaperf.json files it generates are small, summary versions,
rather than full unsummarized datasets.

This is basically the same as what we did earlier for
fuchsia_microbenchmarks in the following change, which has a fuller
explanation:
https://fuchsia-review.googlesource.com/c/fuchsia/+/504550

Bug: 68471
Test: fx set terminal.x64 --with src/tests/end_to_end/perf:tests
  + fx test --e2e fidl_microbenchmarks_test
Change-Id: I823c2573350ffed34f81640458139b57e9ea4b91
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/514047
Reviewed-by: Nathan Rogers <nathanrogers@google.com>
Commit-Queue: Mark Seaborn <mseaborn@google.com>
diff --git a/src/tests/end_to_end/perf/test/fidl_microbenchmarks_test.dart b/src/tests/end_to_end/perf/test/fidl_microbenchmarks_test.dart
index 44dce20..8b0324f 100644
--- a/src/tests/end_to_end/perf/test/fidl_microbenchmarks_test.dart
+++ b/src/tests/end_to_end/perf/test/fidl_microbenchmarks_test.dart
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+import 'dart:io' show File;
+
 import 'package:test/test.dart';
 
 import 'helpers.dart';
@@ -30,29 +32,24 @@
 // Runs a benchmark that uses the C++ perftest runner.
 // It is believed that benchmarks converge to different means in different
 // process runs (and reboots). Since each of these benchmarks are currently
-// fast to run (a few secs), run the binary several times for more stability
-// in perfcompare results.
-// However, this is currently not possible with catapult_converter, so only
-// report the first result to catapult.
+// fast to run (a few secs), run the binary several times for more stability.
 void runPerftestFidlBenchmark(String benchmarkBinary) {
   final resultsFile = tmpPerfResultsJson(benchmarkBinary);
   _tests.add(() {
     test(benchmarkBinary, () async {
       final helper = await PerfTestHelper.make();
-      final result = await helper.sl4fDriver.ssh
-          .run('/bin/$benchmarkBinary -p --quiet --out $resultsFile');
-      expect(result.exitCode, equals(0));
-      // This makes the results visible to both perfcompare and Catapult.
-      await helper.processResults(resultsFile);
 
-      for (var process = 0; process < perftestProcessRuns - 1; ++process) {
+      final List<File> resultsFiles = [];
+      for (var process = 0; process < perftestProcessRuns; ++process) {
         final result = await helper.sl4fDriver.ssh
             .run('/bin/$benchmarkBinary -p --quiet --out $resultsFile');
         expect(result.exitCode, equals(0));
-        // This makes the results visible to perfcompare but not Catapult.
-        await helper.storage.dumpFile(resultsFile,
-            'results_fidl_microbenchmarks_process$process', 'fuchsiaperf.json');
+        resultsFiles.add(await helper.storage.dumpFile(
+            resultsFile,
+            'results_fidl_microbenchmarks_process$process',
+            'fuchsiaperf_full.json'));
       }
+      await helper.processResultsSummarized(resultsFiles);
     }, timeout: Timeout.none);
   });
 }