[test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)

Google benchmark is hoping to drop the CSV output format. This updates the microbenchmark module to use the JSON output.

This fixes PR41327

Reviewers: lebedev.ri

https://reviews.llvm.org/D60205

llvm-svn: 357704
diff --git a/litsupport/modules/microbenchmark.py b/litsupport/modules/microbenchmark.py
index 5cef745..c7b9c3d 100644
--- a/litsupport/modules/microbenchmark.py
+++ b/litsupport/modules/microbenchmark.py
@@ -1,17 +1,17 @@
 '''Test module to collect google benchmark results.'''
 from litsupport import shellcommand
 from litsupport import testplan
-import csv
+import json
 import lit.Test
 
 
 def _mutateCommandLine(context, commandline):
     cmd = shellcommand.parse(commandline)
-    cmd.arguments.append("--benchmark_format=csv")
+    cmd.arguments.append("--benchmark_format=json")
     # We need stdout outself to get the benchmark csv data.
     if cmd.stdout is not None:
         raise Exception("Rerouting stdout not allowed for microbenchmarks")
-    benchfile = context.tmpBase + '.bench.csv'
+    benchfile = context.tmpBase + '.bench.json'
     cmd.stdout = benchfile
     context.microbenchfiles.append(benchfile)
 
@@ -25,18 +25,18 @@
 def _collectMicrobenchmarkTime(context, microbenchfiles):
     for f in microbenchfiles:
         content = context.read_result_file(context, f)
-        lines = csv.reader(content.splitlines())
-        # First line: "name,iterations,real_time,cpu_time,time_unit..."
-        for line in lines:
-            if line[0] == 'name':
-                continue
+        data = json.loads(content)
+
+        # Create a micro_result for each benchmark
+        for benchmark in data['benchmarks']:
             # Name for MicroBenchmark
-            name = line[0]
+            name = benchmark['name']
+
             # Create Result object with PASS
             microBenchmark = lit.Test.Result(lit.Test.PASS)
 
-            # Index 3 is cpu_time
-            exec_time_metric = lit.Test.toMetricValue(float(line[3]))
+            # Add the exec_time metric for this result
+            exec_time_metric = lit.Test.toMetricValue(benchmark['cpu_time'])
             microBenchmark.addMetric('exec_time', exec_time_metric)
 
             # Add Micro Result