Merge pull request #21477 from palimondo/against-the-dark

[benchmark] BenchmarkDriver check --markdown
diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver
index 6df7916..230e180 100755
--- a/benchmark/scripts/Benchmark_Driver
+++ b/benchmark/scripts/Benchmark_Driver
@@ -330,11 +330,15 @@
         super(BenchmarkDoctor, self).__init__()
         self.driver = driver or BenchmarkDriver(args)
         self.results = {}
-        self.console_handler = logging.StreamHandler(sys.stdout)
-        self.console_handler.setLevel(logging.DEBUG if args.verbose else
-                                      logging.INFO)
-        self.console_handler.setFormatter(
-            LoggingReportFormatter(use_color=sys.stdout.isatty()))
+
+        if hasattr(args, 'markdown') and args.markdown:
+            self.console_handler = MarkdownReportHandler(sys.stdout)
+        else:
+            self.console_handler = logging.StreamHandler(sys.stdout)
+            self.console_handler.setFormatter(
+                LoggingReportFormatter(use_color=sys.stdout.isatty()))
+            self.console_handler.setLevel(logging.DEBUG if args.verbose else
+                                          logging.INFO)
         self.log.addHandler(self.console_handler)
         self.log.debug('Checking tests: %s', ', '.join(self.driver.tests))
         self.requirements = [
@@ -350,6 +354,7 @@
         """Close log handlers on exit."""
         for handler in list(self.log.handlers):
             handler.close()
+        self.log.removeHandler(self.console_handler)
 
     benchmark_naming_convention_re = re.compile(r'[A-Z][a-zA-Z0-9\-.!?]+')
     camel_humps_re = re.compile(r'[a-z][A-Z]')
@@ -703,9 +708,13 @@
         'check',
         help='',
         parents=[shared_benchmarks_parser])
-    check_parser.add_argument(
+    check_group = check_parser.add_mutually_exclusive_group()
+    check_group.add_argument(
         '-v', '--verbose', action='store_true',
-        help='show more details during benchmark analysis',)
+        help='show more details during benchmark analysis')
+    check_group.add_argument(
+        '-md', '--markdown', action='store_true',
+        help='format report as Markdown table')
     check_parser.set_defaults(func=BenchmarkDoctor.run_check)
 
     compare_parser = subparsers.add_parser(
diff --git a/benchmark/scripts/test_Benchmark_Driver.py b/benchmark/scripts/test_Benchmark_Driver.py
index f0194ce..5ecf76f 100644
--- a/benchmark/scripts/test_Benchmark_Driver.py
+++ b/benchmark/scripts/test_Benchmark_Driver.py
@@ -120,6 +120,20 @@
         self.assertTrue(parse_args(['check', '-v']).verbose)
         self.assertTrue(parse_args(['check', '--verbose']).verbose)
 
+    def test_check_supports_mardown_output(self):
+        self.assertFalse(parse_args(['check']).markdown)
+        self.assertTrue(parse_args(['check', '-md']).markdown)
+        self.assertTrue(parse_args(['check', '--markdown']).markdown)
+
+    def test_check_flags_are_mutually_exclusive(self):
+        with captured_output() as (out, err):
+            self.assertRaises(SystemExit,
+                              parse_args, ['check', '-md', '-v'])
+        self.assert_contains(
+            ['error:', 'argument -v/--verbose: ' +
+             'not allowed with argument -md/--markdown'],
+            err.getvalue())
+
 
 class ArgsStub(object):
     def __init__(self):
@@ -497,7 +511,7 @@
 
     def setUp(self):
         super(TestBenchmarkDoctor, self).setUp()
-        self.args = Stub(verbose=False)
+        self.args = Stub(verbose=False, markdown=False)
         self._doctor_log_handler.reset()
         self.logs = self._doctor_log_handler.messages
 
@@ -516,8 +530,9 @@
     def test_supports_verbose_output(self):
         driver = BenchmarkDriverMock(tests=['B1', 'B2'])
         driver.verbose = True
+        self.args.verbose = True
         with captured_output() as (out, _):
-            BenchmarkDoctor(Stub(verbose=True), driver)
+            BenchmarkDoctor(self.args, driver)
         self.assert_contains(['Checking tests: B1, B2'], out.getvalue())
 
     def test_uses_report_formatter(self):
@@ -528,6 +543,14 @@
         self.assertTrue(isinstance(console_handler.formatter,
                                    LoggingReportFormatter))
 
+    def test_uses_optional_markdown_report_formatter(self):
+        self.args.markdown = True
+        with captured_output() as (_, _):
+            doc = BenchmarkDoctor(self.args, BenchmarkDriverMock(tests=['B1']))
+        self.assertTrue(doc)
+        console_handler = logging.getLogger('BenchmarkDoctor').handlers[1]
+        self.assertTrue(isinstance(console_handler, MarkdownReportHandler))
+
     def test_measure_10_independent_1s_benchmark_series(self):
         """Measurement strategy takes 5 i2 and 5 i1 series.