Teach mojo_benchmark to produce chart_data for the perf dashboard.
This patch adds the ability to encode the benchmark results in the 'chart_data'
format understood by Chromium Performance Benchmark.
Fixes domokit/devtools#52.
R=viettrungluu@chromium.org
Review URL: https://codereview.chromium.org/1406063002 .
diff --git a/mojo/devtools/common/devtoolslib/perf_dashboard.py b/mojo/devtools/common/devtoolslib/perf_dashboard.py
new file mode 100644
index 0000000..aa11ff6
--- /dev/null
+++ b/mojo/devtools/common/devtoolslib/perf_dashboard.py
@@ -0,0 +1,35 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Disable the line-too-long warning.
+# pylint: disable=C0301
+"""This module implements the Chromium Performance Dashboard JSON v1.0 data
+format.
+
+See http://www.chromium.org/developers/speed-infra/performance-dashboard/sending-data-to-the-performance-dashboard.
+"""
+
+import json
+from collections import defaultdict
+
+class ChartDataRecorder(object):
+ """Allows one to record measurement values one by one and then generate the
+ JSON string that represents them in the 'chart_data' format expected by the
+ performance dashboard.
+ """
+
+ def __init__(self):
+ self.charts = defaultdict(list)
+
+ def record_scalar(self, chart_name, value_name, units, value):
+ """Records a single measurement value of a scalar type."""
+ self.charts[chart_name].append({
+ 'type': 'scalar',
+ 'name': value_name,
+ 'units': units,
+ 'value': value})
+
+ def get_json(self):
+ """Returns the JSON string representing the recorded chart data."""
+ return json.dumps(self.charts)
diff --git a/mojo/devtools/common/devtoolslib/perf_dashboard_unittest.py b/mojo/devtools/common/devtoolslib/perf_dashboard_unittest.py
new file mode 100644
index 0000000..61809c3
--- /dev/null
+++ b/mojo/devtools/common/devtoolslib/perf_dashboard_unittest.py
@@ -0,0 +1,68 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for the Chromium Performance Dashboard data format implementation."""
+
+import imp
+import json
+import os.path
+import sys
+import unittest
+
+try:
+ imp.find_module("devtoolslib")
+except ImportError:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from devtoolslib.perf_dashboard import ChartDataRecorder
+
+class ChartDataRecorderTest(unittest.TestCase):
+ """Tests the chart data recorder."""
+
+ def test_empty(self):
+ """Tests chart data with no charts."""
+ recorder = ChartDataRecorder()
+ result = json.loads(recorder.get_json())
+ self.assertEquals(0, len(result))
+
+ def test_one_chart(self):
+ """Tests chart data with two samples in one chart."""
+ recorder = ChartDataRecorder()
+ recorder.record_scalar('chart', 'val1', 'ms', 1)
+ recorder.record_scalar('chart', 'val2', 'ms', 2)
+
+ result = json.loads(recorder.get_json())
+ self.assertEquals(1, len(result))
+ self.assertEquals(2, len(result['chart']))
+ self.assertEquals({
+ 'type': 'scalar',
+ 'name': 'val1',
+ 'units': 'ms',
+ 'value': 1}, result['chart'][0])
+ self.assertEquals({
+ 'type': 'scalar',
+ 'name': 'val2',
+ 'units': 'ms',
+ 'value': 2}, result['chart'][1])
+
+ def test_two_charts(self):
+ """Tests chart data with two samples over two charts."""
+ recorder = ChartDataRecorder()
+ recorder.record_scalar('chart1', 'val1', 'ms', 1)
+ recorder.record_scalar('chart2', 'val2', 'ms', 2)
+
+ result = json.loads(recorder.get_json())
+ self.assertEquals(2, len(result))
+ self.assertEquals(1, len(result['chart1']))
+ self.assertEquals({
+ 'type': 'scalar',
+ 'name': 'val1',
+ 'units': 'ms',
+ 'value': 1}, result['chart1'][0])
+ self.assertEquals(1, len(result['chart2']))
+ self.assertEquals({
+ 'type': 'scalar',
+ 'name': 'val2',
+ 'units': 'ms',
+ 'value': 2}, result['chart2'][0])
diff --git a/mojo/devtools/common/mojo_benchmark b/mojo/devtools/common/mojo_benchmark
index 0f2126b..5ffdbe1 100755
--- a/mojo/devtools/common/mojo_benchmark
+++ b/mojo/devtools/common/mojo_benchmark
@@ -14,6 +14,7 @@
from devtoolslib import shell_arguments
from devtoolslib import shell_config
+from devtoolslib import performance_dashboard
_DESCRIPTION = """Runner for Mojo application benchmarks.
@@ -175,6 +176,9 @@
help='a file listing benchmarks to run')
parser.add_argument('--save-traces', action='store_true',
help='save the traces produced by benchmarks to disk')
+ parser.add_argument('--chart-data-output-file', type=argparse.FileType('w'),
+ help='file to write chart data for the performance '
+ 'dashboard to')
# Common shell configuration arguments.
shell_config.add_shell_arguments(parser)
@@ -191,6 +195,10 @@
benchmark_list_params = {"target_os": target_os}
exec script_args.benchmark_list_file in benchmark_list_params
+ chart_data_recorder = None
+ if script_args.chart_data_output_file:
+ chart_data_recorder = performance_dashboard.ChartDataRecorder()
+
exit_code = 0
for benchmark_spec in benchmark_list_params['benchmarks']:
for variant_spec in _generate_benchmark_variants(benchmark_spec):
@@ -213,8 +221,13 @@
# results and preserve the required order.
for measurement_spec in measurements:
if measurement_spec in measurement_results:
- print '%s %s' % (measurement_spec,
- measurement_results[measurement_spec])
+ result = measurement_results[measurement_spec]
+ print '%s %s' % (measurement_spec, result)
+
+ if chart_data_recorder:
+ measurement_name = measurement_spec.replace('/', '-')
+ chart_data_recorder.record_scalar(name, measurement_name, 'ms',
+ result)
else:
print '%s ?' % measurement_spec
some_measurements_failed = True
@@ -230,6 +243,9 @@
print '-' * 72
exit_code = 1
+ if script_args.chart_data_output_file:
+ script_args.chart_data_output_file.write(chart_data_recorder.get_json())
+
return exit_code
if __name__ == '__main__':