mojo_benchmark: improve recording the chart data for perf dashboard.
This patch:
- adds the neccessary metadata to `chart_data` being recorded
- records individual `chart_data` for individual benchmarks (so that
all measurements won't be tracked on one collective chart)
- fixes an import error introduced in
64736fae560422778d97e5e747458e34697cd24e
R=etiennej@chromium.org
Review URL: https://codereview.chromium.org/1412113003 .
Cr-Mirrored-From: https://github.com/domokit/mojo
Cr-Mirrored-Commit: 7615d79003f86ad1958e4a0c2d9a613d01a50569
diff --git a/devtoolslib/perf_dashboard.py b/devtoolslib/perf_dashboard.py
index aa11ff6..74eb8ca 100644
--- a/devtoolslib/perf_dashboard.py
+++ b/devtoolslib/perf_dashboard.py
@@ -13,14 +13,16 @@
import json
from collections import defaultdict
+
class ChartDataRecorder(object):
"""Allows one to record measurement values one by one and then generate the
JSON string that represents them in the 'chart_data' format expected by the
performance dashboard.
"""
- def __init__(self):
+ def __init__(self, benchmark_name):
self.charts = defaultdict(list)
+ self.benchmark_name = benchmark_name
def record_scalar(self, chart_name, value_name, units, value):
"""Records a single measurement value of a scalar type."""
@@ -31,5 +33,11 @@
'value': value})
def get_json(self):
- """Returns the JSON string representing the recorded chart data."""
- return json.dumps(self.charts)
+ """Returns the JSON string representing the recorded chart data, wrapping
+ it with the required meta data."""
+ chart_data = {
+ 'format_version': '1.0',
+ 'benchmark_name': self.benchmark_name,
+ 'charts': self.charts
+ }
+ return json.dumps(chart_data)
diff --git a/devtoolslib/perf_dashboard_unittest.py b/devtoolslib/perf_dashboard_unittest.py
index 61809c3..b9c3b17 100644
--- a/devtoolslib/perf_dashboard_unittest.py
+++ b/devtoolslib/perf_dashboard_unittest.py
@@ -17,52 +17,64 @@
from devtoolslib.perf_dashboard import ChartDataRecorder
+
class ChartDataRecorderTest(unittest.TestCase):
"""Tests the chart data recorder."""
def test_empty(self):
"""Tests chart data with no charts."""
- recorder = ChartDataRecorder()
+ recorder = ChartDataRecorder('benchmark')
result = json.loads(recorder.get_json())
- self.assertEquals(0, len(result))
+ self.assertEquals({
+ 'format_version': '1.0',
+ 'benchmark_name': 'benchmark',
+ 'charts': {}}, result)
def test_one_chart(self):
"""Tests chart data with two samples in one chart."""
- recorder = ChartDataRecorder()
+ recorder = ChartDataRecorder('benchmark')
recorder.record_scalar('chart', 'val1', 'ms', 1)
recorder.record_scalar('chart', 'val2', 'ms', 2)
result = json.loads(recorder.get_json())
- self.assertEquals(1, len(result))
- self.assertEquals(2, len(result['chart']))
+ self.assertEquals('1.0', result['format_version'])
+ self.assertEquals('benchmark', result['benchmark_name'])
+
+ charts = result['charts']
+ self.assertEquals(1, len(charts))
+ self.assertEquals(2, len(charts['chart']))
self.assertEquals({
'type': 'scalar',
'name': 'val1',
'units': 'ms',
- 'value': 1}, result['chart'][0])
+ 'value': 1}, charts['chart'][0])
self.assertEquals({
'type': 'scalar',
'name': 'val2',
'units': 'ms',
- 'value': 2}, result['chart'][1])
+ 'value': 2}, charts['chart'][1])
def test_two_charts(self):
"""Tests chart data with two samples over two charts."""
- recorder = ChartDataRecorder()
+ recorder = ChartDataRecorder('benchmark')
recorder.record_scalar('chart1', 'val1', 'ms', 1)
recorder.record_scalar('chart2', 'val2', 'ms', 2)
result = json.loads(recorder.get_json())
- self.assertEquals(2, len(result))
- self.assertEquals(1, len(result['chart1']))
+ self.assertEquals('1.0', result['format_version'])
+ self.assertEquals('benchmark', result['benchmark_name'])
+
+ charts = result['charts']
+ self.assertEquals(2, len(charts))
+ self.assertEquals(1, len(charts['chart1']))
self.assertEquals({
'type': 'scalar',
'name': 'val1',
'units': 'ms',
- 'value': 1}, result['chart1'][0])
- self.assertEquals(1, len(result['chart2']))
+ 'value': 1}, charts['chart1'][0])
+ self.assertEquals(1, len(charts['chart2']))
self.assertEquals({
'type': 'scalar',
'name': 'val2',
'units': 'ms',
- 'value': 2}, result['chart2'][0])
+ 'value': 2}, charts['chart2'][0])
diff --git a/mojo_benchmark b/mojo_benchmark
index 5ffdbe1..8413490 100755
--- a/mojo_benchmark
+++ b/mojo_benchmark
@@ -14,7 +14,7 @@
from devtoolslib import shell_arguments
from devtoolslib import shell_config
-from devtoolslib import performance_dashboard
+from devtoolslib import perf_dashboard
_DESCRIPTION = """Runner for Mojo application benchmarks.
@@ -195,10 +195,6 @@
benchmark_list_params = {"target_os": target_os}
exec script_args.benchmark_list_file in benchmark_list_params
- chart_data_recorder = None
- if script_args.chart_data_output_file:
- chart_data_recorder = performance_dashboard.ChartDataRecorder()
-
exit_code = 0
for benchmark_spec in benchmark_list_params['benchmarks']:
for variant_spec in _generate_benchmark_variants(benchmark_spec):
@@ -207,6 +203,10 @@
duration = variant_spec['duration']
shell_args = variant_spec.get('shell-args', []) + common_shell_args
measurements = variant_spec['measurements']
+
+ chart_data_recorder = None
+ if script_args.chart_data_output_file:
+ chart_data_recorder = perf_dashboard.ChartDataRecorder(name)
benchmark_succeeded, benchmark_error, output = _run_benchmark(
shell, shell_args, name, app, duration, measurements,
script_args.verbose, script_args.android,
@@ -243,8 +243,9 @@
print '-' * 72
exit_code = 1
- if script_args.chart_data_output_file:
- script_args.chart_data_output_file.write(chart_data_recorder.get_json())
+ if script_args.chart_data_output_file:
+ script_args.chart_data_output_file.write(chart_data_recorder.get_json())
+ script_args.chart_data_output_file.write('\n')
return exit_code