| #!/usr/bin/env python |
| # Copyright 2015 The Chromium Authors. All rights reserved. |
| # Use of this source code is governed by a BSD-style license that can be |
| # found in the LICENSE file. |
| |
| """Runner for Mojo application benchmarks.""" |
| |
| import argparse |
| import logging |
| import os.path |
| import re |
| import sys |
| import time |
| |
| from devtoolslib import shell_arguments |
| from devtoolslib import shell_config |
| from devtoolslib import perf_dashboard |
| |
| |
| _DESCRIPTION = """Runner for Mojo application benchmarks. |
| |
| |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| |
| dictionary. For description of the required format see |
| https://github.com/domokit/devtools/blob/master/docs/mojo_benchmark.md . |
| """ |
| |
| _logger = logging.getLogger() |
| |
| _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
| _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' |
| _NETWORK_SERVICE_URL = 'mojo:network_service' |
| |
| _COLD_START_SHELL_ARGS = [ |
| '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), |
| '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), |
| ] |
| |
| # Additional time in seconds allocated per shell run to accommodate start-up. |
| # The shell should terminate before hitting this time out, it is an error if it |
| # doesn't. |
| _EXTRA_TIMEOUT = 20 |
| |
| _MEASUREMENT_RESULT_FORMAT = r""" |
| ^ # Beginning of the line. |
| measurement: # Hard-coded tag. |
| \s+(\S+) # Match measurement spec. |
| \s+(\S+) # Match measurement result. |
| $ # End of the line. |
| """ |
| |
| _MEASUREMENT_REGEX = re.compile(_MEASUREMENT_RESULT_FORMAT, re.VERBOSE) |
| |
| |
| def _generate_benchmark_variants(benchmark_spec): |
| """Generates benchmark specifications for individual variants of the given |
| benchmark: cold start and warm start. |
| |
| Returns: |
| A list of benchmark specs corresponding to individual variants of the given |
| benchmark. |
| """ |
| variants = [] |
| variants.append({ |
| 'variant_name': 'cold start', |
| 'app': benchmark_spec['app'], |
| 'duration': benchmark_spec['duration'], |
| 'measurements': benchmark_spec['measurements'], |
| 'shell-args': benchmark_spec.get('shell-args', |
| []) + _COLD_START_SHELL_ARGS}) |
| variants.append({ |
| 'variant_name': 'warm start', |
| 'app': benchmark_spec['app'], |
| 'duration': benchmark_spec['duration'], |
| 'measurements': benchmark_spec['measurements'], |
| 'shell-args': benchmark_spec.get('shell-args', [])}) |
| return variants |
| |
| |
| def _run_benchmark(shell, shell_args, app, duration_seconds, measurements, |
| verbose, android, output_file): |
| """Runs the given benchmark by running `benchmark.mojo` in mojo shell with |
| appropriate arguments and returns the produced output. |
| |
| Returns: |
| A tuple of (succeeded, error_msg, output). |
| """ |
| timeout = duration_seconds + _EXTRA_TIMEOUT |
| benchmark_args = [] |
| benchmark_args.append('--app=' + app) |
| benchmark_args.append('--duration=' + str(duration_seconds)) |
| |
| device_output_file = None |
| if output_file: |
| if android: |
| device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) |
| benchmark_args.append('--trace-output=' + device_output_file) |
| else: |
| benchmark_args.append('--trace-output=' + output_file) |
| |
| for measurement in measurements: |
| benchmark_args.append(measurement['spec']) |
| |
| shell_args = list(shell_args) |
| shell_args.append(_BENCHMARK_APP) |
| shell_args.append('--force-offline-by-default') |
| shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, |
| ' '.join(benchmark_args))) |
| |
| if verbose: |
| print 'shell arguments: ' + str(shell_args) |
| return_code, output, did_time_out = shell.run_and_get_output( |
| shell_args, timeout=timeout) |
| |
| if did_time_out: |
| return False, 'timed out', output |
| if return_code: |
| return False, 'return code: ' + str(return_code), output |
| |
| # Pull the trace file even if some measurements are missing, as it can be |
| # useful in debugging. |
| if device_output_file: |
| shell.pull_file(device_output_file, output_file, remove_original=True) |
| |
| return True, None, output |
| |
| |
| def _parse_measurement_results(output): |
| """Parses the measurement results present in the benchmark output and returns |
| the dictionary of correctly recognized and parsed results. |
| """ |
| measurement_results = {} |
| output_lines = [line.strip() for line in output.split('\n')] |
| for line in output_lines: |
| match = re.match(_MEASUREMENT_REGEX, line) |
| if match: |
| measurement_spec = match.group(1) |
| measurement_result = match.group(2) |
| try: |
| measurement_results[measurement_spec] = float(measurement_result) |
| except ValueError: |
| pass |
| return measurement_results |
| |
| |
| def main(): |
| parser = argparse.ArgumentParser( |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| description=_DESCRIPTION) |
| parser.add_argument('benchmark_list_file', type=file, |
| help='a file listing benchmarks to run') |
| parser.add_argument('--save-all-traces', action='store_true', |
| help='save the traces produced by benchmarks to disk') |
| perf_dashboard.add_argparse_server_arguments(parser) |
| |
| # Common shell configuration arguments. |
| shell_config.add_shell_arguments(parser) |
| script_args = parser.parse_args() |
| config = shell_config.get_shell_config(script_args) |
| |
| try: |
| shell, common_shell_args = shell_arguments.get_shell(config, []) |
| except shell_arguments.ShellConfigurationException as e: |
| print e |
| return 1 |
| |
| target_os = 'android' if script_args.android else 'linux' |
| benchmark_list_params = {"target_os": target_os} |
| exec script_args.benchmark_list_file in benchmark_list_params |
| |
| exit_code = 0 |
| for benchmark_spec in benchmark_list_params['benchmarks']: |
| benchmark_name = benchmark_spec['name'] |
| |
| for variant_spec in _generate_benchmark_variants(benchmark_spec): |
| variant_name = variant_spec['variant_name'] |
| app = variant_spec['app'] |
| duration = variant_spec['duration'] |
| shell_args = variant_spec.get('shell-args', []) + common_shell_args |
| measurements = variant_spec['measurements'] |
| |
| output_file = None |
| if script_args.save_all_traces: |
| output_file = 'benchmark-%s-%s-%s.trace' % ( |
| benchmark_name.replace(' ', '_'), |
| variant_name.replace(' ', '_'), |
| time.strftime('%Y%m%d%H%M%S')) |
| |
| chart_data_recorder = None |
| if script_args.upload: |
| chart_data_recorder = perf_dashboard.ChartDataRecorder( |
| script_args.test_name) |
| |
| benchmark_succeeded, benchmark_error, output = _run_benchmark( |
| shell, shell_args, app, duration, measurements, script_args.verbose, |
| script_args.android, output_file) |
| |
| print '[ %s ] %s ' % (benchmark_name, variant_name) |
| |
| some_measurements_failed = False |
| if benchmark_succeeded: |
| measurement_results = _parse_measurement_results(output) |
| # Iterate over the list of specs, not the dictionary, to detect missing |
| # results and preserve the required order. |
| for measurement in measurements: |
| if measurement['spec'] in measurement_results: |
| result = measurement_results[measurement['spec']] |
| print '%10.4f %s' % (result, measurement['name']) |
| |
| if chart_data_recorder: |
| chart_name = benchmark_name + '__' + variant_name |
| chart_data_recorder.record_scalar( |
| perf_dashboard.normalize_label(chart_name), |
| perf_dashboard.normalize_label(measurement['name']), |
| 'ms', result) |
| else: |
| print '? %s' % measurement['name'] |
| some_measurements_failed = True |
| |
| if not benchmark_succeeded or some_measurements_failed: |
| if not benchmark_succeeded: |
| print 'benchmark failed: ' + benchmark_error |
| if some_measurements_failed: |
| print 'some measurements failed' |
| print 'output: ' |
| print '-' * 72 |
| print output |
| print '-' * 72 |
| exit_code = 1 |
| |
| if script_args.upload: |
| if not perf_dashboard.upload_chart_data( |
| script_args.master_name, script_args.bot_name, |
| script_args.test_name, script_args.builder_name, |
| script_args.build_number, chart_data_recorder.get_chart_data(), |
| script_args.server_url, script_args.dry_run): |
| exit_code = 1 |
| |
| return exit_code |
| |
| if __name__ == '__main__': |
| sys.exit(main()) |