Changeset 190779 in webkit
- Timestamp:
- Oct 8, 2015 10:53:12 PM (9 years ago)
- Location:
- trunk/Tools
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r190776 r190779 1 2015-10-08 Dewei Zhu <dewei_zhu@apple.com> 2 3 Extend run-benchmark script to support human-readable results conversion. 4 https://bugs.webkit.org/show_bug.cgi?id=149944 5 6 Reviewed by Ryosuke Niwa. 7 8 Add '--read-results-json' and '--no-adjust-unit' options. 9 '--read-results-json' option converts result file to human readable format. 10 '--no-adjust-unit' option skips scientific notation convertion. 11 '--platform' defaults to 'osx' and '--browser' defaults to 'safari'. 12 13 * Scripts/webkitpy/benchmark_runner/benchmark_results.py: 14 (BenchmarkResults.format): 15 (BenchmarkResults._format_tests): 16 (BenchmarkResults._format_values): 17 * Scripts/webkitpy/benchmark_runner/benchmark_runner.py: 18 (BenchmarkRunner.__init__): 19 (BenchmarkRunner._run_benchmark): 20 (BenchmarkRunner._dump): 21 (BenchmarkRunner.show_results): 22 (BenchmarkRunner._show_results): Deleted. 23 * Scripts/webkitpy/benchmark_runner/run_benchmark.py: 24 (parse_args): 25 (start): 26 1 27 2015-10-08 Daniel Bates <dabates@apple.com> 2 28 -
trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py
r185178 r190779 49 49 self._results = self._aggregate_results(results) 50 50 51 def format(self ):52 return self._format_tests(self._results )53 54 @classmethod 55 def _format_tests( self, tests, indent=''):51 def format(self, scale_unit): 52 return self._format_tests(self._results, scale_unit) 53 54 @classmethod 55 def _format_tests(cls, tests, scale_unit, indent=''): 56 56 output = '' 57 57 config_name = 'current' … … 72 72 if aggregator_name: 73 73 output += aggregator_name + ':' 74 output += ' ' + self._format_values(metric_name, metric[aggregator_name][config_name]) + '\n'74 output += ' ' + cls._format_values(metric_name, metric[aggregator_name][config_name], scale_unit) + '\n' 75 75 if 'tests' in test: 76 output += self._format_tests(test['tests'], indent=(indent + ' ' * len(test_name)))76 output += cls._format_tests(test['tests'], scale_unit, indent=(indent + ' ' * len(test_name))) 77 77 return output 78 78 79 79 @classmethod 80 def _format_values(cls, metric_name, values ):80 def _format_values(cls, metric_name, values, scale_unit): 81 81 values = map(float, values) 82 82 total = sum(values) … … 93 93 94 94 unit = cls._unit_from_metric(metric_name) 95 96 if not scale_unit: 97 return ('{mean:.3f}{unit} stdev={delta:.1%}').format(mean=mean, delta=sample_stdev / mean, unit=unit) 95 98 96 99 if unit == 'ms': -
trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py
r188237 r190779 25 25 class BenchmarkRunner(object): 26 26 27 def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, device_id=None):27 def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, scale_unit=True, device_id=None): 28 28 try: 29 29 plan_file = self._find_plan_file(plan_file) … … 40 40 self._build_dir = os.path.abspath(build_dir) if build_dir else None 41 41 self._output_file = output_file 42 self._scale_unit = scale_unit 42 43 self._device_id = device_id 43 44 except IOError as error: … … 81 82 results = self._wrap(results) 82 83 self._dump(results, self._output_file if self._output_file else self._plan['output_file']) 83 self. _show_results(results)84 self.show_results(results, self._scale_unit) 84 85 85 86 def execute(self): … … 95 96 except IOError as error: 96 97 _log.error('Cannot open output file: {output_file} - Error: {error}'.format(output_file=output_file, error=error)) 97 _log.error('Results are:\n {result}'.format( json.dumps(results)))98 _log.error('Results are:\n {result}'.format(result=json.dumps(results))) 98 99 99 100 @classmethod … … 130 131 131 132 @classmethod 132 def _show_results(cls, results):133 def show_results(cls, results, scale_unit=True): 133 134 results = BenchmarkResults(results) 134 print results.format( )135 print results.format(scale_unit) -
trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py
r188092 r190779 2 2 3 3 import argparse 4 import json 4 5 import logging 5 6 import platform … … 17 18 parser.add_argument('--output-file', dest='output', default=None) 18 19 parser.add_argument('--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/') 19 parser.add_argument('--plan', dest='plan', required=True, help='Benchmark plan to run. e.g. speedometer, jetstream') 20 parser.add_argument('--platform', dest='platform', required=True, choices=BrowserDriverFactory.available_platforms()) 20 parser.add_argument('--platform', dest='platform', default='osx', choices=BrowserDriverFactory.available_platforms()) 21 21 # FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS. 22 parser.add_argument('--browser', dest='browser', required=True, choices=BrowserDriverFactory.available_browsers())22 parser.add_argument('--browser', dest='browser', default='safari', choices=BrowserDriverFactory.available_browsers()) 23 23 parser.add_argument('--debug', action='store_true') 24 24 parser.add_argument('--local-copy', dest='localCopy', help='Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/') 25 25 parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5') 26 26 parser.add_argument('--device-id', dest='device_id', default=None) 27 parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false') 28 mutual_group = parser.add_mutually_exclusive_group(required=True) 29 mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format') 30 mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream') 27 31 28 32 args = parser.parse_args() … … 39 43 40 44 def start(args): 41 runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.device_id) 45 if args.json_file: 46 BenchmarkRunner.show_results(json.load(open(args.json_file, 'r')), args.scale_unit) 47 return 48 runner = BenchmarkRunner(args.plan, args.localCopy, args.countOverride, args.buildDir, args.output, args.platform, args.browser, args.scale_unit, args.device_id) 42 49 runner.execute() 43 50
Note: See TracChangeset
for help on using the changeset viewer.