Changeset 115466 in webkit
- Timestamp:
- Apr 27, 2012 12:21:16 PM (12 years ago)
- Location:
- trunk/Tools
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r115453 r115466 1 2012-04-27 Ryosuke Niwa <rniwa@webkit.org> 2 3 Chromium Windows Perf bots timeout due to not output 4 https://bugs.webkit.org/show_bug.cgi?id=84940 5 6 Reviewed by Dirk Pranke. 7 8 Dirk and I investigated the issue on the bot but we couldn't figure out what is going wrong. 9 Since we run-perf-tests don't need any of fancy feature printer provides, just use python's 10 built-in logging module instead. Printing out to stdout and stderr seem to work so hopefully 11 this will fix the issue on the bot. 12 13 * Scripts/run-perf-tests: 14 * Scripts/webkitpy/performance_tests/perftest.py: 15 (PerfTest.run): 16 (PerfTest.run_failed): 17 (PerfTest.parse_output): 18 (PerfTest.output_statistics): 19 (ChromiumStylePerfTest.parse_output): 20 (PageLoadingPerfTest.run): 21 * Scripts/webkitpy/performance_tests/perftest_unittest.py: 22 (MainTest.test_parse_output): 23 (MainTest.test_parse_output_with_failing_line): 24 (TestPageLoadingPerfTest): 25 (TestPageLoadingPerfTest.test_run): 26 (TestPageLoadingPerfTest.test_run_with_bad_output): 27 * Scripts/webkitpy/performance_tests/perftestsrunner.py: 28 (PerfTestsRunner.__init__): 29 (PerfTestsRunner._parse_args): 30 (PerfTestsRunner.run): 31 (PerfTestsRunner._upload_json): 32 (PerfTestsRunner._print_status): 33 (PerfTestsRunner._run_tests_set): 34 (PerfTestsRunner._run_single_test): 35 * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py: 36 (create_runner): 37 (test_run_test_set): 38 (test_run_test_set_kills_drt_per_run): 39 (test_run_test_pause_before_testing): 40 (test_run_test_set_for_parser_tests): 41 (test_run_test_set_with_json_output): 42 (test_run_test_set_with_json_source): 43 (test_run_test_set_with_multiple_repositories): 44 (test_upload_json): 45 (test_parse_args): 46 1 47 2012-04-27 Dirk Pranke <dpranke@chromium.org> 2 48 -
trunk/Tools/Scripts/run-perf-tests
r104856 r115466 1 1 #!/usr/bin/env python 2 # Copyright (C) 201 1Google Inc. All rights reserved.2 # Copyright (C) 2012 Google Inc. All rights reserved. 3 3 # 4 4 # Redistribution and use in source and binary forms, with or without … … 35 35 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner 36 36 37 _log = logging.getLogger(__name__)38 39 37 if '__main__' == __name__: 40 38 logging.basicConfig(level=logging.INFO, format="%(message)s") -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py
r115410 r115466 29 29 30 30 31 import logging 31 32 import math 32 33 import re 33 34 34 35 from webkitpy.layout_tests.port.driver import DriverInput 36 37 38 _log = logging.getLogger(__name__) 35 39 36 40 … … 46 50 return self._path_or_url 47 51 48 def run(self, driver, timeout_ms , printer, buildbot_output):52 def run(self, driver, timeout_ms): 49 53 output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False)) 50 if self.run_failed(output , printer):54 if self.run_failed(output): 51 55 return None 52 return self.parse_output(output , printer, buildbot_output)53 54 def run_failed(self, output , printer):56 return self.parse_output(output) 57 58 def run_failed(self, output): 55 59 if output.text == None or output.error: 56 60 pass 57 61 elif output.timeout: 58 printer.write('timeout: %s' % self.test_name())62 _log.error('timeout: %s' % self.test_name()) 59 63 elif output.crash: 60 printer.write('crash: %s' % self.test_name())64 _log.error('crash: %s' % self.test_name()) 61 65 else: 62 66 return False 63 67 64 68 if output.error: 65 printer.write('error: %s\n%s' % (self.test_name(), output.error))69 _log.error('error: %s\n%s' % (self.test_name(), output.error)) 66 70 67 71 return True … … 87 91 return False 88 92 89 def parse_output(self, output , printer, buildbot_output):93 def parse_output(self, output): 90 94 got_a_result = False 91 95 test_failed = False … … 104 108 if not self._should_ignore_line_in_parser_test_result(line): 105 109 test_failed = True 106 printer.write("%s" %line)110 _log.error(line) 107 111 108 112 if test_failed or set(self._statistics_keys) != set(results.keys()): … … 112 116 113 117 test_name = re.sub(r'\.\w+$', '', self._test_name) 114 self.output_statistics(test_name, results , buildbot_output)118 self.output_statistics(test_name, results) 115 119 116 120 return {test_name: results} 117 121 118 def output_statistics(self, test_name, results , buildbot_output):122 def output_statistics(self, test_name, results): 119 123 unit = results['unit'] 120 buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit))121 buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]]) + '\n')124 _log.info('RESULT %s= %s %s' % (test_name.replace('/', ': '), results['avg'], unit)) 125 _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]])) 122 126 123 127 … … 128 132 super(ChromiumStylePerfTest, self).__init__(test_name, path_or_url) 129 133 130 def parse_output(self, output , printer, buildbot_output):134 def parse_output(self, output): 131 135 test_failed = False 132 136 got_a_result = False … … 137 141 # FIXME: Store the unit 138 142 results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value')) 139 buildbot_output.write("%s\n" %line)143 _log.info(line) 140 144 elif not len(line) == 0: 141 145 test_failed = True 142 printer.write("%s" %line)146 _log.error(line) 143 147 return results if results and not test_failed else None 144 148 … … 148 152 super(PageLoadingPerfTest, self).__init__(test_name, path_or_url) 149 153 150 def run(self, driver, timeout_ms , printer, buildbot_output):154 def run(self, driver, timeout_ms): 151 155 test_times = [] 152 156 153 157 for i in range(0, 20): 154 158 output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False)) 155 if self.run_failed(output , printer):159 if self.run_failed(output): 156 160 return None 157 161 if i == 0: … … 178 182 'stdev': math.sqrt(squareSum), 179 183 'unit': 'ms'} 180 self.output_statistics(self.test_name(), results , buildbot_output)184 self.output_statistics(self.test_name(), results) 181 185 return {self.test_name(): results} 182 186 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
r115410 r115466 32 32 import unittest 33 33 34 from webkitpy.common.system.outputcapture import OutputCapture 34 35 from webkitpy.layout_tests.port.driver import DriverOutput 35 36 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest … … 39 40 40 41 41 class MockPrinter(object):42 def __init__(self):43 self.written_lines = []44 45 def write(self, line):46 self.written_lines.append(line)47 48 49 42 class MainTest(unittest.TestCase): 50 43 def test_parse_output(self): 51 printer = MockPrinter()52 buildbot_output = StringIO.StringIO()53 44 output = DriverOutput('\n'.join([ 54 45 'Running 20 times', … … 60 51 'min 1080', 61 52 'max 1120']), image=None, image_hash=None, audio=None) 62 test = PerfTest('some-test', '/path/some-dir/some-test') 63 self.assertEqual(test.parse_output(output, printer, buildbot_output), 64 {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}}) 65 self.assertEqual(printer.written_lines, []) 53 output_capture = OutputCapture() 54 output_capture.capture_output() 55 try: 56 test = PerfTest('some-test', '/path/some-dir/some-test') 57 self.assertEqual(test.parse_output(output), 58 {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}}) 59 finally: 60 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 61 self.assertEqual(actual_stdout, '') 62 self.assertEqual(actual_stderr, '') 63 self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n') 66 64 67 65 def test_parse_output_with_failing_line(self): 68 printer = MockPrinter()69 buildbot_output = StringIO.StringIO()70 66 output = DriverOutput('\n'.join([ 71 67 'Running 20 times', … … 79 75 'min 1080', 80 76 'max 1120']), image=None, image_hash=None, audio=None) 81 test = PerfTest('some-test', '/path/some-dir/some-test') 82 self.assertEqual(test.parse_output(output, printer, buildbot_output), None) 83 self.assertEqual(printer.written_lines, ['some-unrecognizable-line']) 77 output_capture = OutputCapture() 78 output_capture.capture_output() 79 try: 80 test = PerfTest('some-test', '/path/some-dir/some-test') 81 self.assertEqual(test.parse_output(output), None) 82 finally: 83 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 84 self.assertEqual(actual_stdout, '') 85 self.assertEqual(actual_stderr, '') 86 self.assertEqual(actual_logs, 'some-unrecognizable-line\n') 84 87 85 88 86 89 class TestPageLoadingPerfTest(unittest.TestCase): 87 def assertWritten(self, stream, contents):88 self.assertEquals(stream.buflist, contents)89 90 90 class MockDriver(object): 91 91 def __init__(self, values): … … 102 102 103 103 def test_run(self): 104 printer = MockPrinter()105 buildbot_output = StringIO.StringIO()106 104 test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test') 107 105 driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) 108 self.assertEqual(test.run(driver, None, printer, buildbot_output), 109 {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}}) 110 self.assertEqual(printer.written_lines, []) 111 self.assertWritten(buildbot_output, ['RESULT some-test= 11000.0 ms\n', 'median= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n']) 106 output_capture = OutputCapture() 107 output_capture.capture_output() 108 try: 109 self.assertEqual(test.run(driver, None), 110 {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}}) 111 finally: 112 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 113 self.assertEqual(actual_stdout, '') 114 self.assertEqual(actual_stderr, '') 115 self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n') 112 116 113 117 def test_run_with_bad_output(self): 114 printer = MockPrinter() 115 buildbot_output = StringIO.StringIO() 116 test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test') 117 driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) 118 self.assertEqual(test.run(driver, None, printer, buildbot_output), None) 119 self.assertEqual(printer.written_lines, ['error: some-test\nsome error']) 118 output_capture = OutputCapture() 119 output_capture.capture_output() 120 try: 121 test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test') 122 driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]) 123 self.assertEqual(test.run(driver, None), None) 124 finally: 125 actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() 126 self.assertEqual(actual_stdout, '') 127 self.assertEqual(actual_stderr, '') 128 self.assertEqual(actual_logs, 'error: some-test\nsome error\n') 120 129 121 130 -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
r115410 r115466 53 53 _EXIT_CODE_FAILED_UPLOADING = -3 54 54 55 def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None): 56 self._buildbot_output = buildbot_output 55 def __init__(self, args=None, port=None): 57 56 self._options, self._args = PerfTestsRunner._parse_args(args) 58 57 if port: … … 63 62 self._port = self._host.port_factory.get(self._options.platform, self._options) 64 63 self._host._initialize_scm() 65 self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output)66 64 self._webkit_base_dir_len = len(self._port.webkit_base()) 67 65 self._base_path = self._port.perf_tests_dir() … … 71 69 @staticmethod 72 70 def _parse_args(args=None): 73 print_options = printing.print_options()74 75 71 perf_option_list = [ 76 72 optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration", … … 103 99 help="Use WebKitTestRunner rather than DumpRenderTree."), 104 100 ] 105 106 option_list = (perf_option_list + print_options) 107 return optparse.OptionParser(option_list=option_list).parse_args(args) 101 return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args) 108 102 109 103 def _collect_tests(self): … … 134 128 135 129 def run(self): 136 if self._options.help_printing:137 self._printer.help_printing()138 self._printer.cleanup()139 return 0140 141 130 if not self._port.check_build(needs_http=False): 142 131 _log.error("Build not up to date for %s" % self._port._path_to_driver()) … … 146 135 # in a try/finally to ensure that we clean up the logging configuration. 147 136 unexpected = -1 148 try: 149 tests = self._collect_tests() 150 unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port) 151 finally: 152 self._printer.cleanup() 137 tests = self._collect_tests() 138 unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port) 153 139 154 140 options = self._options … … 210 196 return False 211 197 212 self._printer.write("JSON file uploaded.")198 _log.info("JSON file uploaded.") 213 199 return True 214 200 … … 220 206 if unexpected: 221 207 status += " (%d didn't run)" % unexpected 222 self._printer.write(status)208 _log.info(status) 223 209 224 210 def _run_tests_set(self, tests, port): … … 237 223 return unexpected 238 224 239 self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))225 _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests))) 240 226 if self._run_single_test(test, driver): 241 227 expected = expected + 1 … … 243 229 unexpected = unexpected + 1 244 230 245 self._printer.write('')231 _log.info('') 246 232 247 233 driver.stop() … … 252 238 start_time = time.time() 253 239 254 new_results = test.run(driver, self._options.time_out_ms , self._printer, self._buildbot_output)240 new_results = test.run(driver, self._options.time_out_ms) 255 241 if new_results: 256 242 self._results.update(new_results) 257 243 else: 258 self._printer.write('FAILED')259 260 self._printer.write("Finished: %f s" % (time.time() - start_time))244 _log.error('FAILED') 245 246 _log.debug("Finished: %f s" % (time.time() - start_time)) 261 247 262 248 return new_results != None -
trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
r115410 r115466 112 112 """do nothing""" 113 113 114 def create_runner(self, buildbot_output=None, args=[], regular_output=None, driver_class=TestDriver): 115 buildbot_output = buildbot_output or StringIO.StringIO() 116 regular_output = regular_output or StringIO.StringIO() 117 114 def create_runner(self, args=[], driver_class=TestDriver): 118 115 options, parsed_args = PerfTestsRunner._parse_args(args) 119 116 test_port = TestPort(host=MockHost(), options=options) 120 117 test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class() 121 118 122 runner = PerfTestsRunner( regular_output, buildbot_output,args=args, port=test_port)119 runner = PerfTestsRunner(args=args, port=test_port) 123 120 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector') 124 121 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings') … … 162 159 163 160 def test_run_test_set(self): 164 buildbot_output = StringIO.StringIO() 165 runner = self.create_runner(buildbot_output) 161 runner = self.create_runner() 162 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html', 163 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html']) 164 output = OutputCapture() 165 output.capture_output() 166 try: 167 unexpected_result_count = runner._run_tests_set(tests, runner._port) 168 finally: 169 stdout, stderr, log = output.restore_output() 170 self.assertEqual(unexpected_result_count, len(tests) - 1) 171 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log) 172 173 def test_run_test_set_kills_drt_per_run(self): 174 175 class TestDriverWithStopCount(MainTest.TestDriver): 176 stop_count = 0 177 178 def stop(self): 179 TestDriverWithStopCount.stop_count += 1 180 181 runner = self.create_runner(driver_class=TestDriverWithStopCount) 182 166 183 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html', 167 184 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html']) 168 185 unexpected_result_count = runner._run_tests_set(tests, runner._port) 169 self.assertEqual(unexpected_result_count, len(tests) - 1) 170 self.assertWritten(buildbot_output, ['RESULT group_name: test_name= 42 ms\n']) 171 172 def test_run_test_set_kills_drt_per_run(self): 173 174 class TestDriverWithStopCount(MainTest.TestDriver): 175 stop_count = 0 176 177 def stop(self): 178 TestDriverWithStopCount.stop_count += 1 179 180 buildbot_output = StringIO.StringIO() 181 runner = self.create_runner(buildbot_output, driver_class=TestDriverWithStopCount) 182 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html', 183 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html']) 184 185 unexpected_result_count = runner._run_tests_set(tests, runner._port) 186 186 187 self.assertEqual(TestDriverWithStopCount.stop_count, 6) 187 188 … … 193 194 TestDriverWithStartCount.start_count += 1 194 195 195 buildbot_output = StringIO.StringIO() 196 regular_output = StringIO.StringIO() 197 runner = self.create_runner(buildbot_output, args=["--pause-before-testing"], regular_output=regular_output, driver_class=TestDriverWithStartCount) 196 runner = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount) 198 197 tests = self._tests_for_runner(runner, ['inspector/pass.html']) 199 198 199 output = OutputCapture() 200 output.capture_output() 200 201 try: 201 output = OutputCapture()202 output.capture_output()203 202 unexpected_result_count = runner._run_tests_set(tests, runner._port) 204 203 self.assertEqual(TestDriverWithStartCount.start_count, 1) 205 204 finally: 206 _, stderr, _= output.restore_output()207 208 self.assertTrue("Running inspector/pass.html (1 of 1)" in regular_output.getvalue())205 stdout, stderr, log = output.restore_output() 206 self.assertEqual(stderr, "Ready to run test?\n") 207 self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n") 209 208 210 209 def test_run_test_set_for_parser_tests(self): 211 buildbot_output = StringIO.StringIO() 212 runner = self.create_runner(buildbot_output) 210 runner = self.create_runner() 213 211 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html']) 214 unexpected_result_count = runner._run_tests_set(tests, runner._port) 212 output = OutputCapture() 213 output.capture_output() 214 try: 215 unexpected_result_count = runner._run_tests_set(tests, runner._port) 216 finally: 217 stdout, stderr, log = output.restore_output() 215 218 self.assertEqual(unexpected_result_count, 0) 216 self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n', 217 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n', 218 'RESULT Parser: some-parser= 1100.0 ms\n', 219 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n']) 219 self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)', 220 'RESULT Bindings: event-target-wrapper= 1489.05 ms', 221 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms', 222 '', 223 'Running Parser/some-parser.html (2 of 2)', 224 'RESULT Parser: some-parser= 1100.0 ms', 225 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms', 226 '', ''])) 220 227 221 228 def test_run_test_set_with_json_output(self): 222 buildbot_output = StringIO.StringIO() 223 runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json']) 229 runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json']) 224 230 runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True 225 231 runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True 226 232 runner._timestamp = 123456789 227 self.assertEqual(runner.run(), 0) 228 self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n', 229 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n', 230 'RESULT group_name: test_name= 42 ms\n']) 233 output_capture = OutputCapture() 234 output_capture.capture_output() 235 try: 236 self.assertEqual(runner.run(), 0) 237 finally: 238 stdout, stderr, logs = output_capture.restore_output() 239 240 self.assertEqual(logs, 241 '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)', 242 'RESULT Bindings: event-target-wrapper= 1489.05 ms', 243 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms', 244 '', 245 'Running inspector/pass.html (2 of 2)', 246 'RESULT group_name: test_name= 42 ms', 247 '', ''])) 231 248 232 249 self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), { … … 237 254 238 255 def test_run_test_set_with_json_source(self): 239 buildbot_output = StringIO.StringIO() 240 runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json', 241 '--source-json-path=/mock-checkout/source.json']) 256 runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json', '--source-json-path=/mock-checkout/source.json']) 242 257 runner._host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}' 243 258 runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True 244 259 runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True 245 260 runner._timestamp = 123456789 246 self.assertEqual(runner.run(), 0) 247 self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n', 248 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n', 249 'RESULT group_name: test_name= 42 ms\n']) 261 output_capture = OutputCapture() 262 output_capture.capture_output() 263 try: 264 self.assertEqual(runner.run(), 0) 265 finally: 266 stdout, stderr, logs = output_capture.restore_output() 267 268 self.assertEqual(logs, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)', 269 'RESULT Bindings: event-target-wrapper= 1489.05 ms', 270 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms', 271 '', 272 'Running inspector/pass.html (2 of 2)', 273 'RESULT group_name: test_name= 42 ms', 274 '', ''])) 250 275 251 276 self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), { … … 257 282 258 283 def test_run_test_set_with_multiple_repositories(self): 259 buildbot_output = StringIO.StringIO() 260 runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json']) 284 runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json']) 261 285 runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True 262 286 runner._timestamp = 123456789 263 287 runner._port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')] 264 288 self.assertEqual(runner.run(), 0) 265 266 289 self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), { 267 290 "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678}) … … 298 321 299 322 def test_upload_json(self): 300 regular_output = StringIO.StringIO() 301 runner = self.create_runner(regular_output=regular_output) 323 runner = self.create_runner() 302 324 runner._host.filesystem.files['/mock-checkout/some.json'] = 'some content' 303 325 … … 391 413 runner = self.create_runner() 392 414 options, args = PerfTestsRunner._parse_args([ 393 '--verbose',394 415 '--build-directory=folder42', 395 416 '--platform=platform42', … … 400 421 '--source-json-path=a/source.json', 401 422 '--test-results-server=somehost', 402 '--debug' , 'an_arg'])423 '--debug']) 403 424 self.assertEqual(options.build, True) 404 self.assertEqual(options.verbose, True)405 self.assertEqual(options.help_printing, None)406 425 self.assertEqual(options.build_directory, 'folder42') 407 426 self.assertEqual(options.platform, 'platform42') … … 410 429 self.assertEqual(options.time_out_ms, '42') 411 430 self.assertEqual(options.configuration, 'Debug') 412 self.assertEqual(options.print_options, None)413 431 self.assertEqual(options.output_json_path, 'a/output.json') 414 432 self.assertEqual(options.source_json_path, 'a/source.json')
Note: See TracChangeset
for help on using the changeset viewer.