Changeset 79837 in webkit
- Timestamp:
- Feb 27, 2011 7:55:14 PM (13 years ago)
- Location:
- trunk/Tools
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Tools/ChangeLog
r79833 r79837 1 2011-02-25 Ojan Vafai <ojan@chromium.org> 2 3 Reviewed by Tony Chang. 4 5 Change results.json format to the one used by unexpected_results.json 6 https://bugs.webkit.org/show_bug.cgi?id=52267 7 8 Also add runtimes in milliseconds to the JSON and make the output format more compact. 9 Named the file full_results.json to avoid conflicting with the results.json 10 file the test-results server currently serves up. 11 12 * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py: 13 * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py: 14 * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py: 15 * Scripts/webkitpy/layout_tests/layout_package/printing.py: 16 * Scripts/webkitpy/layout_tests/layout_package/result_summary.py: 17 * Scripts/webkitpy/layout_tests/layout_package/test_runner.py: 18 1 19 2011-02-27 Adam Roben <aroben@apple.com> 2 20 -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
r78012 r79837 43 43 WONTFIX = "wontfixCounts" 44 44 45 # Note that we omit test_expectations.FAIL from this list because46 # it should never show up (it's a legacy input expectation, never47 # an output expectation).48 FAILURE_TO_CHAR = {test_expectations.CRASH: "C",45 FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGeneratorBase.PASS_RESULT, 46 test_expectations.SKIP: json_results_generator.JSONResultsGeneratorBase.SKIP_RESULT, 47 test_expectations.FAIL: "Y", 48 test_expectations.CRASH: "C", 49 49 test_expectations.TIMEOUT: "T", 50 50 test_expectations.IMAGE: "I", -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
r78012 r79837 110 110 111 111 RESULTS_FILENAME = "results.json" 112 FULL_RESULTS_FILENAME = "full_results.json" 112 113 INCREMENTAL_RESULTS_FILENAME = "incremental_results.json" 113 114 … … 152 153 self._builder_base_url = builder_base_url 153 154 self._results_directory = results_file_base_path 154 self._results_file_path = self._fs.join(results_file_base_path,155 self.RESULTS_FILENAME)156 self._incremental_results_file_path = self._fs.join(157 results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME)158 155 159 156 self._test_results_map = test_results_map … … 173 170 json = self.get_json() 174 171 if json: 175 self._generate_json_file( 176 json, self._incremental_results_file_path) 172 self._generate_json_file(json, self.INCREMENTAL_RESULTS_FILENAME) 173 174 def generate_full_results_file(self): 175 # Use the same structure as the compacted version of TestRunner.summarize_results. 176 # For now we only include the times as this is only used for treemaps and 177 # expected/actual don't make sense for gtests. 178 results = {} 179 results['version'] = 1 180 181 tests = {} 182 183 for test in self._test_results_map: 184 time_seconds = self._test_results_map[test].time 185 tests[test] = {} 186 tests[test]['t'] = int(1000 * time_seconds) 187 188 results['tests'] = tests 189 self._generate_json_file(results, self.FULL_RESULTS_FILENAME) 177 190 178 191 def get_json(self): … … 250 263 _log.info("JSON files uploaded.") 251 264 252 def _generate_json_file(self, json, file _path):265 def _generate_json_file(self, json, filename): 253 266 # Specify separators in order to get compact encoding. 254 267 json_data = simplejson.dumps(json, separators=(',', ':')) 255 268 json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX 269 file_path = self._fs.join(self._results_directory, filename) 256 270 self._fs.write_text_file(file_path, json_string) 257 271 -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
r78012 r79837 107 107 1) 108 108 109 # We don't verify the results here, but at least we make sure the code runs without errors. 110 generator.generate_json_output() 111 generator.generate_full_results_file() 112 109 113 def _verify_json_results(self, tests_set, test_timings, failed_count_map, 110 114 PASS_count, DISABLED_count, FLAKY_count, -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py
r76288 r79837 424 424 self._current_progress_str += "." 425 425 426 if (next_test in result_summary.unexpected_results and426 if (next_test in result_summary.unexpected_results.type and 427 427 self.enabled('unexpected')): 428 428 self._meter.write("%s\n" % self._current_progress_str) -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py
r74525 r79837 82 82 self.expected += 1 83 83 else: 84 self.unexpected_results[result.filename] = result .type84 self.unexpected_results[result.filename] = result 85 85 self.unexpected += 1 86 86 if len(result.failures): -
trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
r78012 r79837 38 38 from __future__ import with_statement 39 39 40 import copy 40 41 import errno 41 42 import logging … … 69 70 70 71 71 def summarize_unexpected_results(port_obj, expectations, result_summary, 72 retry_summary): 72 def summarize_results(port_obj, expectations, result_summary, retry_summary, test_timings, only_unexpected): 73 73 """Summarize any unexpected results as a dict. 74 74 … … 80 80 result_summary: summary object from initial test runs 81 81 retry_summary: summary object from final test run of retried tests 82 test_timings: a list of TestResult objects which contain test runtimes in seconds 83 only_unexpected: whether to return a summary only for the unexpected results 82 84 Returns: 83 85 A dictionary containing a summary of the unexpected results from the … … 89 91 'num_flaky': # of flaky failures 90 92 'num_passes': # of unexpected passes 91 'tests': a dict of tests -> {'expected': '...', 'actual': '...' }93 'tests': a dict of tests -> {'expected': '...', 'actual': '...', 'time_ms': ...} 92 94 """ 93 95 results = {} 94 96 results['version'] = 1 97 98 test_timings_map = dict((test_result.filename, test_result.test_run_time) for test_result in test_timings) 95 99 96 100 tbe = result_summary.tests_by_expectation … … 105 109 num_regressions = 0 106 110 keywords = {} 107 for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): 108 keywords[v] = k.upper() 111 for expecation_string, expectation_enum in TestExpectationsFile.EXPECTATIONS.iteritems(): 112 keywords[expectation_enum] = expecation_string.upper() 113 114 for modifier_string, modifier_enum in TestExpectationsFile.MODIFIERS.iteritems(): 115 keywords[modifier_enum] = modifier_string.upper() 109 116 110 117 tests = {} 111 for filename, result in result_summary.unexpected_results.iteritems(): 118 original_results = result_summary.unexpected_results if only_unexpected else result_summary.results 119 120 for filename, result in original_results.iteritems(): 112 121 # Note that if a test crashed in the original run, we ignore 113 122 # whether or not it crashed when we retried it (if we retried it), … … 115 124 test = port_obj.relative_test_filename(filename) 116 125 expected = expectations.get_expectations_string(filename) 117 actual = [keywords[result]] 118 119 if result == test_expectations.PASS: 126 result_type = result.type 127 actual = [keywords[result_type]] 128 129 if result_type == test_expectations.PASS: 120 130 num_passes += 1 121 elif result == test_expectations.CRASH:131 elif result_type == test_expectations.CRASH: 122 132 num_regressions += 1 123 el se:133 elif filename in result_summary.unexpected_results: 124 134 if filename not in retry_summary.unexpected_results: 125 actual.extend(expectations.get_expectations_string( 126 filename).split(" ")) 135 actual.extend(expectations.get_expectations_string(filename).split(" ")) 127 136 num_flaky += 1 128 137 else: 129 retry_result = retry_summary.unexpected_results[filename]130 if result != retry_result:131 actual.append(keywords[retry_result ])138 retry_result_type = retry_summary.unexpected_results[filename].type 139 if result_type != retry_result_type: 140 actual.append(keywords[retry_result_type]) 132 141 num_flaky += 1 133 142 else: … … 137 146 tests[test]['expected'] = expected 138 147 tests[test]['actual'] = " ".join(actual) 148 149 if filename in test_timings_map: 150 time_seconds = test_timings_map[filename] 151 tests[test]['time_ms'] = int(1000 * time_seconds) 139 152 140 153 results['tests'] = tests … … 684 697 result_summary.unexpected) 685 698 686 unexpected_results = summarize_ unexpected_results(self._port,687 self._expectations, result_summary, retry_summary )699 unexpected_results = summarize_results(self._port, 700 self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True) 688 701 self._printer.print_unexpected_results(unexpected_results) 689 702 … … 694 707 # Write the same data to log files and upload generated JSON files 695 708 # to appengine server. 696 self._upload_json_files(unexpected_results, result_summary, 709 summarized_results = summarize_results(self._port, 710 self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False) 711 self._upload_json_files(unexpected_results, summarized_results, result_summary, 697 712 individual_test_timings) 698 713 … … 783 798 failed_results = {} 784 799 for test, result in result_summary.unexpected_results.iteritems(): 785 if (result == test_expectations.PASS or786 result == test_expectations.CRASH and not include_crashes):800 if (result.type == test_expectations.PASS or 801 result.type == test_expectations.CRASH and not include_crashes): 787 802 continue 788 failed_results[test] = result 803 failed_results[test] = result.type 789 804 790 805 return failed_results 791 806 792 def _upload_json_files(self, unexpected_results, result_summary, 807 def _char_for_result(self, result): 808 result = result.lower() 809 if result in TestExpectationsFile.EXPECTATIONS: 810 result_enum_value = TestExpectationsFile.EXPECTATIONS[result] 811 else: 812 result_enum_value = TestExpectationsFile.MODIFIERS[result] 813 return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value] 814 815 def _dump_summarized_result(self, filename, results): 816 """Compacts the results and dumps them to a file as JSON. 817 818 Args: 819 filename: filename to dump the JSON to 820 results: dict of results as returned by the summarize_results function 821 """ 822 new_results = copy.deepcopy(results) 823 824 # Compact the results since we'll be uploading this to the test-results server. 825 # This shrinks the file size by ~20%. 826 # actual --> a 827 # expected --> e 828 # time --> t 829 # The results are shrunken as per the FAILURE_TO_CHAR map, e.g., "PASS CRASH" --> "PC" 830 for test in new_results['tests']: 831 result = new_results['tests'][test] 832 833 result['a'] = ''.join([self._char_for_result(actual) for actual in result['actual'].split(' ')]) 834 del(result['actual']) 835 836 result['e'] = ''.join([self._char_for_result(expected) for expected in result['expected'].split(' ')]) 837 del(result['expected']) 838 839 if 'time_ms' in result: 840 result['t'] = result['time_ms'] 841 del(result['time_ms']) 842 843 unexpected_json_path = self._fs.join(self._options.results_directory, filename) 844 with self._fs.open_text_file_for_writing(unexpected_json_path) as file: 845 simplejson.dump(new_results, file, sort_keys=True, separators=(',', ':')) 846 847 def _upload_json_files(self, unexpected_results, summarized_results, result_summary, 793 848 individual_test_timings): 794 849 """Writes the results of the test run as JSON files into the results … … 804 859 Args: 805 860 unexpected_results: dict of unexpected results 861 summarized_results: dict of results 806 862 result_summary: full summary object 807 863 individual_test_timings: list of test times (used by the flakiness 808 864 dashboard). 809 865 """ 810 results_directory = self._options.results_directory 811 _log.debug("Writing JSON files in %s." % results_directory) 812 unexpected_json_path = self._fs.join(results_directory, "unexpected_results.json") 813 with self._fs.open_text_file_for_writing(unexpected_json_path) as file: 814 simplejson.dump(unexpected_results, file, sort_keys=True, indent=2) 866 _log.debug("Writing JSON files in %s." % self._options.results_directory) 867 868 self._dump_summarized_result("unexpected_results.json", unexpected_results) 869 self._dump_summarized_result("full_results.json", summarized_results) 815 870 816 871 # Write a json file of the test_expectations.txt file for the layout 817 872 # tests dashboard. 818 expectations_path = self._fs.join( results_directory, "expectations.json")873 expectations_path = self._fs.join(self._options.results_directory, "expectations.json") 819 874 expectations_json = \ 820 875 self._expectations.get_expectations_json_for_all_platforms() … … 833 888 _log.debug("Finished writing JSON files.") 834 889 835 json_files = ["expectations.json", "incremental_results.json" ]890 json_files = ["expectations.json", "incremental_results.json", "full_results.json"] 836 891 837 892 generator.upload_json_files(json_files)
Note: See TracChangeset
for help on using the changeset viewer.