Changeset 79837 in webkit


Ignore:
Timestamp:
Feb 27, 2011 7:55:14 PM (13 years ago)
Author:
ojan@chromium.org
Message:

2011-02-25 Ojan Vafai <ojan@chromium.org>

Reviewed by Tony Chang.

Change results.json format to the one used by unexpected_results.json
https://bugs.webkit.org/show_bug.cgi?id=52267

Also add runtimes in milliseconds to the JSON and make the output format more compact.
Named the file full_results.json to avoid conflicting with the results.json
file the test-results server currently serves up.

  • Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:
  • Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
  • Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
  • Scripts/webkitpy/layout_tests/layout_package/printing.py:
  • Scripts/webkitpy/layout_tests/layout_package/result_summary.py:
  • Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
Location:
trunk/Tools
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r79833 r79837  
     12011-02-25  Ojan Vafai  <ojan@chromium.org>
     2
     3        Reviewed by Tony Chang.
     4
     5        Change results.json format to the one used by unexpected_results.json
     6        https://bugs.webkit.org/show_bug.cgi?id=52267
     7
     8        Also add runtimes in milliseconds to the JSON and make the output format more compact.
     9        Named the file full_results.json to avoid conflicting with the results.json
     10        file the test-results server currently serves up.
     11
     12        * Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py:
     13        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py:
     14        * Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py:
     15        * Scripts/webkitpy/layout_tests/layout_package/printing.py:
     16        * Scripts/webkitpy/layout_tests/layout_package/result_summary.py:
     17        * Scripts/webkitpy/layout_tests/layout_package/test_runner.py:
     18
    1192011-02-27  Adam Roben  <aroben@apple.com>
    220
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py

    r78012 r79837  
    4343    WONTFIX = "wontfixCounts"
    4444
    45     # Note that we omit test_expectations.FAIL from this list because
    46     # it should never show up (it's a legacy input expectation, never
    47     # an output expectation).
    48     FAILURE_TO_CHAR = {test_expectations.CRASH: "C",
     45    FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGeneratorBase.PASS_RESULT,
     46                       test_expectations.SKIP: json_results_generator.JSONResultsGeneratorBase.SKIP_RESULT,
     47                       test_expectations.FAIL: "Y",
     48                       test_expectations.CRASH: "C",
    4949                       test_expectations.TIMEOUT: "T",
    5050                       test_expectations.IMAGE: "I",
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py

    r78012 r79837  
    110110
    111111    RESULTS_FILENAME = "results.json"
     112    FULL_RESULTS_FILENAME = "full_results.json"
    112113    INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
    113114
     
    152153        self._builder_base_url = builder_base_url
    153154        self._results_directory = results_file_base_path
    154         self._results_file_path = self._fs.join(results_file_base_path,
    155             self.RESULTS_FILENAME)
    156         self._incremental_results_file_path = self._fs.join(
    157             results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME)
    158155
    159156        self._test_results_map = test_results_map
     
    173170        json = self.get_json()
    174171        if json:
    175             self._generate_json_file(
    176                 json, self._incremental_results_file_path)
     172            self._generate_json_file(json, self.INCREMENTAL_RESULTS_FILENAME)
     173
     174    def generate_full_results_file(self):
     175        # Use the same structure as the compacted version of TestRunner.summarize_results.
     176        # For now we only include the times as this is only used for treemaps and
     177        # expected/actual don't make sense for gtests.
     178        results = {}
     179        results['version'] = 1
     180
     181        tests = {}
     182
     183        for test in self._test_results_map:
     184            time_seconds = self._test_results_map[test].time
     185            tests[test] = {}
     186            tests[test]['t'] = int(1000 * time_seconds)
     187
     188        results['tests'] = tests
     189        self._generate_json_file(results, self.FULL_RESULTS_FILENAME)
    177190
    178191    def get_json(self):
     
    250263        _log.info("JSON files uploaded.")
    251264
    252     def _generate_json_file(self, json, file_path):
     265    def _generate_json_file(self, json, filename):
    253266        # Specify separators in order to get compact encoding.
    254267        json_data = simplejson.dumps(json, separators=(',', ':'))
    255268        json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX
     269        file_path = self._fs.join(self._results_directory, filename)
    256270        self._fs.write_text_file(file_path, json_string)
    257271
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py

    r78012 r79837  
    107107            1)
    108108
     109        # We don't verify the results here, but at least we make sure the code runs without errors.
     110        generator.generate_json_output()
     111        generator.generate_full_results_file()
     112
    109113    def _verify_json_results(self, tests_set, test_timings, failed_count_map,
    110114                             PASS_count, DISABLED_count, FLAKY_count,
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py

    r76288 r79837  
    424424                self._current_progress_str += "."
    425425
    426             if (next_test in result_summary.unexpected_results and
     426            if (next_test in result_summary.unexpected_results.type and
    427427                self.enabled('unexpected')):
    428428                self._meter.write("%s\n" % self._current_progress_str)
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py

    r74525 r79837  
    8282            self.expected += 1
    8383        else:
    84             self.unexpected_results[result.filename] = result.type
     84            self.unexpected_results[result.filename] = result
    8585            self.unexpected += 1
    8686            if len(result.failures):
  • trunk/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py

    r78012 r79837  
    3838from __future__ import with_statement
    3939
     40import copy
    4041import errno
    4142import logging
     
    6970
    7071
    71 def summarize_unexpected_results(port_obj, expectations, result_summary,
    72                                  retry_summary):
     72def summarize_results(port_obj, expectations, result_summary, retry_summary, test_timings, only_unexpected):
    7373    """Summarize any unexpected results as a dict.
    7474
     
    8080        result_summary: summary object from initial test runs
    8181        retry_summary: summary object from final test run of retried tests
     82        test_timings: a list of TestResult objects which contain test runtimes in seconds
     83        only_unexpected: whether to return a summary only for the unexpected results
    8284    Returns:
    8385        A dictionary containing a summary of the unexpected results from the
     
    8991        'num_flaky': # of flaky failures
    9092        'num_passes': # of unexpected passes
    91         'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
     93        'tests': a dict of tests -> {'expected': '...', 'actual': '...', 'time_ms': ...}
    9294    """
    9395    results = {}
    9496    results['version'] = 1
     97
     98    test_timings_map = dict((test_result.filename, test_result.test_run_time) for test_result in test_timings)
    9599
    96100    tbe = result_summary.tests_by_expectation
     
    105109    num_regressions = 0
    106110    keywords = {}
    107     for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
    108         keywords[v] = k.upper()
     111    for expecation_string, expectation_enum in TestExpectationsFile.EXPECTATIONS.iteritems():
     112        keywords[expectation_enum] = expecation_string.upper()
     113
     114    for modifier_string, modifier_enum in TestExpectationsFile.MODIFIERS.iteritems():
     115        keywords[modifier_enum] = modifier_string.upper()
    109116
    110117    tests = {}
    111     for filename, result in result_summary.unexpected_results.iteritems():
     118    original_results = result_summary.unexpected_results if only_unexpected else result_summary.results
     119
     120    for filename, result in original_results.iteritems():
    112121        # Note that if a test crashed in the original run, we ignore
    113122        # whether or not it crashed when we retried it (if we retried it),
     
    115124        test = port_obj.relative_test_filename(filename)
    116125        expected = expectations.get_expectations_string(filename)
    117         actual = [keywords[result]]
    118 
    119         if result == test_expectations.PASS:
     126        result_type = result.type
     127        actual = [keywords[result_type]]
     128
     129        if result_type == test_expectations.PASS:
    120130            num_passes += 1
    121         elif result == test_expectations.CRASH:
     131        elif result_type == test_expectations.CRASH:
    122132            num_regressions += 1
    123         else:
     133        elif filename in result_summary.unexpected_results:
    124134            if filename not in retry_summary.unexpected_results:
    125                 actual.extend(expectations.get_expectations_string(
    126                     filename).split(" "))
     135                actual.extend(expectations.get_expectations_string(filename).split(" "))
    127136                num_flaky += 1
    128137            else:
    129                 retry_result = retry_summary.unexpected_results[filename]
    130                 if result != retry_result:
    131                     actual.append(keywords[retry_result])
     138                retry_result_type = retry_summary.unexpected_results[filename].type
     139                if result_type != retry_result_type:
     140                    actual.append(keywords[retry_result_type])
    132141                    num_flaky += 1
    133142                else:
     
    137146        tests[test]['expected'] = expected
    138147        tests[test]['actual'] = " ".join(actual)
     148
     149        if filename in test_timings_map:
     150            time_seconds = test_timings_map[filename]
     151            tests[test]['time_ms'] = int(1000 * time_seconds)
    139152
    140153    results['tests'] = tests
     
    684697                                             result_summary.unexpected)
    685698
    686         unexpected_results = summarize_unexpected_results(self._port,
    687             self._expectations, result_summary, retry_summary)
     699        unexpected_results = summarize_results(self._port,
     700            self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True)
    688701        self._printer.print_unexpected_results(unexpected_results)
    689702
     
    694707            # Write the same data to log files and upload generated JSON files
    695708            # to appengine server.
    696             self._upload_json_files(unexpected_results, result_summary,
     709            summarized_results = summarize_results(self._port,
     710                self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False)
     711            self._upload_json_files(unexpected_results, summarized_results, result_summary,
    697712                                    individual_test_timings)
    698713
     
    783798        failed_results = {}
    784799        for test, result in result_summary.unexpected_results.iteritems():
    785             if (result == test_expectations.PASS or
    786                 result == test_expectations.CRASH and not include_crashes):
     800            if (result.type == test_expectations.PASS or
     801                result.type == test_expectations.CRASH and not include_crashes):
    787802                continue
    788             failed_results[test] = result
     803            failed_results[test] = result.type
    789804
    790805        return failed_results
    791806
    792     def _upload_json_files(self, unexpected_results, result_summary,
     807    def _char_for_result(self, result):
     808        result = result.lower()
     809        if result in TestExpectationsFile.EXPECTATIONS:
     810            result_enum_value = TestExpectationsFile.EXPECTATIONS[result]
     811        else:
     812            result_enum_value = TestExpectationsFile.MODIFIERS[result]
     813        return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value]
     814
     815    def _dump_summarized_result(self, filename, results):
     816        """Compacts the results and dumps them to a file as JSON.
     817       
     818        Args:
     819          filename: filename to dump the JSON to
     820          results: dict of results as returned by the summarize_results function
     821        """
     822        new_results = copy.deepcopy(results)
     823
     824        # Compact the results since we'll be uploading this to the test-results server.
     825        # This shrinks the file size by ~20%.
     826        # actual --> a
     827        # expected --> e
     828        # time --> t
     829        # The results are shrunken as per the FAILURE_TO_CHAR map, e.g., "PASS CRASH" --> "PC"
     830        for test in new_results['tests']:
     831            result = new_results['tests'][test]
     832
     833            result['a'] = ''.join([self._char_for_result(actual) for actual in result['actual'].split(' ')])
     834            del(result['actual'])
     835
     836            result['e'] = ''.join([self._char_for_result(expected) for expected in result['expected'].split(' ')])
     837            del(result['expected'])
     838
     839            if 'time_ms' in result:
     840                result['t'] = result['time_ms']
     841                del(result['time_ms'])
     842
     843        unexpected_json_path = self._fs.join(self._options.results_directory, filename)
     844        with self._fs.open_text_file_for_writing(unexpected_json_path) as file:
     845            simplejson.dump(new_results, file, sort_keys=True, separators=(',', ':'))
     846
     847    def _upload_json_files(self, unexpected_results, summarized_results, result_summary,
    793848                           individual_test_timings):
    794849        """Writes the results of the test run as JSON files into the results
     
    804859        Args:
    805860          unexpected_results: dict of unexpected results
     861          summarized_results: dict of results
    806862          result_summary: full summary object
    807863          individual_test_timings: list of test times (used by the flakiness
    808864            dashboard).
    809865        """
    810         results_directory = self._options.results_directory
    811         _log.debug("Writing JSON files in %s." % results_directory)
    812         unexpected_json_path = self._fs.join(results_directory, "unexpected_results.json")
    813         with self._fs.open_text_file_for_writing(unexpected_json_path) as file:
    814             simplejson.dump(unexpected_results, file, sort_keys=True, indent=2)
     866        _log.debug("Writing JSON files in %s." % self._options.results_directory)
     867
     868        self._dump_summarized_result("unexpected_results.json", unexpected_results)
     869        self._dump_summarized_result("full_results.json", summarized_results)
    815870
    816871        # Write a json file of the test_expectations.txt file for the layout
    817872        # tests dashboard.
    818         expectations_path = self._fs.join(results_directory, "expectations.json")
     873        expectations_path = self._fs.join(self._options.results_directory, "expectations.json")
    819874        expectations_json = \
    820875            self._expectations.get_expectations_json_for_all_platforms()
     
    833888        _log.debug("Finished writing JSON files.")
    834889
    835         json_files = ["expectations.json", "incremental_results.json"]
     890        json_files = ["expectations.json", "incremental_results.json", "full_results.json"]
    836891
    837892        generator.upload_json_files(json_files)
Note: See TracChangeset for help on using the changeset viewer.