Changeset 122659 in webkit


Ignore:
Timestamp:
Jul 13, 2012 8:45:48 PM (12 years ago)
Author:
dpranke@chromium.org
Message:

run-webkit-test outputs the wrong number of tests executed when some are skipped.
https://bugs.webkit.org/show_bug.cgi?id=89894

Reviewed by Ojan Vafai.

Fix the logging of the actual number of tests run so that tests
that are skipped aren't included.

Also revamp the 'expected' output so we distinguish the number
of tests found from the number of tests run (to account for
--repeat-each and --iterations).

Covered by existing tests.

  • Scripts/webkitpy/layout_tests/controllers/manager.py:

(Manager.prepare_lists_and_print_output):
(Manager._log_num_workers):
(Manager.run):
(Manager._print_result_summary):

  • Scripts/webkitpy/layout_tests/models/result_summary.py:

(ResultSummary.init):
(ResultSummary.add):

  • Scripts/webkitpy/layout_tests/views/printing.py:

(Printer.print_one_line_summary):

  • Scripts/webkitpy/layout_tests/views/printing_unittest.py:

(Testprinter.test_print_one_line_summary):

Location:
trunk/Tools
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/Tools/ChangeLog

    r122657 r122659  
     12012-07-13  Dirk Pranke  <dpranke@chromium.org>
     2
     3        run-webkit-test outputs the wrong number of tests executed when some are skipped.
     4        https://bugs.webkit.org/show_bug.cgi?id=89894
     5
     6        Reviewed by Ojan Vafai.
     7
     8        Fix the logging of the actual number of tests run so that tests
     9        that are skipped aren't included.
     10
     11        Also revamp the 'expected' output so we distinguish the number
     12        of tests found from the number of tests run (to account for
     13        --repeat-each and --iterations).
     14
     15        Covered by existing tests.
     16
     17        * Scripts/webkitpy/layout_tests/controllers/manager.py:
     18        (Manager.prepare_lists_and_print_output):
     19        (Manager._log_num_workers):
     20        (Manager.run):
     21        (Manager._print_result_summary):
     22        * Scripts/webkitpy/layout_tests/models/result_summary.py:
     23        (ResultSummary.__init__):
     24        (ResultSummary.add):
     25        * Scripts/webkitpy/layout_tests/views/printing.py:
     26        (Printer.print_one_line_summary):
     27        * Scripts/webkitpy/layout_tests/views/printing_unittest.py:
     28        (Testprinter.test_print_one_line_summary):
     29
    1302012-07-13  Dirk Pranke  <dpranke@chromium.org>
    231
  • trunk/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py

    r122615 r122659  
    481481        self._test_files.update(found_test_files.intersection(self._paths))
    482482
    483         if not num_all_test_files:
     483        num_to_run = len(self._test_files)
     484        num_skipped = num_all_test_files - num_to_run
     485
     486        if not num_to_run:
    484487            _log.critical('No tests to run.')
    485488            return None
    486 
    487         num_skipped = num_all_test_files - len(self._test_files)
    488         if num_skipped:
    489             self._printer.print_expected("Running %s (found %d, skipping %d)." % (
    490                 grammar.pluralize('test', num_all_test_files - num_skipped),
    491                 num_all_test_files, num_skipped))
    492         elif len(self._test_files) > 1:
    493             self._printer.print_expected("Running all %d tests." % len(self._test_files))
    494         else:
    495             self._printer.print_expected("Running 1 test.")
    496489
    497490        # Create a sorted list of test files so the subset chunk,
     
    519512            (self._options.iterations if self._options.iterations else 1)
    520513        result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations)
     514
     515        self._printer.print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files))
    521516        self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes")
    522517        self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures")
     
    531526                result = test_results.TestResult(test)
    532527                result.type = test_expectations.SKIP
    533                 iterations =  \
    534                     (self._options.repeat_each if self._options.repeat_each else 1) * \
    535                     (self._options.iterations if self._options.iterations else 1)
    536528                for iteration in range(iterations):
    537529                    result_summary.add(result, expected=True)
    538530        self._printer.print_expected('')
    539531
    540         # Check to make sure we didn't filter out all of the tests.
    541         if not len(self._test_files):
    542             _log.info("All tests are being skipped")
    543             return None
     532        if self._options.repeat_each > 1:
     533            self._printer.print_expected('Running each test %d times.' % self._options.repeat_each)
     534        if self._options.iterations > 1:
     535            self._printer.print_expected('Running %d iterations of the tests.' % self._options.iterations)
     536        if iterations > 1:
     537            self._printer.print_expected('')
    544538
    545539        return result_summary
     
    716710        driver_name = self._port.driver_name()
    717711        if num_workers == 1:
    718             self._printer.print_config("Running 1 %s over %s" %
     712            self._printer.print_config("Running 1 %s over %s." %
    719713                (driver_name, grammar.pluralize('shard', num_shards)))
    720714        else:
    721             self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)" %
     715            self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)." %
    722716                (num_workers, driver_name, num_shards, num_locked_shards))
     717        self._printer.print_config('')
    723718
    724719    def _run_tests(self, file_list, result_summary, num_workers):
     
    891886        self._print_result_summary(result_summary)
    892887
    893         self._printer.print_one_line_summary(result_summary.total, result_summary.expected, result_summary.unexpected)
     888        self._printer.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
    894889
    895890        unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
     
    13321327        """
    13331328        failed = result_summary.total_failures
    1334         skipped = result_summary.total_tests_by_expectation[test_expectations.SKIP]
    1335         total = result_summary.total
    1336         passed = total - failed - skipped
     1329        total = result_summary.total - result_summary.expected_skips
     1330        passed = total - failed
    13371331        pct_passed = 0.0
    13381332        if total > 0:
  • trunk/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py

    r106784 r122659  
    4848        self.failures = {}
    4949        self.total_failures = 0
     50        self.expected_skips = 0
    5051        self.total_tests_by_expectation[SKIP] = 0
    5152        self.tests_by_expectation[SKIP] = set()
     
    6667        if expected:
    6768            self.expected += 1
     69            if test_result.type == SKIP:
     70                self.expected_skips += 1
    6871        else:
    6972            self.unexpected_results[test_result.test_name] = test_result
  • trunk/Tools/Scripts/webkitpy/layout_tests/views/printing.py

    r120619 r122659  
    3232import optparse
    3333
     34from webkitpy.tool import grammar
    3435from webkitpy.common.net import resultsjsonparser
    3536from webkitpy.layout_tests.models.test_expectations import TestExpectations
     
    218219
    219220        incomplete = total - expected - unexpected
     221        incomplete_str = ''
    220222        if incomplete:
    221223            self._write("")
    222224            incomplete_str = " (%d didn't run)" % incomplete
    223             expected_str = str(expected)
     225
     226        if unexpected == 0:
     227            if expected == total:
     228                if expected > 1:
     229                    self._write("All %d tests ran as expected." % expected)
     230                else:
     231                    self._write("The test ran as expected.")
     232            else:
     233                self._write("%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str))
    224234        else:
    225             incomplete_str = ""
    226             expected_str = "All %d" % expected
    227 
    228         if unexpected == 0:
    229             self._write("%s tests ran as expected%s." %
    230                         (expected_str, incomplete_str))
    231         elif expected == 1:
    232             self._write("1 test ran as expected, %d didn't%s:" %
    233                         (unexpected, incomplete_str))
    234         else:
    235             self._write("%d tests ran as expected, %d didn't%s:" %
    236                         (expected, unexpected, incomplete_str))
     235            self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str))
    237236        self._write("")
    238237
  • trunk/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py

    r117805 r122659  
    207207        printer, err, out = self.get_printer(['--print', 'one-line-summary'])
    208208        printer.print_one_line_summary(1, 1, 0)
    209         self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
     209        self.assertWritten(err, ["The test ran as expected.\n", "\n"])
    210210
    211211        printer, err, out = self.get_printer(['--print', 'everything'])
    212212        printer.print_one_line_summary(1, 1, 0)
    213         self.assertWritten(err, ["All 1 tests ran as expected.\n", "\n"])
     213        self.assertWritten(err, ["The test ran as expected.\n", "\n"])
    214214
    215215        printer, err, out = self.get_printer(['--print', 'everything'])
Note: See TracChangeset for help on using the changeset viewer.