Changeset 73211 in webkit


Ignore:
Timestamp:
Dec 2, 2010 3:55:31 PM (13 years ago)
Author:
dpranke@chromium.org
Message:

2010-12-02 Dirk Pranke <dpranke@chromium.org>

Reviewed by Tony Chang.

This change is a bunch of cleanup / refactoring of the file
below. It moves a bunch of free functions to methods on the
TestShellThread class, and restructures other methods to be
closer to the structure we'll need for the multiprocessing
rewrite.

It also makes the logic of --run-singly a *lot* easier to follow
by nestling all of the separate-thread logic into a single
routine.

There should be no semantic changes in this patch, just cleanup.

https://bugs.webkit.org/show_bug.cgi?id=50367

  • Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
Location:
trunk/WebKitTools
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/WebKitTools/ChangeLog

    r73207 r73211  
     12010-12-02  Dirk Pranke  <dpranke@chromium.org>
     2
     3        Reviewed by Tony Chang.
     4
     5        This change is a bunch of cleanup / refactoring of the file
     6        below. It moves a bunch of free functions to methods on the
     7        TestShellThread class, and restructures other methods to be
     8        closer to the structure we'll need for the multiprocessing
     9        rewrite.
     10
     11        It also makes the logic of --run-singly a *lot* easier to follow
     12        by nestling all of the separate-thread logic into a single
     13        routine.
     14
     15        There should be no semantic changes in this patch, just cleanup.
     16
     17        https://bugs.webkit.org/show_bug.cgi?id=50367
     18
     19        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
     20
    1212010-12-02  Tony Chang  <tony@chromium.org>
    222
  • trunk/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py

    r72708 r73211  
    6363
    6464
    65 def _expected_test_output(port, filename):
    66     """Returns an expected TestOutput object."""
    67     return test_output.TestOutput(port.expected_text(filename),
    68                                   port.expected_image(filename),
    69                                   port.expected_checksum(filename))
    70 
    71 def _process_output(port, options, test_input, test_types, test_args,
    72                     test_output, worker_name):
    73     """Receives the output from a DumpRenderTree process, subjects it to a
    74     number of tests, and returns a list of failure types the test produced.
    75 
    76     Args:
    77       port: port-specific hooks
    78       options: command line options argument from optparse
    79       proc: an active DumpRenderTree process
    80       test_input: Object containing the test filename and timeout
    81       test_types: list of test types to subject the output to
    82       test_args: arguments to be passed to each test
    83       test_output: a TestOutput object containing the output of the test
    84       worker_name: worker name for logging
    85 
    86     Returns: a TestResult object
    87     """
    88     failures = []
    89 
    90     if test_output.crash:
    91         failures.append(test_failures.FailureCrash())
    92     if test_output.timeout:
    93         failures.append(test_failures.FailureTimeout())
    94 
    95     test_name = port.relative_test_filename(test_input.filename)
    96     if test_output.crash:
    97         _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name,
    98                                                   test_output.error))
    99         filename = os.path.join(options.results_directory, test_name)
    100         filename = os.path.splitext(filename)[0] + "-stack.txt"
    101         port.maybe_make_directory(os.path.split(filename)[0])
    102         with codecs.open(filename, "wb", "utf-8") as file:
    103             file.write(test_output.error)
    104     elif test_output.error:
    105         _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name,
    106                                                        test_output.error))
    107 
    108     expected_test_output = _expected_test_output(port, test_input.filename)
    109 
    110     # Check the output and save the results.
    111     start_time = time.time()
    112     time_for_diffs = {}
    113     for test_type in test_types:
    114         start_diff_time = time.time()
    115         new_failures = test_type.compare_output(port, test_input.filename,
    116                                                 test_args, test_output,
    117                                                 expected_test_output)
    118         # Don't add any more failures if we already have a crash, so we don't
    119         # double-report those tests. We do double-report for timeouts since
    120         # we still want to see the text and image output.
    121         if not test_output.crash:
    122             failures.extend(new_failures)
    123         time_for_diffs[test_type.__class__.__name__] = (
    124             time.time() - start_diff_time)
    125 
    126     total_time_for_all_diffs = time.time() - start_diff_time
    127     return test_results.TestResult(test_input.filename, failures, test_output.test_time,
    128                                    total_time_for_all_diffs, time_for_diffs)
    129 
    130 
    131 def _pad_timeout(timeout):
    132     """Returns a safe multiple of the per-test timeout value to use
    133     to detect hung test threads.
    134 
    135     """
    136     # When we're running one test per DumpRenderTree process, we can
    137     # enforce a hard timeout.  The DumpRenderTree watchdog uses 2.5x
    138     # the timeout; we want to be larger than that.
    139     return timeout * 3
    140 
    141 
    142 def _milliseconds_to_seconds(msecs):
    143     return float(msecs) / 1000.0
    144 
    145 
    146 def _should_fetch_expected_checksum(options):
    147     return options.pixel_tests and not (options.new_baseline or options.reset_results)
    148 
    149 
    150 def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name):
    151     # FIXME: Pull this into TestShellThread._run().
    152 
    153     # The image hash is used to avoid doing an image dump if the
    154     # checksums match, so it should be set to a blank value if we
    155     # are generating a new baseline.  (Otherwise, an image from a
    156     # previous run will be copied into the baseline."""
    157     if _should_fetch_expected_checksum(options):
    158         test_input.image_hash = port.expected_checksum(test_input.filename)
    159     test_output = driver.run_test(test_input)
    160     return _process_output(port, options, test_input, test_types, test_args,
    161                            test_output, worker_name)
    162 
    163 
    164 class SingleTestThread(threading.Thread):
    165     """Thread wrapper for running a single test file."""
    166 
    167     def __init__(self, port, options, worker_number, worker_name,
    168                  test_input, test_types, test_args):
    169         """
    170         Args:
    171           port: object implementing port-specific hooks
    172           options: command line argument object from optparse
    173           worker_number: worker number for tests
    174           worker_name: for logging
    175           test_input: Object containing the test filename and timeout
    176           test_types: A list of TestType objects to run the test output
    177               against.
    178           test_args: A TestArguments object to pass to each TestType.
    179         """
    180 
    181         threading.Thread.__init__(self)
    182         self._port = port
    183         self._options = options
    184         self._test_input = test_input
    185         self._test_types = test_types
    186         self._test_args = test_args
    187         self._driver = None
    188         self._worker_number = worker_number
    189         self._name = worker_name
    190 
    191     def run(self):
    192         self._covered_run()
    193 
    194     def _covered_run(self):
    195         # FIXME: this is a separate routine to work around a bug
    196         # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
    197         self._driver = self._port.create_driver(self._worker_number)
    198         self._driver.start()
    199         self._test_result = _run_single_test(self._port, self._options,
    200                                              self._test_input, self._test_types,
    201                                              self._test_args, self._driver,
    202                                              self._name)
    203         self._driver.stop()
    204 
    205     def get_test_result(self):
    206         return self._test_result
    207 
    208 
    20965class WatchableThread(threading.Thread):
    21066    """This class abstracts an interface used by
     
    264120        self._filename_list_queue = filename_list_queue
    265121        self._result_queue = result_queue
    266         self._filename_list = []
     122
     123        self._batch_count = 0
     124        self._batch_size = self._options.batch_size
    267125        self._driver = None
    268         self._test_group_timing_stats = {}
     126        self._have_http_lock = False
     127
     128        self._test_runner = None
     129        self._result_summary = None
     130        self._test_list_timing_stats = {}
    269131        self._test_results = []
    270132        self._num_tests = 0
    271133        self._start_time = 0
    272134        self._stop_time = 0
    273         self._have_http_lock = False
    274135        self._http_lock_wait_begin = 0
    275136        self._http_lock_wait_end = 0
     
    281142        self._test_args = self._get_test_args(worker_number)
    282143
    283         # Current group of tests we're running.
    284         self._current_group = None
    285         # Number of tests in self._current_group.
    286         self._num_tests_in_current_group = None
    287         # Time at which we started running tests from self._current_group.
    288         self._current_group_start_time = None
     144        # Append tests we're running to the existing tests_run.txt file.
     145        # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
     146        tests_run_filename = os.path.join(self._options.results_directory,
     147                                          "tests_run.txt")
     148        self._tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
     149
     150    def __del__(self):
     151        self._cleanup()
    289152
    290153    def _get_test_args(self, worker_number):
     
    305168        """Returns a dictionary mapping test group to a tuple of
    306169        (number of tests in that group, time to run the tests)"""
    307         return self._test_group_timing_stats
     170        return self._test_list_timing_stats
    308171
    309172    def get_test_results(self):
     
    321184    def get_num_tests(self):
    322185        return self._num_tests
     186
     187    def next_timeout(self):
     188        """Return the time the test is supposed to finish by."""
     189        if self._next_timeout:
     190            return self._next_timeout + self._http_lock_wait_time()
     191        return self._next_timeout
    323192
    324193    def run(self):
     
    332201        self._thread_id = thread.get_ident()
    333202        self._start_time = time.time()
    334         self._num_tests = 0
    335203        try:
    336             _log.debug('%s starting' % (self.getName()))
     204            _log.debug('%s starting' % (self._name))
    337205            self._run(test_runner=None, result_summary=None)
    338             _log.debug('%s done (%d tests)' % (self.getName(),
    339                        self.get_num_tests()))
     206            _log.debug('%s done (%d tests)' % (self._name, self._num_tests))
    340207        except KeyboardInterrupt:
    341208            self._exception_info = sys.exc_info()
    342             _log.debug("%s interrupted" % self.getName())
     209            _log.debug("%s interrupted" % self._name)
    343210        except:
    344211            # Save the exception for our caller to see.
    345212            self._exception_info = sys.exc_info()
    346213            self._stop_time = time.time()
    347             _log.error('%s dying, exception raised' % self.getName())
     214            _log.error('%s dying, exception raised' % self._name)
    348215
    349216        self._stop_time = time.time()
     
    356223        self._run(test_runner, result_summary)
    357224
    358     def cancel(self):
    359         """Clean up http lock and set a flag telling this thread to quit."""
    360         self._stop_servers_with_lock()
    361         WatchableThread.cancel(self)
    362 
    363     def next_timeout(self):
    364         """Return the time the test is supposed to finish by."""
    365         if self._next_timeout:
    366             return self._next_timeout + self._http_lock_wait_time()
    367         return self._next_timeout
    368 
    369     def _http_lock_wait_time(self):
    370         """Return the time what http locking takes."""
    371         if self._http_lock_wait_begin == 0:
    372             return 0
    373         if self._http_lock_wait_end == 0:
    374             return time.time() - self._http_lock_wait_begin
    375         return self._http_lock_wait_end - self._http_lock_wait_begin
    376 
    377225    def _run(self, test_runner, result_summary):
    378226        """Main work entry point of the thread. Basically we pull urls from the
     
    380228
    381229        If test_runner is not None, then we call test_runner.UpdateSummary()
    382         with the results of each test."""
    383         batch_size = self._options.batch_size
    384         batch_count = 0
    385 
    386         # Append tests we're running to the existing tests_run.txt file.
    387         # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
    388         tests_run_filename = os.path.join(self._options.results_directory,
    389                                           "tests_run.txt")
    390         tests_run_file = codecs.open(tests_run_filename, "a", "utf-8")
    391 
    392         while True:
     230        with the results of each test during _tear_down_test(), below."""
     231        self._test_runner = test_runner
     232        self._result_summary = result_summary
     233
     234        while not self._canceled:
     235            try:
     236                current_group, filename_list = \
     237                    self._filename_list_queue.get_nowait()
     238                self.handle_test_list(current_group, filename_list)
     239            except Queue.Empty:
     240                break
     241
     242        if self._canceled:
     243            _log.debug('Testing canceled')
     244
     245        self._cleanup()
     246
     247    def _cleanup(self):
     248        self._kill_dump_render_tree()
     249        if self._have_http_lock:
     250            self._stop_servers_with_lock()
     251        if self._tests_run_file:
     252            self._tests_run_file.close()
     253            self._tests_run_file = None
     254
     255    def handle_test_list(self, list_name, test_list):
     256        if list_name == "tests_to_http_lock":
     257            self._start_servers_with_lock()
     258
     259        start_time = time.time()
     260        num_tests = 0
     261        for test_input in test_list:
     262            self._run_test(test_input)
    393263            if self._canceled:
    394                 _log.debug('Testing cancelled')
    395                 tests_run_file.close()
    396                 return
    397 
    398             if len(self._filename_list) is 0:
    399                 if self._current_group is not None:
    400                     self._test_group_timing_stats[self._current_group] = \
    401                         (self._num_tests_in_current_group,
    402                          time.time() - self._current_group_start_time)
    403 
    404                 try:
    405                     self._current_group, self._filename_list = \
    406                         self._filename_list_queue.get_nowait()
    407                 except Queue.Empty:
    408                     self._stop_servers_with_lock()
    409                     self._kill_dump_render_tree()
    410                     tests_run_file.close()
    411                     return
    412 
    413                 if self._current_group == "tests_to_http_lock":
    414                     self._start_servers_with_lock()
    415                 elif self._have_http_lock:
    416                     self._stop_servers_with_lock()
    417 
    418                 self._num_tests_in_current_group = len(self._filename_list)
    419                 self._current_group_start_time = time.time()
    420 
    421             test_input = self._filename_list.pop()
    422 
    423             # We have a url, run tests.
    424             batch_count += 1
    425             self._num_tests += 1
    426             if self._options.run_singly:
    427                 result = self._run_test_in_another_thread(test_input)
    428             else:
    429                 result = self._run_test_in_this_thread(test_input)
    430 
    431             filename = test_input.filename
    432             tests_run_file.write(filename + "\n")
    433             if result.failures:
    434                 # Check and kill DumpRenderTree if we need to.
    435                 if len([1 for f in result.failures
    436                         if f.should_kill_dump_render_tree()]):
    437                     self._kill_dump_render_tree()
    438                     # Reset the batch count since the shell just bounced.
    439                     batch_count = 0
    440                 # Print the error message(s).
    441                 error_str = '\n'.join(['  ' + f.message() for
    442                                        f in result.failures])
    443                 _log.debug("%s %s failed:\n%s" % (self.getName(),
    444                            self._port.relative_test_filename(filename),
    445                            error_str))
    446             else:
    447                 _log.debug("%s %s passed" % (self.getName(),
    448                            self._port.relative_test_filename(filename)))
    449             self._result_queue.put(result.dumps())
    450 
    451             if batch_size > 0 and batch_count >= batch_size:
    452                 # Bounce the shell and reset count.
    453                 self._kill_dump_render_tree()
    454                 batch_count = 0
    455 
    456             if test_runner:
    457                 test_runner.update_summary(result_summary)
    458 
    459     def _run_test_in_another_thread(self, test_input):
     264                break
     265            num_tests += 1
     266
     267        elapsed_time = time.time() - start_time
     268
     269        if self._have_http_lock:
     270            self._stop_servers_with_lock()
     271
     272        self._test_list_timing_stats[list_name] = \
     273           (num_tests, elapsed_time)
     274
     275    def _run_test(self, test_input):
     276        self._set_up_test(test_input)
     277
     278        # We calculate how long we expect the test to take.
     279        #
     280        # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
     281        # larger than that. We also add a little more padding if we're
     282        # running tests in a separate thread.
     283        #
     284        # Note that we need to convert the test timeout from a
     285        # string value in milliseconds to a float for Python.
     286        driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
     287        thread_padding_sec = 1.0
     288        thread_timeout_sec = driver_timeout_sec + thread_padding_sec
     289        if self._options.run_singly:
     290            test_timeout_sec = thread_timeout_sec
     291        else:
     292            test_timeout_sec = driver_timeout_sec
     293
     294        start = time.time()
     295        self._next_timeout = start + test_timeout_sec
     296
     297        if self._options.run_singly:
     298            result = self._run_test_in_another_thread(test_input,
     299                                                      thread_timeout_sec)
     300        else:
     301            result = self._run_test_in_this_thread(test_input)
     302
     303        self._tear_down_test(test_input, result)
     304
     305    def _set_up_test(self, test_input):
     306        test_input.uri = self._port.filename_to_uri(test_input.filename)
     307        if self._should_fetch_expected_checksum():
     308            test_input.image_checksum = self._port.expected_checksum(
     309                test_input.filename)
     310
     311    def _should_fetch_expected_checksum(self):
     312        return (self._options.pixel_tests and not
     313                (self._options.new_baseline or self._options.reset_results))
     314
     315    def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
    460316        """Run a test in a separate thread, enforcing a hard time limit.
    461317
     
    466322        Args:
    467323          test_input: Object containing the test filename and timeout
    468 
     324          thread_timeout_sec: time to wait before killing the driver process.
    469325        Returns:
    470326          A TestResult
    471327        """
    472         worker = SingleTestThread(self._port,
    473                                   self._options,
    474                                   self._worker_number,
    475                                   self._name,
    476                                   test_input,
    477                                   self._test_types,
    478                                   self._test_args)
    479 
    480         worker.start()
    481 
    482         thread_timeout = _milliseconds_to_seconds(
    483             _pad_timeout(int(test_input.timeout)))
    484         thread._next_timeout = time.time() + thread_timeout
    485         worker.join(thread_timeout)
    486         if worker.isAlive():
     328        worker = self
     329        result = None
     330
     331        driver = worker._port.create_driver(worker._worker_number)
     332        driver.start()
     333
     334        class SingleTestThread(threading.Thread):
     335            def run(self):
     336                result = worker._run_single_test(driver, test_input)
     337
     338        thread = SingleTestThread()
     339        thread.start()
     340        thread.join(thread_timeout_sec)
     341        if thread.isAlive():
    487342            # If join() returned with the thread still running, the
    488343            # DumpRenderTree is completely hung and there's nothing
     
    494349            # thread's results.
    495350            _log.error('Test thread hung: killing all DumpRenderTrees')
    496             if worker._driver:
    497                 worker._driver.stop()
    498 
    499         try:
    500             result = worker.get_test_result()
    501         except AttributeError, e:
    502             # This gets raised if the worker thread has already exited.
    503             failures = []
    504             _log.error('Cannot get results of test: %s' %
    505                        test_input.filename)
     351
     352        driver.stop()
     353
     354        if not result:
    506355            result = test_results.TestResult(test_input.filename, failures=[],
    507356                test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
     
    516365
    517366        Returns: a TestResult object.
    518         """
    519         self._ensure_dump_render_tree_is_running()
    520         thread_timeout = _milliseconds_to_seconds(
    521              _pad_timeout(int(test_input.timeout)))
    522         self._next_timeout = time.time() + thread_timeout
    523         test_result = _run_single_test(self._port, self._options, test_input,
    524                                        self._test_types, self._test_args,
    525                                        self._driver, self._name)
    526         self._test_results.append(test_result)
    527         return test_result
    528 
    529     def _ensure_dump_render_tree_is_running(self):
    530         """Start the shared DumpRenderTree, if it's not running.
    531 
    532         This is not for use when running tests singly, since those each start
    533         a separate DumpRenderTree in their own thread.
    534 
    535367        """
    536368        # poll() is not threadsafe and can throw OSError due to:
     
    540372            self._driver.start()
    541373
     374        test_result = self._run_single_test(test_input, self._driver)
     375        self._test_results.append(test_result)
     376        return test_result
     377
     378    def _run_single_test(self, test_input, driver):
     379        # The image hash is used to avoid doing an image dump if the
     380        # checksums match, so it should be set to a blank value if we
     381        # are generating a new baseline.  (Otherwise, an image from a
     382        # previous run will be copied into the baseline."""
     383        if self._should_fetch_expected_checksum():
     384            test_input.image_hash = self._port.expected_checksum(
     385                test_input.filename)
     386        test_output = driver.run_test(test_input)
     387        return self._process_output(test_input.filename, test_output)
     388
     389    def _process_output(self, test_filename, test_output):
     390        """Receives the output from a DumpRenderTree process, subjects it to a
     391        number of tests, and returns a list of failure types the test produced.
     392
     393        Args:
     394        test_filename: full path to the test in question.
     395        test_output: a TestOutput object containing the output of the test
     396
     397        Returns: a TestResult object
     398        """
     399        failures = []
     400
     401        if test_output.crash:
     402            failures.append(test_failures.FailureCrash())
     403        if test_output.timeout:
     404            failures.append(test_failures.FailureTimeout())
     405
     406        test_name = self._port.relative_test_filename(test_filename)
     407        if test_output.crash:
     408            _log.debug("%s Stacktrace for %s:\n%s" %
     409                       (self._name, test_name, test_output.error))
     410            filename = os.path.join(self._options.results_directory, test_name)
     411            filename = os.path.splitext(filename)[0] + "-stack.txt"
     412            self._port.maybe_make_directory(os.path.split(filename)[0])
     413            with codecs.open(filename, "wb", "utf-8") as file:
     414                file.write(test_output.error)
     415        elif test_output.error:
     416            _log.debug("%s %s output stderr lines:\n%s" %
     417                       (self._name, test_name, test_output.error))
     418
     419        expected_test_output = self._expected_test_output(test_filename)
     420
     421        # Check the output and save the results.
     422        start_time = time.time()
     423        time_for_diffs = {}
     424        for test_type in self._test_types:
     425            start_diff_time = time.time()
     426            new_failures = test_type.compare_output(self._port,
     427                                                    test_filename,
     428                                                    self._test_args,
     429                                                    test_output,
     430                                                    expected_test_output)
     431            # Don't add any more failures if we already have a crash, so we
     432            # don't double-report those tests. We do double-report for timeouts
     433            # since we still want to see the text and image output.
     434            if not test_output.crash:
     435                failures.extend(new_failures)
     436            time_for_diffs[test_type.__class__.__name__] = (
     437                time.time() - start_diff_time)
     438
     439        total_time_for_all_diffs = time.time() - start_diff_time
     440        return test_results.TestResult(test_filename,
     441                                       failures,
     442                                       test_output.test_time,
     443                                       total_time_for_all_diffs,
     444                                       time_for_diffs)
     445
     446    def _expected_test_output(self, filename):
     447        """Returns an expected TestOutput object."""
     448        return test_output.TestOutput(self._port.expected_text(filename),
     449                                    self._port.expected_image(filename),
     450                                    self._port.expected_checksum(filename))
     451
     452    def _tear_down_test(self, test_input, result):
     453        self._num_tests += 1
     454        self._batch_count += 1
     455        self._tests_run_file.write(test_input.filename + "\n")
     456        test_name = self._port.relative_test_filename(test_input.filename)
     457
     458        if result.failures:
     459            # Check and kill DumpRenderTree if we need to.
     460            if any([f.should_kill_dump_render_tree() for f in result.failures]):
     461                self._kill_dump_render_tree()
     462                # Reset the batch count since the shell just bounced.
     463                self._batch_count = 0
     464
     465            # Print the error message(s).
     466            _log.debug("%s %s failed:" % (self._name, test_name))
     467            for f in result.failures:
     468                _log.debug("%s  %s" % (self._name, f.message()))
     469        else:
     470            _log.debug("%s %s passed" % (self._name, test_name))
     471
     472        self._result_queue.put(result.dumps())
     473
     474        if self._batch_size > 0 and self._batch_count >= self._batch_size:
     475            # Bounce the shell and reset count.
     476            self._kill_dump_render_tree()
     477            self._batch_count = 0
     478
     479        if self._test_runner:
     480            self._test_runner.update_summary(self._result_summary)
     481
    542482    def _start_servers_with_lock(self):
    543         """Acquire http lock and start the servers."""
    544483        self._http_lock_wait_begin = time.time()
    545         _log.debug('Acquire http lock ...')
     484        _log.debug('Acquiring http lock ...')
    546485        self._port.acquire_http_lock()
    547486        _log.debug('Starting HTTP server ...')
     
    551490        self._http_lock_wait_end = time.time()
    552491        self._have_http_lock = True
     492
     493    def _http_lock_wait_time(self):
     494        """Return the time what http locking takes."""
     495        if self._http_lock_wait_begin == 0:
     496            return 0
     497        if self._http_lock_wait_end == 0:
     498            return time.time() - self._http_lock_wait_begin
     499        return self._http_lock_wait_end - self._http_lock_wait_begin
    553500
    554501    def _stop_servers_with_lock(self):
Note: See TracChangeset for help on using the changeset viewer.