Diff
Modified: trunk/Tools/ChangeLog (125811 => 125812)
--- trunk/Tools/ChangeLog 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/ChangeLog 2012-08-16 21:45:12 UTC (rev 125812)
@@ -1,3 +1,82 @@
+2012-08-16 Dirk Pranke <[email protected]>
+
+ NRWT cutting off the output from LayoutTest run under Valgrind
+ https://bugs.webkit.org/show_bug.cgi?id=94011
+
+ Reviewed by Ojan Vafai.
+
+ Make NRWT work with valgrind again ... I needed to rework the
+ driver infrastructure so that we could get the stderr written
+ between a test completing and a process being stopped and
+ associate it with the DriverOutput for the test; this meant that
+ run_test() needed to stop the driver at the end of the test
+ directly if/when appropriate. This also entailed reworking
+ run_test() so that we would gather stderr and stdout
+ consistently regardless of whether this was a normal test, or
+ stop_when_done, or a crash or timeout.
+
+ Also, I had to rework the process_stop_time() (and renamed it to
+ driver_stop_timeout) so that it would be longer if --time-out-ms
+ was long as well (so that valgrind would get enough time to
+ run), and I reworked driver.stop(kill_directly=True) to just
+ driver.stop(timeout=0.0).
+
+ Lastly, adding the new stop_when_done parameter entailed
+ touching a lot of test mock functions :(.
+
+ This change appeared to be well-covered by existing tests.
+
+ * Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py:
+ (Worker._run_test):
+ (Worker._run_test_with_timeout):
+ (Worker._run_test_in_another_thread):
+ (Worker._run_test_in_another_thread.SingleTestThread.run):
+ (Worker._run_test_in_this_thread):
+ (Worker._run_single_test):
+ * Scripts/webkitpy/layout_tests/controllers/single_test_runner.py:
+ (run_single_test):
+ (SingleTestRunner.__init__):
+ (SingleTestRunner._run_compare_test):
+ (SingleTestRunner._run_rebaseline):
+ (SingleTestRunner._run_reftest):
+ * Scripts/webkitpy/layout_tests/port/base.py:
+ (Port.driver_stop_timeout):
+ (Port.variable.default_configuration):
+ * Scripts/webkitpy/layout_tests/port/chromium_android.py:
+ (ChromiumAndroidPort.driver_stop_timeout):
+ (ChromiumAndroidDriver.stop):
+ * Scripts/webkitpy/layout_tests/port/driver.py:
+ (Driver.run_test):
+ (Driver.stop):
+ (DriverProxy.run_test):
+ * Scripts/webkitpy/layout_tests/port/driver_unittest.py:
+ (DriverTest.test_check_for_driver_crash.FakeServerProcess.stop):
+ * Scripts/webkitpy/layout_tests/port/server_process.py:
+ (ServerProcess.write):
+ (ServerProcess._wait_for_data_and_update_buffers_using_select):
+ (ServerProcess.stop):
+ (ServerProcess.kill):
+ (ServerProcess):
+ (ServerProcess._kill):
+ * Scripts/webkitpy/layout_tests/port/server_process_unittest.py:
+ (TrivialMockPort.__init__):
+ (MockProc.wait):
+ (TestServerProcess.test_basic):
+ * Scripts/webkitpy/layout_tests/port/test.py:
+ (TestDriver.run_test):
+ * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+ (get_tests_run.RecordingTestDriver.run_test):
+ * Scripts/webkitpy/performance_tests/perftest.py:
+ (PerfTest.run_single):
+ * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+ (TestPageLoadingPerfTest.MockDriver.run_test):
+ (TestReplayPerfTest.ReplayTestPort.__init__.ReplayTestDriver.run_test):
+ (TestReplayPerfTest.test_run_single.run_test):
+ (TestReplayPerfTest.test_run_single_fails_when_output_has_error.run_test):
+ (TestReplayPerfTest.test_prepare.run_test):
+ * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+ (MainTest.TestDriver.run_test):
+
2012-08-16 Roger Fong <[email protected]>
Typo in old-run-webkit-tests script from https://bugs.webkit.org/show_bug.cgi?id=93904.
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -315,12 +315,19 @@
test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
def _run_test(self, test_input):
+ self._batch_count += 1
+
+ stop_when_done = False
+ if self._batch_size > 0 and self._batch_count >= self._batch_size:
+ self._batch_count = 0
+ stop_when_done = True
+
self._update_test_input(test_input)
test_timeout_sec = self._timeout(test_input)
start = time.time()
self._caller.post('started_test', test_input, test_timeout_sec)
- result = self._run_test_with_timeout(test_input, test_timeout_sec)
+ result = self._run_test_with_timeout(test_input, test_timeout_sec, stop_when_done)
elapsed_time = time.time() - start
self._caller.post('finished_test', result, elapsed_time)
@@ -359,13 +366,12 @@
_log.debug("%s killing driver" % self._name)
driver.stop()
- def _run_test_with_timeout(self, test_input, timeout):
+ def _run_test_with_timeout(self, test_input, timeout, stop_when_done):
if self._options.run_singly:
- return self._run_test_in_another_thread(test_input, timeout)
- return self._run_test_in_this_thread(test_input)
+ return self._run_test_in_another_thread(test_input, timeout, stop_when_done)
+ return self._run_test_in_this_thread(test_input, stop_when_done)
def _clean_up_after_test(self, test_input, result):
- self._batch_count += 1
test_name = test_input.test_name
self._tests_run_file.write(test_name + "\n")
@@ -385,11 +391,7 @@
else:
_log.debug("%s %s passed" % (self._name, test_name))
- if self._batch_size > 0 and self._batch_count >= self._batch_size:
- self._kill_driver()
- self._batch_count = 0
-
- def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
+ def _run_test_in_another_thread(self, test_input, thread_timeout_sec, stop_when_done):
"""Run a test in a separate thread, enforcing a hard time limit.
Since we can only detect the termination of a thread, not any internal
@@ -412,7 +414,7 @@
self.result = None
def run(self):
- self.result = worker._run_single_test(driver, test_input)
+ self.result = worker._run_single_test(driver, test_input, stop_when_done)
thread = SingleTestThread()
thread.start()
@@ -435,7 +437,7 @@
result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0)
return result
- def _run_test_in_this_thread(self, test_input):
+ def _run_test_in_this_thread(self, test_input, stop_when_done):
"""Run a single test file using a shared DumpRenderTree process.
Args:
@@ -447,11 +449,11 @@
self._kill_driver()
if not self._driver:
self._driver = self._port.create_driver(self._worker_number)
- return self._run_single_test(self._driver, test_input)
+ return self._run_single_test(self._driver, test_input, stop_when_done)
- def _run_single_test(self, driver, test_input):
+ def _run_single_test(self, driver, test_input, stop_when_done):
return single_test_runner.run_single_test(self._port, self._options,
- test_input, driver, self._name)
+ test_input, driver, self._name, stop_when_done)
class TestShard(object):
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -41,15 +41,15 @@
_log = logging.getLogger(__name__)
-def run_single_test(port, options, test_input, driver, worker_name):
- runner = SingleTestRunner(options, port, driver, test_input, worker_name)
+def run_single_test(port, options, test_input, driver, worker_name, stop_when_done):
+ runner = SingleTestRunner(options, port, driver, test_input, worker_name, stop_when_done)
return runner.run()
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
- def __init__(self, options, port, driver, test_input, worker_name):
+ def __init__(self, options, port, driver, test_input, worker_name, stop_when_done):
self._options = options
self._port = port
self._filesystem = port.host.filesystem
@@ -59,6 +59,7 @@
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
self._reference_files = test_input.reference_files
+ self._stop_when_done = stop_when_done
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
@@ -102,7 +103,7 @@
return self._run_compare_test()
def _run_compare_test(self):
- driver_output = self._driver.run_test(self._driver_input())
+ driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
expected_driver_output = self._expected_driver_output()
if self._options.ignore_metrics:
@@ -116,7 +117,7 @@
return test_result
def _run_rebaseline(self):
- driver_output = self._driver.run_test(self._driver_input())
+ driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
@@ -279,7 +280,7 @@
return failures
def _run_reftest(self):
- test_output = self._driver.run_test(self._driver_input())
+ test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
total_test_time = 0
reference_output = None
test_result = None
@@ -293,7 +294,7 @@
putAllMismatchBeforeMatch = sorted
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
- reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True))
+ reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(test_output, reference_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/base.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -159,6 +159,12 @@
return 50 * 1000
return 35 * 1000
+ def driver_stop_timeout(self):
+ """ Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
+ # We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
+ # well (for things like ASAN, Valgrind, etc.)
+ return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
+
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
@@ -1108,15 +1114,6 @@
def default_configuration(self):
return self._config.default_configuration()
- def process_kill_time(self):
- """ Returns the amount of time in seconds to wait before killing the process.
-
- Within server_process.stop there is a time delta before the test is explictly
- killed. By changing this the time can be extended in case the process needs
- more time to cleanly exit on its own.
- """
- return 3.0
-
#
# PROTECTED ROUTINES
#
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -173,6 +173,10 @@
# marked as slow tests on desktop platforms.
return 10 * 1000
+ def driver_stop_timeout(self):
+ # DRT doesn't respond to closing stdin, so we might as well stop the driver immediately.
+ return 0.0
+
def default_child_processes(self):
return len(self._get_devices())
@@ -659,15 +663,10 @@
self._read_stderr_process.kill()
self._read_stderr_process = None
- # Stop and kill server_process because our pipe reading/writing processes won't quit
- # by itself on close of the pipes.
- if self._server_process:
- self._server_process.stop(kill_directly=True)
- self._server_process = None
super(ChromiumAndroidDriver, self).stop()
if self._forwarder_process:
- self._forwarder_process.stop(kill_directly=True)
+ self._forwarder_process.kill()
self._forwarder_process = None
if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/driver.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/driver.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/driver.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -136,7 +136,7 @@
def __del__(self):
self.stop()
- def run_test(self, driver_input):
+ def run_test(self, driver_input, stop_when_done):
"""Run a single test and return the results.
Note that it is okay if a test times out or crashes and leaves
@@ -158,14 +158,19 @@
text, audio = self._read_first_block(deadline) # First block is either text or audio
image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data.
- # We may not have read all of the output if an error (crash) occured.
- # Since some platforms output the stacktrace over error, we should
- # dump any buffered error into self.error_from_test.
- # FIXME: We may need to also read stderr until the process dies?
- self.error_from_test += self._server_process.pop_all_buffered_stderr()
+ crashed = self.has_crashed()
+ timed_out = self._server_process.timed_out
+ if stop_when_done or crashed or timed_out:
+ # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
+ # In the timeout case, we kill the hung process as well.
+ out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
+ text += out
+ self.error_from_test += err
+ self._server_process = None
+
crash_log = None
- if self.has_crashed():
+ if crashed:
self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
@@ -175,15 +180,9 @@
if self._subprocess_was_unresponsive:
crash_log += ' Process failed to become responsive before timing out.'
- timeout = self._server_process.timed_out
- if timeout:
- # DRT doesn't have a built in timer to abort the test, so we might as well
- # kill the process directly and not wait for it to shut down cleanly (since it may not).
- self._server_process.kill()
-
return DriverOutput(text, image, actual_image_hash, audio,
- crash=self.has_crashed(), test_time=time.time() - test_begin_time,
- timeout=timeout, error=self.error_from_test,
+ crash=crashed, test_time=time.time() - test_begin_time,
+ timeout=timed_out, error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid, crash_log=crash_log)
@@ -273,7 +272,7 @@
def stop(self):
if self._server_process:
- self._server_process.stop()
+ self._server_process.stop(self._port.driver_stop_timeout())
self._server_process = None
if self._driver_tempdir:
@@ -476,20 +475,20 @@
def uri_to_test(self, uri):
return self._driver.uri_to_test(uri)
- def run_test(self, driver_input):
+ def run_test(self, driver_input, stop_when_done):
base = self._port.lookup_virtual_test_base(driver_input.test_name)
if base:
virtual_driver_input = copy.copy(driver_input)
virtual_driver_input.test_name = base
virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
- return self.run_test(virtual_driver_input)
+ return self.run_test(virtual_driver_input, stop_when_done)
pixel_tests_needed = driver_input.should_run_pixel_test
cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
if not cmd_line_key in self._running_drivers:
self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed)
- return self._running_drivers[cmd_line_key].run_test(driver_input)
+ return self._running_drivers[cmd_line_key].run_test(driver_input, stop_when_done)
def start(self):
# FIXME: Callers shouldn't normally call this, since this routine
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -182,7 +182,7 @@
def has_crashed(self):
return self.crashed
- def stop(self):
+ def stop(self, timeout):
pass
def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False):
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -140,7 +140,7 @@
try:
self._proc.stdin.write(bytes)
except IOError, e:
- self.stop()
+ self.stop(0.0)
# stop() calls _reset(), so we have to set crashed to True after calling stop().
self._crashed = True
@@ -218,7 +218,7 @@
err_fd = self._proc.stderr.fileno()
select_fds = (out_fd, err_fd)
try:
- read_fds, _, _ = select.select(select_fds, [], select_fds, deadline - time.time())
+ read_fds, _, _ = select.select(select_fds, [], select_fds, max(deadline - time.time(), 0))
except select.error, e:
# We can ignore EINVAL since it's likely the process just crashed and we'll
# figure that out the next time through the loop in _read().
@@ -307,42 +307,45 @@
if not self._proc:
self._start()
- def stop(self, kill_directly=False):
+ def stop(self, timeout_secs=3.0):
if not self._proc:
- return
+ return (None, None)
- # Only bother to check for leaks if the process is still running.
+ # Only bother to check for leaks or stderr if the process is still running.
if self.poll() is None:
self._port.check_for_leaks(self.name(), self.pid())
+ now = time.time()
self._proc.stdin.close()
- self._proc.stdout.close()
- if self._proc.stderr:
- self._proc.stderr.close()
-
- if kill_directly:
- self.kill()
+ if not timeout_secs:
+ self._kill()
elif not self._host.platform.is_win():
- # Closing stdin/stdout/stderr hangs sometimes on OS X,
- # and anyway we don't want to hang the harness if DumpRenderTree
- # is buggy, so we wait a couple seconds to give DumpRenderTree a
- # chance to clean up, but then force-kill the process if necessary.
- timeout = time.time() + self._port.process_kill_time()
- while self._proc.poll() is None and time.time() < timeout:
+ # FIXME: Why aren't we calling this on win?
+ deadline = now + timeout_secs
+ while self._proc.poll() is None and time.time() < deadline:
time.sleep(0.01)
if self._proc.poll() is None:
_log.warning('stopping %s timed out, killing it' % self._name)
- self.kill()
+ self._kill()
_log.warning('killed')
+
+ # read any remaining data on the pipes and return it.
+ if self._use_win32_apis:
+ self._wait_for_data_and_update_buffers_using_win32_apis(now)
+ else:
+ self._wait_for_data_and_update_buffers_using_select(now)
+ out, err = self._output, self._error
self._reset()
+ return (out, err)
def kill(self):
- if self._proc:
- self._host.executive.kill_process(self._proc.pid)
- if self._proc.poll() is not None:
- self._proc.wait()
- self._reset()
+ self.stop(0.0)
+ def _kill(self):
+ self._host.executive.kill_process(self._proc.pid)
+ if self._proc.poll() is not None:
+ self._proc.wait()
+
def replace_outputs(self, stdout, stderr):
assert self._proc
if stdout:
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -41,6 +41,8 @@
class TrivialMockPort(object):
def __init__(self):
self.host = MockSystemHost()
+ self.host.executive.kill_process = lambda x: None
+ self.host.executive.kill_process = lambda x: None
def results_directory(self):
return "/mock-results"
@@ -77,7 +79,10 @@
def poll(self):
return 1
+ def wait(self):
+ return 0
+
class FakeServerProcess(server_process.ServerProcess):
def _start(self):
self._proc = MockProc(self)
@@ -114,7 +119,7 @@
if line:
self.assertEquals(line.strip(), "stderr")
- proc.stop()
+ proc.stop(0)
def test_broken_pipe(self):
port_obj = TrivialMockPort()
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/port/test.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -525,7 +525,7 @@
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
- def run_test(self, test_input):
+ def run_test(self, test_input, stop_when_done):
start_time = time.time()
test_name = test_input.test_name
test_args = test_input.args or []
@@ -563,6 +563,9 @@
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
+ if stop_when_done:
+ self.stop()
+
return DriverOutput(actual_text, test.actual_image, test.actual_checksum, audio,
crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
Modified: trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -147,7 +147,7 @@
def stop(self):
self._current_test_batch = None
- def run_test(self, test_input):
+ def run_test(self, test_input, stop_when_done):
if self._current_test_batch is None:
self._current_test_batch = []
test_batches.append(self._current_test_batch)
@@ -159,7 +159,7 @@
dirname, filename = filesystem.split(test_name)
if include_reference_html or not Port.is_reference_html_file(filesystem, dirname, filename):
self._current_test_batch.append(test_name)
- return TestDriver.run_test(self, test_input)
+ return TestDriver.run_test(self, test_input, stop_when_done)
class RecordingTestPort(TestPort):
def create_driver(self, worker_number):
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -74,7 +74,7 @@
return self.parse_output(output)
def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False):
- return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test))
+ return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False)
def run_failed(self, output):
if output.text == None or output.error:
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -99,7 +99,7 @@
self._values = values
self._index = 0
- def run_test(self, input):
+ def run_test(self, input, stop_when_done):
value = self._values[self._index]
self._index += 1
if isinstance(value, str):
@@ -141,8 +141,8 @@
def __init__(self, custom_run_test=None):
class ReplayTestDriver(TestDriver):
- def run_test(self, text_input):
- return custom_run_test(text_input) if custom_run_test else None
+ def run_test(self, text_input, stop_when_done):
+ return custom_run_test(text_input, stop_when_done) if custom_run_test else None
self._custom_driver_class = ReplayTestDriver
super(self.__class__, self).__init__(host=MockHost())
@@ -174,7 +174,7 @@
loaded_pages = []
- def run_test(test_input):
+ def run_test(test_input, stop_when_done):
if test_input.test_name != "about:blank":
self.assertEqual(test_input.test_name, 'http://some-test/')
loaded_pages.append(test_input)
@@ -243,7 +243,7 @@
loaded_pages = []
- def run_test(test_input):
+ def run_test(test_input, stop_when_done):
loaded_pages.append(test_input)
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
@@ -270,7 +270,7 @@
output_capture = OutputCapture()
output_capture.capture_output()
- def run_test(test_input):
+ def run_test(test_input, stop_when_done):
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
audio=None, crash=False, timeout=False, error=False)
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py (125811 => 125812)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py 2012-08-16 20:54:21 UTC (rev 125811)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py 2012-08-16 21:45:12 UTC (rev 125812)
@@ -49,7 +49,7 @@
self.assertEquals(stream.buflist, contents)
class TestDriver:
- def run_test(self, driver_input):
+ def run_test(self, driver_input, stop_when_done):
text = ''
timeout = False
crash = False