Diff
Modified: trunk/Tools/ChangeLog (106517 => 106518)
--- trunk/Tools/ChangeLog 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/ChangeLog 2012-02-02 01:56:58 UTC (rev 106518)
@@ -1,3 +1,40 @@
+2012-02-01 Adam Barth <[email protected]>
+
+ Mac-ews logs are huge
+ https://bugs.webkit.org/show_bug.cgi?id=77045
+
+ Reviewed by Eric Seidel.
+
+ In order to get the cr-linux-ews bot to run tests on EC2, we created
+ the chromium-xvfb port, which contained a number of tweaks to the
+ Chromium port's behavior. This patch refactors those tweaks so they
+ can be shared with the mac-ews, mostly by moving them into the
+ non-interactive mode of RunTests.
+
+ * Scripts/webkitpy/common/config/ports.py:
+ (WebKitPort):
+ (WebKitPort.layout_tests_results_path):
+ (ChromiumPort.run_javascriptcore_tests_command):
+ (ChromiumXVFBPort):
+ (ChromiumXVFBPort.run_webkit_tests_command):
+ * Scripts/webkitpy/common/config/ports_mock.py:
+ (MockPort):
+ (MockPort.layout_tests_results_path):
+ * Scripts/webkitpy/common/config/ports_unittest.py:
+ (WebKitPortTest.test_chromium_xvfb_port):
+ * Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py:
+ (LayoutTestResultsReaderTest.test_missing_layout_test_results):
+ * Scripts/webkitpy/tool/commands/queues_unittest.py:
+ (CommitQueueTest.test_commit_queue):
+ (test_rollout):
+ (test_manual_reject_during_processing):
+ * Scripts/webkitpy/tool/steps/runtests.py:
+ (RunTests.run):
+ * Scripts/webkitpy/tool/steps/runtests_unittest.py:
+ (RunTestsTest.test_no_unit_tests):
+ * Scripts/webkitpy/tool/steps/steps_unittest.py:
+ (StepsTest.test_runtests_args):
+
2012-02-01 Ryosuke Niwa <[email protected]>
Roll out r106442. It made some tests more stable but others less stable.
Modified: trunk/Tools/Scripts/webkitpy/common/config/ports.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/common/config/ports.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/common/config/ports.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -36,6 +36,8 @@
class WebKitPort(object):
+ results_directory = "/tmp/layout-test-results"
+
# We might need to pass scm into this function for scm.checkout_root
@classmethod
def script_path(cls, script_name):
@@ -122,7 +124,7 @@
@classmethod
def layout_tests_results_path(cls):
- return "/tmp/layout-test-results/results.html"
+ return os.path.join(cls.results_directory, "full_results.json")
class MacPort(WebKitPort):
@@ -256,34 +258,12 @@
return None
-# FIXME: This port is a bit of a hack to get our infrastructure running on EC2.
class ChromiumXVFBPort(ChromiumPort):
- results_directory = "/tmp/layout-test-results"
-
@classmethod
def flag(cls):
return "--port=chromium-xvfb"
@classmethod
def run_webkit_tests_command(cls):
- # FIXME: We should find a better way to do this. Some of these options
- # are specific to new-run-webkit-tests and some of them are due to
- # running in non-interactive mode.
- return ["xvfb-run"] + ChromiumPort.run_webkit_tests_command() + [
- "--results-directory=%s" % cls.results_directory,
- "--skip-failing-tests",
- "--print=actual,config,expected,misc,slowest,unexpected,unexpected-results",
- ]
-
- @classmethod
- def run_python_unittests_command(cls):
- return None
-
- @classmethod
- def run_perl_unittests_command(cls):
- return None
-
- @classmethod
- def layout_tests_results_path(cls):
- return os.path.join(cls.results_directory, "full_results.json")
+ return ["xvfb-run"] + ChromiumPort.run_webkit_tests_command()
Modified: trunk/Tools/Scripts/webkitpy/common/config/ports_mock.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/common/config/ports_mock.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/common/config/ports_mock.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -28,11 +28,13 @@
class MockPort(object):
+ results_directory = "/mock-results"
+
def name(self):
return "MockPort"
def layout_tests_results_path(self):
- return "/mock-results/results.html"
+ return "/mock-results/full_results.json"
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
Modified: trunk/Tools/Scripts/webkitpy/common/config/ports_unittest.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/common/config/ports_unittest.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/common/config/ports_unittest.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -70,7 +70,7 @@
self.assertEquals(ChromiumPort.update_webkit_command(), WebKitPort.script_shell_command("update-webkit") + ["--chromium"])
def test_chromium_xvfb_port(self):
- self.assertEquals(ChromiumXVFBPort.run_webkit_tests_command(), ['xvfb-run'] + WebKitPort.script_shell_command('new-run-webkit-tests') + ['--chromium', '--skip-failing-tests', '--results-directory=/tmp/layout-test-results', '--skip-failing-tests', '--print=actual,config,expected,misc,slowest,unexpected,unexpected-results'])
+ self.assertEquals(ChromiumXVFBPort.run_webkit_tests_command(), ['xvfb-run'] + WebKitPort.script_shell_command('new-run-webkit-tests') + ['--chromium', '--skip-failing-tests'])
if __name__ == '__main__':
unittest.main()
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -39,7 +39,7 @@
def test_missing_layout_test_results(self):
tool = MockTool()
reader = LayoutTestResultsReader(tool, "/var/logs")
- results_path = '/mock-results/results.html'
+ results_path = '/mock-results/full_results.json'
tool.filesystem = MockFileSystem({results_path: None})
# Make sure that our filesystem mock functions as we expect.
self.assertRaises(IOError, tool.filesystem.read_text_file, results_path)
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -237,6 +237,8 @@
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_commit_queue(self):
+ tool = MockTool()
+ tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
"should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n",
@@ -254,7 +256,7 @@
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
"handle_script_error": "ScriptError error message\n",
}
- self.assert_queue_outputs(CommitQueue(), expected_stderr=expected_stderr)
+ self.assert_queue_outputs(CommitQueue(), tool=tool, expected_stderr=expected_stderr)
def test_commit_queue_failure(self):
expected_stderr = {
@@ -317,7 +319,7 @@
def test_rollout(self):
tool = MockTool(log_executive=True)
- tool.filesystem.write_text_file('/mock-results/results.html', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+ tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.buildbot.light_tree_on_fire()
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
@@ -390,7 +392,7 @@
def test_manual_reject_during_processing(self):
queue = SecondThoughtsCommitQueue(MockTool())
queue.begin_work_queue()
- queue._tool.filesystem.write_text_file('/mock-results/results.html', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+ queue._tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
queue._options = Mock()
queue._options.port = None
expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory
Modified: trunk/Tools/Scripts/webkitpy/tool/steps/runtests.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/tool/steps/runtests.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/tool/steps/runtests.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -46,34 +46,42 @@
if not self._options.test:
return
- python_unittests_command = self._tool.port().run_python_unittests_command()
- if python_unittests_command:
- log("Running Python unit tests")
- self._tool.executive.run_and_throw_if_fail(python_unittests_command, cwd=self._tool.scm().checkout_root)
+ if not self._options.non_interactive:
+ # FIXME: We should teach the commit-queue and the EWS how to run these tests.
- perl_unittests_command = self._tool.port().run_perl_unittests_command()
- if perl_unittests_command:
- log("Running Perl unit tests")
- self._tool.executive.run_and_throw_if_fail(perl_unittests_command, cwd=self._tool.scm().checkout_root)
+ python_unittests_command = self._tool.port().run_python_unittests_command()
+ if python_unittests_command:
+ log("Running Python unit tests")
+ self._tool.executive.run_and_throw_if_fail(python_unittests_command, cwd=self._tool.scm().checkout_root)
- _javascript_core_tests_command = self._tool.port().run_javascriptcore_tests_command()
- if _javascript_core_tests_command:
- log("Running _javascript_Core tests")
- self._tool.executive.run_and_throw_if_fail(_javascript_core_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
+ perl_unittests_command = self._tool.port().run_perl_unittests_command()
+ if perl_unittests_command:
+ log("Running Perl unit tests")
+ self._tool.executive.run_and_throw_if_fail(perl_unittests_command, cwd=self._tool.scm().checkout_root)
- webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
- if webkit_unit_tests_command:
- log("Running WebKit unit tests")
- self._tool.executive.run_and_throw_if_fail(webkit_unit_tests_command, cwd=self._tool.scm().checkout_root)
+ _javascript_core_tests_command = self._tool.port().run_javascriptcore_tests_command()
+ if _javascript_core_tests_command:
+ log("Running _javascript_Core tests")
+ self._tool.executive.run_and_throw_if_fail(_javascript_core_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
+ webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
+ if webkit_unit_tests_command:
+ log("Running WebKit unit tests")
+ self._tool.executive.run_and_throw_if_fail(webkit_unit_tests_command, cwd=self._tool.scm().checkout_root)
+
log("Running run-webkit-tests")
args = self._tool.port().run_webkit_tests_command()
if self._options.non_interactive:
- args.append("--no-new-test-results")
- args.append("--no-launch-safari")
- args.append("--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
+ args.extend([
+ "--no-new-test-results",
+ "--no-launch-safari",
+ "--skip-failing-tests",
+ "--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT,
+ "--results-directory=%s" % self._tool.port().results_directory,
+ # We customize the printing options to avoid generating massive logs on the EWS and commit-queue.
+ "--print=actual,config,expected,misc,slowest,unexpected,unexpected-results",
+ ])
if self._options.quiet:
args.append("--quiet")
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
-
Modified: trunk/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -34,12 +34,11 @@
class RunTestsTest(unittest.TestCase):
def test_no_unit_tests(self):
- tool = MockTool()
+ tool = MockTool(log_executive=True)
tool._deprecated_port.run_python_unittests_command = lambda: None
tool._deprecated_port.run_perl_unittests_command = lambda: None
step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
- expected_stderr = """Running _javascript_Core tests
-Running WebKit unit tests
-Running run-webkit-tests
+ expected_stderr = """Running run-webkit-tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--print=actual,config,expected,misc,slowest,unexpected,unexpected-results'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)
Modified: trunk/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py (106517 => 106518)
--- trunk/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py 2012-02-02 01:40:47 UTC (rev 106517)
+++ trunk/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py 2012-02-02 01:56:58 UTC (rev 106518)
@@ -97,6 +97,7 @@
def test_runtests_args(self):
mock_options = self._step_options()
+ mock_options.non_interactive = False
step = steps.RunTests(MockTool(log_executive=True), mock_options)
# FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
mock_port = WebKitPort()
@@ -111,6 +112,6 @@
Running _javascript_Core tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-_javascript_core-tests'], cwd=/mock-checkout
Running run-webkit-tests
-MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--exit-after-n-failures=30', '--quiet'], cwd=/mock-checkout
+MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--quiet'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)