Diff
Modified: trunk/Tools/ChangeLog (175062 => 175063)
--- trunk/Tools/ChangeLog 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/ChangeLog 2014-10-22 20:04:37 UTC (rev 175063)
@@ -1,3 +1,59 @@
+2014-10-22 Jake Nielsen <[email protected]>
+
+ PatchAnalysisTask._test_patch() needs refactoring
+ https://bugs.webkit.org/show_bug.cgi?id=137904
+
+ Reviewed by Alexey Proskuryakov.
+
+ * Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py:
+ Fixes several small bugs, and makes minor changes to accomodate the
+ changes in PatchAnalysisTask.
+ (MockCommitQueue.refetch_patch):
+ (MockCommitQueue.test_results):
+ (test_flaky_test_failure):
+ (test_failed_archive):
+ (test_very_red_tree_retry):
+ (_expect_validate.MockDelegate.refetch_patch):
+ (MockCommitQueue.expected_failures): Deleted.
+ (_expect_validate.MockDelegate.expected_failures): Deleted.
+ * Scripts/webkitpy/tool/bot/expectedfailures.py: Removed.
+ * Scripts/webkitpy/tool/bot/expectedfailures_unittest.py: Removed.
+ * Scripts/webkitpy/tool/bot/patchanalysistask.py:
+ Removes the unexpected_failures member, adds the _clean_tree_results
+ member, adds a helper function for _test_patch, and refactors
+ _test_patch to be more readable.
+ (PatchAnalysisTask.__init__):
+ (PatchAnalysisTask._continue_testing_patch_that_exceeded_failure_limit_on_first_or_second_try):
+ (PatchAnalysisTask._test_patch):
+ (PatchAnalysisTask.results_from_test_run_without_patch):
+ * Scripts/webkitpy/tool/commands/earlywarningsystem.py:
+ Removes AbstractEarlyWarningSystem's dependancy on ExpectedFailures.
+ (AbstractEarlyWarningSystem.begin_work_queue):
+ (AbstractEarlyWarningSystem._failing_tests_message):
+ (AbstractEarlyWarningSystem.command_failed):
+ (AbstractEarlyWarningSystem.expected_failures): Deleted.
+ * Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py:
+ Makes changes to accomodate the changes made to PatchAnalysisTask.
+ (AbstractEarlyWarningSystemTest.test_failing_tests_message):
+ * Scripts/webkitpy/tool/commands/perfalizer.py:
+ Removes Perfalizer's dependancy on ExpectedFailures.
+ (PerfalizerTask.refetch_patch):
+ (PerfalizerTask.expected_failures): Deleted.
+ * Scripts/webkitpy/tool/commands/queues.py:
+ Removes CommitQueue's dependancy on ExpectedFailures, and adds an
+ initializer to allow for mock injection.
+ (CommitQueue.__init__):
+ (CommitQueue.begin_work_queue):
+ (CommitQueue.process_work_item):
+ (CommitQueue._failing_tests_message):
+ * Scripts/webkitpy/tool/commands/queues_unittest.py:
+ Removes dependancy on ExpectedFailures, and instead uses mock
+ injection.
+ (MockCommitQueueTask):
+ (MockCommitQueueTask.results_from_patch_test_run):
+ (MockCommitQueueTask.results_from_test_run_without_patch):
+ (mock_run_webkit_patch):
+
2014-10-22 Rohit Kumar <[email protected]>
[EFL][WK2] Minibrowser : Add support for mouse middle button to open links in new window
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -38,7 +38,6 @@
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.commitqueuetask import *
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.mocktool import MockTool
_log = logging.getLogger(__name__)
@@ -69,11 +68,8 @@
def refetch_patch(self, patch):
return patch
- def expected_failures(self):
- return ExpectedFailures()
-
def test_results(self):
- return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
+ return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
def report_flaky_tests(self, patch, flaky_results, results_archive):
flaky_tests = [result.test_name for result in flaky_results]
@@ -281,7 +277,7 @@
])
# CommitQueueTask will only report flaky tests if we successfully parsed
# results.json and returned a LayoutTestResults object, so we fake one.
- commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
+ commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
@@ -312,7 +308,7 @@
None,
ScriptError("MOCK tests failure"),
])
- commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
+ commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
# It's possible delegate to fail to archive layout tests, don't try to report
# flaky tests when that happens.
commit_queue.archive_last_test_results = lambda patch: None
@@ -409,6 +405,7 @@
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
+archive_last_test_results: patch='10000'
"""
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
@@ -497,12 +494,10 @@
None,
None,
ScriptError("MOCK test failure"),
- ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
lots_of_failing_tests,
lots_of_failing_tests,
- lots_of_failing_tests,
])
# Tests always fail, and return so many failures that we do not
@@ -521,9 +516,6 @@
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
-run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
-command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
@@ -604,9 +596,6 @@
def refetch_patch(self, patch):
return patch
- def expected_failures(self):
- return ExpectedFailures()
-
task = CommitQueueTask(MockDelegate(), patch)
self.assertEqual(task.validate(), is_valid)
Deleted: trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -1,60 +0,0 @@
-# Copyright (c) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class ExpectedFailures(object):
- def __init__(self):
- self._failures = set()
- self._is_trustworthy = True
-
- @classmethod
- def _has_failures(cls, results):
- return bool(results and results.failing_tests())
-
- @classmethod
- def _should_trust(cls, results):
- return bool(results and not results.did_exceed_test_failure_limit())
-
- def failures_were_expected(self, results):
- if not self._is_trustworthy:
- return False
- if not self._should_trust(results):
- return False
- return set(results.failing_tests()) <= self._failures
-
- def unexpected_failures_observed(self, results):
- if not self._is_trustworthy:
- return None
- if not self._has_failures(results):
- return None
- return set(results.failing_tests()) - self._failures
-
- def update(self, results):
- if results:
- self._failures = set(results.failing_tests())
- self._is_trustworthy = self._should_trust(results)
Deleted: trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -1,95 +0,0 @@
-# Copyright (c) 2009 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
-
-
-class MockResults(object):
- def __init__(self, failing_tests=[], did_exceed_test_failure_limit=False):
- self._failing_tests = failing_tests
- self._did_exceed_test_failure_limit = did_exceed_test_failure_limit
-
- def did_exceed_test_failure_limit(self):
- return self._did_exceed_test_failure_limit
-
- def failing_tests(self):
- return self._failing_tests
-
-
-class ExpectedFailuresTest(unittest.TestCase):
- def _assert_can_trust(self, results, can_trust):
- self.assertEqual(ExpectedFailures._should_trust(results), can_trust)
-
- def test_can_trust_results(self):
- self._assert_can_trust(None, False)
- self._assert_can_trust(MockResults(failing_tests=[], did_exceed_test_failure_limit=False), True)
- self._assert_can_trust(MockResults(failing_tests=[], did_exceed_test_failure_limit=True), False)
- self._assert_can_trust(MockResults(failing_tests=[1], did_exceed_test_failure_limit=False), True)
- self._assert_can_trust(MockResults(failing_tests=[1], did_exceed_test_failure_limit=True), False)
- self._assert_can_trust(MockResults(failing_tests=[1, 2], did_exceed_test_failure_limit=False), True)
- self._assert_can_trust(MockResults(failing_tests=[1, 2], did_exceed_test_failure_limit=True), False)
-
- def _assert_expected(self, expected_failures, failures, expected):
- self.assertEqual(expected_failures.failures_were_expected(MockResults(failures)), expected)
-
- def test_failures_were_expected(self):
- failures = ExpectedFailures()
- failures.update(MockResults(['foo.html']))
- self._assert_expected(failures, ['foo.html'], True)
- self._assert_expected(failures, ['bar.html'], False)
- self._assert_expected(failures, ['bar.html', 'foo.html'], False)
-
- failures.update(MockResults(['baz.html']))
- self._assert_expected(failures, ['baz.html'], True)
- self._assert_expected(failures, ['foo.html'], False)
-
- failures.update(MockResults([]))
- self._assert_expected(failures, ['baz.html'], False)
- self._assert_expected(failures, ['foo.html'], False)
-
- def test_unexpected_failures_observed(self):
- failures = ExpectedFailures()
- failures.update(MockResults(['foo.html']))
- self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), set(['bar.html']))
- self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), set(['baz.html']))
- unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
- self.assertEqual(failures.unexpected_failures_observed(unbounded_results), set(['baz.html', 'qux.html', 'taco.html']))
- unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
- self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), set(['baz.html', 'qux.html', 'taco.html']))
-
- def test_unexpected_failures_observed_when_tree_is_hosed(self):
- failures = ExpectedFailures()
- failures.update(MockResults(['foo.html', 'banana.html'], did_exceed_test_failure_limit=True))
- self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), None)
- self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), None)
- unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
- self.assertEqual(failures.unexpected_failures_observed(unbounded_results), None)
- unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
- self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), None)
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -82,7 +82,7 @@
self._script_error = None
self._results_archive_from_patch_test_run = None
self._results_from_patch_test_run = None
- self._expected_failures = delegate.expected_failures()
+ self._clean_tree_results = None
def _run_command(self, command, success_message, failure_message):
try:
@@ -183,6 +183,19 @@
second_failing_tests = [] if not second else second.failing_tests()
return first_failing_tests != second_failing_tests
+ def _continue_testing_patch_that_exceeded_failure_limit_on_first_or_second_try(self, results, results_archive, script_error):
+ self._build_and_test_without_patch()
+ self._clean_tree_results = self._delegate.test_results()
+
+ # If we've made it here, then many (500) tests are failing with the patch applied, but
+ # if the clean tree is also failing many tests, even if it's not quite as many (495),
+ # then we can't be certain that the discrepancy isn't due to flakiness, and hence we must
+ # defer judgement.
+ if (len(results.failing_tests()) - len(self._clean_tree_results.failing_tests())) <= 5:
+ return False
+
+ return self.report_failure(results_archive, results, script_error)
+
def _test_patch(self):
if self._test():
return True
@@ -194,8 +207,8 @@
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
- if self._expected_failures.failures_were_expected(first_results):
- return True
+ if first_results.did_exceed_test_failure_limit():
+ return self._continue_testing_patch_that_exceeded_failure_limit_on_first_or_second_try(first_results, first_results_archive, first_script_error)
if self._test():
# Only report flaky tests if we were successful at parsing results.json and archiving results.
@@ -204,10 +217,14 @@
return True
second_results = self._delegate.test_results()
+ second_results_archive = self._delegate.archive_last_test_results(self._patch)
+ second_script_error = self._script_error
+ second_failure_status_id = self.failure_status_id
- if (not first_results.did_exceed_test_failure_limit() and
- not second_results.did_exceed_test_failure_limit() and
- self._results_failed_different_tests(first_results, second_results)):
+ if second_results.did_exceed_test_failure_limit():
+ return self._continue_testing_patch_that_exceeded_failure_limit_on_first_or_second_try(second_results, second_results_archive, second_script_error)
+
+ if self._results_failed_different_tests(first_results, second_results):
# We could report flaky tests here, but we would need to be careful
# to use similar checks to ExpectedFailures._can_trust_results
# to make sure we don't report constant failures as flakes when
@@ -215,30 +232,23 @@
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
- # Archive (and remove) second results so test_results() after
- # build_and_test_without_patch won't use second results instead of the clean-tree results.
- second_results_archive = self._delegate.archive_last_test_results(self._patch)
-
if self._build_and_test_without_patch():
# The error from the previous ._test() run is real, report it.
return self.report_failure(first_results_archive, first_results, first_script_error)
- clean_tree_results = self._delegate.test_results()
- self._expected_failures.update(clean_tree_results)
+ self._clean_tree_results = self._delegate.test_results()
- # Re-check if the original results are now to be expected to avoid a full re-try.
- if self._expected_failures.failures_were_expected(first_results):
- return True
+ if self._clean_tree_results.did_exceed_test_failure_limit():
+ return False
- # Now that we have updated information about failing tests with a clean checkout, we can
- # tell if our original failures were unexpected and fail the patch if necessary.
- if self._expected_failures.unexpected_failures_observed(first_results):
+ if set(first_results.failing_tests()) - set(self._clean_tree_results.failing_tests()):
self.failure_status_id = first_failure_status_id
return self.report_failure(first_results_archive, first_results, first_script_error)
- # We don't know what's going on. The tree is likely very red (beyond our layout-test-results
- # failure limit), just keep retrying the patch. until someone fixes the tree.
- return False
+ # At this point, we know that the first and second runs had the exact same failures,
+ # and that those failures are all present on the clean tree, so we can say with certainty
+ # that the patch is good.
+ return True
def results_archive_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
@@ -248,6 +258,10 @@
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_from_patch_test_run
+ def results_from_test_run_without_patch(self, patch):
+ assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
+ return self._clean_tree_results
+
def report_failure(self, results_archive=None, results=None, script_error=None):
if not self.validate():
return False
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -35,7 +35,6 @@
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.earlywarningsystemtask import EarlyWarningSystemTask, EarlyWarningSystemTaskDelegate
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
from webkitpy.tool.bot.patchanalysistask import UnableToApplyPatch, PatchIsNotValid
from webkitpy.tool.bot.queueengine import QueueEngine
@@ -55,12 +54,15 @@
def begin_work_queue(self):
AbstractReviewQueue.begin_work_queue(self)
- self._expected_failures = ExpectedFailures()
self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._port.results_directory(), self._log_directory())
def _failing_tests_message(self, task, patch):
results = task.results_from_patch_test_run(patch)
- unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
+ clean_results = task.results_from_test_run_without_patch(patch)
+
+ unexpected_failures = None
+ if results and clean_results:
+ unexpected_failures = list(set(results.failing_tests()) - set(clean_results.failing_tests()))
if not unexpected_failures:
return None
if results and results.did_exceed_test_failure_limit():
@@ -118,9 +120,6 @@
failure_log = self._log_from_script_error_for_upload(script_error)
return self._update_status(message, patch=patch, results_file=failure_log)
- def expected_failures(self):
- return self._expected_failures
-
def test_results(self):
return self._layout_test_results_reader.results()
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -27,7 +27,10 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.thirdparty.mock import Mock
+from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
from webkitpy.tool.bot.queueengine import QueueEngine
from webkitpy.tool.commands.earlywarningsystem import *
from webkitpy.tool.commands.queuestest import QueuesTest
@@ -44,9 +47,11 @@
ews.bind_to_tool(MockTool())
ews._options = MockOptions(port=None, confirm=False)
OutputCapture().assert_outputs(self, ews.begin_work_queue, expected_logs=self._default_begin_work_queue_logs(ews.name))
- ews._expected_failures.unexpected_failures_observed = lambda results: set(["foo.html", "bar.html"])
task = Mock()
- task.results_from_patch_test_run = lambda a: None
+ task.results_from_patch_test_run = lambda a: LayoutTestResults([test_results.TestResult("foo.html", failures=[test_failures.FailureTextMismatch()]),
+ test_results.TestResult("bar.html", failures=[test_failures.FailureTextMismatch()])],
+ did_exceed_test_failure_limit=False)
+ task.results_from_test_run_without_patch = lambda a: LayoutTestResults([], did_exceed_test_failure_limit=False)
patch = ews._tool.bugs.fetch_attachment(10000)
self.assertMultiLineEqual(ews._failing_tests_message(task, patch), "New failing tests:\nbar.html\nfoo.html")
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/perfalizer.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/commands/perfalizer.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/perfalizer.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -28,7 +28,6 @@
import logging
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.bot.irc_command import IRCCommand
from webkitpy.tool.bot.irc_command import Help
from webkitpy.tool.bot.irc_command import Hi
@@ -151,9 +150,6 @@
def refetch_patch(self, patch):
return self._tool.bugs.fetch_attachment(patch.id())
- def expected_failures(self):
- return ExpectedFailures()
-
def build_style(self):
return "release"
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/queues.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/commands/queues.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/queues.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -46,7 +46,6 @@
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.bot.commitqueuetask import CommitQueueTask, CommitQueueTaskDelegate
-from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.bot.feeders import CommitQueueFeeder, EWSFeeder
from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
from webkitpy.tool.bot.layouttestresultsreader import LayoutTestResultsReader
@@ -299,6 +298,10 @@
class CommitQueue(PatchProcessingQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate):
+ def __init__(self, commit_queue_task_class=CommitQueueTask):
+ self._commit_queue_task_class = commit_queue_task_class
+ PatchProcessingQueue.__init__(self)
+
name = "commit-queue"
port_name = "mac-mountainlion"
@@ -307,7 +310,6 @@
def begin_work_queue(self):
PatchProcessingQueue.begin_work_queue(self)
self.committer_validator = CommitterValidator(self._tool)
- self._expected_failures = ExpectedFailures()
self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._port.results_directory(), self._log_directory())
def next_work_item(self):
@@ -315,7 +317,7 @@
def process_work_item(self, patch):
self._cc_watchers(patch.bug_id())
- task = CommitQueueTask(self, patch)
+ task = self._commit_queue_task_class(self, patch)
try:
if task.run():
self._did_pass(patch)
@@ -336,7 +338,12 @@
def _failing_tests_message(self, task, patch):
results = task.results_from_patch_test_run(patch)
- unexpected_failures = self._expected_failures.unexpected_failures_observed(results)
+ clean_results = task.results_from_test_run_without_patch(patch)
+
+ unexpected_failures = None
+ if results and clean_results:
+ unexpected_failures = list(set(results.failing_tests()) - set(clean_results.failing_tests()))
+ # FIXME: Can this ever happen?
if not unexpected_failures:
return None
if results and results.did_exceed_test_failure_limit():
Modified: trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py (175062 => 175063)
--- trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py 2014-10-22 19:44:03 UTC (rev 175062)
+++ trunk/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py 2014-10-22 20:04:37 UTC (rev 175063)
@@ -31,6 +31,7 @@
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
@@ -295,7 +296,6 @@
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nNew failing tests:
mock_test_name.html
-another_test_name.html
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
@@ -303,14 +303,21 @@
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
- queue = CommitQueue()
+ class MockCommitQueueTask(CommitQueueTask):
+ def results_from_patch_test_run(self, patch):
+ return LayoutTestResults([test_results.TestResult("mock_test_name.html", failures=[test_failures.FailureTextMismatch()])], did_exceed_test_failure_limit=False)
+
+ def results_from_test_run_without_patch(self, patch):
+ return LayoutTestResults([], did_exceed_test_failure_limit=False)
+
+ queue = CommitQueue(MockCommitQueueTask)
+
def mock_run_webkit_patch(command):
if command[0] == 'clean' or command[0] == 'update':
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
- queue._expected_failures.unexpected_failures_observed = lambda results: ["mock_test_name.html", "another_test_name.html"]
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch