Diff
Modified: trunk/Tools/ChangeLog (174129 => 174130)
--- trunk/Tools/ChangeLog 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/ChangeLog 2014-09-30 22:58:25 UTC (rev 174130)
@@ -1,3 +1,90 @@
+2014-09-30 Jake Nielsen <[email protected]>
+
+ LayoutTestResults and ExpectedFailures should know about the
+ interrupted flag from the json results file
+ https://bugs.webkit.org/show_bug.cgi?id=137229
+
+ Reviewed by Daniel Bates.
+
+ Changes LayoutTestResults to use the interrupted flag instead of
+ counting failures.
+
+ * Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py:
+ (BuilderTest._install_fetch_build._mock_fetch_build):
+ (BuilderTest.test_latest_layout_test_results):
+ * Scripts/webkitpy/common/net/layouttestresults.py:
+ Removes notion of failure_limit_count, and adds
+ did_exceed_test_failure_limit.
+
+ (LayoutTestResults.results_from_string):
+ (LayoutTestResults.__init__):
+ (LayoutTestResults.did_exceed_test_failure_limit):
+ (LayoutTestResults): Deleted.
+ (LayoutTestResults.set_failure_limit_count): Deleted.
+ (LayoutTestResults.failure_limit_count): Deleted.
+ * Scripts/webkitpy/common/net/layouttestresults_unittest.py:
+ Removes unit test for failure_limit_count logic.
+
+ (LayoutTestResultsTest.test_set_failure_limit_count): Deleted.
+ * Scripts/webkitpy/common/net/resultsjsonparser.py:
+ Instead of providing a class method that returns a list of
+ TestResults objects, resultsjsonparser now provides a class
+ ParsedJSONResults that serves as an interface between the results.json
+ file and the rest of webkitpy.
+
+ (ParsedJSONResults):
+ (ParsedJSONResults.__init__):
+ (ParsedJSONResults.did_exceed_test_failure_limit):
+ (ParsedJSONResults.test_results):
+ (ResultsJSONParser): Deleted.
+ (ResultsJSONParser.parse_results_json): Deleted.
+ * Scripts/webkitpy/common/net/resultsjsonparser_unittest.py:
+ Updates the unit test so that it can test the new ResultsJSONParser
+ class.
+
+ (ParsedJSONResultsTest):
+ (test_basic):
+ (ResultsJSONParserTest): Deleted.
+ * Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py:
+ Updates the unit test to instantiate ResultsJSONParser objects rather
+ than simple lists of TestResult objects.
+
+ (MockCommitQueue.test_results):
+ (FailingTestCommitQueue.test_results):
+ (test_flaky_test_failure):
+ (test_failed_archive):
+ * Scripts/webkitpy/tool/bot/expectedfailures.py:
+ Updates ExpectedFailures to use the did_exceed_test_failure_limit
+ method rather than counting the number of failed tests.
+
+ (ExpectedFailures._should_trust):
+ * Scripts/webkitpy/tool/bot/expectedfailures_unittest.py:
+ Updates MockResults to more closely resemble the updated
+ LayoutTestResults class.
+
+ (MockResults.__init__):
+ (MockResults.did_exceed_test_failure_limit):
+ (ExpectedFailuresTest.test_can_trust_results):
+ (ExpectedFailuresTest.test_unexpected_failures_observed):
+ (ExpectedFailuresTest.test_unexpected_failures_observed_when_tree_is_hosed):
+ (MockResults.failure_limit_count): Deleted.
+ * Scripts/webkitpy/tool/bot/layouttestresultsreader.py:
+ Removes a FIXME regarding the use of the
+ NON_INTERACTIVE_FAILURE_LIMIT_COUNT value.
+
+ (LayoutTestResultsReader.results):
+ * Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py:
+ Updates test to not check the now-nonexistant failure_count_limit
+ method.
+
+ (test_missing_unit_test_results_path):
+ (test_layout_test_results):
+ * Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py:
+ Accounts for name change: ResultsJSONParserTest ->
+ ParsedJSONParserTest.
+
+ (RebaselineTestTest.test_gather_baselines):
+
2014-09-30 Gyuyoung Kim <[email protected]>
[EFL] Rename TEST_THEME_DIR macro
Modified: trunk/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -48,7 +48,7 @@
is_green=build_number < 4
)
results = [self._mock_test_result(testname) for testname in failure(build_number)]
- layout_test_results = LayoutTestResults(results)
+ layout_test_results = LayoutTestResults(test_results=results, did_exceed_test_failure_limit=False)
def mock_layout_test_results():
return layout_test_results
build.layout_test_results = mock_layout_test_results
@@ -61,7 +61,7 @@
self._install_fetch_build(lambda build_number: ["test1", "test2"])
def test_latest_layout_test_results(self):
- self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults([self._mock_test_result(testname) for testname in ["test1", "test2"]])
+ self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(test_results=[self._mock_test_result(testname) for testname in ["test1", "test2"]], did_exceed_test_failure_limit=False)
self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
self.assertTrue(self.builder.latest_layout_test_results())
Modified: trunk/Tools/Scripts/webkitpy/common/net/layouttestresults.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/common/net/layouttestresults.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/common/net/layouttestresults.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -28,7 +28,7 @@
import logging
-from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
+from webkitpy.common.net.resultsjsonparser import ParsedJSONResults
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
@@ -50,28 +50,17 @@
def results_from_string(cls, string):
if not string:
return None
- test_results = ResultsJSONParser.parse_results_json(string)
- if not test_results:
- return None
- return cls(test_results)
+ parsed_results = ParsedJSONResults(string)
+ return cls(parsed_results.test_results(), parsed_results.did_exceed_test_failure_limit())
- def __init__(self, test_results):
- self._test_results = test_results
- self._failure_limit_count = None
+ def __init__(self, test_results, did_exceed_test_failure_limit):
self._unit_test_failures = []
+ self._test_results = test_results
+ self._did_exceed_test_failure_limit = did_exceed_test_failure_limit
- # FIXME: run-webkit-tests should store the --exit-after-N-failures value
- # (or some indication of early exit) somewhere in the results.json
- # file. Until it does, callers should set the limit to
- # --exit-after-N-failures value used in that run. Consumers of LayoutTestResults
- # may use that value to know if absence from the failure list means PASS.
- # https://bugs.webkit.org/show_bug.cgi?id=58481
- def set_failure_limit_count(self, limit):
- self._failure_limit_count = limit
+ def did_exceed_test_failure_limit(self):
+ return self._did_exceed_test_failure_limit
- def failure_limit_count(self):
- return self._failure_limit_count
-
def test_results(self):
return self._test_results
Modified: trunk/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -36,11 +36,6 @@
class LayoutTestResultsTest(unittest.TestCase):
- def test_set_failure_limit_count(self):
- results = LayoutTestResults([])
- self.assertIsNone(results.failure_limit_count())
- results.set_failure_limit_count(10)
- self.assertEqual(results.failure_limit_count(), 10)
def test_results_from_string(self):
self.assertIsNone(LayoutTestResults.results_from_string(None))
Modified: trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -137,9 +137,8 @@
return test_results.TestResult(self._test_name, self._failures())
-class ResultsJSONParser(object):
- @classmethod
- def parse_results_json(cls, json_string):
+class ParsedJSONResults(object):
+ def __init__(self, json_string):
if not json_results_generator.has_json_wrapper(json_string):
return None
@@ -149,7 +148,19 @@
json_results = []
for_each_test(json_dict['tests'], lambda test, result: json_results.append(JSONTestResult(test, result)))
- # FIXME: What's the short sexy python way to filter None?
- # I would use [foo.bar() for foo in foos if foo.bar()] but bar() is expensive.
- unexpected_failures = [result.test_result() for result in json_results if not result.did_pass_or_run_as_expected()]
- return filter(lambda a: a, unexpected_failures)
+ unexpected_failures = []
+ for json_result in json_results:
+ if json_result.did_pass_or_run_as_expected():
+ continue
+ test_result = json_result.test_result()
+ if test_result:
+ unexpected_failures.append(test_result)
+
+ self._test_results = unexpected_failures
+ self._did_exceed_test_failure_limit = json_dict["interrupted"]
+
+ def did_exceed_test_failure_limit(self):
+ return self._did_exceed_test_failure_limit
+
+ def test_results(self):
+ return self._test_results
Modified: trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -28,12 +28,12 @@
import unittest2 as unittest
-from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
+from webkitpy.common.net.resultsjsonparser import ParsedJSONResults
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
-class ResultsJSONParserTest(unittest.TestCase):
+class ParsedJSONResultsTest(unittest.TestCase):
# The real files have no whitespace, but newlines make this much more readable.
_example_full_results_json = """ADD_RESULTS({
@@ -72,6 +72,7 @@
}
}
},
+ "interrupted": true,
"skipped": 450,
"num_regressions": 15,
"layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
@@ -83,10 +84,63 @@
"uses_expectations_file": true
});"""
+ _not_interrupted_example_full_results_json = """ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-inheritance.html": {
+ "expected": "PASS",
+ "actual": "FAIL"
+ },
+ "prototype-banana.html": {
+ "expected": "FAIL",
+ "actual": "PASS"
+ },
+ "prototype-taco.html": {
+ "expected": "PASS",
+ "actual": "PASS FAIL"
+ },
+ "prototype-chocolate.html": {
+ "expected": "FAIL",
+ "actual": "FAIL"
+ },
+ "prototype-strawberry.html": {
+ "expected": "PASS",
+ "actual": "FAIL PASS"
+ }
+ }
+ },
+ "svg": {
+ "dynamic-updates": {
+ "SVGFEDropShadowElement-dom-stdDeviation-attr.html": {
+ "expected": "PASS",
+ "actual": "IMAGE",
+ "has_stderr": true
+ }
+ }
+ }
+ },
+ "interrupted": false,
+ "skipped": 450,
+ "num_regressions": 15,
+ "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
+ "version": 3,
+ "num_passes": 77,
+ "has_pretty_patch": false,
+ "fixable": 1220,
+ "num_flaky": 0,
+ "uses_expectations_file": true
+});"""
+
def test_basic(self):
expected_results = [
test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch()], 0),
]
- results = ResultsJSONParser.parse_results_json(self._example_full_results_json)
- self.assertEqual(expected_results, results)
+ parsed_results = ParsedJSONResults(self._example_full_results_json)
+ self.assertEqual(expected_results, parsed_results.test_results())
+ self.assertTrue(parsed_results.did_exceed_test_failure_limit())
+
+ def test_not_interrupted(self):
+ parsed_results = ParsedJSONResults(self._not_interrupted_example_full_results_json)
+ self.assertFalse(parsed_results.did_exceed_test_failure_limit())
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -73,7 +73,7 @@
return ExpectedFailures()
def test_results(self):
- return None
+ return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
def report_flaky_tests(self, patch, flaky_results, results_archive):
flaky_tests = [result.filename for result in flaky_results]
@@ -110,9 +110,7 @@
# Doesn't make sense to ask for the test_results until the tests have run at least once.
assert(self._test_run_counter >= 0)
failures_for_run = self._test_failure_plan[self._test_run_counter]
- results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
- # This makes the results trustable by ExpectedFailures.
- results.set_failure_limit_count(10)
+ results = LayoutTestResults(test_results=map(self._mock_test_result, failures_for_run), did_exceed_test_failure_limit=(len(self._test_failure_plan[self._test_run_counter]) >= 10))
return results
@@ -282,7 +280,7 @@
])
# CommitQueueTask will only report flaky tests if we successfully parsed
# results.json and returned a LayoutTestResults object, so we fake one.
- commit_queue.test_results = lambda: LayoutTestResults([])
+ commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
@@ -313,7 +311,7 @@
None,
ScriptError("MOCK tests failure"),
])
- commit_queue.test_results = lambda: LayoutTestResults([])
+ commit_queue.test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=True)
# It's possible delegate to fail to archive layout tests, don't try to report
# flaky tests when that happens.
commit_queue.archive_last_test_results = lambda patch: None
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -38,7 +38,7 @@
@classmethod
def _should_trust(cls, results):
- return bool(cls._has_failures(results) and results.failure_limit_count() and len(results.failing_tests()) < results.failure_limit_count())
+ return bool(results and not results.did_exceed_test_failure_limit())
def failures_were_expected(self, results):
if not self._is_trustworthy:
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -32,12 +32,12 @@
class MockResults(object):
- def __init__(self, failing_tests=[], failure_limit=10):
+ def __init__(self, failing_tests=[], did_exceed_test_failure_limit=False):
self._failing_tests = failing_tests
- self._failure_limit_count = failure_limit
+ self._did_exceed_test_failure_limit = did_exceed_test_failure_limit
- def failure_limit_count(self):
- return self._failure_limit_count
+ def did_exceed_test_failure_limit(self):
+ return self._did_exceed_test_failure_limit
def failing_tests(self):
return self._failing_tests
@@ -49,12 +49,12 @@
def test_can_trust_results(self):
self._assert_can_trust(None, False)
- self._assert_can_trust(MockResults(failing_tests=[], failure_limit=None), False)
- self._assert_can_trust(MockResults(failing_tests=[], failure_limit=10), False)
- self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=None), False)
- self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=2), True)
- self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=1), False)
- self._assert_can_trust(MockResults(failing_tests=[1, 2], failure_limit=1), False)
+ self._assert_can_trust(MockResults(failing_tests=[], did_exceed_test_failure_limit=False), True)
+ self._assert_can_trust(MockResults(failing_tests=[], did_exceed_test_failure_limit=True), False)
+ self._assert_can_trust(MockResults(failing_tests=[1], did_exceed_test_failure_limit=False), True)
+ self._assert_can_trust(MockResults(failing_tests=[1], did_exceed_test_failure_limit=True), False)
+ self._assert_can_trust(MockResults(failing_tests=[1, 2], did_exceed_test_failure_limit=False), True)
+ self._assert_can_trust(MockResults(failing_tests=[1, 2], did_exceed_test_failure_limit=True), False)
def _assert_expected(self, expected_failures, failures, expected):
self.assertEqual(expected_failures.failures_were_expected(MockResults(failures)), expected)
@@ -79,17 +79,17 @@
failures.update(MockResults(['foo.html']))
self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), set(['bar.html']))
self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), set(['baz.html']))
- unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
+ unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
self.assertEqual(failures.unexpected_failures_observed(unbounded_results), set(['baz.html', 'qux.html', 'taco.html']))
- unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
+ unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), set(['baz.html', 'qux.html', 'taco.html']))
def test_unexpected_failures_observed_when_tree_is_hosed(self):
failures = ExpectedFailures()
- failures.update(MockResults(['foo.html', 'banana.html'], failure_limit=2))
+ failures.update(MockResults(['foo.html', 'banana.html'], did_exceed_test_failure_limit=True))
self.assertEqual(failures.unexpected_failures_observed(MockResults(['foo.html', 'bar.html'])), None)
self.assertEqual(failures.unexpected_failures_observed(MockResults(['baz.html'])), None)
- unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], failure_limit=3)
+ unbounded_results = MockResults(['baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
self.assertEqual(failures.unexpected_failures_observed(unbounded_results), None)
- unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], failure_limit=4)
+ unbounded_results_with_existing_failure = MockResults(['foo.html', 'baz.html', 'qux.html', 'taco.html'], did_exceed_test_failure_limit=True)
self.assertEqual(failures.unexpected_failures_observed(unbounded_results_with_existing_failure), None)
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -70,10 +70,6 @@
layout_test_results = self._create_layout_test_results()
unit_test_results = self._create_unit_test_results()
if layout_test_results:
- # FIXME: This is used to detect if we had N failures due to
- # N tests failing, or if we hit the "exit-after-n-failures" limit.
- # These days we could just check for the "interrupted" key in results.json instead!
- layout_test_results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
if unit_test_results:
layout_test_results.add_unit_test_failures(unit_test_results)
return layout_test_results
Modified: trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -68,7 +68,7 @@
def test_missing_unit_test_results_path(self):
host = MockHost()
reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
- reader._create_layout_test_results = lambda: LayoutTestResults([])
+ reader._create_layout_test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
reader._create_unit_test_results = lambda: None
# layout_test_results shouldn't raise even if the unit tests xml file is missing.
self.assertIsNotNone(reader.results(), None)
@@ -81,10 +81,9 @@
self.assertIsNone(reader.results())
reader._read_file_contents = lambda path: ""
self.assertIsNone(reader.results())
- reader._create_layout_test_results = lambda: LayoutTestResults([])
+ reader._create_layout_test_results = lambda: LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
results = reader.results()
self.assertIsNotNone(results)
- self.assertEqual(results.failure_limit_count(), 30) # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
def test_archive_last_layout_test_results(self):
host = MockHost()
Modified: trunk/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py (174129 => 174130)
--- trunk/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py 2014-09-30 22:56:08 UTC (rev 174129)
+++ trunk/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py 2014-09-30 22:58:25 UTC (rev 174130)
@@ -205,7 +205,7 @@
])
def test_gather_baselines(self):
- example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_full_results_json
+ example_json = resultsjsonparser_unittest.ParsedJSONResultsTest._example_full_results_json
results_json = json.loads(strip_json_wrapper(example_json))
server = RebaselineServer()
server._test_config = get_test_config()