Title: [144255] trunk/Tools
Revision
144255
Author
[email protected]
Date
2013-02-27 18:29:04 -0800 (Wed, 27 Feb 2013)

Log Message

Merge PageLoadingPerfTest into ReplayPerfTest
https://bugs.webkit.org/show_bug.cgi?id=111027

Reviewed by Dirk Pranke.

Merged two classes.

We should really move ahead with the bug 100991 and get rid of PageLoadingPerfTest
part of ReplayPerfTest.

* Scripts/webkitpy/performance_tests/perftest.py:
(ReplayPerfTest): Moved _FORCE_GC_FILE here.
(ReplayPerfTest.__init__):
(ReplayPerfTest._run_with_driver): Moved from PageLoadingPerfTest.
(ReplayPerfTest.run_single): Load the GC page as run_single on PageLoadingPerfTest did.
* Scripts/webkitpy/performance_tests/perftest_unittest.py:
(TestReplayPerfTest):
(TestReplayPerfTest.test_run_single.run_test):
(TestReplayPerfTest.test_run_single): Make sure test_time is passed down properly from output.
(TestReplayPerfTest.test_run_with_driver_accumulates_results): Renamed from
TestPageLoadingPerfTest.test_run.
(TestReplayPerfTest.test_run_with_driver_accumulates_results.mock_run_signle):
(TestReplayPerfTest.test_run_with_driver_accumulates_memory_results): Renamed from
TestPageLoadingPerfTest.test_run_with_memory_output
(TestReplayPerfTest.test_run_with_driver_accumulates_memory_results.mock_run_signle):
(TestReplayPerfTest.test_prepare_calls_run_single):

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (144254 => 144255)


--- trunk/Tools/ChangeLog	2013-02-28 02:24:16 UTC (rev 144254)
+++ trunk/Tools/ChangeLog	2013-02-28 02:29:04 UTC (rev 144255)
@@ -1,3 +1,32 @@
+2013-02-27  Ryosuke Niwa  <[email protected]>
+
+        Merge PageLoadingPerfTest into ReplayPerfTest
+        https://bugs.webkit.org/show_bug.cgi?id=111027
+
+        Reviewed by Dirk Pranke.
+
+        Merged two classes.
+
+        We should really move ahead with the bug 100991 and get rid of PageLoadingPerfTest
+        part of ReplayPerfTest.
+
+        * Scripts/webkitpy/performance_tests/perftest.py:
+        (ReplayPerfTest): Moved _FORCE_GC_FILE here.
+        (ReplayPerfTest.__init__):
+        (ReplayPerfTest._run_with_driver): Moved from PageLoadingPerfTest.
+        (ReplayPerfTest.run_single): Load the GC page as run_single on PageLoadingPerfTest did.
+        * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+        (TestReplayPerfTest):
+        (TestReplayPerfTest.test_run_single.run_test):
+        (TestReplayPerfTest.test_run_single): Make sure test_time is passed down properly from output.
+        (TestReplayPerfTest.test_run_with_driver_accumulates_results): Renamed from
+        TestPageLoadingPerfTest.test_run.
+        (TestReplayPerfTest.test_run_with_driver_accumulates_results.mock_run_signle):
+        (TestReplayPerfTest.test_run_with_driver_accumulates_memory_results): Renamed from
+        TestPageLoadingPerfTest.test_run_with_memory_output
+        (TestReplayPerfTest.test_run_with_driver_accumulates_memory_results.mock_run_signle):
+        (TestReplayPerfTest.test_prepare_calls_run_single):
+
 2013-02-27  Eric Seidel  <[email protected]>
 
         Add --additional-drt-flag option to run-perf-tests to make it easy to test runtime options

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py (144254 => 144255)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py	2013-02-28 02:24:16 UTC (rev 144254)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py	2013-02-28 02:29:04 UTC (rev 144255)
@@ -291,44 +291,6 @@
         return results if results and not test_failed else None
 
 
-class PageLoadingPerfTest(PerfTest):
-    _FORCE_GC_FILE = 'resources/force-gc.html'
-
-    def __init__(self, port, test_name, test_path):
-        super(PageLoadingPerfTest, self).__init__(port, test_name, test_path)
-        self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE)
-
-    def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
-        # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203
-        super(PageLoadingPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False)
-        return super(PageLoadingPerfTest, self).run_single(driver, test_path, time_out_ms, should_run_pixel_test)
-
-    def _run_with_driver(self, driver, time_out_ms):
-        times = PerfTestMetric('Time')
-        malloc = PerfTestMetric('Malloc')
-        js_heap = PerfTestMetric('JSHeap')
-
-        for i in range(0, 20):
-            output = self.run_single(driver, self.test_path(), time_out_ms)
-            if not output or self.run_failed(output):
-                return None
-            if i == 0:
-                continue
-
-            times.append(output.test_time * 1000)
-            if not output.measurements:
-                continue
-
-            for metric, result in output.measurements.items():
-                assert metric == 'Malloc' or metric == 'JSHeap'
-                if metric == 'Malloc':
-                    malloc.append(result)
-                else:
-                    js_heap.append(result)
-
-        return filter(lambda metric: metric.has_values(), [times, malloc, js_heap])
-
-
 class ReplayServer(object):
     def __init__(self, archive, record):
         self._process = None
@@ -364,9 +326,12 @@
         self.stop()
 
 
-class ReplayPerfTest(PageLoadingPerfTest):
+class ReplayPerfTest(PerfTest):
+    _FORCE_GC_FILE = 'resources/force-gc.html'
+
     def __init__(self, port, test_name, test_path):
         super(ReplayPerfTest, self).__init__(port, test_name, test_path)
+        self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE)
 
     def _start_replay_server(self, archive, record):
         try:
@@ -405,6 +370,31 @@
 
         return True
 
+    def _run_with_driver(self, driver, time_out_ms):
+        times = PerfTestMetric('Time')
+        malloc = PerfTestMetric('Malloc')
+        js_heap = PerfTestMetric('JSHeap')
+
+        for i in range(0, 20):
+            output = self.run_single(driver, self.test_path(), time_out_ms)
+            if not output or self.run_failed(output):
+                return None
+            if i == 0:
+                continue
+
+            times.append(output.test_time * 1000)
+            if not output.measurements:
+                continue
+
+            for metric, result in output.measurements.items():
+                assert metric == 'Malloc' or metric == 'JSHeap'
+                if metric == 'Malloc':
+                    malloc.append(result)
+                else:
+                    js_heap.append(result)
+
+        return filter(lambda metric: metric.has_values(), [times, malloc, js_heap])
+
     def run_single(self, driver, url, time_out_ms, record=False):
         server = self._start_replay_server(self._archive_path, record)
         if not server:
@@ -418,6 +408,8 @@
                 return None
 
             _log.debug("Web page replay started. Loading the page.")
+            # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203
+            super(ReplayPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False)
             output = super(ReplayPerfTest, self).run_single(driver, self._url, time_out_ms, should_run_pixel_test=True)
             if self.run_failed(output):
                 return None

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py (144254 => 144255)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py	2013-02-28 02:24:16 UTC (rev 144254)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py	2013-02-28 02:29:04 UTC (rev 144255)
@@ -37,7 +37,6 @@
 from webkitpy.layout_tests.port.test import TestDriver
 from webkitpy.layout_tests.port.test import TestPort
 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
-from webkitpy.performance_tests.perftest import PageLoadingPerfTest
 from webkitpy.performance_tests.perftest import PerfTest
 from webkitpy.performance_tests.perftest import PerfTestMetric
 from webkitpy.performance_tests.perftest import PerfTestFactory
@@ -234,89 +233,7 @@
         self.assertEqual(actual_logs, '')
 
 
-class TestPageLoadingPerfTest(unittest.TestCase):
-    class MockDriver(object):
-        def __init__(self, values, test, measurements=None):
-            self._values = values
-            self._index = 0
-            self._test = test
-            self._measurements = measurements
-
-        def run_test(self, input, stop_when_done):
-            if input.test_name == self._test.force_gc_test:
-                return
-            value = self._values[self._index]
-            self._index += 1
-            if isinstance(value, str):
-                return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value)
-            else:
-                return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements)
-
-    def test_run(self):
-        port = MockPort()
-        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
-        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test)
-        output_capture = OutputCapture()
-        output_capture.capture_output()
-        try:
-            metrics = test._run_with_driver(driver, None)
-        finally:
-            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
-
-        self.assertEqual(actual_stdout, '')
-        self.assertEqual(actual_stderr, '')
-        self.assertEqual(actual_logs, '')
-
-        self.assertEqual(len(metrics), 1)
-        self.assertEqual(metrics[0].metric(), 'Time')
-        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
-            'values': [float(i * 1000) for i in range(2, 21)]})
-
-    def test_run_with_memory_output(self):
-        port = MockPort()
-        test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
-        memory_results = {'Malloc': 10, 'JSHeap': 5}
-        self.maxDiff = None
-        driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
-        output_capture = OutputCapture()
-        output_capture.capture_output()
-        try:
-            metrics = test._run_with_driver(driver, None)
-        finally:
-            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
-
-        self.assertEqual(actual_stdout, '')
-        self.assertEqual(actual_stderr, '')
-        self.assertEqual(actual_logs, '')
-
-        self.assertEqual(len(metrics), 3)
-        self.assertEqual(metrics[0].metric(), 'Time')
-        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
-            'values': [float(i * 1000) for i in range(2, 21)]})
-        self.assertEqual(metrics[1].metric(), 'Malloc')
-        self.assertEqual(metrics[1].to_dict(), {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
-            'values': [float(10)] * 19})
-        self.assertEqual(metrics[2].metric(), 'JSHeap')
-        self.assertEqual(metrics[2].to_dict(), {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
-            'values': [float(5)] * 19})
-
-    def test_run_with_bad_output(self):
-        output_capture = OutputCapture()
-        output_capture.capture_output()
-        try:
-            port = MockPort()
-            test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
-            driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], test)
-            self.assertIsNone(test._run_with_driver(driver, None))
-        finally:
-            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
-        self.assertEqual(actual_stdout, '')
-        self.assertEqual(actual_stderr, '')
-        self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
-
-
 class TestReplayPerfTest(unittest.TestCase):
-
     class ReplayTestPort(MockPort):
         def __init__(self, custom_run_test=None):
 
@@ -363,7 +280,7 @@
             loaded_pages.append(test_input)
             self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
             return DriverOutput('actual text', 'actual image', 'actual checksum',
-                audio=None, crash=False, timeout=False, error=False)
+                audio=None, crash=False, timeout=False, error=False, test_time=12345)
 
         test, port = self._setup_test(run_test)
         test._archive_path = '/path/some-dir/some-test.wpr'
@@ -371,7 +288,8 @@
 
         try:
             driver = port.create_driver(worker_number=1, no_timeout=True)
-            self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
+            output = test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100)
+            self.assertTrue(output)
         finally:
             actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
 
@@ -382,6 +300,7 @@
         self.assertEqual(actual_stderr, '')
         self.assertEqual(actual_logs, '')
         self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
+        self.assertEqual(output.test_time, 12345)
 
     def test_run_single_fails_without_webpagereplay(self):
         output_capture = OutputCapture()
@@ -401,6 +320,66 @@
         self.assertEqual(actual_stderr, '')
         self.assertEqual(actual_logs, "Web page replay didn't start.\n")
 
+    def test_run_with_driver_accumulates_results(self):
+        port = MockPort()
+        test, port = self._setup_test()
+        counter = [0]
+
+        def mock_run_signle(drive, path, timeout):
+            counter[0] += 1
+            return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={})
+
+        test.run_single = mock_run_signle
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            metrics = test._run_with_driver(driver, None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, '')
+
+        self.assertEqual(len(metrics), 1)
+        self.assertEqual(metrics[0].metric(), 'Time')
+        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
+            'values': [float(i * 1000) for i in range(2, 21)]})
+
+    def test_run_with_driver_accumulates_memory_results(self):
+        port = MockPort()
+        test, port = self._setup_test()
+        counter = [0]
+
+        def mock_run_signle(drive, path, timeout):
+            counter[0] += 1
+            return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={'Malloc': 10, 'JSHeap': 5})
+
+        test.run_single = mock_run_signle
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            driver = port.create_driver(worker_number=1, no_timeout=True)
+            metrics = test._run_with_driver(driver, None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, '')
+
+        self.assertEqual(len(metrics), 3)
+        self.assertEqual(metrics[0].metric(), 'Time')
+        self.assertEqual(metrics[0].to_dict(), {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
+            'values': [float(i * 1000) for i in range(2, 21)]})
+        self.assertEqual(metrics[1].metric(), 'Malloc')
+        self.assertEqual(metrics[1].to_dict(), {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
+            'values': [float(10)] * 19})
+        self.assertEqual(metrics[2].metric(), 'JSHeap')
+        self.assertEqual(metrics[2].to_dict(), {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
+            'values': [float(5)] * 19})
+
     def test_prepare_fails_when_wait_until_ready_fails(self):
         output_capture = OutputCapture()
         output_capture.capture_output()
@@ -495,6 +474,7 @@
         self.assertEqual(actual_stderr, '')
         self.assertEqual(actual_logs, "Preparing replay for some-test.replay\nFailed to prepare a replay for some-test.replay\n")
 
+
 class TestPerfTestFactory(unittest.TestCase):
     def test_regular_test(self):
         test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to