Title: [104856] trunk/Tools
Revision
104856
Author
rn...@webkit.org
Date
2012-01-12 14:08:41 -0800 (Thu, 12 Jan 2012)

Log Message

Need a script to run tests in PerformanceTests
https://bugs.webkit.org/show_bug.cgi?id=76132

Reviewed by Adam Barth.

Add run-perf-tests to run performance tests using parser/resources/runner.js.
Unfortunately, there isn't an easy way of telling which test uses which format
so hard-code directory that uses Chromium perf-bot style (only inspector perf. tests for now).

All test outputs are re-formatted to match Chromium perf-bot style.

* Scripts/run-inspector-perf-tests.py:
* Scripts/run-perf-tests: Added.
* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner):
(PerfTestsRunner.__init__):
(PerfTestsRunner._collect_tests):
(PerfTestsRunner.run):
(PerfTestsRunner._print_status):
(PerfTestsRunner._run_tests_set):
(PerfTestsRunner._process_chromium_style_test_result):
(PerfTestsRunner._should_ignore_line_in_parser_test_result):
(PerfTestsRunner._process_parser_test_result):
(PerfTestsRunner._run_single_test):
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(MainTest.TestDriver.run_test):
(create_runner):
(run_test):
(test_run_test_set):
(test_run_test_set_for_parser_tests):
(test_collect_tests):

Modified Paths

Added Paths

Diff

Modified: trunk/Tools/ChangeLog (104855 => 104856)


--- trunk/Tools/ChangeLog	2012-01-12 21:39:54 UTC (rev 104855)
+++ trunk/Tools/ChangeLog	2012-01-12 22:08:41 UTC (rev 104856)
@@ -1,3 +1,37 @@
+2012-01-12  Ryosuke Niwa  <rn...@webkit.org>
+
+        Need a script to run tests in PerformanceTests
+        https://bugs.webkit.org/show_bug.cgi?id=76132
+
+        Reviewed by Adam Barth.
+
+        Add run-perf-tests to run performance tests using parser/resources/runner.js.
+        Unfortunately, there isn't an easy way of telling which test uses which format
+        so hard-code directory that uses Chromium perf-bot style (only inspector perf. tests for now).
+
+        All test outputs are re-formatted to match Chromium perf-bot style.
+
+        * Scripts/run-inspector-perf-tests.py:
+        * Scripts/run-perf-tests: Added.
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (PerfTestsRunner):
+        (PerfTestsRunner.__init__):
+        (PerfTestsRunner._collect_tests):
+        (PerfTestsRunner.run):
+        (PerfTestsRunner._print_status):
+        (PerfTestsRunner._run_tests_set):
+        (PerfTestsRunner._process_chromium_style_test_result):
+        (PerfTestsRunner._should_ignore_line_in_parser_test_result):
+        (PerfTestsRunner._process_parser_test_result):
+        (PerfTestsRunner._run_single_test):
+        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+        (MainTest.TestDriver.run_test):
+        (create_runner):
+        (run_test):
+        (test_run_test_set):
+        (test_run_test_set_for_parser_tests):
+        (test_collect_tests):
+
 2012-01-12  Dirk Pranke  <dpra...@chromium.org>
 
         webkitpy: clean up port unit tests in preparation for making port_name mandatory

Modified: trunk/Tools/Scripts/run-inspector-perf-tests.py (104855 => 104856)


--- trunk/Tools/Scripts/run-inspector-perf-tests.py	2012-01-12 21:39:54 UTC (rev 104855)
+++ trunk/Tools/Scripts/run-inspector-perf-tests.py	2012-01-12 22:08:41 UTC (rev 104856)
@@ -38,4 +38,4 @@
 
 if '__main__' == __name__:
     logging.basicConfig(level=logging.INFO, format="%(message)s")
-    sys.exit(PerfTestsRunner('inspector').run())
+    sys.exit(PerfTestsRunner(args=['inspector']).run())

Added: trunk/Tools/Scripts/run-perf-tests (0 => 104856)


--- trunk/Tools/Scripts/run-perf-tests	                        (rev 0)
+++ trunk/Tools/Scripts/run-perf-tests	2012-01-12 22:08:41 UTC (rev 104856)
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run performance tests."""
+
+import logging
+import sys
+
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+_log = logging.getLogger(__name__)
+
+if '__main__' == __name__:
+    logging.basicConfig(level=logging.INFO, format="%(message)s")
+    sys.exit(PerfTestsRunner().run())
Property changes on: trunk/Tools/Scripts/run-perf-tests
___________________________________________________________________

Added: svn:executable

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py (104855 => 104856)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2012-01-12 21:39:54 UTC (rev 104855)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2012-01-12 22:08:41 UTC (rev 104856)
@@ -44,10 +44,9 @@
 
 class PerfTestsRunner(object):
     _perf_tests_base_dir = 'PerformanceTests'
-    _result_regex = re.compile('^RESULT .*$')
+    _test_directories_for_chromium_style_tests = ['inspector']
 
-    def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
-        self._perf_tests_dir = perf_tests_dir
+    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
         self._buildbot_output = buildbot_output
         self._options, self._args = self._parse_args(args)
         self._host = Host()
@@ -55,6 +54,7 @@
         self._port = self._host.port_factory.get(self._options.platform, self._options)
         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
         self._webkit_base_dir_len = len(self._port.webkit_base())
+        self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)
 
     def _parse_args(self, args=None):
         print_options = printing.print_options()
@@ -75,15 +75,13 @@
         option_list = (perf_option_list + print_options)
         return optparse.OptionParser(option_list=option_list).parse_args(args)
 
-    def _collect_tests(self, webkit_base, filesystem=None):
+    def _collect_tests(self):
         """Return the list of tests found."""
 
         def _is_test_file(filesystem, dirname, filename):
             return filename.endswith('.html')
 
-        filesystem = filesystem or self._host.filesystem
-        base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir, self._perf_tests_dir)
-        return find_files.find(filesystem, base_dir, paths=self._args, file_filter=_is_test_file)
+        return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)
 
     def run(self):
         if self._options.help_printing:
@@ -99,18 +97,26 @@
         # in a try/finally to ensure that we clean up the logging configuration.
         unexpected = -1
         try:
-            tests = self._collect_tests(self._port.webkit_base())
+            tests = self._collect_tests()
             unexpected = self._run_tests_set(tests, self._port)
         finally:
             self._printer.cleanup()
 
         return unexpected
 
+    def _print_status(self, tests, expected, unexpected):
+        if len(tests) == expected + unexpected:
+            status = "Ran %d tests" % len(tests)
+        else:
+            status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
+        if unexpected:
+            status += " (%d didn't run)" % unexpected
+        self._printer.write(status)
+
     def _run_tests_set(self, tests, port):
         result_count = len(tests)
         expected = 0
         unexpected = 0
-        self._printer.print_one_line_summary(result_count, 0, 0)
         driver_need_restart = False
         driver = None
 
@@ -122,21 +128,78 @@
             if not driver:
                 driver = port.create_driver(worker_number=1)
 
-            test_failed, driver_need_restart = self._run_single_test(test, driver)
+            relative_test_path = self._host.filesystem.relpath(test, self._base_path)
+            self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
+
+            is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
+            test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
             if test_failed:
                 unexpected = unexpected + 1
             else:
                 expected = expected + 1
 
-            self._printer.print_one_line_summary(result_count, expected, unexpected)
+            self._printer.write('')
 
         if driver:
             driver.stop()
 
         return unexpected
 
-    def _run_single_test(self, test, driver):
+    _inspector_result_regex = re.compile('^RESULT .*$')
+
+    def _process_chromium_style_test_result(self, test, output):
         test_failed = False
+        got_a_result = False
+        for line in re.split('\n', output.text):
+            if self._inspector_result_regex.match(line):
+                self._buildbot_output.write("%s\n" % line)
+                got_a_result = True
+            elif not len(line) == 0:
+                test_failed = True
+                self._printer.write("%s" % line)
+        return test_failed or not got_a_result
+
+    _lines_to_ignore_in_parser_result = [
+        re.compile(r'^Running \d+ times$'),
+        re.compile(r'^Ignoring warm-up '),
+        re.compile(r'^\d+$'),
+    ]
+
+    def _should_ignore_line_in_parser_test_result(self, line):
+        if not line:
+            return True
+        for regex in self._lines_to_ignore_in_parser_result:
+            if regex.match(line):
+                return True
+        return False
+
+    def _process_parser_test_result(self, test, output):
+        got_a_result = False
+        test_failed = False
+        filesystem = self._host.filesystem
+        category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
+        test_name = filesystem.splitext(test_name)[0]
+        results = {}
+        keys = ['avg', 'median', 'stdev', 'min', 'max']
+        score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
+        for line in re.split('\n', output.text):
+            score = score_regex.match(line)
+            if score:
+                results[score.group(1)] = score.group(2)
+                continue
+
+            if not self._should_ignore_line_in_parser_test_result(line):
+                test_failed = True
+                self._printer.write("%s" % line)
+
+        if test_failed or set(keys) != set(results.keys()):
+            return True
+        self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
+        self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
+        return False
+
+    def _run_single_test(self, test, driver, is_chromium_style):
+        test_failed = False
         driver_need_restart = False
         output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
 
@@ -151,18 +214,16 @@
             driver_need_restart = True
             test_failed = True
         else:
-            got_a_result = False
-            for line in re.split('\n', output.text):
-                if self._result_regex.match(line):
-                    self._buildbot_output.write("%s\n" % line)
-                    got_a_result = True
-                elif not len(line) == 0:
-                    test_failed = True
-                    self._printer.write("%s" % line)
-            test_failed = test_failed or not got_a_result
+            if is_chromium_style:
+                test_failed = self._process_chromium_style_test_result(test, output)
+            else:
+                test_failed = self._process_parser_test_result(test, output)
 
         if len(output.error):
             self._printer.write('error:\n%s' % output.error)
             test_failed = True
 
+        if test_failed:
+            self._printer.write('FAILED')
+
         return test_failed, driver_need_restart

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py (104855 => 104856)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2012-01-12 21:39:54 UTC (rev 104855)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2012-01-12 22:08:41 UTC (rev 104856)
@@ -48,16 +48,56 @@
             text = ''
             timeout = False
             crash = False
-            if driver_input.test_name == 'pass.html':
+            if driver_input.test_name.endswith('pass.html'):
                 text = 'RESULT group_name: test_name= 42 ms'
-            elif driver_input.test_name == 'timeout.html':
+            elif driver_input.test_name.endswith('timeout.html'):
                 timeout = True
-            elif driver_input.test_name == 'failed.html':
+            elif driver_input.test_name.endswith('failed.html'):
                 text = None
-            elif driver_input.test_name == 'tonguey.html':
+            elif driver_input.test_name.endswith('tonguey.html'):
                 text = 'we are not expecting an output from perf tests but RESULT blablabla'
-            elif driver_input.test_name == 'crash.html':
+            elif driver_input.test_name.endswith('crash.html'):
                 crash = True
+            elif driver_input.test_name.endswith('event-target-wrapper.html'):
+                text = """Running 20 times
+Ignoring warm-up run (1502)
+1504
+1505
+1510
+1504
+1507
+1509
+1510
+1487
+1488
+1472
+1472
+1488
+1473
+1472
+1475
+1487
+1486
+1486
+1475
+1471
+
+avg 1489.05
+median 1487
+stdev 14.46
+min 1471
+max 1510
+"""
+            elif driver_input.test_name.endswith('some-parser.html'):
+                text = """Running 20 times
+Ignoring warm-up run (1115)
+
+avg 1100
+median 1101
+stdev 11
+min 1080
+max 1120
+"""
             return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
 
         def stop(self):
@@ -66,12 +106,12 @@
     def create_runner(self, buildbot_output=None):
         buildbot_output = buildbot_output or array_stream.ArrayStream()
         regular_output = array_stream.ArrayStream()
-        return PerfTestsRunner('', regular_output, buildbot_output, args=[])
+        return PerfTestsRunner(regular_output, buildbot_output, args=[])
 
     def run_test(self, test_name):
         runner = self.create_runner()
         driver = MainTest.TestDriver()
-        return runner._run_single_test(test_name, driver)
+        return runner._run_single_test(test_name, driver, is_chromium_style=True)
 
     def test_run_passing_test(self):
         test_failed, driver_need_restart = self.run_test('pass.html')
@@ -106,20 +146,38 @@
     def test_run_test_set(self):
         buildbot_output = array_stream.ArrayStream()
         runner = self.create_runner(buildbot_output)
+        runner._base_path = '/test.checkout/PerformanceTests'
         port = MainTest.TestPort()
-        tests = ['pass.html', 'silent.html', 'failed.html', 'tonguey.html', 'timeout.html', 'crash.html']
+        dirname = runner._base_path + '/inspector/'
+        tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html',
+            dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html']
         unexpected_result_count = runner._run_tests_set(tests, port)
         self.assertEqual(unexpected_result_count, len(tests) - 1)
         self.assertEqual(len(buildbot_output.get()), 1)
         self.assertEqual(buildbot_output.get()[0], 'RESULT group_name: test_name= 42 ms\n')
 
+    def test_run_test_set_for_parser_tests(self):
+        buildbot_output = array_stream.ArrayStream()
+        runner = self.create_runner(buildbot_output)
+        runner._base_path = '/test.checkout/PerformanceTests/'
+        port = MainTest.TestPort()
+        tests = [runner._base_path + 'Bindings/event-target-wrapper.html', runner._base_path + 'Parser/some-parser.html']
+        unexpected_result_count = runner._run_tests_set(tests, port)
+        self.assertEqual(unexpected_result_count, 0)
+        self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n')
+        self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
+        self.assertEqual(buildbot_output.get()[2], 'RESULT Parser: some-parser= 1100 ms\n')
+        self.assertEqual(buildbot_output.get()[3], 'median= 1101 ms, stdev= 11 ms, min= 1080 ms, max= 1120 ms\n')
+
     def test_collect_tests(self):
         runner = self.create_runner()
-        webkit_base = '/test.checkout'
+        runner._base_path = '/test.checkout/PerformanceTests'
         filesystem = MockFileSystem()
-        filename = filesystem.join(webkit_base, 'PerformanceTests', 'a_file.html')
+        filename = filesystem.join(runner._base_path, 'inspector', 'a_file.html')
+        filesystem.maybe_make_directory(runner._base_path, 'inspector')
         filesystem.files[filename] = 'a content'
-        tests = runner._collect_tests(webkit_base, filesystem)
+        runner._host.filesystem = filesystem
+        tests = runner._collect_tests()
         self.assertEqual(len(tests), 1)
 
     def test_parse_args(self):
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
http://lists.webkit.org/mailman/listinfo.cgi/webkit-changes

Reply via email to