Title: [159617] trunk/Tools
Revision
159617
Author
[email protected]
Date
2013-11-21 05:22:44 -0800 (Thu, 21 Nov 2013)

Log Message

PerfTestRunner._generate_results_dict shouldn't depend on test objects
https://bugs.webkit.org/show_bug.cgi?id=124623

Removed the dependency on test objects from results JSON generation.
This allows single test.run to return metrics for multiple tests

Reviewed by Antti Koivisto.

* Scripts/webkitpy/performance_tests/perftest.py:
(PerfTestMetric.__init__): Takes the test path and test name.
(PerfTestMetric.path): Added.
(PerfTestMetric.test_file_name): Added.
(PerfTest.run): Accumulate PerfTestMetric objects instead of raw values.
(PerfTest._ensure_metrics): Instantiate PerfTestMetric with the test path and test name.
The path is going to have the names of subtests at the end once we support them.

* Scripts/webkitpy/performance_tests/perftest_unittest.py:
(TestPerfTestMetric.test_init_set_missing_unit): Specify the test path and test name.
(TestPerfTestMetric.test_init_set_time_metric): Ditto.
(TestPerfTestMetric.test_has_values): Ditto.
(TestPerfTestMetric.test_append): Ditto.

* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(_generate_results_dict): Only use metrics.
(_run_tests_set): Accumulate metrics as supposed to (test, metrics) pairs.

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (159616 => 159617)


--- trunk/Tools/ChangeLog	2013-11-21 12:26:06 UTC (rev 159616)
+++ trunk/Tools/ChangeLog	2013-11-21 13:22:44 UTC (rev 159617)
@@ -1,3 +1,31 @@
+2013-11-21  Ryosuke Niwa  <[email protected]>
+
+        PerfTestRunner._generate_results_dict shouldn't depend on test objects
+        https://bugs.webkit.org/show_bug.cgi?id=124623
+
+        Removed the dependency on test objects from results JSON generation.
+        This allows single test.run to return metrics for multiple tests
+
+        Reviewed by Antti Koivisto.
+
+        * Scripts/webkitpy/performance_tests/perftest.py:
+        (PerfTestMetric.__init__): Takes the test path and test name.
+        (PerfTestMetric.path): Added.
+        (PerfTestMetric.test_file_name): Added.
+        (PerfTest.run): Accumulate PerfTestMetric objects instead of raw values.
+        (PerfTest._ensure_metrics): Instantiate PerfTestMetric with the test path and test name.
+        The path is going to have the names of subtests at the end once we support them.
+
+        * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+        (TestPerfTestMetric.test_init_set_missing_unit): Specify the test path and test name.
+        (TestPerfTestMetric.test_init_set_time_metric): Ditto.
+        (TestPerfTestMetric.test_has_values): Ditto.
+        (TestPerfTestMetric.test_append): Ditto.
+
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (_generate_results_dict): Only use metrics.
+        (_run_tests_set): Accumulate metrics as supposed to (test, metrics) pairs.
+
 2013-11-21  Manuel Rego Casasnovas  <[email protected]>
 
         REGRESSION(r159599): webkitdirs.pm spits out warnings at lines 851 and 852

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py (159616 => 159617)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py	2013-11-21 12:26:06 UTC (rev 159616)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py	2013-11-21 13:22:44 UTC (rev 159617)
@@ -55,15 +55,23 @@
 
 
 class PerfTestMetric(object):
-    def __init__(self, metric, unit=None, iterations=None):
+    def __init__(self, path, test_file_name, metric, unit=None, iterations=None):
         # FIXME: Fix runner.js to report correct metric names
         self._iterations = iterations or []
         self._unit = unit or self.metric_to_unit(metric)
         self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
+        self._path = path
+        self._test_file_name = test_file_name
 
     def name(self):
         return self._metric
 
+    def path(self):
+        return self._path
+
+    def test_file_name(self):
+        return self._test_file_name
+
     def has_values(self):
         return bool(self._iterations)
 
@@ -132,10 +140,10 @@
         if should_log and self._description:
             _log.info('DESCRIPTION: %s' % self._description)
 
-        results = {}
+        results = []
         for metric_name in self._ordered_metrics_name:
             metric = self._metrics[metric_name]
-            results[metric.name()] = metric.grouped_iteration_values()
+            results.append(metric)
             if should_log:
                 legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
                 self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
@@ -193,7 +201,7 @@
 
     def _ensure_metrics(self, metric_name, unit=None):
         if metric_name not in self._metrics:
-            self._metrics[metric_name] = PerfTestMetric(metric_name, unit)
+            self._metrics[metric_name] = PerfTestMetric(self.test_name_without_file_extension().split('/'), self._test_name, metric_name, unit)
             self._ordered_metrics_name.append(metric_name)
         return self._metrics[metric_name]
 

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py (159616 => 159617)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py	2013-11-21 12:26:06 UTC (rev 159616)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py	2013-11-21 13:22:44 UTC (rev 159617)
@@ -50,22 +50,22 @@
 
 class TestPerfTestMetric(unittest.TestCase):
     def test_init_set_missing_unit(self):
-        self.assertEqual(PerfTestMetric('Time', iterations=[1, 2, 3, 4, 5]).unit(), 'ms')
-        self.assertEqual(PerfTestMetric('Malloc', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
-        self.assertEqual(PerfTestMetric('JSHeap', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time', iterations=[1, 2, 3, 4, 5]).unit(), 'ms')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'Malloc', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'JSHeap', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
 
     def test_init_set_time_metric(self):
-        self.assertEqual(PerfTestMetric('Time', 'ms').name(), 'Time')
-        self.assertEqual(PerfTestMetric('Time', 'fps').name(), 'FrameRate')
-        self.assertEqual(PerfTestMetric('Time', 'runs/s').name(), 'Runs')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time', 'ms').name(), 'Time')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time', 'fps').name(), 'FrameRate')
+        self.assertEqual(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time', 'runs/s').name(), 'Runs')
 
     def test_has_values(self):
-        self.assertFalse(PerfTestMetric('Time').has_values())
-        self.assertTrue(PerfTestMetric('Time', iterations=[1]).has_values())
+        self.assertFalse(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time').has_values())
+        self.assertTrue(PerfTestMetric(['some', 'test'], 'some/test.html', 'Time', iterations=[1]).has_values())
 
     def test_append(self):
-        metric = PerfTestMetric('Time')
-        metric2 = PerfTestMetric('Time')
+        metric = PerfTestMetric(['some', 'test'], 'some/test.html', 'Time')
+        metric2 = PerfTestMetric(['some', 'test'], 'some/test.html', 'Time')
         self.assertFalse(metric.has_values())
         self.assertFalse(metric2.has_values())
 

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py (159616 => 159617)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2013-11-21 12:26:06 UTC (rev 159616)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2013-11-21 13:22:44 UTC (rev 159617)
@@ -261,26 +261,22 @@
             if value:
                 contents[key] = value
 
-        for test, metrics in self._results:
-            for metric_name, iteration_values in metrics.iteritems():
-                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
-                    continue
+        for metric in self._results:
+            tests = contents['tests']
+            path = metric.path()
+            for i in range(0, len(path)):
+                is_last_token = i + 1 == len(path)
+                url = "" + (metric.test_file_name() if is_last_token else '/'.join(path[0:i + 1])))
+                tests.setdefault(path[i], {'url': url})
+                current_test = tests[path[i]]
+                if is_last_token:
+                    current_test.setdefault('metrics', {})
+                    assert metric.name() not in current_test['metrics']
+                    current_test['metrics'][metric.name()] = {'current': metric.grouped_iteration_values()}
+                else:
+                    current_test.setdefault('tests', {})
+                    tests = current_test['tests']
 
-                tests = contents['tests']
-                path = test.test_name_without_file_extension().split('/')
-                for i in range(0, len(path)):
-                    is_last_token = i + 1 == len(path)
-                    url = "" + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
-                    tests.setdefault(path[i], {'url': url})
-                    current_test = tests[path[i]]
-                    if is_last_token:
-                        current_test.setdefault('metrics', {})
-                        assert metric_name not in current_test['metrics']
-                        current_test['metrics'][metric_name] = {'current': iteration_values}
-                    else:
-                        current_test.setdefault('tests', {})
-                        tests = current_test['tests']
-
         return contents
 
     @staticmethod
@@ -348,7 +344,7 @@
             start_time = time.time()
             metrics = test.run(self._options.time_out_ms)
             if metrics:
-                self._results.append((test, metrics))
+                self._results += metrics
             else:
                 failures += 1
                 _log.error('FAILED')
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to