Diff
Modified: trunk/Tools/ChangeLog (144509 => 144510)
--- trunk/Tools/ChangeLog 2013-03-01 22:53:30 UTC (rev 144509)
+++ trunk/Tools/ChangeLog 2013-03-01 22:55:03 UTC (rev 144510)
@@ -1,3 +1,28 @@
+2013-03-01 Ryosuke Niwa <[email protected]>
+
+ Don't use legacy test names when returning results from PerfTest.run
+ https://bugs.webkit.org/show_bug.cgi?id=111148
+
+ Reviewed by Dirk Pranke.
+
+ * Scripts/webkitpy/performance_tests/perftest.py:
+ (PerfTestMetric.name): Renamed from metric.
+ (PerfTest.test_name_without_file_extension): Extracted from legacy_chromium_bot_compatible_test_name.
+ (PerfTest.run): Use metric name instead of the legacy name to store iteration values.
+ (ChromiumStylePerfTest.parse_and_log_output): Use the metric name to store results as done in PerfTest.run.
+
+ * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+ (TestPerfTestMetric.test_init_set_time_metric):
+ (TestPerfTestMetric.legacy_chromium_bot_compatible_test_name): Removed. The integration tests test this.
+ (TestReplayPerfTest.test_run_with_driver_accumulates_results):
+ (TestReplayPerfTest.test_run_with_driver_accumulates_memory_results):
+
+ * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+ (PerfTestsRunner.__init__):
+ (PerfTestsRunner._generate_results_dict): Updated to iterate over (test, metrics) pair. Use view_source_url
+ to obtain the trac URL instead of hard coding it.
+ (PerfTestsRunner._run_tests_set):
+
2013-03-01 Roger Fong <[email protected]>
Unreviewed. Add an extra project that assembles all project build logs on Windows into a single file.
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py (144509 => 144510)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2013-03-01 22:53:30 UTC (rev 144509)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest.py 2013-03-01 22:55:03 UTC (rev 144510)
@@ -59,17 +59,12 @@
self._unit = unit or self.metric_to_unit(metric)
self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
- def metric(self):
+ def name(self):
return self._metric
def has_values(self):
return bool(self._iterations)
- # FIXME: We don't need to support this anymore. Make outputs more human friendly.
- def legacy_chromium_bot_compatible_test_name(self, test_name_with_extension):
- test_name = re.sub(r'\.\w+$', '', test_name_with_extension)
- return test_name + ':' + self._metric
-
def append(self, value):
self._iterations.append(value)
@@ -99,6 +94,9 @@
def test_name(self):
return self._test_name
+ def test_name_without_file_extension(self):
+ return re.sub(r'\.\w+$', '', self.test_name())
+
def test_path(self):
return self._test_path
@@ -127,10 +125,10 @@
results = {}
for metric in metrics:
- legacy_test_name = metric.legacy_chromium_bot_compatible_test_name(self.test_name())
- results[legacy_test_name] = metric.iteration_values()
+ results[metric.name()] = metric.iteration_values()
if should_log:
- self.log_statistics(legacy_test_name.replace(':', ': ').replace('/', ': '),
+ legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
+ self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
metric.iteration_values(), metric.unit())
return results
@@ -275,7 +273,7 @@
resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
if resultLine:
# FIXME: Store the unit
- results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
+ results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
_log.info(line)
elif not len(line) == 0:
test_failed = True
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py (144509 => 144510)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2013-03-01 22:53:30 UTC (rev 144509)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py 2013-03-01 22:55:03 UTC (rev 144510)
@@ -55,16 +55,10 @@
self.assertEqual(PerfTestMetric('JSHeap', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
def test_init_set_time_metric(self):
- self.assertEqual(PerfTestMetric('Time', 'ms').metric(), 'Time')
- self.assertEqual(PerfTestMetric('Time', 'fps').metric(), 'FrameRate')
- self.assertEqual(PerfTestMetric('Time', 'runs/s').metric(), 'Runs')
+ self.assertEqual(PerfTestMetric('Time', 'ms').name(), 'Time')
+ self.assertEqual(PerfTestMetric('Time', 'fps').name(), 'FrameRate')
+ self.assertEqual(PerfTestMetric('Time', 'runs/s').name(), 'Runs')
- def test_legacy_chromium_bot_compatible_test_name(self):
- self.assertEqual(PerfTestMetric('Time').legacy_chromium_bot_compatible_test_name('test'), 'test:Time')
- self.assertEqual(PerfTestMetric('Malloc').legacy_chromium_bot_compatible_test_name('test'), 'test:Malloc')
- self.assertEqual(PerfTestMetric('JSHeap').legacy_chromium_bot_compatible_test_name('test'), 'test:JSHeap')
- self.assertEqual(PerfTestMetric('FontSize', unit='em').legacy_chromium_bot_compatible_test_name('test'), 'test:FontSize')
-
def test_has_values(self):
self.assertFalse(PerfTestMetric('Time').has_values())
self.assertTrue(PerfTestMetric('Time', iterations=[1]).has_values())
@@ -316,7 +310,7 @@
self.assertEqual(actual_logs, '')
self.assertEqual(len(metrics), 1)
- self.assertEqual(metrics[0].metric(), 'Time')
+ self.assertEqual(metrics[0].name(), 'Time')
self.assertEqual(metrics[0].iteration_values(), [float(i * 1000) for i in range(2, 21)])
def test_run_with_driver_accumulates_memory_results(self):
@@ -342,11 +336,11 @@
self.assertEqual(actual_logs, '')
self.assertEqual(len(metrics), 3)
- self.assertEqual(metrics[0].metric(), 'Time')
+ self.assertEqual(metrics[0].name(), 'Time')
self.assertEqual(metrics[0].iteration_values(), [float(i * 1000) for i in range(2, 21)])
- self.assertEqual(metrics[1].metric(), 'Malloc')
+ self.assertEqual(metrics[1].name(), 'Malloc')
self.assertEqual(metrics[1].iteration_values(), [float(10)] * 19)
- self.assertEqual(metrics[2].metric(), 'JSHeap')
+ self.assertEqual(metrics[2].name(), 'JSHeap')
self.assertEqual(metrics[2].iteration_values(), [float(5)] * 19)
def test_prepare_fails_when_wait_until_ready_fails(self):
Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py (144509 => 144510)
--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py 2013-03-01 22:53:30 UTC (rev 144509)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py 2013-03-01 22:55:03 UTC (rev 144510)
@@ -37,6 +37,7 @@
from webkitpy.common import find_files
from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.config.urls import view_source_url
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.performance_tests.perftest import PerfTestFactory
@@ -66,10 +67,10 @@
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
- self._results = {}
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
+
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
@@ -233,10 +234,6 @@
self._port.show_results_html_file(results_page_path)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
- contents = {'tests': {}}
- if description:
- contents['description'] = description
-
revisions = {}
for (name, path) in self._port.repository_paths():
scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
@@ -244,43 +241,38 @@
revisions[name] = {'revision': str(revision), 'timestamp': scm.timestamp_of_latest_commit(path, revision)}
meta_info = {
+ 'description': description,
'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
'platform': platform,
'revisions': revisions,
'builderName': builder_name,
'buildNumber': int(build_number) if build_number else None}
+ contents = {'tests': {}}
for key, value in meta_info.items():
if value:
contents[key] = value
- # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org.
- for metric_full_name, iteration_values in self._results.iteritems():
- if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
- continue
+ for test, metrics in self._results:
+ for metric_name, iteration_values in metrics.iteritems():
+ if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
+ continue
- assert metric_full_name.count(':') <= 1
- test_full_name, _, metric = metric_full_name.partition(':')
+ tests = contents['tests']
+ path = test.test_name_without_file_extension().split('/')
+ for i in range(0, len(path)):
+ is_last_token = i + 1 == len(path)
+ url = "" + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
+ tests.setdefault(path[i], {'url': url})
+ current_test = tests[path[i]]
+ if is_last_token:
+ current_test.setdefault('metrics', {})
+ assert metric_name not in current_test['metrics']
+ current_test['metrics'][metric_name] = {'current': iteration_values}
+ else:
+ current_test.setdefault('tests', {})
+ tests = current_test['tests']
- tests = contents['tests']
- path = test_full_name.split('/')
- for i in range(0, len(path)):
- # FIXME: We shouldn't assume HTML extension.
- is_last_token = i + 1 == len(path)
- url = '' + '/'.join(path[0:i + 1])
- if is_last_token:
- url += '.html'
-
- tests.setdefault(path[i], {'url': url})
- current_test = tests[path[i]]
- if is_last_token:
- current_test.setdefault('metrics', {})
- assert metric not in current_test['metrics']
- current_test['metrics'][metric] = {'current': iteration_values}
- else:
- current_test.setdefault('tests', {})
- tests = current_test['tests']
-
return contents
@staticmethod
@@ -357,13 +349,14 @@
def _run_tests_set(self, tests):
result_count = len(tests)
failures = 0
+ self._results = []
for i, test in enumerate(tests):
_log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
start_time = time.time()
- new_results = test.run(self._options.time_out_ms)
- if new_results:
- self._results.update(new_results)
+ metrics = test.run(self._options.time_out_ms)
+ if metrics:
+ self._results.append((test, metrics))
else:
failures += 1
_log.error('FAILED')