Modified: trunk/Tools/ChangeLog (235754 => 235755)
--- trunk/Tools/ChangeLog 2018-09-06 20:42:48 UTC (rev 235754)
+++ trunk/Tools/ChangeLog 2018-09-06 20:47:55 UTC (rev 235755)
@@ -1,3 +1,17 @@
+2018-08-29 Dewei Zhu <[email protected]>
+
+ BenchmarkResults.format should support specifying depth of tests to show.
+ https://bugs.webkit.org/show_bug.cgi?id=189135
+
+ Reviewed by Ryosuke Niwa.
+
+ Added the option to specify the depth of tests to show.
+
+ * Scripts/webkitpy/benchmark_runner/benchmark_results.py:
+ (BenchmarkResults):
+ (BenchmarkResults.format): Added 'max_depth' option.
+ (BenchmarkResults._format_tests): Added unit tests for 'max_depth'.
+
2018-09-06 Thomas Denney <[email protected]>
[WHLSL] Call arguments should be copied as soon as they are evaluated
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py (235754 => 235755)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py 2018-09-06 20:42:48 UTC (rev 235754)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py 2018-09-06 20:47:55 UTC (rev 235755)
@@ -48,11 +48,13 @@
self._lint_results(results)
self._results = self._aggregate_results(results)
- def format(self, scale_unit=True, show_iteration_values=False):
- return self._format_tests(self._results, scale_unit, show_iteration_values)
+ def format(self, scale_unit=True, show_iteration_values=False, max_depth=None):
+ return self._format_tests(self._results, scale_unit, show_iteration_values, max_depth)
@classmethod
- def _format_tests(cls, tests, scale_unit, show_iteration_values, indent=''):
+ def _format_tests(cls, tests, scale_unit, show_iteration_values, max_depth, indent=''):
+ if max_depth is not None and max_depth <= 0:
+ return ''
output = ''
config_name = 'current'
for test_name in sorted(tests.keys()):
@@ -73,7 +75,7 @@
output += aggregator_name + ':'
output += ' ' + cls._format_values(metric_name, metric[aggregator_name][config_name], scale_unit, show_iteration_values) + '\n'
if 'tests' in test:
- output += cls._format_tests(test['tests'], scale_unit, show_iteration_values, indent=(indent + ' ' * len(test_name)))
+ output += cls._format_tests(test['tests'], scale_unit, show_iteration_values, max_depth - 1 if max_depth else None, indent=(indent + ' ' * len(test_name)))
return output
@classmethod
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py (235754 => 235755)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py 2018-09-06 20:42:48 UTC (rev 235754)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py 2018-09-06 20:47:55 UTC (rev 235755)
@@ -55,6 +55,18 @@
SubTest2:Time: 5.0ms stdev=20.0%
'''[1:])
+ def test_format_with_depth_limit(self):
+ result = BenchmarkResults({'SomeTest': {
+ 'metrics': {'Time': ['Total', 'Arithmetic']},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6]}}}}}})
+ self.assertEqual(result.format(max_depth=1), '''
+SomeTest:Time:Arithmetic: 3.0ms stdev=33.3%
+ :Time:Total: 7.0ms stdev=28.6%
+'''[1:])
+ self.assertEqual(result.format(max_depth=0), "")
+
def test_format_values_with_large_error(self):
self.assertEqual(BenchmarkResults._format_values('Runs', [1, 2, 3]), '2.0/s stdev=50.0%')
self.assertEqual(BenchmarkResults._format_values('Runs', [10, 20, 30]), '20/s stdev=50.0%')