Title: [124349] trunk/Tools
Revision
124349
Author
[email protected]
Date
2012-08-01 11:23:25 -0700 (Wed, 01 Aug 2012)

Log Message

run-perf-tests throws an exception when the output json is malformed
https://bugs.webkit.org/show_bug.cgi?id=92887

Reviewed by Dirk Pranke.

Catch exceptions and gracefully fail. Also split _generate_json into smaller methods.

* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner):
(PerfTestsRunner.run):
(PerfTestsRunner._generate_output): Extracted from _generate_json.
(PerfTestsRunner._merge_source_json): Ditto; catch all exceptions since they are too many
exceptions to consder here.
(PerfTestsRunner._merge_outputs): Ditto.
(PerfTestsRunner._generate_output_files): Extracted from _generate_json.
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(_test_run_with_json_output): Don't assert logs when we except an non-zero exit code.
(create_runner_and_setup_results_template): Extracted from test_run_generates_results_page.
(test_run_generates_results_page):
(test_run_with_bad_output_json): Added.
(test_run_with_bad_json_source): Added.
(test_run_with_upload_json):

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (124348 => 124349)


--- trunk/Tools/ChangeLog	2012-08-01 18:17:42 UTC (rev 124348)
+++ trunk/Tools/ChangeLog	2012-08-01 18:23:25 UTC (rev 124349)
@@ -1,3 +1,28 @@
+2012-08-01  Ryosuke Niwa  <[email protected]>
+
+        run-perf-tests throws an exception when the output json is malformed
+        https://bugs.webkit.org/show_bug.cgi?id=92887
+
+        Reviewed by Dirk Pranke.
+
+        Catch exceptions and gracefully fail. Also split _generate_json into smaller methods.
+
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (PerfTestsRunner):
+        (PerfTestsRunner.run):
+        (PerfTestsRunner._generate_output): Extracted from _generate_json.
+        (PerfTestsRunner._merge_source_json): Ditto; catch all exceptions since they are too many
+        exceptions to consder here.
+        (PerfTestsRunner._merge_outputs): Ditto.
+        (PerfTestsRunner._generate_output_files): Extracted from _generate_json.
+        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+        (_test_run_with_json_output): Don't assert logs when we except an non-zero exit code.
+        (create_runner_and_setup_results_template): Extracted from test_run_generates_results_page.
+        (test_run_generates_results_page):
+        (test_run_with_bad_output_json): Added.
+        (test_run_with_bad_json_source): Added.
+        (test_run_with_upload_json):
+
 2012-08-01  Thiago Marcos P. Santos  <[email protected]>
 
         Regression(r124135): nrwt: --verbose logging does not work right on windows

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py (124348 => 124349)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2012-08-01 18:17:42 UTC (rev 124348)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py	2012-08-01 18:23:25 UTC (rev 124349)
@@ -49,10 +49,11 @@
 
 class PerfTestsRunner(object):
     _default_branch = 'webkit-trunk'
-    _EXIT_CODE_BAD_BUILD = -1
-    _EXIT_CODE_BAD_JSON = -2
-    _EXIT_CODE_FAILED_UPLOADING = -3
-    _EXIT_CODE_BAD_PREPARATION = -4
+    EXIT_CODE_BAD_BUILD = -1
+    EXIT_CODE_BAD_SOURCE_JSON = -2
+    EXIT_CODE_BAD_MERGE = -3
+    EXIT_CODE_FAILED_UPLOADING = -4
+    EXIT_CODE_BAD_PREPARATION = -5
 
     def __init__(self, args=None, port=None):
         self._options, self._args = PerfTestsRunner._parse_args(args)
@@ -142,14 +143,14 @@
     def run(self):
         if not self._port.check_build(needs_http=False):
             _log.error("Build not up to date for %s" % self._port._path_to_driver())
-            return self._EXIT_CODE_BAD_BUILD
+            return self.EXIT_CODE_BAD_BUILD
 
         tests = self._collect_tests()
         _log.info("Running %d tests" % len(tests))
 
         for test in tests:
             if not test.prepare(self._options.time_out_ms):
-                return self._EXIT_CODE_BAD_PREPARATION
+                return self.EXIT_CODE_BAD_PREPARATION
 
         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
 
@@ -157,58 +158,63 @@
         if self._options.output_json_path:
             # FIXME: Add --branch or auto-detect the branch we're in
             test_results_server = options.test_results_server
-            branch = self._default_branch if test_results_server else None
-            build_number = int(options.build_number) if options.build_number else None
 
-            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
-                not test_results_server,
-                branch, options.platform, options.builder_name, build_number) and not unexpected:
-                return self._EXIT_CODE_BAD_JSON
+            output = self._generate_output(self._timestamp, options.platform, options.builder_name, options.build_number)
 
+            if options.source_json_path:
+                output = self._merge_source_json(options.source_json_path, output)
+                if not output:
+                    return self.EXIT_CODE_BAD_SOURCE_JSON
+
+            if not test_results_server:
+                output = self._merge_outputs(self._options.output_json_path, output)
+                if not output:
+                    return self.EXIT_CODE_BAD_MERGE
+
+            self._generate_output_files(self._options.output_json_path, output, not test_results_server)
+
             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
-                return self._EXIT_CODE_FAILED_UPLOADING
+                return self.EXIT_CODE_FAILED_UPLOADING
 
         return unexpected
 
-    def _generate_json(self, timestamp, output_json_path, source_json_path, should_generate_results_page,
-        branch, platform, builder_name, build_number):
-
-        contents = {'timestamp': int(timestamp), 'results': self._results}
+    def _generate_output(self, timestamp, platform, builder_name, build_number):
+        contents = {'results': self._results}
         for (name, path) in self._port.repository_paths():
             contents[name + '-revision'] = self._host.scm().svn_revision(path)
 
-        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
+        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
+            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
             if value:
                 contents[key] = value
 
+        return contents
+
+    def _merge_source_json(self, source_json_path, output):
+        try:
+            source_json_file = self._host.filesystem.open_text_file_for_reading(source_json_path)
+            source_json = json.load(source_json_file)
+            return dict(source_json.items() + output.items())
+        except Exception, error:
+            _log.error("Failed to merge source JSON file %s: %s" % (source_json_path, error))
+        return None
+
+    def _merge_outputs(self, output_json_path, output):
+        if not self._host.filesystem.isfile(output_json_path):
+            return [output]
+        try:
+            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
+            return existing_outputs + [output]
+        except Exception, error:
+            _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
+        return None
+
+    def _generate_output_files(self, output_json_path, output, should_generate_results_page):
         filesystem = self._host.filesystem
-        succeeded = False
-        if source_json_path:
-            try:
-                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
-                source_json = json.load(source_json_file)
-                contents = dict(source_json.items() + contents.items())
-                succeeded = True
-            except IOError, error:
-                _log.error("Failed to read %s: %s" % (source_json_path, error))
-            except ValueError, error:
-                _log.error("Failed to parse %s: %s" % (source_json_path, error))
-            except TypeError, error:
-                _log.error("Failed to merge JSON files: %s" % error)
-            if not succeeded:
-                return False
 
-        if should_generate_results_page:
-            if filesystem.isfile(output_json_path):
-                existing_contents = json.loads(filesystem.read_text_file(output_json_path))
-                existing_contents.append(contents)
-                contents = existing_contents
-            else:
-                contents = [contents]
+        json_output = json.dumps(output)
+        filesystem.write_text_file(output_json_path, json_output)
 
-        serialized_contents = json.dumps(contents)
-        filesystem.write_text_file(output_json_path, serialized_contents)
-
         if should_generate_results_page:
             jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
             jquery = filesystem.read_text_file(jquery_path)
@@ -217,12 +223,10 @@
             template = filesystem.read_text_file(template_path)
 
             results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
-                '<script>%s</script><script id="json">%s</script>' % (jquery, serialized_contents))
+                '<script>%s</script><script id="json">%s</script>' % (jquery, json_output))
 
             filesystem.write_text_file(filesystem.splitext(output_json_path)[0] + '.html', results_page)
 
-        return True
-
     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
         try:

Modified: trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py (124348 => 124349)


--- trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2012-08-01 18:17:42 UTC (rev 124348)
+++ trunk/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py	2012-08-01 18:23:25 UTC (rev 124349)
@@ -246,16 +246,17 @@
         finally:
             stdout, stderr, logs = output_capture.restore_output()
 
-        self.assertEqual(logs, '\n'.join([
-            'Running 2 tests',
-            'Running Bindings/event-target-wrapper.html (1 of 2)',
-            'RESULT Bindings: event-target-wrapper= 1489.05 ms',
-            'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
-            '',
-            'Running inspector/pass.html (2 of 2)',
-            'RESULT group_name: test_name= 42 ms',
-            '',
-            '']))
+        if not expected_exit_code:
+            self.assertEqual(logs, '\n'.join([
+                'Running 2 tests',
+                'Running Bindings/event-target-wrapper.html (1 of 2)',
+                'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+                'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+                '',
+                'Running inspector/pass.html (2 of 2)',
+                'RESULT group_name: test_name= 42 ms',
+                '',
+                '']))
 
         return uploaded[0]
 
@@ -269,19 +270,21 @@
             "inspector/pass.html:group_name:test_name": 42},
             "webkit-revision": 5678, "branch": "webkit-trunk"})
 
+    def create_runner_and_setup_results_template(self, args):
+        runner, port = self.create_runner(args)
+        filesystem = port.host.filesystem
+        filesystem.write_text_file(runner._base_path + '/resources/results-template.html', 'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
+        filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+        return runner, port
+
     def test_run_generates_results_page(self):
-        runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
         filesystem = port.host.filesystem
-        filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
-            'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
-        filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js',
-            'jquery content')
-
         self._test_run_with_json_output(runner, filesystem)
 
         expected_entry = {"timestamp": 123456789, "results": {"Bindings/event-target-wrapper":
             {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
-            "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678}
+            "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678, "branch": "webkit-trunk"}
 
         self.maxDiff = None
         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
@@ -295,6 +298,13 @@
         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
             'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
 
+    def test_run_with_bad_output_json(self):
+        runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+        port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+
     def test_run_with_json_source(self):
         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
             '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
@@ -307,6 +317,15 @@
             "webkit-revision": 5678, "branch": "webkit-trunk",
             "key": "value"})
 
+    def test_run_with_bad_json_source(self):
+        runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+            '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        port.host.filesystem.write_text_file('/mock-checkout/source.json', 'bad json')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+        port.host.filesystem.write_text_file('/mock-checkout/source.json', '["another bad json"]')
+        self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+
     def test_run_with_multiple_repositories(self):
         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
             '--test-results-server=some.host'])
@@ -328,7 +347,7 @@
         self.assertEqual(generated_json['builder-name'], 'builder1')
         self.assertEqual(generated_json['build-number'], 123)
 
-        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=-3)
+        self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
 
     def test_upload_json(self):
         runner, port = self.create_runner()
_______________________________________________
webkit-changes mailing list
[email protected]
http://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to