Title: [261231] trunk/Tools
Revision
261231
Author
[email protected]
Date
2020-05-06 08:39:48 -0700 (Wed, 06 May 2020)

Log Message

Python3: Support Python3 in Tools/webkitpy/benchmark_runner
https://bugs.webkit.org/show_bug.cgi?id=211249

Reviewed by Jonathan Bedard.

* Scripts/webkitpy/benchmark_runner/benchmark_builder.py:
(BenchmarkBuilder._fetch_remote_archive):
* Scripts/webkitpy/benchmark_runner/benchmark_results.py:
(BenchmarkResults._format_values):
(BenchmarkResults._subtest_values_by_config_iteration):
* Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
(istext):
(BenchmarkRunner._run_benchmark):
(BenchmarkRunner._merge):
(BenchmarkRunner.show_results):
* Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py:
(OSXSafariDriver.launch_url):
* Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py:
(HTTPServerDriver):
* Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py:
(SimpleHTTPServerDriver.serve):
(SimpleHTTPServerDriver._wait_for_http_server):
* Scripts/webkitpy/benchmark_runner/run_benchmark.py:
(parse_args):
(list_benchmark_plans):
* Scripts/webkitpy/style/checker.py:
(CheckerDispatcher._create_checker):

Modified Paths

Diff

Modified: trunk/Tools/ChangeLog (261230 => 261231)


--- trunk/Tools/ChangeLog	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/ChangeLog	2020-05-06 15:39:48 UTC (rev 261231)
@@ -1,3 +1,33 @@
+2020-05-06  Pablo Saavedra  <[email protected]>
+
+        Python3: Support Python3 in Tools/webkitpy/benchmark_runner
+        https://bugs.webkit.org/show_bug.cgi?id=211249
+
+        Reviewed by Jonathan Bedard.
+
+        * Scripts/webkitpy/benchmark_runner/benchmark_builder.py:
+        (BenchmarkBuilder._fetch_remote_archive):
+        * Scripts/webkitpy/benchmark_runner/benchmark_results.py:
+        (BenchmarkResults._format_values):
+        (BenchmarkResults._subtest_values_by_config_iteration):
+        * Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
+        (istext):
+        (BenchmarkRunner._run_benchmark):
+        (BenchmarkRunner._merge):
+        (BenchmarkRunner.show_results):
+        * Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py:
+        (OSXSafariDriver.launch_url):
+        * Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py:
+        (HTTPServerDriver):
+        * Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py:
+        (SimpleHTTPServerDriver.serve):
+        (SimpleHTTPServerDriver._wait_for_http_server):
+        * Scripts/webkitpy/benchmark_runner/run_benchmark.py:
+        (parse_args):
+        (list_benchmark_plans):
+        * Scripts/webkitpy/style/checker.py:
+        (CheckerDispatcher._create_checker):
+
 2020-05-06  Aakash Jain  <[email protected]>
 
         Delete code for QueueStatusServer

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_builder.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -3,15 +3,20 @@
 import logging
 import tempfile
 import os
-import urllib
 import shutil
 import subprocess
+import sys
 import tarfile
 
 from webkitpy.benchmark_runner.utils import get_path_from_project_root, force_remove
 from zipfile import ZipFile
 
+if sys.version_info > (3, 0):
+    from urllib.request import urlretrieve
+else:
+    from urllib import urlretrieve
 
+
 _log = logging.getLogger(__name__)
 
 
@@ -70,7 +75,7 @@
 
         archive_path = os.path.join(self._web_root, 'archive.' + archive_type)
         _log.info('Downloading %s to %s' % (archive_url, archive_path))
-        urllib.urlretrieve(archive_url, archive_path)
+        urlretrieve(archive_url, archive_path)
 
         if archive_type == 'zip':
             with ZipFile(archive_path, 'r') as archive:
@@ -79,7 +84,7 @@
             with tarfile.open(archive_path, 'r:gz') as archive:
                 archive.extractall(self._dest)
 
-        unarchived_files = filter(lambda name: not name.startswith('.'), os.listdir(self._dest))
+        unarchived_files = [name for name in os.listdir(self._dest) if not name.startswith('.')]
         if len(unarchived_files) == 1:
             first_file = os.path.join(self._dest, unarchived_files[0])
             if os.path.isdir(first_file):

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -85,7 +85,7 @@
         values = list(map(float, values))
         total = sum(values)
         mean = total / len(values)
-        square_sum = sum(map(lambda x: x * x, values))
+        square_sum = sum([x * x for x in values])
         sample_count = len(values)
 
         # With sum and sum of squares, we can compute the sample standard deviation in O(1).
@@ -101,13 +101,13 @@
         if not scale_unit:
             formatted_value = '{mean:.3f}{unit} stdev={delta:.1%}'.format(mean=mean, delta=sample_stdev / mean, unit=unit)
             if show_iteration_values:
-                formatted_value += ' [' + ', '.join(map(lambda value: '{value:.3f}'.format(value=value), values)) + ']'
+                formatted_value += ' [' + ', '.join(['{value:.3f}'.format(value=value) for value in values]) + ']'
             return formatted_value
 
         if unit == 'ms':
             unit = 's'
             mean = float(mean) / 1000
-            values = list(map(lambda value: float(value) / 1000, values))
+            values = list([float(value) / 1000 for value in values])
             sample_stdev /= 1000
 
         base = 1024 if unit == 'B' else 1000
@@ -126,7 +126,7 @@
 
         formatted_value = '{mean}{prefix}{unit} stdev={delta:.1%}'.format(mean=format_scaled(scaled_mean), delta=sample_stdev / mean, prefix=SI_prefix, unit=unit)
         if show_iteration_values:
-            formatted_value += ' [' + ', '.join(map(lambda value: format_scaled(value * scaling_factor), values)) + ']'
+            formatted_value += ' [' + ', '.join([format_scaled(value * scaling_factor) for value in values]) + ']'
         return formatted_value
 
     @classmethod
@@ -182,7 +182,7 @@
                 results_for_aggregator = results_for_metric.get(aggregator)
             elif None in results_for_metric:
                 results_for_aggregator = results_for_metric.get(None)
-            elif len(results_for_metric.keys()) == 1:
+            elif len(list(results_for_metric.keys())) == 1:
                 results_for_aggregator = results_for_metric.get(list(results_for_metric.keys())[0])
             else:
                 results_for_aggregator = {}

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -8,7 +8,6 @@
 import sys
 import tempfile
 import time
-import types
 import os
 
 from webkitpy.benchmark_runner.benchmark_builder import BenchmarkBuilder
@@ -15,6 +14,15 @@
 from webkitpy.benchmark_runner.benchmark_results import BenchmarkResults
 from webkitpy.benchmark_runner.browser_driver.browser_driver_factory import BrowserDriverFactory
 
+
+if sys.version_info > (3, 0):
+    def istext(a):
+        return isinstance(a, bytes) or isinstance(a, str)
+else:
+    def istext(a):
+        return isinstance(a, str) or isinstance(a, unicode)
+
+
 _log = logging.getLogger(__name__)
 
 
@@ -78,7 +86,7 @@
         debug_outputs = []
         try:
             self._browser_driver.prepare_initial_env(self._config)
-            for iteration in xrange(1, count + 1):
+            for iteration in range(1, count + 1):
                 _log.info('Start the iteration {current_iteration} of {iterations} for current benchmark'.format(current_iteration=iteration, iterations=count))
                 try:
                     self._browser_driver.prepare_env(self._config)
@@ -142,16 +150,16 @@
         assert(isinstance(a, type(b)))
         arg_type = type(a)
         # special handle for list type, and should be handle before equal check
-        if arg_type == types.ListType and len(a) and (type(a[0]) == types.StringType or type(a[0]) == types.UnicodeType):
+        if arg_type == list and len(a) and istext(a[0]):
             return a
-        if arg_type == types.DictType:
+        if arg_type == dict:
             result = {}
-            for key, value in a.items():
+            for key, value in list(a.items()):
                 if key in b:
                     result[key] = cls._merge(value, b[key])
                 else:
                     result[key] = value
-            for key, value in b.items():
+            for key, value in list(b.items()):
                 if key not in result:
                     result[key] = value
             return result

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/browser_driver/osx_safari_driver.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -32,7 +32,7 @@
             safari_app_in_build_path = os.path.join(browser_build_absolute_path, 'Safari.app/Contents/MacOS/Safari')
             has_safari_app = os.path.exists(safari_app_in_build_path)
             content_in_path = os.listdir(browser_build_absolute_path)
-            contains_frameworks = any(itertools.imap(lambda entry: entry.endswith('.framework'), os.listdir(browser_build_absolute_path)))
+            contains_frameworks = any(map(lambda entry: entry.endswith('.framework'), os.listdir(browser_build_absolute_path)))
 
             if has_safari_app:
                 args = [safari_app_in_build_path]

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/http_server_driver.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -1,13 +1,12 @@
 #!/usr/bin/env python
 
 from abc import abstractmethod, ABCMeta
+from six import with_metaclass
 
 
-class HTTPServerDriver(object):
+class HTTPServerDriver(with_metaclass(ABCMeta, object)):
     platforms = []
 
-    __metaclass__ = ABCMeta
-
     @abstractmethod
     def serve(self, webRoot):
         pass

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/http_server_driver/simple_http_server_driver.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -40,7 +40,7 @@
         _log.info('Start to fetching the port number of the http server')
         try:
             import psutil
-            for attempt in xrange(max_attempt):
+            for attempt in range(max_attempt):
                 connections = psutil.Process(self._server_process.pid).connections()
                 if connections and connections[0].laddr and connections[0].laddr[1] and connections[0].status == 'LISTEN':
                     self._server_port = connections[0].laddr[1]
@@ -52,7 +52,7 @@
             else:
                 raise Exception("Server is not listening on port, max tries exceeded. HTTP server may be installing dependent modules.")
         except ImportError:
-            for attempt in xrange(max_attempt):
+            for attempt in range(max_attempt):
                 try:
                     output = subprocess.check_output(['/usr/sbin/lsof', '-a', '-P', '-iTCP', '-sTCP:LISTEN', '-p', str(self._server_process.pid)])
                     self._server_port = int(re.search('TCP .*:(\d+) \(LISTEN\)', output).group(1))
@@ -71,7 +71,7 @@
     def _wait_for_http_server(self):
         max_attempt = 5
         # Wait for server to be up completely before exiting
-        for attempt in xrange(max_attempt):
+        for attempt in range(max_attempt):
             try:
                 subprocess.check_call(["curl", "--silent", "--head", "--fail", "--output", "/dev/null", self.base_url()])
                 return

Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py (261230 => 261231)


--- trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py	2020-05-06 15:32:00 UTC (rev 261230)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py	2020-05-06 15:39:48 UTC (rev 261231)
@@ -41,7 +41,7 @@
     mutual_group.add_argument('--read-results-json', dest='json_file', help='Instead of running a benchmark, format the output saved in JSON_FILE.')
     parser.add_argument('--output-file', default=None, help='Save detailed results to OUTPUT in JSON format. By default, results will not be saved.')
     parser.add_argument('--count', type=int, help='Number of times to run the benchmark (e.g. 5).')
-    parser.add_argument('--driver', default=WebServerBenchmarkRunner.name, choices=benchmark_runner_subclasses.keys(), help='Use the specified benchmark driver. Defaults to %s.' % WebServerBenchmarkRunner.name)
+    parser.add_argument('--driver', default=WebServerBenchmarkRunner.name, choices=list(benchmark_runner_subclasses.keys()), help='Use the specified benchmark driver. Defaults to %s.' % WebServerBenchmarkRunner.name)
     parser.add_argument('--browser', default=default_browser(), choices=BrowserDriverFactory.available_browsers(), help='Browser to run the nechmark in. Defaults to %s.' % default_browser())
     parser.add_argument('--platform', default=default_platform(), choices=BrowserDriverFactory.available_platforms(), help='Platform that this script is running on. Defaults to %s.' % default_platform())
     parser.add_argument('--local-copy', help='Path to a local copy of the benchmark (e.g. PerformanceTests/SunSpider/).')
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to