- Revision
- 234764
- Author
- [email protected]
- Date
- 2018-08-10 11:23:50 -0700 (Fri, 10 Aug 2018)
Log Message
Add ability to ignore process prewarming for launch time benchmark
https://bugs.webkit.org/show_bug.cgi?id=188462
Patch by Ben Richards <[email protected]> on 2018-08-10
Reviewed by Ryosuke Niwa.
Added a flag to the new tab benchmark that will open a blank tab before the tab to be measured in order to ignore process prewarming.
* LaunchTime/launch_time.py:
(DefaultLaunchTimeHandler):
(DefaultLaunchTimeHandler.on_receive_stop_time): Deleted.
(DefaultLaunchTimeHandler.on_receive_stop_signal):
(DefaultLaunchTimeHandler.do_HEAD):
(DefaultLaunchTimeHandler.do_GET):
(DefaultLaunchTimeHandler.do_POST):
(LaunchTimeBenchmark):
(LaunchTimeBenchmark._standard_deviation): Fixed divide by zero bug when '-n' is set to 1
(LaunchTimeBenchmark.open_tab): Added option to open a blank tab
(LaunchTimeBenchmark.run):
* LaunchTime/new_tab.py:
(NewTabBenchmark.initialize):
(NewTabBenchmark.run_iteration):
(NewTabBenchmark.will_parse_arguments):
(NewTabBenchmark.did_parse_arguments):
(NewTabBenchmark.ResponseHandler.Handler.get_test_page):
(NewTabBenchmark.ResponseHandler.Handler.on_receive_stop_time): Deleted.
(NewTabBenchmark.ResponseHandler.Handler.on_receive_stop_signal):
(NewTabBenchmark):
* LaunchTime/startup.py:
(StartupBenchmark.ResponseHandler.Handler.get_test_page):
(StartupBenchmark.ResponseHandler.Handler.on_receive_stop_time): Deleted.
(StartupBenchmark.ResponseHandler.Handler.on_receive_stop_signal):
(StartupBenchmark):
Modified Paths
Diff
Modified: trunk/PerformanceTests/ChangeLog (234763 => 234764)
--- trunk/PerformanceTests/ChangeLog 2018-08-10 17:42:32 UTC (rev 234763)
+++ trunk/PerformanceTests/ChangeLog 2018-08-10 18:23:50 UTC (rev 234764)
@@ -1,3 +1,38 @@
+2018-08-10 Ben Richards <[email protected]>
+
+ Add ability to ignore process prewarming for launch time benchmark
+ https://bugs.webkit.org/show_bug.cgi?id=188462
+
+ Reviewed by Ryosuke Niwa.
+
+ Added a flag to the new tab benchmark that will open a blank tab before the tab to be measured in order to ignore process prewarming.
+
+ * LaunchTime/launch_time.py:
+ (DefaultLaunchTimeHandler):
+ (DefaultLaunchTimeHandler.on_receive_stop_time): Deleted.
+ (DefaultLaunchTimeHandler.on_receive_stop_signal):
+ (DefaultLaunchTimeHandler.do_HEAD):
+ (DefaultLaunchTimeHandler.do_GET):
+ (DefaultLaunchTimeHandler.do_POST):
+ (LaunchTimeBenchmark):
+ (LaunchTimeBenchmark._standard_deviation): Fixed divide by zero bug when '-n' is set to 1
+ (LaunchTimeBenchmark.open_tab): Added option to open a blank tab
+ (LaunchTimeBenchmark.run):
+ * LaunchTime/new_tab.py:
+ (NewTabBenchmark.initialize):
+ (NewTabBenchmark.run_iteration):
+ (NewTabBenchmark.will_parse_arguments):
+ (NewTabBenchmark.did_parse_arguments):
+ (NewTabBenchmark.ResponseHandler.Handler.get_test_page):
+ (NewTabBenchmark.ResponseHandler.Handler.on_receive_stop_time): Deleted.
+ (NewTabBenchmark.ResponseHandler.Handler.on_receive_stop_signal):
+ (NewTabBenchmark):
+ * LaunchTime/startup.py:
+ (StartupBenchmark.ResponseHandler.Handler.get_test_page):
+ (StartupBenchmark.ResponseHandler.Handler.on_receive_stop_time): Deleted.
+ (StartupBenchmark.ResponseHandler.Handler.on_receive_stop_signal):
+ (StartupBenchmark):
+
2018-08-10 Truitt Savell <[email protected]>
Unreviewed, rolling out r234750.
Modified: trunk/PerformanceTests/LaunchTime/launch_time.py (234763 => 234764)
--- trunk/PerformanceTests/LaunchTime/launch_time.py 2018-08-10 17:42:32 UTC (rev 234763)
+++ trunk/PerformanceTests/LaunchTime/launch_time.py 2018-08-10 18:23:50 UTC (rev 234764)
@@ -41,21 +41,12 @@
</html>
'''
- def get_blank_page(self):
- return '''<!DOCTYPE html>
- <html>
- <head>
- <title>Launch Time Benchmark</title>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
- </head>
- </html>'''
-
- def on_receive_stop_time(self, time):
+ def on_receive_stop_signal(self, data):
pass
def do_HEAD(self):
self.send_response(200)
- self.send_header('Content-type', 'text/hetml')
+ self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
@@ -62,7 +53,8 @@
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
- self.wfile.write(self.get_blank_page() if self.path == '/blank' else self.get_test_page())
+ if not self.path.startswith('/blank'):
+ self.wfile.write(self.get_test_page())
self.wfile.close()
def do_POST(self):
@@ -73,10 +65,8 @@
self.wfile.close()
data_string = self.rfile.read(int(self.headers['Content-Length']))
- time = float(data_string)
+ self.on_receive_stop_signal(data_string)
- self.on_receive_stop_time(time)
-
def log_message(self, format, *args):
pass
@@ -177,7 +167,8 @@
def _standard_deviation(self, results, mean=None):
if mean is None:
mean = sum(results) / float(len(results))
- variance = sum((x - mean) ** 2 for x in results) / float(len(results) - 1)
+ divisor = float(len(results) - 1) if len(results) > 1 else float(len(results))
+ variance = sum((x - mean) ** 2 for x in results) / divisor
return sqrt(variance)
def _compute_results(self, results):
@@ -197,9 +188,13 @@
for i in range(self.iteration_groups):
yield self.wait_time_low + increment_per_group * i
- def open_tab(self):
- call(['open', '-a', self._browser_bundle_path,
- 'http://localhost:{}/{}'.format(self._port, self._open_count)])
+ def open_tab(self, blank=False):
+ if blank:
+ call(['open', '-a', self._browser_bundle_path,
+ 'http://localhost:{}/blank/{}'.format(self._port, self._open_count)])
+ else:
+ call(['open', '-a', self._browser_bundle_path,
+ 'http://localhost:{}/{}'.format(self._port, self._open_count)])
self._open_count += 1
def launch_browser(self):
@@ -273,8 +268,8 @@
results_by_iteration_number[i].append(result_in_ms)
except KeyboardInterrupt:
raise KeyboardInterrupt
- except:
- self._exit_due_to_exception('(Test {} failed)\n'.format(i + 1 if self._verbose else i))
+ except Exception as error:
+ self._exit_due_to_exception('(Test {} failed) {}: {}\n'.format(i + 1 if self._verbose else i, type(error).__name__, error))
if not self._verbose:
print ''
Modified: trunk/PerformanceTests/LaunchTime/new_tab.py (234763 => 234764)
--- trunk/PerformanceTests/LaunchTime/new_tab.py 2018-08-10 17:42:32 UTC (rev 234763)
+++ trunk/PerformanceTests/LaunchTime/new_tab.py 2018-08-10 18:23:50 UTC (rev 234764)
@@ -36,16 +36,22 @@
self.start_time = None
self.stop_time = None
self.stop_signal_was_received = Event()
+ self.allow_prewarm = True
def run_iteration(self):
+ tabs_to_open = 1 if self.allow_prewarm else 2
+ self.stop_time = None
+ for _ in range(tabs_to_open - 1):
+ self.open_tab(blank=True)
self.start_time = time.time() * 1000
self.open_tab()
while self.stop_time is None:
self.stop_signal_was_received.wait()
result = self.stop_time - self.start_time
- self.stop_time = None
self.stop_signal_was_received.clear()
- self.close_tab()
+ for _ in range(tabs_to_open):
+ self.close_tab()
+
return result
def group_init(self):
@@ -56,6 +62,8 @@
help='number of groups of iterations to run (default: {})'.format(self.iteration_groups))
self.argument_parser.add_argument('-w', '--wait-time', type=self._parse_wait_time,
help='wait time to use between iterations or range to scan (format is "N" or "N:M" where N < M, default: {}:{})'.format(self.wait_time_low, self.wait_time_high))
+ self.argument_parser.add_argument('--no-prewarm', action='',
+ help='attempt to ignore process prewarming (will most likely raise standard deviation)')
def did_parse_arguments(self, args):
if args.groups:
@@ -62,6 +70,8 @@
self.iteration_groups = args.groups
if args.wait_time:
self.wait_time_low, self.wait_time_high = args.wait_time
+ if args.no_prewarm:
+ self.allow_prewarm = False
@staticmethod
def ResponseHandler(new_tab_benchmark):
@@ -74,8 +84,8 @@
<meta http-equiv="Content-Type" content="text/html" />
<script>
function sendDone() {
- var time = performance.timing.navigationStart
- var request = new XMLHttpRequest();
+ const time = performance.timing.navigationStart
+ const request = new XMLHttpRequest();
request.open("POST", "done", false);
request.setRequestHeader('Content-Type', 'application/json');
request.send(JSON.stringify(time));
@@ -89,8 +99,8 @@
</html>
'''
- def on_receive_stop_time(self, stop_time):
- new_tab_benchmark.stop_time = stop_time
+ def on_receive_stop_signal(self, data):
+ new_tab_benchmark.stop_time = float(data)
new_tab_benchmark.stop_signal_was_received.set()
return Handler
Modified: trunk/PerformanceTests/LaunchTime/startup.py (234763 => 234764)
--- trunk/PerformanceTests/LaunchTime/startup.py 2018-08-10 17:42:32 UTC (rev 234763)
+++ trunk/PerformanceTests/LaunchTime/startup.py 2018-08-10 18:23:50 UTC (rev 234764)
@@ -50,8 +50,8 @@
</html>
'''
- def on_receive_stop_time(self, stop_time):
- startup_benchmark.stop_time = stop_time
+ def on_receive_stop_signal(self, data):
+ startup_benchmark.stop_time = float(data)
startup_benchmark.stop_signal_was_received.set()
return Handler