Diff
Modified: trunk/Tools/ChangeLog (293108 => 293109)
--- trunk/Tools/ChangeLog 2022-04-20 18:36:56 UTC (rev 293108)
+++ trunk/Tools/ChangeLog 2022-04-20 19:30:06 UTC (rev 293109)
@@ -1,3 +1,27 @@
+2022-04-18 Justin Michaud <[email protected]>
+
+ [PGO] run-benchmark should allow pgo profiles to be collected during a benchmark run
+ https://bugs.webkit.org/show_bug.cgi?id=239313
+
+ Reviewed by Dewei Zhu.
+
+ Add a --profile-output-directory to run-benchmark.
+ Add helper scripts pgo-profile-merge and pgo-profile-summarize to simplify handling profile output.
+ Add a script called build-and-collect-pgo-profiles that automates everything.
+
+ * Scripts/build-and-collect-pgo-profiles: Added.
+ * Scripts/pgo-profile: Added.
+ * Scripts/webkitpy/benchmark_runner/benchmark_runner.py:
+ (BenchmarkRunner.__init__):
+ * Scripts/webkitpy/benchmark_runner/run_benchmark.py:
+ (config_argument_parser):
+ (parse_args):
+ (run_benchmark_plan):
+ * Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py:
+ (WebServerBenchmarkRunner.__init__):
+ (WebServerBenchmarkRunner._get_result):
+ (WebServerBenchmarkRunner._run_one_test): Deleted.
+
2022-04-20 Myles C. Maxfield <[email protected]>
[WebGPU] Update WebGPU CTS to revision 549d7c2f1637321a9247f0a3ab022f919e5ca5da
Added: trunk/Tools/Scripts/build-and-collect-pgo-profiles (0 => 293109)
--- trunk/Tools/Scripts/build-and-collect-pgo-profiles (rev 0)
+++ trunk/Tools/Scripts/build-and-collect-pgo-profiles 2022-04-20 19:30:06 UTC (rev 293109)
@@ -0,0 +1,48 @@
+#!/bin/bash
+set -e
+set -o pipefail
+
+# Build WebKit, run benchmarks, and spit out compressed PGO profiles
+BASE=${1:-/Volumes/WebKit/BenchmarkProfiles/}
+echo "Using output directory: $BASE"
+
+while true; do
+ read -p "Have you read the source of this script, and do you understand that it is potentially destructive? [y/N]" yn
+ case $yn in
+ [Yy]* ) break;;
+ [Nn]* ) exit;;
+ * ) echo "Please answer yes or no.";;
+ esac
+done
+
+if [[ ! -d "$BASE" ]] ; then
+ echo "$BASE is missing, aborting."
+ exit
+fi
+
+rm -rf "$BASE/*"
+
+mkdir -p "$BASE/speedometer"
+mkdir -p "$BASE/jetstream"
+mkdir -p "$BASE/output"
+mkdir -p "$BASE/Internal/WebKit/WebKitAdditions/Profiling/"
+
+cd ../Internal
+
+rm -rf ../OpenSource/WebKitBuild
+make release WK_LTO_MODE=thin ENABLE_LLVM_PROFILE_GENERATION=ON
+
+run-benchmark --plan jetstream2 --diagnose-directory=/Volumes/WebKit/BenchmarkProfiles/jetstream --generate-profiles --build-directory=../OpenSource/WebKitBuild/Release --count 1
+pgo-profile merge "$BASE/jetstream"
+
+run-benchmark --plan speedometer --diagnose-directory=/Volumes/WebKit/BenchmarkProfiles/speedometer --generate-profiles --build-directory=../OpenSource/WebKitBuild/Release --count 1
+pgo-profile merge "$BASE/speedometer"
+
+rm *.result
+
+pgo-profile combine --jetstream "$BASE/jetstream" --speedometer "$BASE/speedometer" --output "$BASE/output"
+
+pgo-profile compress --input "$BASE/output" --output "$BASE/Internal/WebKit/WebKitAdditions/Profiling/"
+
+echo "Done! Find your profiles in $BASE/Internal/WebKit/WebKitAdditions/Profiling/"
+echo "To check these in, do: 'cp -r /Volumes/WebKit/BenchmarkProfiles/Internal/ ../Internal/'"
\ No newline at end of file
Property changes on: trunk/Tools/Scripts/build-and-collect-pgo-profiles
___________________________________________________________________
Added: svn:executable
+*
\ No newline at end of property
Added: trunk/Tools/Scripts/pgo-profile (0 => 293109)
--- trunk/Tools/Scripts/pgo-profile (rev 0)
+++ trunk/Tools/Scripts/pgo-profile 2022-04-20 19:30:06 UTC (rev 293109)
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+
+import argparse
+import glob
+import math
+import subprocess
+import os
+
+PROFILED_DYLIBS = ["_javascript_Core", "WebCore", "WebKit"]
+BENCHMARK_GROUP_WEIGHTS = [("speedometer", 0.6), ("jetstream", 0.4)]
+
+def pad(string, max_length):
+ if len(string) > max_length:
+ return string[:max_length - 1] + u"…"
+ return string.ljust(max_length)
+
+
+def shell(command):
+ return subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip()
+
+
+def shortened(name):
+ return name if len(name) < 200 else name[0:99] + u"…" + name[-98:]
+
+
+def assert_directory(directory_path):
+ assert os.path.isdir(directory_path), f"No directory at {directory_path}"
+ return directory_path
+
+
+def assert_file(file_path):
+ assert os.path.isfile(file_path), f"No file at {file_path}"
+ return file_path
+
+
+def summarize_parser(subparsers):
+ parser = subparsers.add_parser("summarize", help="Dumps function names in a given .profraw file, sorted in descending order by function count")
+ parser.add_argument("file", help="Path to the .profraw file")
+ parser.set_defaults(func=summarize)
+ return parser
+
+
+def summarize(args):
+ file = assert_file(args.file)
+
+ lines = shell(f"xcrun -sdk macosx llvm-profdata show --all-functions --value-cutoff=10 \"{file}\" | c++filt -n").decode("utf-8").splitlines()
+
+ counts_and_functions = []
+
+ for line_number in range(len(lines)):
+ line = lines[line_number].strip()
+ if line.startswith("Function count: "):
+ count = int(line.split()[-1])
+ symbol = lines[line_number - 3].strip()[:-1]
+ counts_and_functions.append((count, symbol))
+
+ counts_and_functions.sort(reverse=True)
+ for count, name in counts_and_functions:
+ print(pad(str(count), 15), shortened(name))
+
+
+def merge_parser(subparsers):
+ parser = subparsers.add_parser("merge", help="Merge a pile of *.profraw files into the *.profdata files we can build with.")
+ parser.add_argument("directory", help="Path to the directory containing the *.profraw files")
+ parser.set_defaults(func=merge)
+ return parser
+
+
+def merge(args):
+ path = assert_directory(args.directory)
+
+ for lib in PROFILED_DYLIBS:
+ print("Merging", lib)
+ inputs = glob.glob(os.path.join(path, lib) + "*.profraw")
+ inputs = [f"\"{i}\"" for i in inputs]
+ inputs = " ".join(inputs)
+ output_file = os.path.join(path, lib + '.profdata')
+ print(shell(f"xcrun -sdk macosx.internal llvm-profdata merge {inputs} -output=\"{output_file}\""))
+
+
+def combine_parser(subparsers):
+ parser = subparsers.add_parser("combine", help="Combine directories containing *.profdata files from different platforms together.")
+ parser.add_argument("--speedometer", default=None, help="Path to the directory containing the *.profdata files from a speedometer run.")
+ parser.add_argument("--jetstream", default=None, help="Path to the directory containing the *.profdata files from a jetstream run.")
+ parser.add_argument("--output", help="Path to the directory where the output will be placed.")
+ parser.set_defaults(func=combine)
+ return parser
+
+
+def combine(args):
+ assert args.output, "Must specify output directory."
+ out = assert_directory(args.output)
+ args = vars(args)
+ group_paths = []
+
+ for i in range(0, len(BENCHMARK_GROUP_WEIGHTS)):
+ group, weight = BENCHMARK_GROUP_WEIGHTS[i]
+ if args[group]:
+ path = assert_directory(args[group])
+ group_paths.append((path, weight))
+
+ assert len(group_paths) > 0, "Must specify at least one group."
+
+ sum = 0
+ max = 0
+ # We need to turn percentages into weights > 1, but we don't want crazy high multipliers.
+ # For example, if we have weights 0.35 and 0.65, we don't need a 7:13 ratio when 5:9 is good enough.
+ max_multiplier = 15
+ for group, weight in group_paths:
+ sum = sum + weight
+ if weight > max:
+ max = weight
+
+ gcd = int(max * max_multiplier)
+ for group, weight in group_paths:
+ gcd = math.gcd(gcd, int((weight / sum) * max_multiplier))
+ for i in range(0, len(group_paths)):
+ group, weight = group_paths[i]
+ group_paths[i] = (group, int((weight / sum) * max_multiplier) // gcd)
+
+ print("Simplified group weights: ", group_paths)
+
+ for lib in PROFILED_DYLIBS:
+ print("Merging", lib)
+ group_input = ["--weighted-input={},\"{}\"".format(weight, os.path.join(path, lib + ".profdata")) for path, weight in group_paths]
+ group_input = " ".join(group_input)
+ output_file = os.path.join(out, lib + '.profdata')
+ print(shell(f"xcrun -sdk macosx.internal llvm-profdata merge {group_input} -output=\"{output_file}\""))
+
+
+def compress_parser(subparsers):
+ parser = subparsers.add_parser("compress", help="Compress *.profdata files so that they can be checked in.")
+ parser.add_argument("--input", help="Path to the directory containing the input *.profdata files.")
+ parser.add_argument("--output", help="Path to the directory where the output will be placed.")
+ parser.set_defaults(func=compress)
+ return parser
+
+
+def compress(args):
+ out = assert_directory(args.output)
+ input_directory = assert_directory(args.input)
+
+ for lib in PROFILED_DYLIBS:
+ print("Compressing", lib)
+ input_file = os.path.join(input_directory, lib + ".profdata")
+ output_file = os.path.join(out, lib + ".profdata.compressed")
+ print(shell(f"compression_tool -encode -i \"{input_file}\" -o \"{output_file}\" -a lzfse"))
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(prog='pgo-profile')
+ subparsers = parser.add_subparsers(help='valid sub-commands', required=True, dest='sub command')
+ merge_parser(subparsers)
+ summarize_parser(subparsers)
+ combine_parser(subparsers)
+ compress_parser(subparsers)
+
+ args = parser.parse_args()
+ try:
+ args.func(args)
+ except subprocess.CalledProcessError as e:
+ print(e.stdout)
+ raise e
\ No newline at end of file
Property changes on: trunk/Tools/Scripts/pgo-profile
___________________________________________________________________
Added: svn:executable
+*
\ No newline at end of property
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py (293108 => 293109)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py 2022-04-20 18:36:56 UTC (rev 293108)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/benchmark_runner.py 2022-04-20 19:30:06 UTC (rev 293109)
@@ -22,7 +22,7 @@
class BenchmarkRunner(object):
name = 'benchmark_runner'
- def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None):
+ def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None, profile_output_directory=None):
try:
plan_file = self._find_plan_file(plan_file)
with open(plan_file, 'r') as fp:
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py (293108 => 293109)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py 2022-04-20 18:36:56 UTC (rev 293108)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/run_benchmark.py 2022-04-20 19:30:06 UTC (rev 293109)
@@ -55,6 +55,7 @@
parser.add_argument('--diagnose-directory', dest='diagnose_dir', default=diagnose_directory, help='Directory for storing diagnose information on test failure. Defaults to {}.'.format(diagnose_directory))
parser.add_argument('--no-adjust-unit', dest='scale_unit', action='', help="Don't convert to scientific notation.")
parser.add_argument('--show-iteration-values', dest='show_iteration_values', action='', help="Show the measured value for each iteration in addition to averages.")
+ parser.add_argument('--generate-profiles', dest='generate_profiles', action='', help="Collect LLVM profiles for PGO, and copy them to the diagnose directory.")
group = parser.add_mutually_exclusive_group()
group.add_argument('--browser-path', help='Specify the path to a non-default copy of the target browser as a path to the .app.')
@@ -76,12 +77,24 @@
_log.debug('\tbuild directory\t: %s' % args.build_dir)
_log.debug('\tplan name\t: %s', args.plan)
+ if args.generate_profiles and not os.path.isdir(args.diagnose_dir):
+ _log.error("No diagnose directory to dump profiles to: {}".format(args.diagnose_dir))
+ exit()
+
+ if args.generate_profiles and args.platform is not 'osx':
+ _log.error("Profile generation is currently only supported on macOS.")
+ exit()
+
return args
def run_benchmark_plan(args, plan):
benchmark_runner_class = benchmark_runner_subclasses[args.driver]
- runner = benchmark_runner_class(plan, args.local_copy, args.count, args.build_dir, args.output_file, args.platform, args.browser, args.browser_path, args.scale_unit, args.show_iteration_values, args.device_id, args.diagnose_dir)
+ runner = benchmark_runner_class(plan,
+ args.local_copy, args.count, args.build_dir, args.output_file,
+ args.platform, args.browser, args.browser_path, args.scale_unit,
+ args.show_iteration_values, args.device_id, args.diagnose_dir,
+ args.diagnose_dir if args.generate_profiles else None)
runner.execute()
Modified: trunk/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py (293108 => 293109)
--- trunk/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py 2022-04-20 18:36:56 UTC (rev 293108)
+++ trunk/Tools/Scripts/webkitpy/benchmark_runner/webserver_benchmark_runner.py 2022-04-20 19:30:06 UTC (rev 293109)
@@ -1,7 +1,10 @@
import json
import logging
import os
+import subprocess
+import signal
import sys
+import time
from webkitcorepy import Timeout
@@ -19,10 +22,11 @@
class WebServerBenchmarkRunner(BenchmarkRunner):
name = 'webserver'
- def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None):
+ def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None, profile_output_directory=None):
self._http_server_driver = HTTPServerDriverFactory.create(platform)
self._http_server_driver.set_device_id(device_id)
- super(WebServerBenchmarkRunner, self).__init__(plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit, show_iteration_values, device_id, diagnose_dir)
+ self._profile_output_directory = profile_output_directory
+ super(WebServerBenchmarkRunner, self).__init__(plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit, show_iteration_values, device_id, diagnose_dir, profile_output_directory)
if self._diagnose_dir:
self._http_server_driver.set_http_log(os.path.join(self._diagnose_dir, 'run-benchmark-http.log'))
@@ -29,6 +33,18 @@
def _get_result(self, test_url):
result = self._browser_driver.add_additional_results(test_url, self._http_server_driver.fetch_result())
assert(not self._http_server_driver.get_return_code())
+
+ if self._profile_output_directory:
+ _log.info("Getting benchmark profile results for {} and copying them to {}.".format(test_url, self._profile_output_directory))
+ copy_output = subprocess.Popen(r"""log stream --style json --color none | perl -mFile::Basename -mFile::Copy -nle 'if (m/<WEBKIT_LLVM_PROFILE>.*<BEGIN>(.*)<END>/) { (my $l = $1) =~ s/\\\//\//g; my $b = File::Basename::basename($l); my $d = """ + "\"" + self._profile_output_directory + """/$b"; print "Moving $l to $d"; File::Copy::move($l, $d); }'""", shell=True, bufsize=0, preexec_fn=os.setsid)
+ time.sleep(1)
+ subprocess.call(["notifyutil", "-p", "com.apple.WebKit.profiledata"])
+ time.sleep(7)
+ # We can kill the shell with kill(), but killing children is harder.
+ os.killpg(os.getpgid(copy_output.pid), signal.SIGINT)
+ copy_output.kill()
+ time.sleep(1)
+ _log.debug("Hopefully the benchmark profile has finished writing to disk, moving on.")
return result
def _run_one_test(self, web_root, test_file):