Author: Matti Picus <[email protected]>
Branch: py3
Changeset: r390:a668bb880172
Date: 2019-12-31 17:00 +0200
http://bitbucket.org/pypy/benchmarks/changeset/a668bb880172/

Log:    update runner, bm_ai for python3

diff --git a/benchmarks.py b/benchmarks.py
--- a/benchmarks.py
+++ b/benchmarks.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
 import os
 import logging
 from unladen_swallow.perf import SimpleBenchmark, MeasureGeneric
@@ -48,7 +50,7 @@
         try:
             base_data = benchmark_function(base_python, options,
                                            *args, **kwargs)
-        except subprocess.CalledProcessError, e:
+        except subprocess.CalledProcessError as e:
             return ResultError(e)
         return SimpleComparisonResult(avg(base_data[0]), -1, -1)
     BM.func_name = 'BM_' + bm_name
@@ -173,11 +175,11 @@
     retcode = proc.poll()
     if retcode != 0:
         if out is not None:
-            print '---------- stdout ----------'
-            print out
+            print('---------- stdout ----------')
+            print(out)
         if err is not None:
-            print '---------- stderr ----------'
-            print err
+            print('---------- stderr ----------')
+            print(err)
         raise Exception("translate.py failed, retcode %r" % (retcode,))
 
     lines = err.splitlines()
@@ -213,8 +215,8 @@
         out, err = proc.communicate()
         retcode = proc.poll()
         if retcode != 0:
-            print out
-            print err
+            print(out)
+            print(err)
             raise Exception("sphinx-build.py failed")
         t.append(float(out.splitlines()[-1]))
     return RawResult([t[0]], [t[1]])
diff --git a/runner.py b/runner.py
--- a/runner.py
+++ b/runner.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 """ Usage: runner.py <result filename> <path to pypy-c> <revnumber>
 """
+from __future__ import division, print_function
 
 import json
 import socket
@@ -316,8 +317,8 @@
             # prevent to upload results from the nullpython dummy
             host = force_host if force_host else socket.gethostname()
             for url in urls:
-                print save(project, revision, results, executable, host, url,
-                           changed=(run == CHANGED), branch=branch)
+                print(save(project, revision, results, executable, host, url,
+                           changed=(run == CHANGED), branch=branch))
 
 
 if __name__ == '__main__':
diff --git a/saveresults.py b/saveresults.py
--- a/saveresults.py
+++ b/saveresults.py
@@ -20,13 +20,17 @@
   $ ./saveresults.py result.json -r '45757:fabe4fc0dc08' -n pypy-c-jit-64 \
     -H tannit
 """
+from __future__ import division, print_function
 
 from datetime import datetime
 import optparse
 import sys
 import time
 import urllib
-import urllib2
+try:
+    import urllib2
+except ImportError:
+    import urllib.request as urllib2
 import json
 
 
@@ -73,7 +77,7 @@
             'branch': branch,
         }]
         if value is None:
-            print "Ignoring skipped result", data
+            print("Ignoring skipped result", data)
             continue
         if res_type == "ComparisonResult":
             if changed:
@@ -117,9 +121,9 @@
                 if not retries:
                     raise
                 d = retries.pop(0)
-                print "retrying in %d seconds..." % d
+                print("retrying in %d seconds..." % d)
                 time.sleep(d)
-    except urllib2.URLError, e:
+    except urllib2.URLError as e:
         if hasattr(e, 'reason'):
             response = '\n  We failed to reach a server\n'
             response += '  Reason: ' + str(e.reason)
@@ -127,13 +131,13 @@
             response = '\n  The server couldn\'t fulfill the request'
         if hasattr(e, 'readlines'):
             response = "".join([response] + e.readlines())
-        print response
+        print(response)
         with open('error.html', 'w') as error_file:
             error_file.write(response)
         print("Server (%s) response written to error.html" % (url,))
         print('  Error code: %s\n' % (e,))
         return 1
-    print "saved correctly!\n"
+    print("saved correctly!", end='\n\n')
     return 0
 
 
@@ -142,10 +146,10 @@
     with open(jsonfile) as f:
         data = simplejson.load(f)
     results = data['results']
-    print 'uploading results...',
+    print('uploading results...', end='')
     save(options.project, options.revision, results, options.executable,
                 options.host, options.url, changed=options.changed)
-    print 'done'
+    print('done')
 
 
 if __name__ == '__main__':
diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -44,7 +44,7 @@
   changed_python -A -B the_benchmark.py
 """
 
-from __future__ import division, with_statement
+from __future__ import division, print_function
 
 __author__ = "[email protected] (Jeffrey Yasskin)"
 
@@ -62,7 +62,10 @@
 import tempfile
 import time
 import threading
-import urllib2
+try:
+    import urllib2
+except ImportError:
+    import urllib.request as urllib2
 try:
     import multiprocessing
 except ImportError:
@@ -301,7 +304,7 @@
         future = MemoryUsageFuture(some_pid)
         ...
         usage = future.GetMemoryUsage()
-        print max(usage)
+        print(max(usage))
 
     Note that calls to GetMemoryUsage() will block until the process exits.
     """
@@ -459,7 +462,7 @@
                                           *args, **kwargs)
         base_data = benchmark_function(base_python, options,
                                        *args, **kwargs)
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         return ResultError(e)
 
     return CompareBenchmarkData(base_data, changed_data, options)
@@ -588,9 +591,9 @@
 
 
 def LogCall(command):
-    command = map(str, command)
-    info("Running %s", " ".join(command))
-    return command
+    cmd = list(map(str, command))
+    info("Running %s", " ".join(cmd))
+    return cmd
 
 
 try:
@@ -672,12 +675,12 @@
         human consumption.
     """
     if len(base_times) != len(changed_times):
-        print "Base:"
-        print base_times
-        print "Changed:"
-        print changed_times
+        print("Base:")
+        print(base_times)
+        print("Changed:")
+        print(changed_times)
         # XXX <arigo> hacked.  Got this error *once*, don't want to care
-        print "WARNING: length did not match"
+        print("WARNING: length did not match")
         l = min(len(base_times), len(changed_times))
         base_times = base_times[:l]
         changed_times = changed_times[:l]
@@ -770,7 +773,7 @@
         future = MemoryUsageFuture(subproc.pid)
     result, err = subproc.communicate()
     if subproc.returncode != 0:
-        print result
+        print(result)
         raise RuntimeError("Benchmark died (returncode: %d): %s" %
                            (subproc.returncode, err))
     if track_memory:
@@ -887,7 +890,7 @@
             result, err = comparer.communicate()
             if comparer.returncode != 0:
                 return "pybench died: " + err
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         return str(e)
 
     if options.verbose:
@@ -1091,7 +1094,7 @@
                                        spitfire_env, extra_args)
         base_data = MeasureSpitfire(base_python, options,
                                     spitfire_env, extra_args)
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         return str(e)
 
     return CompareBenchmarkData(base_data, changed_data, options)
@@ -1448,7 +1451,7 @@
 
 def _FindAllBenchmarks(namespace):
     return dict((name[3:].lower(), func)
-                for (name, func) in sorted(namespace.iteritems())
+                for (name, func) in sorted(namespace.items())
                 if name.startswith("BM_"))
 
 BENCH_FUNCS = _FindAllBenchmarks(globals())
@@ -1548,7 +1551,7 @@
 
 def main(argv, bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS):
     bench_groups = bench_groups.copy()
-    all_benchmarks = bench_funcs.keys()
+    all_benchmarks = list(bench_funcs.keys())
     bench_groups["all"] = all_benchmarks
 
     parser = optparse.OptionParser(
@@ -1580,7 +1583,7 @@
                             " benchmarks except the negative arguments. " +
                             " Otherwise we run only the positive arguments. " +
                             " Valid benchmarks are: " +
-                            ", ".join(bench_groups.keys() + all_benchmarks)))
+                            ", ".join(list(bench_groups.keys()) + 
all_benchmarks)))
     parser.add_option("--inherit_env", metavar="ENVVARS", type="string", 
action="callback",
                       callback=ParseEnvVars, default=[],
                       help=("Comma-separated list of environment variable 
names"
@@ -1618,7 +1621,7 @@
     results = []
     for name in sorted(should_run):
         func = bench_funcs[name]
-        print "Running %s..." % name
+        print("Running %s..." % name)
         # PyPy specific modification: let the func to return a list of results
         # for sub-benchmarks
         bench_result = func(base_cmd_prefix, changed_cmd_prefix, options)
@@ -1631,13 +1634,13 @@
             results.append((name, bench_result))
 
     print
-    print "Report on %s" % " ".join(platform.uname())
+    print("Report on %s" % " ".join(platform.uname()))
     if multiprocessing:
-        print "Total CPU cores:", multiprocessing.cpu_count()
+        print("Total CPU cores:", multiprocessing.cpu_count())
     for name, result in results:
-        print
-        print "###", name, "###"
-        print result.string_representation()
+        print()
+        print("###", name, "###")
+        print(result.string_representation())
     return results
 
 if __name__ == "__main__":
diff --git a/unladen_swallow/performance/bm_ai.py 
b/unladen_swallow/performance/bm_ai.py
--- a/unladen_swallow/performance/bm_ai.py
+++ b/unladen_swallow/performance/bm_ai.py
@@ -10,7 +10,7 @@
 """
 
 # Wanted by the alphametics solver.
-from __future__ import division
+from __future__ import division, print_function
 
 __author__ = "[email protected] (Collin Winter)"
 
@@ -19,11 +19,16 @@
 import re
 import string
 import time
+import sys
+
+if sys.version_info[1] < 3:
+    range = xrange
 
 # Local imports
 import util
 
 
+
 # Pure-Python implementation of itertools.permutations().
 def permutations(iterable, r=None):
     """permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)"""
@@ -31,8 +36,8 @@
     n = len(pool)
     if r is None:
         r = n
-    indices = range(n)
-    cycles = range(n-r+1, n+1)[::-1]
+    indices = list(range(n))
+    cycles = list(range(n-r+1, n+1)[::-1])
     yield tuple(pool[i] for i in indices[:r])
     while n:
         for i in reversed(range(r)):
@@ -75,7 +80,7 @@
     list(n_queens(8))
 
     times = []
-    for _ in xrange(iterations):
+    for _ in range(iterations):
         t0 = time.time()
         list(n_queens(8))
         t1 = time.time()
diff --git a/unladen_swallow/performance/util.py 
b/unladen_swallow/performance/util.py
--- a/unladen_swallow/performance/util.py
+++ b/unladen_swallow/performance/util.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 
 """Utility code for benchmark scripts."""
+from __future__ import division, print_function
 
 __author__ = "[email protected] (Collin Winter)"
 
@@ -27,10 +28,10 @@
         data = bench_func(num_runs, *args)
         if options.take_geo_mean:
             product = reduce(operator.mul, data, 1)
-            print math.pow(product, 1.0 / len(data))
+            print(math.pow(product, 1.0 / len(data)))
         else:
             for x in data:
-                print x
+                print(x)
 
 
 def add_standard_options_to(parser):
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to