Author: Matti Picus <matti.pi...@gmail.com>
Branch: chameleon
Changeset: r433:e07bc02ab2e8
Date: 2020-01-07 17:41 +0200
http://bitbucket.org/pypy/benchmarks/changeset/e07bc02ab2e8/

Log:    fix up runners to rename spitfire -> spitfire2 since it is now much
        changed

diff --git a/benchmarks.py b/benchmarks.py
--- a/benchmarks.py
+++ b/benchmarks.py
@@ -113,9 +113,9 @@
                      globals(), bm_env={'PYTHONPATH': 
os.pathsep.join(TWISTED)},
                                  iteration_scaling=iteration_scaling)
 
-_register_new_bm('spitfire', 'spitfire', globals(),
-    extra_args=['--benchmark=spitfire_o4'])
-_register_new_bm('spitfire', 'spitfire_cstringio', globals(),
+_register_new_bm('spitfire', 'spitfire2', globals(),
+    extra_args=['--benchmark=spitfire_o3'])
+_register_new_bm('spitfire', 'spitfire_cstringio2', globals(),
     extra_args=['--benchmark=python_cstringio'])
 
 # =========================================================================
diff --git a/own/spitfire.py b/own/spitfire.py
--- a/own/spitfire.py
+++ b/own/spitfire.py
@@ -12,17 +12,12 @@
 def relative(*args):
     return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
 
-class FakePsyco(object):
-    def bind(self, *args, **kwargs):
-        pass
-sys.modules["psyco"] = FakePsyco()
-
 testdir = relative('..', 'unladen_swallow', 'lib', 'spitfire', 'tests', 'perf')
 sys.path.insert(0, testdir)
 sys.path.insert(0, relative('..', 'unladen_swallow', 'lib', 'spitfire'))
 import bigtable
 # bummer, timeit module is stupid
-from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire
+from bigtable import test_python_cstringio, test_spitfire_o3, test_spitfire
 
 def runtest(n, benchmark):
     times = []
@@ -38,9 +33,9 @@
         usage="%prog [options]",
         description="Test the performance of the spitfire benchmark")
     parser.add_option('--benchmark', type="choice",
-                      choices=['python_cstringio', 'spitfire_o4'],
-                      default="spitfire_o4",
-                      help="choose between cstringio and spitfire_o4")
+                      choices=['python_cstringio', 'spitfire_o3'],
+                      default="spitfire_o3",
+                      help="choose between cstringio and spitfire_o3")
     util.add_standard_options_to(parser)
     options, args = parser.parse_args(sys.argv)
     util.run_benchmark(options, options.num_runs, runtest, options.benchmark)
diff --git a/runner.py b/runner.py
--- a/runner.py
+++ b/runner.py
@@ -12,7 +12,7 @@
 from saveresults import save
 from unladen_swallow import perf
 
-BENCHMARK_SET = ['richards', 'slowspitfire', 'django',
+BENCHMARK_SET = ['richards', 'spitfire2', 'django',
                  'html5lib', 'ai']
 if sys.version_info[0] < 3:
     BENCHMARK_SET += [
diff --git a/unladen_swallow/lib/spitfire/tests/perf/bigtable.py 
b/unladen_swallow/lib/spitfire/tests/perf/bigtable.py
--- a/unladen_swallow/lib/spitfire/tests/perf/bigtable.py
+++ b/unladen_swallow/lib/spitfire/tests/perf/bigtable.py
@@ -10,11 +10,18 @@
 #
 # Author: Jonas Borgstr&#246;m <jo...@edgewall.com>
 
+from __future__ import print_function
+
 import cgi
 import sys
 import timeit
-import StringIO
-import cStringIO
+if sys.version_info[0] < 3:
+    import StringIO
+    import cStringIO
+else:
+    import io as StringIO
+    cStringIO = StringIO
+
 try:
     import genshi
     from genshi.builder import tag
@@ -121,7 +128,7 @@
     def test_mako():
         """Mako Template"""
         data = mako_tmpl.render(table=table)
-        #print "mako", len(data)
+        #print("mako", len(data))
 
 if SpitfireTemplate:
     import spitfire.compiler.analyzer
@@ -141,37 +148,37 @@
         spitfire_src, 'spitfire_tmpl')
 
     spitfire_tmpl_o1 = spitfire.compiler.util.load_template(
-        spitfire_src, 'spitfire_tmpl_o1', 
spitfire.compiler.analyzer.o1_options,
+        spitfire_src, 'spitfire_tmpl_o1', spitfire.compiler.options.o1_options,
         {'enable_filters':enable_filters})
 
     spitfire_tmpl_o2 = spitfire.compiler.util.load_template(
-        spitfire_src, 'spitfire_tmpl_o2', 
spitfire.compiler.analyzer.o2_options,
+        spitfire_src, 'spitfire_tmpl_o2', spitfire.compiler.options.o2_options,
         {'enable_filters':enable_filters})
 
     spitfire_tmpl_o3 = spitfire.compiler.util.load_template(
-        spitfire_src, 'spitfire_tmpl_o3', 
spitfire.compiler.analyzer.o3_options,
+        spitfire_src, 'spitfire_tmpl_o3', spitfire.compiler.options.o3_options,
         {'enable_filters':enable_filters})
 
 
     def test_spitfire():
         """Spitfire template"""
         data = spitfire_tmpl(search_list=[{'table':table}]).main()
-        #print "spitfire", len(data)
+        #print("spitfire", len(data))
 
     def test_spitfire_o1():
         """Spitfire template -O1"""
         data = spitfire_tmpl_o1(search_list=[{'table':table}]).main()
-        #print "spitfire -O1", len(data)
+        #print("spitfire -O1", len(data))
 
     def test_spitfire_o2():
         """Spitfire template -O2"""
         data = spitfire_tmpl_o2(search_list=[{'table':table}]).main()
-        #print "spitfire -O2", len(data)
+        #print("spitfire -O2", len(data))
 
     def test_spitfire_o3():
         """Spitfire template -O3"""
         data = spitfire_tmpl_o3(search_list=[{'table':table}]).main()
-        #print "spitfire -O3", len(data)
+        #print("spitfire -O3", len(data))
 
 if CheetahTemplate:
     cheetah_src = """<table>
@@ -187,30 +194,30 @@
     cheetah_template = CheetahTemplate.Template(cheetah_src, 
searchList=[{'table':table}])
     # force compile
     post = set([k for k, v in sys.modules.iteritems() if v])
-    #print post - pre
+    #print(post - pre)
 
-    #print type(cheetah_template)
+    #print(type(cheetah_template))
     cheetah_template.respond()
     cheetah_template = type(cheetah_template)
 
     def test_cheetah():
         """Cheetah template"""
         data = cheetah_template(searchList=[{'table':table}]).respond()
-        #print "cheetah", len(data)
+        #print("cheetah", len(data))
 
 if genshi:
     def test_genshi():
         """Genshi template"""
         stream = genshi_tmpl.generate(table=table)
         data = stream.render('html', strip_whitespace=False)
-        #print "genshi", len(data)
+        #print("genshi", len(data))
 
     def disabled_test_genshi_text():
         """Genshi text template"""
         stream = genshi_text_tmpl.generate(table=table)
-        print "test_genshi_text", stream
+        print("test_genshi_text", stream)
         data = stream.render('text')
-        print "test_genshi_text", 'data', stream
+        print("test_genshi_text", 'data', stream)
 
     def test_genshi_builder():
         """Genshi template + tag builder"""
@@ -305,7 +312,7 @@
     write('<table>\n')
     for row in table:
         write('<tr>\n')
-        for col in row.itervalues():
+        for col in row.values():
             write('<td>\n')
             write('%s' % col)
             write('\n</td>\n')
@@ -321,7 +328,7 @@
     write('<table>\n')
     for row in table:
         write('<tr>\n')
-        for col in row.itervalues():
+        for col in row.values():
             write('<td>\n')
             write('%s' % col)
             write('\n</td>\n')
@@ -337,7 +344,7 @@
     write('<table>\n')
     for row in table:
         write('<tr>\n')
-        for col in row.itervalues():
+        for col in row.values():
             write('<td>\n')
             write('%s' % col)
             write('\n</td>\n')
@@ -369,7 +376,7 @@
             result = '   (not installed?)'
         else:
             result = '%16.2f ms' % (1000 * time)
-        print '%-35s %s' % (getattr(sys.modules[__name__], test).__doc__, 
result)
+        print('%-35s %s' % (getattr(sys.modules[__name__], test).__doc__, 
result))
 
 
 if __name__ == '__main__':
diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -12,7 +12,7 @@
 --help to get a full list of options that can be passed to -b.
 
 Omitting the -b option will result in the default group of benchmarks being run
-This currently consists of: 2to3, django, nbody, slowspitfire, slowpickle,
+This currently consists of: 2to3, django, nbody, spitfire, slowpickle,
 slowunpickle, spambayes. Omitting -b is the same as specifying `-b default`.
 
 To run every benchmark perf.py knows about, use `-b all`. To see a full list of
@@ -1036,41 +1036,13 @@
     return MeasureGeneric(python, options, bm_path, env, extra_args)
 
 
-def MeasureSpitfireWithPsyco(python, options):
-    """Use Spitfire to measure Python's performance.
+def BM_Spitfire2(*args, **kwargs):
+    """ Table size was changed in Jan 2020, so the name changed too"""
+    return SimpleBenchmark(MeasureSpitfire, *args, **kwargs)
 
-    Args:
-        python: prefix of a command line for the Python binary.
-        options: optparse.Values instance.
 
-    Returns:
-        (perf_data, mem_usage), where perf_data is a list of floats, each the
-        time it took to run the Spitfire test once; mem_usage is a list of
-        memory usage samples in kilobytes.
-    """
-    SPITFIRE_DIR = Relative("lib/spitfire")
-
-    psyco_dir = ""
-    if not _ComesWithPsyco(python):
-        psyco_dir = _BuildPsyco(python)
-
-    env_dirs = filter(bool, [SPITFIRE_DIR, psyco_dir])
-    spitfire_env = {"PYTHONPATH": os.pathsep.join(env_dirs)}
-
-    try:
-        return MeasureSpitfire(python, options, spitfire_env)
-    finally:
-        try:
-            shutil.rmtree(psyco_dir)
-        except OSError:
-            pass
-
-
-def BM_Spitfire(*args, **kwargs):
-    return SimpleBenchmark(MeasureSpitfireWithPsyco, *args, **kwargs)
-
-
-def BM_SlowSpitfire(base_python, changed_python, options):
+def BM_SlowSpitfire2(base_python, changed_python, options):
+    """ Table size was changed in Jan 2020, so the name changed too"""
     extra_args = ["--disable_psyco"]
     spitfire_env = {"PYTHONPATH": Relative("lib/spitfire")}
 
@@ -1447,7 +1419,7 @@
 # If you update the default group, be sure to update the module docstring, too.
 # An "all" group which includes every benchmark perf.py knows about is 
generated
 # automatically.
-BENCH_GROUPS = {"default": ["2to3", "django", "nbody", "slowspitfire",
+BENCH_GROUPS = {"default": ["2to3", "django", "nbody", "spitfire",
                             "slowpickle", "slowunpickle", "spambayes"],
                 "startup": ["normal_startup", "startup_nosite"],
                 "regex": ["regex_v8", "regex_effbot", "regex_compile"],
diff --git a/unladen_swallow/performance/bm_spitfire.py 
b/unladen_swallow/performance/bm_spitfire.py
--- a/unladen_swallow/performance/bm_spitfire.py
+++ b/unladen_swallow/performance/bm_spitfire.py
@@ -47,7 +47,7 @@
     # conceivably be interesting to stress Spitfire's lower optimization
     # levels, we assume no-one will be running a production system with those
     # settings.
-    spitfire_tmpl_o4 = spitfire.compiler.util.load_template(
+    spitfire_tmpl_o3 = spitfire.compiler.util.load_template(
         SPITFIRE_SRC,
         "spitfire_tmpl_o3",
         spitfire.compiler.options.o3_options,
@@ -57,7 +57,7 @@
 
     # Warm up Spitfire.
     zzz = spitfire_tmpl_o4(search_list=[{"table": table}]).main()
-    spitfire_tmpl_o4(search_list=[{"table": table}]).main()
+    spitfire_tmpl_o3(search_list=[{"table": table}]).main()
 
     times = []
     for _ in xrange(count):
diff --git a/unladen_swallow/test_perf.py b/unladen_swallow/test_perf.py
--- a/unladen_swallow/test_perf.py
+++ b/unladen_swallow/test_perf.py
@@ -114,7 +114,7 @@
         # perf.py, no -b option.
         should_run = perf.ParseBenchmarksOption("")
         self.assertEqual(should_run, set(["2to3", "django", "slowpickle",
-                                          "slowspitfire", "slowunpickle"]))
+                                          "spitfire", "slowunpickle"]))
 
         # perf.py -b 2to3
         should_run = perf.ParseBenchmarksOption("2to3")
@@ -126,7 +126,7 @@
 
         # perf.py -b -2to3
         should_run = perf.ParseBenchmarksOption("-2to3")
-        self.assertEqual(should_run, set(["django", "slowspitfire",
+        self.assertEqual(should_run, set(["django", "spitfire",
                                           "slowpickle", "slowunpickle"]))
 
         # perf.py -b all
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to