Author: Maciej Fijalkowski <[email protected]>
Branch: single-run
Changeset: r322:fc84a3202e27
Date: 2015-04-08 11:49 +0200
http://bitbucket.org/pypy/benchmarks/changeset/fc84a3202e27/

Log:    improve the json format

diff --git a/bench-data.json b/bench-data.json
--- a/bench-data.json
+++ b/bench-data.json
@@ -9,6 +9,19 @@
  },
  "bm_mako": {
  },
+ "bm_dulwich_log": {
+ },
+ "bm_icbd": {
+ },
+ "bm_krakatau": {
+ },
+ "bm_mdp": {
+ },
+ "deltablue": {
+ },
+ "pypy_interp": {
+   "description": "interpreting py.py"
+ },
  "chaos": {
    "description": "Creates chaosgame-like fractals"
  },
diff --git a/runner.py b/runner.py
--- a/runner.py
+++ b/runner.py
@@ -6,6 +6,7 @@
 import socket
 import sys
 import os
+import time
 
 import benchmarks
 from saveresults import save
@@ -18,10 +19,20 @@
 class WrongBenchmark(Exception):
     pass
 
+def convert_results(result_list):
+    r = []
+    for bench, cls, dict, t0 in result_list:
+        runs = []
+        cur_time = t0
+        for t in dict['times']:
+            runs.append({"start_timestamp": cur_time, "duration": t})
+            cur_time += t
+        r.append({"name": bench, "runs": runs, "events": {}})
+    return r
 
 def run_and_store(benchmark_set, result_filename, path, revision=0,
                   options='', branch='default', args='', upload=False,
-                  fast=False, full_store=False):
+                  fast=False, full_store=False, parser_options=None):
     _funcs = perf.BENCH_FUNCS.copy()
     _funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__))
     bench_data = json.load(open('bench-data.json'))
@@ -38,16 +49,24 @@
     if full_store:
         opts += ['--no_statistics']
     opts += [path]
+    start_time = time.time()
     results = perf.main(opts, funcs)
+    end_time = time.time()
     f = open(str(result_filename), "w")
-    results = [(name, result.__class__.__name__, result.__dict__)
-           for name, result in results]
+    results = [(name, result.__class__.__name__, result.__dict__, t0)
+           for name, result, t0 in results]
+    force_host = parser_options.force_host
     f.write(json.dumps({
         'revision': revision,
-        'results': results,
+        'results': convert_results(results),
+        "interpreter": parser_options.python,
+        "machine": force_host if force_host else socket.gethostname(),
+        "protocol_version_no": "1",
+        "start_timestamp": start_time,
+        "end_timestamp": end_time,
         'options': options,
         'branch': branch,
-        }))
+        }, indent=4))
     f.close()
     return results
 
@@ -168,7 +187,8 @@
 
     results = run_and_store(benchmarks, output_filename, path,
                             revision, args=args, fast=fast,
-                            full_store=full_store, branch=branch)
+                            full_store=full_store, branch=branch,
+                            parser_options=options)
 
     if options.upload_url:
         branch = options.upload_branch or 'default'
diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -779,13 +779,8 @@
     return SimpleBenchmark(MeasureDjango, *args, **kwargs)
 
 
-<<<<<<< local
 def MeasureRietveld(python, options, bench_data):
-    PYTHONPATH = ":".join([DJANGO_DIR,
-=======
-def MeasureRietveld(python, options):
     PYTHONPATH = os.pathsep.join([DJANGO_DIR,
->>>>>>> other
                            # These paths are lifted from
                            # lib/google_appengine.appcfg.py.  Note that we use
                            # our own version of Django instead of Appengine's.
@@ -1133,6 +1128,7 @@
     for name in sorted(should_run):
         func, bench_data = bench_funcs[name]
         print "Running %s..." % name
+        t0 = time.time()
         # PyPy specific modification: let the func to return a list of results
         # for sub-benchmarks
         bench_result = func(base_cmd_prefix, options, bench_data)
@@ -1140,15 +1136,15 @@
         if isinstance(bench_result, list):
             for subname, subresult in bench_result:
                 fullname = '%s_%s' % (name, subname)
-                results.append((fullname, subresult))
+                results.append((fullname, subresult, t0))
         else:
-            results.append((name, bench_result))
+            results.append((name, bench_result, t0))
 
     print
     print "Report on %s" % " ".join(platform.uname())
     if multiprocessing:
         print "Total CPU cores:", multiprocessing.cpu_count()
-    for name, result in results:
+    for name, result, start_time in results:
         print
         print "###", name, "###"
         print result.string_representation()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to