Author: Maciej Fijalkowski <fij...@gmail.com>
Branch: optresult-unroll
Changeset: r78480:127dc1dae3be
Date: 2015-07-06 18:49 +0200
http://bitbucket.org/pypy/pypy/changeset/127dc1dae3be/

Log:    fix for guard_value

diff --git a/rpython/jit/metainterp/compile.py 
b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -75,7 +75,7 @@
 
 class UnrolledLoopData(CompileData):
     """ This represents label() ops jump with extra info that's from the
-    run of LoopCompileData
+    run of LoopCompileData. Jump goes to the same label
     """
     def __init__(self, start_label, end_jump, operations, state,
                  call_pure_results=None):
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py 
b/rpython/jit/metainterp/optimizeopt/test/test_util.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_util.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py
@@ -445,12 +445,14 @@
         preamble_data.forget_optimization_info()
         end_label = ResOperation(rop.LABEL, start_state.end_args)
         loop_data = compile.UnrolledLoopData(end_label, jump_op,
-                                             ops + [jump_op], start_state)
+                                             ops, start_state)
         _, ops = self._do_optimize_loop(loop_data, call_pure_results)
         preamble = TreeLoop('preamble')
         preamble.inputargs = start_label.getarglist()
         preamble.operations = [start_label] + preamble_ops
-        loop.operations = [end_label] + ops
+        emit_end_label = ResOperation(rop.LABEL, start_state.end_args)
+        loop.inputargs = start_state.end_args
+        loop.operations = [emit_end_label] + ops
         return Info(preamble)
 
     def foo(self):
diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py 
b/rpython/jit/metainterp/optimizeopt/unroll.py
--- a/rpython/jit/metainterp/optimizeopt/unroll.py
+++ b/rpython/jit/metainterp/optimizeopt/unroll.py
@@ -79,6 +79,12 @@
         self._check_no_forwarding([[start_label, end_jump], ops])
         self.import_state(start_label, state)
         self.optimizer.propagate_all_forward(start_label.getarglist()[:], ops)
+        jump_args = [self.get_box_replacement(op)
+                     for op in end_jump.getarglist()]
+        jump_args = state.virtual_state.make_inputargs(jump_args,
+                                                       self.optimizer)
+        jump_op = ResOperation(rop.JUMP, jump_args)
+        self.optimizer._newoperations.append(jump_op)
         return None, self.optimizer._newoperations
 
     def random_garbage(self):
@@ -186,7 +192,8 @@
         infos = {}
         for arg in end_args:
             infos[arg] = self.optimizer.getinfo(arg)
-        return ExportedState(end_args, inparg_mapping, virtual_state, infos,
+        label_args = virtual_state.make_inputargs(end_args, self.optimizer)
+        return ExportedState(label_args, inparg_mapping, virtual_state, infos,
                              sb.short_boxes)
 
 
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to