Author: Maciej Fijalkowski <[email protected]>
Branch: conditional_call_value
Changeset: r77046:7ee1ac2e53a0
Date: 2015-05-04 18:58 +0200
http://bitbucket.org/pypy/pypy/changeset/7ee1ac2e53a0/
Log: support cond_call_value in the x86 backend (I hope)
diff --git a/rpython/jit/backend/llsupport/assembler.py
b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -109,10 +109,16 @@
kind='unicode')
else:
self.malloc_slowpath_unicode = None
- self.cond_call_slowpath = [self._build_cond_call_slowpath(False,
False),
- self._build_cond_call_slowpath(False, True),
- self._build_cond_call_slowpath(True, False),
- self._build_cond_call_slowpath(True, True)]
+ self.cond_call_slowpath = [
+ self._build_cond_call_slowpath(False, False, False),
+ self._build_cond_call_slowpath(False, True, False),
+ self._build_cond_call_slowpath(True, False, False),
+ self._build_cond_call_slowpath(True, True, False),
+ self._build_cond_call_slowpath(False, False, True),
+ self._build_cond_call_slowpath(False, True, True),
+ self._build_cond_call_slowpath(True, False, True),
+ self._build_cond_call_slowpath(True, True, True),
+ ]
self._build_stack_check_slowpath()
self._build_release_gil(gc_ll_descr.gcrootmap)
diff --git a/rpython/jit/backend/x86/assembler.py
b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -152,7 +152,8 @@
mc.RET()
self._frame_realloc_slowpath = mc.materialize(self.cpu, [])
- def _build_cond_call_slowpath(self, supports_floats, callee_only):
+ def _build_cond_call_slowpath(self, supports_floats, callee_only,
+ has_result):
""" This builds a general call slowpath, for whatever call happens to
come.
"""
@@ -161,7 +162,8 @@
# 'cond_call_register_arguments' and eax, because these have already
# been saved by the caller. Note that this is not symmetrical:
# these 5 registers are saved by the caller but restored here at
- # the end of this function.
+ # the end of this function. if has_result is True, we don't restore
+ # eax as we use the result of the function
self._push_all_regs_to_frame(mc, cond_call_register_arguments + [eax],
supports_floats, callee_only)
if IS_X86_64:
@@ -182,7 +184,11 @@
mc.ADD(esp, imm(WORD * 7))
self.set_extra_stack_depth(mc, 0)
self._reload_frame_if_necessary(mc, align_stack=True)
- self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only)
+ if has_result:
+ lst = [eax]
+ else:
+ lst = []
+ self._pop_all_regs_from_frame(mc, lst, supports_floats, callee_only)
self.pop_gcmap(mc) # push_gcmap(store=True) done by the caller
mc.RET()
return mc.materialize(self.cpu, [])
@@ -2223,7 +2229,9 @@
def label(self):
self._check_frame_depth_debug(self.mc)
- def cond_call(self, op, gcmap, loc_cond, imm_func, arglocs):
+ def cond_call(self, op, gcmap, loc_cond, loc_def, imm_func, arglocs):
+ if loc_def is not None:
+ self.mc.MOV(eax, loc_def)
self.mc.TEST(loc_cond, loc_cond)
self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later
jmp_adr = self.mc.get_relative_pos()
@@ -2231,10 +2239,15 @@
self.push_gcmap(self.mc, gcmap, store=True)
#
# first save away the 4 registers from 'cond_call_register_arguments'
- # plus the register 'eax'
+ # plus the register 'eax', if res is False
base_ofs = self.cpu.get_baseofs_of_frame_field()
should_be_saved = self._regalloc.rm.reg_bindings.values()
- for gpr in cond_call_register_arguments + [eax]:
+ res = loc_def is not None
+ if res:
+ extra = [eax]
+ else:
+ extra = []
+ for gpr in cond_call_register_arguments + extra:
if gpr not in should_be_saved:
continue
v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value]
@@ -2260,7 +2273,8 @@
callee_only = True
if self._regalloc.xrm.reg_bindings:
floats = True
- cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only]
+ cond_call_adr = self.cond_call_slowpath[res * 4 + floats * 2 +
+ callee_only]
self.mc.CALL(imm(follow_jump(cond_call_adr)))
# restoring the registers saved above, and doing pop_gcmap(), is left
# to the cond_call_slowpath helper. We never have any result value.
diff --git a/rpython/jit/backend/x86/regalloc.py
b/rpython/jit/backend/x86/regalloc.py
--- a/rpython/jit/backend/x86/regalloc.py
+++ b/rpython/jit/backend/x86/regalloc.py
@@ -869,7 +869,33 @@
arglocs = [self.loc(args[i]) for i in range(2, len(args))]
gcmap = self.get_gcmap()
self.rm.possibly_free_var(args[0])
- self.assembler.cond_call(op, gcmap, loc_cond, imm_func, arglocs)
+ self.assembler.cond_call(op, gcmap, loc_cond, None, imm_func, arglocs)
+
+ def consider_cond_call_value(self, op):
+ # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments'
+ # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA.
+ # We must make sure that edi and esi do not contain GC pointers.
+ if IS_X86_32 and self.assembler._is_asmgcc():
+ for box, loc in self.rm.reg_bindings.items():
+ if (loc == edi or loc == esi) and box.type == REF:
+ self.rm.force_spill_var(box)
+ assert box not in self.rm.reg_bindings
+ #
+ assert op.result is not None
+ args = op.getarglist()
+ assert 3 <= len(args) <= 4 + 3 # maximum 4 arguments
+ self.rm.force_allocate_reg(op.result, selected_reg=eax)
+ loc_cond = self.make_sure_var_in_reg(args[0], args + [op.result])
+ loc_def = self.loc(args[1])
+ v = args[2]
+ assert isinstance(v, Const)
+ imm_func = self.rm.convert_to_imm(v)
+ arglocs = [self.loc(args[i]) for i in range(3, len(args))]
+ gcmap = self.get_gcmap()
+ self.rm.possibly_free_var(args[0])
+ self.rm.possibly_free_var(args[1])
+ self.assembler.cond_call(op, gcmap, loc_cond, loc_def, imm_func,
+ arglocs)
def consider_call_malloc_nursery(self, op):
size_box = op.getarg(0)
diff --git a/rpython/jit/backend/x86/test/test_call.py
b/rpython/jit/backend/x86/test/test_call.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/x86/test/test_call.py
@@ -0,0 +1,8 @@
+
+from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
+from rpython.jit.metainterp.test import test_call
+
+class TestCall(Jit386Mixin, test_call.CallTest):
+ # for the individual tests see
+ # ====> ../../../metainterp/test/test_call.py
+ pass
diff --git a/rpython/jit/metainterp/test/test_call.py
b/rpython/jit/metainterp/test/test_call.py
--- a/rpython/jit/metainterp/test/test_call.py
+++ b/rpython/jit/metainterp/test/test_call.py
@@ -2,7 +2,7 @@
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib import jit
-class TestCall(LLJitMixin):
+class CallTest(object):
def test_indirect_call(self):
@jit.dont_look_inside
def f1(x):
@@ -81,3 +81,6 @@
assert self.meta_interp(main, [10]) == 42
self.check_resops(guard_no_exception=0)
+
+class TestCall(LLJitMixin, CallTest):
+ pass
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit