Author: Armin Rigo <[email protected]>
Branch:
Changeset: r76235:dd560e38c568
Date: 2015-03-04 02:00 +0100
http://bitbucket.org/pypy/pypy/changeset/dd560e38c568/
Log: issue #1990: fix trouble caused by dde0fac9f1a4. See the issue for
details. Should be fixed by more explicitly asking for
follow_jump(), often but not always.
diff --git a/rpython/jit/backend/x86/assembler.py
b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -26,6 +26,7 @@
imm0, imm1, FloatImmedLoc, RawEbpLoc, RawEspLoc)
from rpython.rlib.objectmodel import we_are_translated
from rpython.jit.backend.x86 import rx86, codebuf, callbuilder
+from rpython.jit.backend.x86.callbuilder import follow_jump
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.backend.x86 import support
from rpython.rlib.debug import debug_print, debug_start, debug_stop
@@ -245,7 +246,7 @@
mc.MOV_rr(esi.value, eax.value) # tid
mc.MOV_rs(edi.value, WORD * 3) # load the itemsize
self.set_extra_stack_depth(mc, 16)
- mc.CALL(imm(addr))
+ mc.CALL(imm(follow_jump(addr)))
mc.ADD_ri(esp.value, 16 - WORD)
mc.TEST_rr(eax.value, eax.value)
mc.J_il(rx86.Conditions['Z'], 0xfffff) # patched later
@@ -312,7 +313,7 @@
mc.PUSH_r(esp.value)
#
# esp is now aligned to a multiple of 16 again
- mc.CALL(imm(slowpathaddr))
+ mc.CALL(imm(follow_jump(slowpathaddr)))
#
if IS_X86_32:
mc.ADD_ri(esp.value, 3*WORD) # alignment
@@ -819,7 +820,7 @@
newlooptoken.compiled_loop_token.update_frame_info(
oldlooptoken.compiled_loop_token, baseofs)
mc = codebuf.MachineCodeBlockWrapper()
- mc.JMP(imm(target))
+ mc.JMP(imm(follow_jump(target)))
if WORD == 4: # keep in sync with prepare_loop()
assert mc.get_relative_pos() == 5
else:
@@ -2228,7 +2229,7 @@
if self._regalloc.xrm.reg_bindings:
floats = True
cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only]
- self.mc.CALL(imm(cond_call_adr))
+ self.mc.CALL(imm(follow_jump(cond_call_adr)))
# restoring the registers saved above, and doing pop_gcmap(), is left
# to the cond_call_slowpath helper. We never have any result value.
offset = self.mc.get_relative_pos() - jmp_adr
@@ -2246,7 +2247,7 @@
jmp_adr = self.mc.get_relative_pos()
# save the gcmap
self.push_gcmap(self.mc, gcmap, mov=True)
- self.mc.CALL(imm(self.malloc_slowpath))
+ self.mc.CALL(imm(follow_jump(self.malloc_slowpath)))
offset = self.mc.get_relative_pos() - jmp_adr
assert 0 < offset <= 127
self.mc.overwrite(jmp_adr-1, chr(offset))
@@ -2267,7 +2268,7 @@
jmp_adr = self.mc.get_relative_pos()
# save the gcmap
self.push_gcmap(self.mc, gcmap, mov=True)
- self.mc.CALL(imm(self.malloc_slowpath))
+ self.mc.CALL(imm(follow_jump(self.malloc_slowpath)))
offset = self.mc.get_relative_pos() - jmp_adr
assert 0 < offset <= 127
self.mc.overwrite(jmp_adr-1, chr(offset))
@@ -2332,7 +2333,7 @@
assert kind == rewrite.FLAG_UNICODE
addr = self.malloc_slowpath_unicode
self.mc.MOV(edi, lengthloc)
- self.mc.CALL(imm(addr))
+ self.mc.CALL(imm(follow_jump(addr)))
self.mc.JMP_l8(0) # jump to done, patched later
jmp_location = self.mc.get_relative_pos()
#
diff --git a/rpython/jit/backend/x86/callbuilder.py
b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -1,6 +1,7 @@
import sys
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.rarithmetic import intmask
from rpython.jit.metainterp.history import INT, FLOAT
from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32,
PASS_ON_MY_FRAME, FRAME_FIXED_SIZE,
@@ -25,6 +26,14 @@
def align_stack_words(words):
return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1)
+def follow_jump(addr):
+ # If 'addr' is immediately starting with another JMP instruction,
+ # follow it now. 'addr' is an absolute address here
+ while rffi.cast(rffi.CCHARP, addr)[0] == '\xE9': # JMP <4 bytes>
+ addr += 5
+ addr += intmask(rffi.cast(rffi.INTP, addr - 4)[0])
+ return addr
+
class CallBuilderX86(AbstractCallBuilder):
@@ -42,8 +51,11 @@
resloc, restype, ressize)
# Avoid tons of issues with a non-immediate fnloc by sticking it
# as an extra argument if needed
- self.fnloc_is_immediate = isinstance(fnloc, ImmedLoc)
- if not self.fnloc_is_immediate:
+ if isinstance(fnloc, ImmedLoc):
+ self.fnloc_is_immediate = True
+ self.fnloc = imm(follow_jump(fnloc.value))
+ else:
+ self.fnloc_is_immediate = False
self.fnloc = None
self.arglocs = arglocs + [fnloc]
self.current_esp = 0 # 0 or (usually) negative, counted in bytes
@@ -203,7 +215,7 @@
tlofsreg = self.get_tlofs_reg() # => esi, callee-saved
self.save_stack_position() # => edi, callee-saved
mc.PUSH_m((tlofsreg.value, lasterror))
- mc.CALL(imm(SetLastError_addr))
+ mc.CALL(imm(follow_jump(SetLastError_addr)))
# restore the stack position without assuming a particular
# calling convention of _SetLastError()
self.mc.MOV(esp, self.saved_stack_position_reg)
@@ -271,7 +283,7 @@
lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu)
self.save_result_value(save_edx=True) # save eax/edx/xmm0
self.result_value_saved_early = True
- mc.CALL(imm(GetLastError_addr))
+ mc.CALL(imm(follow_jump(GetLastError_addr)))
#
tlofsreg = self.get_tlofs_reg() # => esi (possibly reused)
mc.MOV32_mr((tlofsreg.value, lasterror), eax.value)
@@ -352,7 +364,7 @@
mc.MOV_sr(4, old_value.value)
mc.MOV_sr(0, css_value.value)
# on X86_64, they are already in the right registers
- mc.CALL(imm(self.asm.reacqgil_addr))
+ mc.CALL(imm(follow_jump(self.asm.reacqgil_addr)))
if not self.result_value_saved_early:
self.restore_result_value(save_edx=False)
#
diff --git a/rpython/jit/backend/x86/regloc.py
b/rpython/jit/backend/x86/regloc.py
--- a/rpython/jit/backend/x86/regloc.py
+++ b/rpython/jit/backend/x86/regloc.py
@@ -516,10 +516,7 @@
if code == possible_code:
val = getattr(loc, "value_" + possible_code)()
if possible_code == 'i':
- # This is for CALL or JMP only. If target is
- # immediately starting with another JMP instruction,
- # follow it now.
- val = self._follow_jump_instructions(val)
+ # This is for CALL or JMP only.
if self.WORD == 4:
_rx86_getattr(self, name + "_l")(val)
self.add_pending_relocation()
@@ -537,17 +534,6 @@
return func_with_new_name(INSN, "INSN_" + name)
- _do_follow_jump_instructions = True
-
- def _follow_jump_instructions(self, addr):
- if not self._do_follow_jump_instructions or addr == 0: # for tests
- return addr
- # 'addr' is an absolute address here
- while rffi.cast(rffi.CCHARP, addr)[0] == '\xE9': # JMP <4 bytes>
- addr += 5
- addr += intmask(rffi.cast(rffi.INTP, addr - 4)[0])
- return addr
-
def _addr_as_reg_offset(self, addr):
# Encodes a (64-bit) address as an offset from the scratch register.
# If we are within a "reuse_scratch_register" block, we remember the
diff --git a/rpython/jit/backend/x86/test/test_callbuilder.py
b/rpython/jit/backend/x86/test/test_callbuilder.py
--- a/rpython/jit/backend/x86/test/test_callbuilder.py
+++ b/rpython/jit/backend/x86/test/test_callbuilder.py
@@ -20,7 +20,12 @@
def test_base_case(call_release_gil_mode=False):
asm = FakeAssembler()
- cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx])
+ old_follow_jump = callbuilder.follow_jump
+ try:
+ callbuilder.follow_jump = lambda addr: addr
+ cb = callbuilder.CallBuilder64(asm, ImmedLoc(12345), [ebx, ebx])
+ finally:
+ callbuilder.follow_jump = old_follow_jump
if call_release_gil_mode:
cb.select_call_release_gil_mode()
cb.prepare_arguments()
diff --git a/rpython/jit/backend/x86/test/test_regloc.py
b/rpython/jit/backend/x86/test/test_regloc.py
--- a/rpython/jit/backend/x86/test/test_regloc.py
+++ b/rpython/jit/backend/x86/test/test_regloc.py
@@ -5,6 +5,7 @@
from rpython.jit.backend.x86.assembler import heap
from rpython.jit.backend.x86.arch import IS_X86_64, IS_X86_32
from rpython.jit.backend.x86 import codebuf
+from rpython.jit.backend.x86.callbuilder import follow_jump
from rpython.rlib.rarithmetic import intmask
import py.test
@@ -67,7 +68,6 @@
if target > sys.maxint:
continue
mc = codebuf.MachineCodeBlockWrapper()
- mc._do_follow_jump_instructions = False
mc.CALL(ImmedLoc(target))
length = mc.get_relative_pos()
buf = lltype.malloc(rffi.CCHARP.TO, length, flavor='raw')
@@ -104,6 +104,7 @@
mc.RET()
mc.copy_to_raw_memory(raw)
mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = []
+ assert follow_jump(raw) == raw
mc.JMP(imm(raw))
mc.copy_to_raw_memory(raw + 20)
assert buf[20] == '\xE9' # JMP
@@ -112,21 +113,15 @@
assert buf[23] == '\xFF'
assert buf[24] == '\xFF'
mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = []
- mc.JMP(imm(raw + 20))
+ assert follow_jump(raw + 20) == raw
+ mc.JMP(imm(raw))
mc.copy_to_raw_memory(raw + 40)
assert buf[40] == '\xE9' # JMP
assert buf[41] == '\xD3' # -45
assert buf[42] == '\xFF'
assert buf[43] == '\xFF'
assert buf[44] == '\xFF'
- mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = []
- mc.CALL(imm(raw + 40))
- mc.copy_to_raw_memory(raw + 60)
- assert buf[60] == '\xE8' # CALL
- assert buf[61] == '\xBF' # -65
- assert buf[62] == '\xFF'
- assert buf[63] == '\xFF'
- assert buf[64] == '\xFF'
+ assert follow_jump(raw + 40) == raw
lltype.free(buf, flavor='raw')
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit