Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80512:e4a706c55bfc
Date: 2015-11-03 13:08 +0100
http://bitbucket.org/pypy/pypy/changeset/e4a706c55bfc/
Log: implemented patch pending failure recoveries after a guard has
failed and a bridge is attached (r13 is not yet restored correctly
on bridge to label jump
diff --git a/rpython/jit/backend/zarch/assembler.py
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -1,4 +1,5 @@
-from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler
+from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler,
+ debug_bridge, DEBUG_COUNTER)
from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
from rpython.jit.backend.llsupport import jitframe, rewrite
from rpython.jit.backend.model import CompiledLoopToken
@@ -208,11 +209,11 @@
"""
descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu)
ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr)
- mc.LG(r.r2, l.addr(ofs, r.SPP))
+ #mc.LG(r.r2, l.addr(ofs, r.SPP))
patch_pos = mc.currpos()
- mc.TRAP2() # placeholder for cmpdi(0, r2, ...)
- mc.TRAP2() # placeholder for bge
- mc.TRAP2() # placeholder for li(r0, ...)
+ #mc.TRAP2() # placeholder for cmpdi(0, r2, ...)
+ #mc.TRAP2() # placeholder for bge
+ #mc.TRAP2() # placeholder for li(r0, ...)
#mc.load_imm(r.SCRATCH2, self._frame_realloc_slowpath)
#mc.mtctr(r.SCRATCH2.value)
#self.load_gcmap(mc, r.r2, gcmap)
@@ -254,7 +255,7 @@
self.update_frame_depth(frame_depth_no_fixed_size +
JITFRAME_FIXED_SIZE)
#
size_excluding_failure_stuff = self.mc.get_relative_pos()
- self.pool.post_assemble(self.mc, self.pending_guard_tokens)
+ self.pool.post_assemble(self)
self.write_pending_failure_recoveries()
full_size = self.mc.get_relative_pos()
#
@@ -313,12 +314,14 @@
self.current_clt.allgcrefs,
self.current_clt.frame_info)
self._check_frame_depth(self.mc, regalloc.get_gcmap())
+ self.pool.pre_assemble(self, operations)
frame_depth_no_fixed_size = self._assemble(regalloc, inputargs,
operations)
codeendpos = self.mc.get_relative_pos()
+ self.pool.post_assemble(self)
self.write_pending_failure_recoveries()
fullsize = self.mc.get_relative_pos()
#
- self.patch_stack_checks(frame_depth_no_fixed_size +
JITFRAME_FIXED_SIZE)
+ # TODO self.patch_stack_checks(frame_depth_no_fixed_size +
JITFRAME_FIXED_SIZE)
rawstart = self.materialize_loop(original_loop_token)
debug_bridge(descr_number, rawstart, codeendpos)
self.patch_pending_failure_recoveries(rawstart)
@@ -335,6 +338,18 @@
self.teardown()
return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos)
+ def patch_jump_for_descr(self, faildescr, adr_new_target):
+ # 'faildescr.adr_jump_offset' is the address of an instruction that is
a
+ # conditional jump. We must patch this conditional jump to go
+ # to 'adr_new_target'.
+ # Updates the pool address
+ mc = InstrBuilder()
+ mc.write_i64(adr_new_target)
+ print "addr is", hex(adr_new_target), "writing to",
hex(faildescr.adr_jump_offset)
+ mc.copy_to_raw_memory(faildescr.adr_jump_offset)
+ assert faildescr.adr_jump_offset != 0
+ faildescr.adr_jump_offset = 0 # means "patched"
+
def fixup_target_tokens(self, rawstart):
for targettoken in self.target_tokens_currently_compiling:
targettoken._ll_loop_code += rawstart
@@ -475,9 +490,9 @@
for tok in self.pending_guard_tokens:
addr = rawstart + tok.pos_jump_offset
#
- # XXX see patch_jump_for_descr()
- tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub
- #
+ tok.faildescr.adr_jump_offset = rawstart + \
+ self.pool.pool_start + tok._pool_offset + \
+ RECOVERY_TARGET_POOL_OFFSET
relative_target = tok.pos_recovery_stub - tok.pos_jump_offset
#
if not tok.guard_not_invalidated():
@@ -526,12 +541,12 @@
self.mc.LMG(r.r6, r.r15, l.addr(upoffset, r.SP))
self.jmpto(r.r14)
- def _push_core_regs_to_jitframe(self, mc, includes=r.MANAGED_REGS):
+ def _push_core_regs_to_jitframe(self, mc, includes=r.registers):
base_ofs = self.cpu.get_baseofs_of_frame_field()
assert len(includes) == 16
mc.STMG(r.r0, r.r15, l.addr(base_ofs, r.SPP))
- def _push_fp_regs_to_jitframe(self, mc, includes=r.MANAGED_FP_REGS):
+ def _push_fp_regs_to_jitframe(self, mc, includes=r.fpregisters):
base_ofs = self.cpu.get_baseofs_of_frame_field()
assert len(includes) == 16
v = 16
@@ -562,7 +577,10 @@
if descr in self.target_tokens_currently_compiling:
self.mc.b_offset(descr._ll_loop_code)
else:
- self.mc.b_abs(descr._ll_loop_code)
+ offset = self.pool.get_descr_offset(descr)
+ self.mc.b_abs(l.pool(offset))
+ print "writing", hex(descr._ll_loop_code)
+ self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code)
def emit_finish(self, op, arglocs, regalloc):
diff --git a/rpython/jit/backend/zarch/codebuilder.py
b/rpython/jit/backend/zarch/codebuilder.py
--- a/rpython/jit/backend/zarch/codebuilder.py
+++ b/rpython/jit/backend/zarch/codebuilder.py
@@ -36,6 +36,17 @@
self._pool_offset = -1
class AbstractZARCHBuilder(object):
+
+ def write_i64(self, word):
+ self.writechar(chr((word >> 56) & 0xFF))
+ self.writechar(chr((word >> 48) & 0xFF))
+ self.writechar(chr((word >> 40) & 0xFF))
+ self.writechar(chr((word >> 32) & 0xFF))
+ self.writechar(chr((word >> 24) & 0xFF))
+ self.writechar(chr((word >> 16) & 0xFF))
+ self.writechar(chr((word >> 8) & 0xFF))
+ self.writechar(chr(word & 0xFF))
+
def write_i32(self, word):
self.writechar(chr((word >> 24) & 0xFF))
self.writechar(chr((word >> 16) & 0xFF))
@@ -104,7 +115,11 @@
def b_offset(self, reladdr):
offset = reladdr - self.get_relative_pos()
- self.BRC(l.imm(0xf), l.imm(offset))
+ self.BRC(c.ANY, l.imm(offset))
+
+ def b_abs(self, pooled):
+ self.LG(r.r10, pooled)
+ self.BCR(c.ANY, r.r10)
def reserve_guard_branch(self):
print "reserve!", self.get_relative_pos()
diff --git a/rpython/jit/backend/zarch/conditions.py
b/rpython/jit/backend/zarch/conditions.py
--- a/rpython/jit/backend/zarch/conditions.py
+++ b/rpython/jit/backend/zarch/conditions.py
@@ -9,6 +9,7 @@
LE = loc.imm(EQ.value | LT.value)
GE = loc.imm(EQ.value | GT.value)
NE = loc.imm(LT.value | GT.value)
+ANY = loc.imm(0xf)
cond_none = loc.imm(0x0)
diff --git a/rpython/jit/backend/zarch/pool.py
b/rpython/jit/backend/zarch/pool.py
--- a/rpython/jit/backend/zarch/pool.py
+++ b/rpython/jit/backend/zarch/pool.py
@@ -12,6 +12,8 @@
self.size = 0
# the offset to index the pool
self.pool_start = 0
+ self.label_offset = 0
+ self.label_count = 0
self.offset_map = {}
def ensure_can_hold_constants(self, asm, op):
@@ -20,24 +22,36 @@
# 1x target address
self.offset_map[op.getdescr()] = self.size
self.reserve_literal(2 * 8)
- if op.getopnum() == rop.JUMP:
+ elif op.getopnum() == rop.JUMP:
descr = op.getdescr()
if descr not in asm.target_tokens_currently_compiling:
# this is a 'long' jump instead of a relative jump
+ self.offset_map[descr] = self.size
self.reserve_literal(8)
+ elif op.getopnum() == rop.LABEL:
+ descr = op.getdescr()
+ if descr not in asm.target_tokens_currently_compiling:
+ # this is a 'long' jump instead of a relative jump
+ descr._ll_loop_code = self.pool_start
+ self.offset_map[descr] = self.size
+ self.reserve_literal(asm.BRAS_byte_count)
for arg in op.getarglist():
if arg.is_constant():
self.offset_map[arg] = self.size
self.reserve_literal(8)
+ def get_descr_offset(self, descr):
+ return self.offset_map[descr]
+
def reserve_literal(self, size):
self.size += size
print "resized to", self.size, "(+",size,")"
def reset(self):
self.pool_start = 0
+ self.label_offset = 0
self.size = 0
- self.offset = 0
+ self.offset_map.clear()
def pre_assemble(self, asm, operations):
self.reset()
@@ -60,8 +74,10 @@
if self.size == 0:
# no pool needed!
return
- if self.size % 2 == 1:
- self.size += 1
+ self.size += 8
+ assert self.size % 2 == 0
+ #if self.size % 2 == 1:
+ # self.size += 1
assert self.size < 2**16-1
asm.mc.BRAS(r.POOL, l.imm(self.size+asm.mc.BRAS_byte_count))
self.pool_start = asm.mc.get_relative_pos()
@@ -80,7 +96,9 @@
mc.overwrite(index+6, chr(value >> 8 & 0xff))
mc.overwrite(index+7, chr(value & 0xff))
- def post_assemble(self, mc, pending_guard_tokens):
+ def post_assemble(self, asm):
+ mc = asm.mc
+ pending_guard_tokens = asm.pending_guard_tokens
if self.size == 0:
return
for val, offset in self.offset_map.items():
@@ -101,5 +119,4 @@
guard_token._pool_offset = offset
ptr = rffi.cast(lltype.Signed, guard_token.gcmap)
self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr)
- self.offset_map.clear()
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -121,6 +121,7 @@
def ensure_reg(self, box):
if isinstance(box, Const):
+ xxx
loc = self.get_scratch_reg()
immvalue = self.convert_to_int(box)
self.assembler.mc.load_imm(loc, immvalue)
diff --git a/rpython/jit/backend/zarch/registers.py
b/rpython/jit/backend/zarch/registers.py
--- a/rpython/jit/backend/zarch/registers.py
+++ b/rpython/jit/backend/zarch/registers.py
@@ -7,8 +7,8 @@
[r0,r1,r2,r3,r4,r5,r6,r7,r8,
r9,r10,r11,r12,r13,r14,r15] = registers
-MANAGED_REGS = registers
-VOLATILES = [r6,r7,r8,r9,r10,r11,r12,r13,r14,r15]
+MANAGED_REGS = [r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r12]
+VOLATILES = [r6,r7,r8,r9,r10,r12]
SP = r15
RETURN = r14
POOL = r13
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit