Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80514:07e92acaeeec
Date: 2015-11-03 15:23 +0100
http://bitbucket.org/pypy/pypy/changeset/07e92acaeeec/
Log: jump to loop header from a bridge is now correctly working
diff --git a/rpython/jit/backend/zarch/assembler.py
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -314,7 +314,7 @@
self.current_clt.allgcrefs,
self.current_clt.frame_info)
self._check_frame_depth(self.mc, regalloc.get_gcmap())
- self.pool.pre_assemble(self, operations)
+ self.pool.pre_assemble(self, operations, bridge=True)
frame_depth_no_fixed_size = self._assemble(regalloc, inputargs,
operations)
codeendpos = self.mc.get_relative_pos()
self.pool.post_assemble(self)
@@ -577,8 +577,9 @@
if descr in self.target_tokens_currently_compiling:
self.mc.b_offset(descr._ll_loop_code)
else:
+ # restore the pool address
offset = self.pool.get_descr_offset(descr)
- self.mc.b_abs(l.pool(offset))
+ self.mc.b_abs(l.pool(offset), restore_pool=True)
print "writing", hex(descr._ll_loop_code)
self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code)
diff --git a/rpython/jit/backend/zarch/codebuilder.py
b/rpython/jit/backend/zarch/codebuilder.py
--- a/rpython/jit/backend/zarch/codebuilder.py
+++ b/rpython/jit/backend/zarch/codebuilder.py
@@ -117,8 +117,9 @@
offset = reladdr - self.get_relative_pos()
self.BRC(c.ANY, l.imm(offset))
- def b_abs(self, pooled):
+ def b_abs(self, pooled, restore_pool=False):
self.LG(r.r10, pooled)
+ self.LG(r.POOL, l.pool(0))
self.BCR(c.ANY, r.r10)
def reserve_guard_branch(self):
diff --git a/rpython/jit/backend/zarch/pool.py
b/rpython/jit/backend/zarch/pool.py
--- a/rpython/jit/backend/zarch/pool.py
+++ b/rpython/jit/backend/zarch/pool.py
@@ -34,7 +34,6 @@
# this is a 'long' jump instead of a relative jump
descr._ll_loop_code = self.pool_start
self.offset_map[descr] = self.size
- self.reserve_literal(asm.BRAS_byte_count)
for arg in op.getarglist():
if arg.is_constant():
self.offset_map[arg] = self.size
@@ -53,7 +52,7 @@
self.size = 0
self.offset_map.clear()
- def pre_assemble(self, asm, operations):
+ def pre_assemble(self, asm, operations, bridge=True):
self.reset()
# O(len(operations)). I do not think there is a way
# around this.
@@ -69,6 +68,8 @@
# the current solution (gcc does the same), use a literal pool
# located at register r13. This one can easily offset with 20
# bit signed values (should be enough)
+ if bridge:
+ self.reserve_literal(8)
for op in operations:
self.ensure_can_hold_constants(asm, op)
if self.size == 0:
@@ -79,9 +80,13 @@
#if self.size % 2 == 1:
# self.size += 1
assert self.size < 2**16-1
+ if bridge:
+ asm.mc.LGR(r.SCRATCH, r.r13)
asm.mc.BRAS(r.POOL, l.imm(self.size+asm.mc.BRAS_byte_count))
self.pool_start = asm.mc.get_relative_pos()
- asm.mc.write('\x00' * self.size)
+ asm.mc.write('\xFF' * self.size)
+ if bridge:
+ asm.mc.STG(r.SCRATCH, l.pool(0))
print "pool with %d quad words" % (self.size // 8)
def overwrite_64(self, mc, index, value):
diff --git a/rpython/jit/backend/zarch/registers.py
b/rpython/jit/backend/zarch/registers.py
--- a/rpython/jit/backend/zarch/registers.py
+++ b/rpython/jit/backend/zarch/registers.py
@@ -7,13 +7,13 @@
[r0,r1,r2,r3,r4,r5,r6,r7,r8,
r9,r10,r11,r12,r13,r14,r15] = registers
-MANAGED_REGS = [r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r12]
-VOLATILES = [r6,r7,r8,r9,r10,r12]
+MANAGED_REGS = [r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10]
+VOLATILES = [r6,r7,r8,r9,r10]
SP = r15
RETURN = r14
POOL = r13
SPP = r11
-SCRATCH = r0
+SCRATCH = r12
[f0,f1,f2,f3,f4,f5,f6,f7,f8,
f9,f10,f11,f12,f13,f14,f15] = fpregisters
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit