Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80720:ad3440cae96e
Date: 2015-11-16 18:39 +0100
http://bitbucket.org/pypy/pypy/changeset/ad3440cae96e/
Log: basic int_mul_ovf implemented, there is a quirx in the logic that
does not allow correct execution yet
diff --git a/rpython/jit/backend/zarch/conditions.py
b/rpython/jit/backend/zarch/conditions.py
--- a/rpython/jit/backend/zarch/conditions.py
+++ b/rpython/jit/backend/zarch/conditions.py
@@ -1,7 +1,14 @@
from rpython.jit.backend.zarch import locations as loc
from rpython.rlib.objectmodel import specialize
+# CGIJ for instance has another mask encoding prefixed with J
+J_EQ = loc.imm(0x1)
+J_LT = loc.imm(0x2)
+J_LE = loc.imm(0x2 | 0x1)
+J_GT = loc.imm(0x4)
+J_GE = loc.imm(0x4 | 0x1)
+# normal branch instructions
EQ = loc.imm(0x8)
LT = loc.imm(0x4)
GT = loc.imm(0x2)
diff --git a/rpython/jit/backend/zarch/helper/regalloc.py
b/rpython/jit/backend/zarch/helper/regalloc.py
--- a/rpython/jit/backend/zarch/helper/regalloc.py
+++ b/rpython/jit/backend/zarch/helper/regalloc.py
@@ -41,6 +41,20 @@
self.free_op_vars()
return [l0, l1]
+def prepare_int_mul_ovf(self, op):
+ a0 = op.getarg(0)
+ a1 = op.getarg(1)
+ if check_imm32(a0):
+ a0, a1 = a1, a0
+ lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=False)
+ if check_imm32(a1):
+ l1 = imm(a1.getint())
+ else:
+ l1 = self.ensure_reg(a1)
+ self.force_result_in_reg(op, a0)
+ self.free_op_vars()
+ return [lr, lq, l1]
+
def prepare_int_div(self, op):
a0 = op.getarg(0)
a1 = op.getarg(1)
diff --git a/rpython/jit/backend/zarch/instruction_builder.py
b/rpython/jit/backend/zarch/instruction_builder.py
--- a/rpython/jit/backend/zarch/instruction_builder.py
+++ b/rpython/jit/backend/zarch/instruction_builder.py
@@ -123,8 +123,8 @@
self.writechar(chr(imm))
return encode_i
-def build_rr(mnemonic, (opcode,)):
- @builder.arguments('r,r')
+def build_rr(mnemonic, (opcode,), argtypes='r,r'):
+ @builder.arguments(argtypes)
def encode_rr(self, reg1, reg2):
self.writechar(opcode)
operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf)
@@ -338,6 +338,20 @@
build_rie_g = build_rie_a
+def build_rie_c(mnemonic, (opcode1,opcode2)):
+ br = is_branch_relative(mnemonic)
+ @builder.arguments('r,i8,r/m,i16')
+ def encode_rie_c(self, reg1, imm8, mask, imm16):
+ self.writechar(opcode1)
+ byte = (reg1 & BIT_MASK_4) << 4 | (mask & BIT_MASK_4)
+ self.writechar(chr(byte))
+ if br:
+ imm16 = imm16 >> 1
+ self.write_i16(imm16 & BIT_MASK_16)
+ self.writechar(chr(imm8 & 0xff))
+ self.writechar(opcode2)
+ return encode_rie_c
+
@always_inline
def _encode_rrf(self, opcode1, opcode2, r1, r2, rm3, rm4):
self.writechar(opcode1)
@@ -398,7 +412,7 @@
return function
def is_branch_relative(name):
- return name.startswith('BR')
+ return name.startswith('BR') or name.endswith('J')
def build_instr_codes(clazz):
for mnemonic, params in all_mnemonic_codes.items():
diff --git a/rpython/jit/backend/zarch/instructions.py
b/rpython/jit/backend/zarch/instructions.py
--- a/rpython/jit/backend/zarch/instructions.py
+++ b/rpython/jit/backend/zarch/instructions.py
@@ -24,6 +24,7 @@
'MSGR': ('rre', ['\xB9','\x0C']),
'MSG': ('rxy', ['\xE3','\x0C']),
'MSGFI': ('ril', ['\xC2','\x00']),
+ 'MLGR': ('rre', ['\xB9','\x86']),
# div/mod
'DSGR': ('rre', ['\xB9','\x0D'], 'eo,r'),
'DSG': ('rxy', ['\xE3','\x0D'], 'eo,bidl'),
@@ -60,6 +61,7 @@
'CLG': ('rxy', ['\xE3','\x21']),
'CGHI': ('ri', ['\xA7','\x0F']),
'CGFI': ('ril', ['\xC2','\x0C']),
+ 'CGIJ': ('rie_c', ['\xEC','\x7E']),
}
logic_mnemonic_codes = {
@@ -111,9 +113,10 @@
# load memory
'LMD': ('sse', ['\xEF']),
- 'LMG': ('rsy_a', ['\xEB','\x04']),
+ 'LMG': ('rsy_a', ['\xEB','\x04']),
'LHI': ('ri', ['\xA7','\x08']),
'LGHI': ('ri', ['\xA7','\x09']),
+ 'LGFI': ('ril', ['\xC0','\x01']),
'LR': ('rr', ['\x18']),
'LGR': ('rre', ['\xB9','\x04']),
'LG': ('rxy', ['\xE3','\x04']),
@@ -133,6 +136,9 @@
'STE': ('rx', ['\x70']),
'STD': ('rx', ['\x60']),
+ 'SPM': ('rr', ['\x04'], 'r,-'),
+ 'IPM': ('rre', ['\xB2','\x22'], 'r,-'),
+
# load binary float
# E -> short (32bit),
diff --git a/rpython/jit/backend/zarch/opassembler.py
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -15,6 +15,52 @@
emit_int_sub = gen_emit_rr_or_rpool('SGR', 'SG')
emit_int_sub_ovf = emit_int_sub
emit_int_mul = gen_emit_imm_pool_rr('MSGFI', 'MSG', 'MSGR')
+ def emit_int_mul_ovf(self, op, arglocs, regalloc):
+ lr, lq, l1 = arglocs
+ if l1.is_in_pool():
+ self.mc.LTGR(r.SCRATCH, l1)
+ l1 = r.SCRATCH
+ elif l1.is_imm():
+ self.mc.LGFI(r.SCRATCH, l1)
+ l1 = r.SCRATCH
+
+ mc = self.mc
+ bc_one_signed = mc.CGIJ_byte_count + \
+ mc.LPGR_byte_count * 2 + \
+ mc.MLGR_byte_count + \
+ mc.XG_byte_count + \
+ mc.CGIJ_byte_count * 2 + \
+ mc.BRC_byte_count
+ bc_none_signed = mc.MLGR_byte_count + mc.CGIJ_byte_count * 2 +
mc.BRC_byte_count
+ bc_set_overflow = mc.IPM_byte_count + mc.OIHL_byte_count +
mc.SPM_byte_count
+
+ # check left neg
+ mc.CGIJ(lq, l.imm(0), c.J_LT, l.imm(mc.CGIJ_byte_count*2))
+ mc.CGIJ(l1, l.imm(0), c.J_GE, l.imm(bc_one_signed))
+ # left or right is negative
+ mc.LPGR(lq, lq)
+ mc.LPGR(l1, l1)
+ mc.MLGR(lr, l1)
+ off = mc.CGIJ_byte_count * 2 + mc.XG_byte_count + mc.BRC_byte_count +
bc_none_signed
+ mc.CGIJ(lr, l.imm(0), c.J_LT, l.imm(off)) # jump to overflow
+ mc.CGIJ(lq, l.imm(0), c.J_LT, l.imm(off - mc.CGIJ_byte_count)) # jump
to over overflow
+ mc.XG(lq, l.pool(self.pool.constant_64_sign_bit))
+ mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + bc_set_overflow +
bc_none_signed)) # no overflow happened
+
+ # both are positive
+ mc.MLGR(lr, l1)
+ mc.CGIJ(lq, l.imm(0), c.LT, l.imm(mc.CGIJ_byte_count * 2 +
mc.BRC_byte_count)) # jump to over overflow
+ mc.CGIJ(lr, l.imm(0), c.GT, l.imm(mc.CGIJ_byte_count +
mc.BRC_byte_count)) # jump to overflow
+ mc.BRC(c.ANY, l.imm(mc.BRC_byte_count + bc_set_overflow)) # no
overflow happened
+
+ # set overflow!
+ mc.IPM(r.SCRATCH)
+ mc.XGR(r.SCRATCH, r.SCRATCH)
+ mc.OILH(r.SCRATCH, l.imm(0xf000)) # sets OF
+ mc.SPM(r.SCRATCH)
+
+ # no overflow happended
+ # import pdb; pdb.set_trace()
emit_int_floordiv = gen_emit_pool_or_rr_evenodd('DSG','DSGR')
emit_uint_floordiv = gen_emit_pool_or_rr_evenodd('DLG','DLGR')
diff --git a/rpython/jit/backend/zarch/pool.py
b/rpython/jit/backend/zarch/pool.py
--- a/rpython/jit/backend/zarch/pool.py
+++ b/rpython/jit/backend/zarch/pool.py
@@ -17,6 +17,7 @@
self.offset_map = {}
self.constant_64_zeros = -1
self.constant_64_ones = -1
+ self.constant_64_sign_bit = -1
def ensure_can_hold_constants(self, asm, op):
if op.is_guard():
@@ -38,6 +39,8 @@
self.offset_map[descr] = self.size
elif op.getopnum() == rop.INT_INVERT:
self.constant_64_ones = 1 # we need constant ones!!!
+ elif op.getopnum() == rop.INT_MUL_OVF:
+ self.constant_64_sign_bit = 1
for arg in op.getarglist():
if arg.is_constant():
self.offset_map[arg] = self.size
@@ -91,6 +94,10 @@
asm.mc.write('\x00' * 8)
self.constant_64_zeros = self.size
written += 8
+ if self.constant_64_sign_bit:
+ asm.mc.write('\x80' + '\x00' * 7)
+ self.constant_64_sign_bit = self.size
+ written += 8
self.size += written
print "pool with %d quad words" % (self.size // 8)
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -185,7 +185,7 @@
i = self.free_regs.index(odd)
del self.free_regs[i]
return even, odd
- i += 1
+ i -= 1
import pdb; pdb.set_trace()
xxx
@@ -538,6 +538,7 @@
prepare_int_sub = helper.prepare_int_sub
prepare_int_sub_ovf = helper.prepare_int_sub
prepare_int_mul = helper.prepare_int_mul
+ prepare_int_mul_ovf = helper.prepare_int_mul_ovf
prepare_int_floordiv = helper.prepare_int_div
prepare_uint_floordiv = helper.prepare_int_div
prepare_int_mod = helper.prepare_int_mod
diff --git a/rpython/jit/backend/zarch/test/test_runner.py
b/rpython/jit/backend/zarch/test/test_runner.py
--- a/rpython/jit/backend/zarch/test/test_runner.py
+++ b/rpython/jit/backend/zarch/test/test_runner.py
@@ -72,10 +72,18 @@
(2**63-2,'i1 = int_add_ovf(i0, 1)',0,'guard_no_overflow'),
(2**63-2,'i1 = int_add_ovf(i0, 1)',1,'guard_overflow'),
(2**63-1,'i1 = int_add_ovf(i0, 1)',0,'guard_overflow'),
+
(-2**63, 'i1 = int_sub_ovf(i0, 1)',1,'guard_no_overflow'),
(-2**63+1,'i1 = int_sub_ovf(i0, 1)',0,'guard_no_overflow'),
(-2**63+1,'i1 = int_sub_ovf(i0, 1)',1,'guard_overflow'),
(-2**63, 'i1 = int_sub_ovf(i0, 1)',0,'guard_overflow'),
+
+ (-2**63, 'i1 = int_mul_ovf(i0, 2)',1,'guard_no_overflow'),
+ #(-2**15, 'i1 = int_mul_ovf(i0, 2)',0,'guard_no_overflow'),
+ #(-2**63, 'i1 = int_mul_ovf(i0, 0)',0,'guard_no_overflow'),
+ #(-2**15, 'i1 = int_mul_ovf(i0, 2)',1,'guard_overflow'),
+ #(-2**63, 'i1 = int_mul_ovf(i0, 2)',0,'guard_overflow'),
+ #(-2**63, 'i1 = int_mul_ovf(i0, 0)',1,'guard_overflow'),
])
def test_int_arithmetic_overflow(self, value, opcode, result, guard):
# result == 1 means branch has been taken of the guard
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit