Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80755:86247434b6b3
Date: 2015-11-18 14:59 +0100
http://bitbucket.org/pypy/pypy/changeset/86247434b6b3/

Log:    test_int_operations is now passing

diff --git a/rpython/jit/backend/zarch/assembler.py 
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -371,10 +371,10 @@
             # sadly we cannot use LOCGHI
             # it is included in some extension that seem to be NOT installed
             # by default.
-            self.mc.LGHI(r.SCRATCH, l.imm(1))
-            self.mc.LOCGR(result_loc, r.SCRATCH, condition)
-            self.mc.LGHI(r.SCRATCH, l.imm(0))
-            self.mc.LOCGR(result_loc, r.SCRATCH, c.negate(condition))
+            self.mc.LGHI(result_loc, l.imm(1))
+            off = self.mc.XGR_byte_count + self.mc.BRC_byte_count
+            self.mc.BRC(condition, l.imm(off)) # branch over LGHI
+            self.mc.XGR(result_loc, result_loc)
 
 
     def _assemble(self, regalloc, inputargs, operations):
@@ -408,6 +408,11 @@
                     self.mc.store(r.SCRATCH.value, r.SPP, offset)
                 return
             assert 0, "not supported location"
+        elif prev_loc.is_in_pool():
+            if loc.is_reg():
+                self.mc.LG(loc, prev_loc)
+            else:
+                xxx
         elif prev_loc.is_stack():
             offset = prev_loc.value
             # move from memory to register
diff --git a/rpython/jit/backend/zarch/codebuilder.py 
b/rpython/jit/backend/zarch/codebuilder.py
--- a/rpython/jit/backend/zarch/codebuilder.py
+++ b/rpython/jit/backend/zarch/codebuilder.py
@@ -129,21 +129,21 @@
             if signed:
                 if pool:
                     # 64 bit immediate signed
-                    self.CLG(a, b)
+                    self.CG(a, b)
                 elif imm:
-                    self.CGHI(a, b)
+                    self.CGFI(a, b)
                 else:
                     # 64 bit signed
-                    self.CLGR(a, b)
+                    self.CGR(a, b)
             else:
                 if pool:
                     # 64 bit immediate unsigned
-                    self.CG(a, b)
+                    self.CLG(a, b)
                 elif imm:
-                    raise NotImplementedError
+                    self.CLGFI(a, b)
                 else:
                     # 64 bit unsigned
-                    self.CGR(a, b)
+                    self.CLGR(a, b)
 
 
     def load_imm(self, dest_reg, word):
diff --git a/rpython/jit/backend/zarch/helper/assembler.py 
b/rpython/jit/backend/zarch/helper/assembler.py
--- a/rpython/jit/backend/zarch/helper/assembler.py
+++ b/rpython/jit/backend/zarch/helper/assembler.py
@@ -6,34 +6,6 @@
 from rpython.jit.metainterp.resoperation import rop
 from rpython.rtyper.lltypesystem import rffi, lltype
 
-def flush_cc(asm, condition, result_loc):
-    # After emitting an instruction that leaves a boolean result in
-    # a condition code (cc), call this.  In the common case, result_loc
-    # will be set to SPP by the regalloc, which in this case means
-    # "propagate it between this operation and the next guard by keeping
-    # it in the cc".  In the uncommon case, result_loc is another
-    # register, and we emit a load from the cc into this register.
-    assert asm.guard_success_cc == c.cond_none
-    if result_loc is r.SPP:
-        asm.guard_success_cc = condition
-    else:
-        # Possibly invert the bit in the CR
-        bit, invert = c.encoding[condition]
-        assert 0 <= bit <= 3
-        if invert == 12:
-            pass
-        elif invert == 4:
-            asm.mc.crnor(bit, bit, bit)
-        else:
-            assert 0
-
-        resval = result_loc.value
-        # move the content of the CR to resval
-        asm.mc.mfcr(resval)
-        # zero out everything except of the result
-        asm.mc.rlwinm(resval, resval, 1 + bit, 31, 31)
-
-
 def do_emit_cmp_op(self, arglocs, condition, signed, fp):
     l0 = arglocs[0]
     l1 = arglocs[1]
@@ -41,13 +13,8 @@
     # do the comparison
     self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), 
signed=signed, fp=fp)
 
-    # CR bits:
-    #     0: LT
-    #     1: GT
-    #     2: EQ
-    #     3: UNordered
-
     if fp:
+        xxx
         # Support for NaNs: with LE or GE, if one of the operands is a
         # NaN, we get CR=1,0,0,0 (unordered bit only).  We're about to
         # check "not GT" or "not LT", but in case of NaN we want to
@@ -59,8 +26,7 @@
         #    self.mc.crnor(0, 0, 3)
         #    condition = c.LT
         pass
-
-    flush_cc(self, condition, r.SPP)
+    self.flush_cc(condition, arglocs[2])
 
 
 def gen_emit_cmp_op(condition, signed=True, fp=False):
@@ -82,7 +48,7 @@
         l0, l1 = arglocs
         if l1.is_imm() and not l1.is_in_pool():
             assert 0, "logical imm must reside in pool!"
-        elif l1.is_in_pool():
+        if l1.is_in_pool():
             getattr(self.mc, rp_func)(l0, l1)
         else:
             getattr(self.mc, rr_func)(l0, l1)
@@ -108,6 +74,7 @@
         # remainer is always a even register r0, r2, ... , r14
         assert lr.is_even()
         assert lq.is_odd()
+        self.mc.XGR(lr, lr)
         if l1.is_in_pool():
             getattr(self.mc,pool_func)(lr, l1)
         else:
diff --git a/rpython/jit/backend/zarch/helper/regalloc.py 
b/rpython/jit/backend/zarch/helper/regalloc.py
--- a/rpython/jit/backend/zarch/helper/regalloc.py
+++ b/rpython/jit/backend/zarch/helper/regalloc.py
@@ -1,5 +1,7 @@
-from rpython.jit.metainterp.history import ConstInt, FLOAT
+from rpython.jit.metainterp.history import ConstInt, FLOAT, Const
 from rpython.jit.backend.zarch.locations import imm, addr
+from rpython.jit.backend.llsupport.regalloc import TempVar
+import rpython.jit.backend.zarch.registers as r
 
 def check_imm(arg, lower_bound=-2**15, upper_bound=2**15-1):
     if isinstance(arg, ConstInt):
@@ -55,33 +57,38 @@
     self.free_op_vars()
     return [lr, lq, l1]
 
-def prepare_int_div(self, op):
-    a0 = op.getarg(0)
-    a1 = op.getarg(1)
-    lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=False)
-    l1 = self.ensure_reg(a1)
-    self.rm.force_result_in_reg(op, a0)
-    self.free_op_vars()
-    self.rm._check_invariants()
-    return [lr, lq, l1]
+def generate_div_mod(modulus):
+    def f(self, op):
+        a0 = op.getarg(0)
+        a1 = op.getarg(1)
+        if isinstance(a0, Const):
+            poolloc = self.ensure_reg(a0)
+            lr,lq = self.rm.ensure_even_odd_pair(op, bind_first=modulus, 
must_exist=False)
+            self.assembler.mc.LG(lq, poolloc)
+        else:
+            lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=modulus)
+            self.rm.force_result_in_reg(op, a0)
+        l1 = self.ensure_reg(a1)
+        self.free_op_vars()
+        self.rm._check_invariants()
+        return [lr, lq, l1]
+    return f
 
-def prepare_int_mod(self, op):
-    a0 = op.getarg(0)
-    a1 = op.getarg(1)
-    lr,lq = self.rm.ensure_even_odd_pair(a0, bind_first=True)
-    l1 = self.ensure_reg(a1)
-    self.rm.force_result_in_reg(op, a0)
-    self.free_op_vars()
-    return [lr, lq, l1]
+prepare_int_div= generate_div_mod(False)
+prepare_int_mod = generate_div_mod(True)
 
 def prepare_int_sub(self, op):
     a0 = op.getarg(0)
     a1 = op.getarg(1)
-    if isinstance(a0, ConstInt):
-        a0, a1 = a1, a0
+    # sub is not commotative, thus cannot swap operands
+    l1 = self.ensure_reg(a1)
     l0 = self.ensure_reg(a0)
-    l1 = self.ensure_reg(a1)
-    self.force_result_in_reg(op, a0)
+    if isinstance(a0, Const):
+        loc = self.force_allocate_reg(op)
+        self.assembler.mc.LG(loc, l0)
+        l0 = loc
+    else:
+        self.rm.force_result_in_reg(op, a0)
     self.free_op_vars()
     return [l0, l1]
 
@@ -99,29 +106,42 @@
 def prepare_int_shift(self, op):
     a0 = op.getarg(0)
     a1 = op.getarg(1)
-    assert isinstance(a1, ConstInt)
-    assert check_imm20(a1)
+    if isinstance(a1, ConstInt):
+        # note that the shift value is stored
+        # in the addr part of the instruction
+        l1 = addr(a1.getint())
+    else:
+        self.rm.ensure_in_reg(a1, r.SCRATCH)
+        l1 = addr(0, r.SCRATCH)
     l0 = self.ensure_reg(a0)
-    # note that the shift value is stored
-    # in the addr part of the instruction
-    l1 = addr(a1.getint())
-    self.force_result_in_reg(op, a0)
+    if l0.is_in_pool():
+        loc = self.force_allocate_reg(op)
+        self.assembler.mc.LG(loc, l0)
+        l0 = loc
+    else:
+        self.force_result_in_reg(op, a0)
     self.free_op_vars()
     return [l0, l1]
 
-def prepare_cmp_op(self, op):
-    a0 = op.getarg(0)
-    a1 = op.getarg(1)
-    if check_imm(a0):
-        a0, a1 = a1, a0
-    l0 = self.ensure_reg(a0)
-    if check_imm(a1):
-        l1 = imm(a1.getint())
-    else:
-        l1 = self.ensure_reg(a1)
-    self.force_result_in_reg(op, a0)
-    self.free_op_vars()
-    return [l0, l1]
+def generate_cmp_op(signed=True):
+    def prepare_cmp_op(self, op):
+        a0 = op.getarg(0)
+        a1 = op.getarg(1)
+        invert = imm(0)
+        l0 = self.ensure_reg(a0)
+        if signed and check_imm32(a1):
+            l1 = imm(a1.getint())
+        else:
+            l1 = self.ensure_reg(a1)
+        if l0.is_in_pool():
+            poolloc = l0
+            l0 = self.force_allocate_reg(op)
+            self.assembler.mc.LG(l0, poolloc)
+        res = self.force_allocate_reg_or_cc(op)
+        #self.force_result_in_reg(op, a0)
+        self.free_op_vars()
+        return [l0, l1, res, invert]
+    return prepare_cmp_op
 
 def prepare_binary_op(self, op):
     a0 = op.getarg(0)
@@ -137,5 +157,6 @@
     assert not isinstance(a0, ConstInt)
     l0 = self.ensure_reg(a0)
     self.force_result_in_reg(op, a0)
+    res = self.force_allocate_reg_or_cc(op)
     self.free_op_vars()
-    return [l0]
+    return [l0, res]
diff --git a/rpython/jit/backend/zarch/instructions.py 
b/rpython/jit/backend/zarch/instructions.py
--- a/rpython/jit/backend/zarch/instructions.py
+++ b/rpython/jit/backend/zarch/instructions.py
@@ -59,6 +59,7 @@
     'CLG':     ('rxy',    ['\xE3','\x21']),
     'CGHI':    ('ri',     ['\xA7','\x0F']),
     'CGFI':    ('ril',    ['\xC2','\x0C']),
+    'CLGFI':   ('ril',    ['\xC2','\x0E']),
     'CGIJ':    ('rie_c',  ['\xEC','\x7C']),
     'CLGIJ':   ('rie_c',  ['\xEC','\x7D'], 'r,u8,r/m,i16'),
     'CGIB':    ('ris',    ['\xEC','\xFC']),
diff --git a/rpython/jit/backend/zarch/locations.py 
b/rpython/jit/backend/zarch/locations.py
--- a/rpython/jit/backend/zarch/locations.py
+++ b/rpython/jit/backend/zarch/locations.py
@@ -213,7 +213,7 @@
         return self.isfloat
 
     def __repr__(self):
-        return "pool(i,%d)" %  self.value
+        return "pool(i,%d)" %  self.displace
 
 
 def addr(displace, basereg=None, indexreg=None, length=None):
diff --git a/rpython/jit/backend/zarch/opassembler.py 
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -12,8 +12,18 @@
 
     emit_int_add = gen_emit_imm_pool_rr('AGFI','AG','AGR')
     emit_int_add_ovf = emit_int_add
-    emit_int_sub = gen_emit_rr_or_rpool('SGR', 'SG')
+
+    def emit_int_sub(self, op, arglocs, regalloc):
+        l0, l1 = arglocs
+        if l1.is_imm() and not l1.is_in_pool():
+            assert 0, "logical imm must reside in pool!"
+        if l1.is_in_pool():
+            self.mc.SG(l0, l1)
+        else:
+            self.mc.SGR(l0, l1)
+
     emit_int_sub_ovf = emit_int_sub
+
     emit_int_mul = gen_emit_imm_pool_rr('MSGFI', 'MSG', 'MSGR')
     def emit_int_mul_ovf(self, op, arglocs, regalloc):
         lr, lq, l1 = arglocs
@@ -104,23 +114,25 @@
             #self.mc.AGR(lr, l1)
 
     def emit_int_invert(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
+        l0, l1 = arglocs
         assert not l0.is_imm()
-        self.mc.XG(l0, l.pool(self.pool.constant_64_ones))
+        self.mc.XG(l1, l.pool(self.pool.constant_64_ones))
+        if l0 != l1:
+            self.mc.LGR(l0, l1)
 
     def emit_int_neg(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
-        self.mc.LNGR(l0, l0)
+        l0, l1 = arglocs
+        self.mc.LCGR(l0, l1)
 
     def emit_int_is_zero(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
-        self.mc.CGHI(l0, l.imm(0))
-        self.flush_cc(c.EQ, r.SPP)
+        l0, l1 = arglocs
+        self.mc.CGHI(l1, l.imm(0))
+        self.flush_cc(c.EQ, l0)
 
     def emit_int_is_true(self, op, arglocs, regalloc):
-        l0 = arglocs[0]
+        l0, l1 = arglocs
         self.mc.CGHI(l0, l.imm(0))
-        self.flush_cc(c.NE, r.SPP)
+        self.flush_cc(c.NE, l0)
 
     emit_int_and = gen_emit_rr_or_rpool("NGR", "NG")
     emit_int_or  = gen_emit_rr_or_rpool("OGR", "OG")
diff --git a/rpython/jit/backend/zarch/pool.py 
b/rpython/jit/backend/zarch/pool.py
--- a/rpython/jit/backend/zarch/pool.py
+++ b/rpython/jit/backend/zarch/pool.py
@@ -47,6 +47,10 @@
             self.constant_max_64_positive = 1
         elif opnum == rop.INT_RSHIFT or opnum == rop.INT_LSHIFT or \
              opnum == rop.UINT_RSHIFT:
+            a0 = op.getarg(0)
+            if a0.is_constant():
+                self.offset_map[a0] = self.size
+                self.reserve_literal(8)
             return
         for arg in op.getarglist():
             if arg.is_constant():
diff --git a/rpython/jit/backend/zarch/regalloc.py 
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -115,7 +115,7 @@
             return rffi.cast(lltype.Signed, c.value)
 
     def convert_to_imm(self, c):
-        val = self.convert_to_int(c)
+        #val = self.convert_to_int(c)
         return l.ImmLocation(val)
 
     def ensure_reg(self, box):
@@ -134,9 +134,9 @@
         self.temp_boxes.append(box)
         return reg
 
-    def ensure_even_odd_pair(self, var, bind_first=True):
+    def ensure_even_odd_pair(self, var, bind_first=True, must_exist=True):
         self._check_type(var)
-        prev_loc = self.loc(var, must_exist=True)
+        prev_loc = self.loc(var, must_exist=must_exist)
         var2 = TempVar()
         self.temp_boxes.append(var2)
         if prev_loc is self.frame_reg:
@@ -262,6 +262,22 @@
             raise NoVariableToSpill()
         return even, odd
 
+    def ensure_in_reg(self, var, reg):
+        """ opposed to ensure_reg, this loads the contents of the variable
+            directly into reg """
+        if isinstance(var, ConstInt):
+            if -2**15 <= var.value and var.value <= 2*15-1:
+                self.assembler.mc.LGHI(reg, l.imm(var.value))
+            elif -2**31 <= var.value and var.value <= 2*31-1:
+                self.assembler.mc.LGFI(reg, l.imm(var.value))
+            else:
+                poolloc = self.ensure_reg(a1)
+                self.assembler.mc.LG(reg, poolloc)
+        else:
+            loc = self.loc(var, must_exist=True)
+            if loc is not reg:
+                self.assembler.regalloc_mov(loc, reg)
+            return reg
 
     def force_result_in_even_reg(self, result_v, loc, forbidden_vars=[]):
         pass
@@ -415,7 +431,7 @@
             return r.SPP
         else:
             # else, return a regular register (not SPP).
-            return self.force_allocate_reg(var)
+            return self.rm.force_allocate_reg(var)
 
     def walk_operations(self, inputargs, operations):
         from rpython.jit.backend.zarch.assembler import (
@@ -616,17 +632,17 @@
     prepare_int_lshift  = helper.prepare_int_shift
     prepare_uint_rshift = helper.prepare_int_shift
 
-    prepare_int_le = helper.prepare_cmp_op
-    prepare_int_lt = helper.prepare_cmp_op
-    prepare_int_ge = helper.prepare_cmp_op
-    prepare_int_gt = helper.prepare_cmp_op
-    prepare_int_eq = helper.prepare_cmp_op
-    prepare_int_ne = helper.prepare_cmp_op
+    prepare_int_le = helper.generate_cmp_op()
+    prepare_int_lt = helper.generate_cmp_op()
+    prepare_int_ge = helper.generate_cmp_op()
+    prepare_int_gt = helper.generate_cmp_op()
+    prepare_int_eq = helper.generate_cmp_op()
+    prepare_int_ne = helper.generate_cmp_op()
 
-    prepare_uint_le = helper.prepare_cmp_op
-    prepare_uint_lt = helper.prepare_cmp_op
-    prepare_uint_ge = helper.prepare_cmp_op
-    prepare_uint_gt = helper.prepare_cmp_op
+    prepare_uint_le = helper.generate_cmp_op(signed=False)
+    prepare_uint_lt = helper.generate_cmp_op(signed=False)
+    prepare_uint_ge = helper.generate_cmp_op(signed=False)
+    prepare_uint_gt = helper.generate_cmp_op(signed=False)
 
     prepare_int_is_zero = helper.prepare_unary_op
     prepare_int_is_true = helper.prepare_unary_op
diff --git a/rpython/jit/backend/zarch/test/test_int.py 
b/rpython/jit/backend/zarch/test/test_int.py
--- a/rpython/jit/backend/zarch/test/test_int.py
+++ b/rpython/jit/backend/zarch/test/test_int.py
@@ -19,96 +19,6 @@
     cpu = CPU_S390_64(rtyper=None, stats=FakeStats())
     cpu.setup_once()
 
-    @py.test.mark.parametrize('value,opcode,result',
-        [ (30,'i1 = int_mul(i0, 2)',60),
-          (30,'i1 = int_floordiv(i0, 2)',15),
-          (2**31,'i1 = int_floordiv(i0, 15)',2**31//15),
-          (0,'i1 = int_floordiv(i0, 1)', 0),
-          (1,'i1 = int_floordiv(i0, 1)', 1),
-          (0,'i1 = uint_floordiv(i0, 1)', 0),
-          (1,'i1 = uint_floordiv(i0, 1)', 1),
-          (30,'i1 = int_mod(i0, 2)', 0),
-          (1,'i1 = int_mod(i0, 2)', 1),
-          (1,'i1 = int_lshift(i0, 4)', 16),
-          (1,'i1 = int_lshift(i0, 0)', 1),
-          (4,'i1 = int_rshift(i0, 0)', 4),
-          (4,'i1 = int_rshift(i0, 1)', 2),
-          (-1,'i1 = int_rshift(i0, 0)', -1),
-          (-1,'i1 = int_lshift(i0, 1)', -2),
-          (-2**35,'i1 = int_lshift(i0, 1)', (-2**35)*2),
-          (2**64-1,'i1 = uint_rshift(i0, 2)', (2**64-1)//4),
-          (-1,'i1 = int_neg(i0)', -1),
-          (1,'i1 = int_neg(i0)', -1),
-          (2**63-1,'i1 = int_neg(i0)', -(2**63-1)),
-          (1,'i1 = int_invert(i0)', ~1),
-          (15,'i1 = int_invert(i0)', ~15),
-          (-1,'i1 = int_invert(i0)', ~(-1)),
-          (0,'i1 = int_is_zero(i0)', 1),
-          (50,'i1 = int_is_zero(i0)', 0),
-          (-1,'i1 = int_is_true(i0)', 1),
-          (0,'i1 = int_is_true(i0)', 0),
-        ])
-    def test_int_arithmetic_and_logic(self, value, opcode, result):
-        loop = parse("""
-        [i0]
-        {opcode}
-        finish(i1, descr=faildescr)
-        """.format(opcode=opcode),namespace={"faildescr": BasicFinalDescr(1)})
-        looptoken = JitCellToken()
-        self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
-        deadframe = self.cpu.execute_token(looptoken, value)
-        fail = self.cpu.get_latest_descr(deadframe)
-        res = self.cpu.get_int_value(deadframe, 0)
-        assert res == result
-        assert fail.identifier == 1 
-
-    @py.test.mark.parametrize('value,opcode,result,guard',
-        [ (2**63-1,'i1 = int_add_ovf(i0, 1)',1,'guard_no_overflow'),
-          (2**63-2,'i1 = int_add_ovf(i0, 1)',0,'guard_no_overflow'),
-          (2**63-2,'i1 = int_add_ovf(i0, 1)',1,'guard_overflow'),
-          (2**63-1,'i1 = int_add_ovf(i0, 1)',0,'guard_overflow'),
-
-          (-2**63,  'i1 = int_sub_ovf(i0, 1)',1,'guard_no_overflow'),
-          (-2**63+1,'i1 = int_sub_ovf(i0, 1)',0,'guard_no_overflow'),
-          (-2**63+1,'i1 = int_sub_ovf(i0, 1)',1,'guard_overflow'),
-          (-2**63,  'i1 = int_sub_ovf(i0, 1)',0,'guard_overflow'),
-
-          (-2**63,  'i1 = int_mul_ovf(i0, 2)',1,'guard_no_overflow'),
-          (-2**63,  'i1 = int_mul_ovf(i0, -2)',1,'guard_no_overflow'),
-          (-2**15,  'i1 = int_mul_ovf(i0, 2)',0,'guard_no_overflow'),
-          (-2**63,  'i1 = int_mul_ovf(i0, 0)',0,'guard_no_overflow'),
-          (-2**63,  'i1 = int_mul_ovf(i0, 2)',0,'guard_overflow'),
-          (-2**63,  'i1 = int_mul_ovf(i0, -2)',0,'guard_overflow'),
-          (-2**63,  'i1 = int_mul_ovf(i0, 0)',1,'guard_overflow'),
-          # positive!
-          (2**63-1,  'i1 = int_mul_ovf(i0, 33)',1,'guard_no_overflow'),
-          (2**63-1,  'i1 = int_mul_ovf(i0, -2)',1,'guard_no_overflow'),
-          (2**15,  'i1 = int_mul_ovf(i0, 2)',0,'guard_no_overflow'),
-          (2**63-1,  'i1 = int_mul_ovf(i0, 0)',0,'guard_no_overflow'),
-          (2**63-1,  'i1 = int_mul_ovf(i0, 99)',0,'guard_overflow'),
-          (2**63-1,  'i1 = int_mul_ovf(i0, 3323881828381)',0,'guard_overflow'),
-          (2**63-1,  'i1 = int_mul_ovf(i0, 0)',1,'guard_overflow'),
-        ])
-    def test_int_arithmetic_overflow(self, value, opcode, result, guard):
-        # result == 1 means branch has been taken of the guard
-        code = """
-        [i0]
-        {opcode}
-        {guard}() [i0]
-        i2 = int_xor(i1,i1)
-        finish(i2, descr=faildescr)
-        """.format(opcode=opcode,guard=guard)
-        loop = parse(code, namespace={"faildescr": BasicFinalDescr(1)})
-        looptoken = JitCellToken()
-        self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
-        deadframe = self.cpu.execute_token(looptoken, value)
-        fail = self.cpu.get_latest_descr(deadframe)
-        res = self.cpu.get_int_value(deadframe, 0)
-        if result == 1:
-            assert res == value
-        else:
-            assert res == 0
-
     def test_double_evenodd_pair(self):
         code = """
         [i0]
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to