Author: Richard Plangger <planri...@gmail.com> Branch: s390x-backend Changeset: r81672:39f75f1f6c6b Date: 2016-01-11 14:27 +0100 http://bitbucket.org/pypy/pypy/changeset/39f75f1f6c6b/
Log: guard_subclass, guard_is_object ported to s390x simplified the code in int_shift_left (use some old code of the regalloc) impl of stack check slowpath and stack check diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -529,12 +529,11 @@ # mc._pop_core_regs_from_jitframe([r.r14]) # restore the link on the jit frame # So we return to our caller, conditionally if "EQ" - # mc.LG(r.r14, l.addr(14*WORD, r.SP)) mc.BCR(c.EQ, r.r14) # # Else, jump to propagate_exception_path assert self.propagate_exception_path - mc.b_abs(self.propagate_exception_path) + mc.branch_absolute(self.propagate_exception_path) # rawstart = mc.materialize(self.cpu, []) self.stack_check_slowpath = rawstart diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -112,15 +112,10 @@ # in the addr part of the instruction l1 = addr(a1.getint()) else: - self.rm.ensure_in_reg(a1, r.SCRATCH) - l1 = addr(0, r.SCRATCH) - l0 = self.ensure_reg(a0) - if l0.is_in_pool(): - loc = self.force_allocate_reg(op) - self.assembler.mc.LG(loc, l0) - l0 = loc - else: - self.force_result_in_reg(op, a0) + tmp = self.rm.ensure_reg(a1, force_in_reg=True) + l1 = addr(0, tmp) + l0 = self.ensure_reg(a0, force_in_reg=True) + self.force_result_in_reg(op, a0) self.free_op_vars() return [l0, l1] diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -634,10 +634,7 @@ # Note that the typeid half-word is at offset 0 on a little-endian # machine; it is at offset 2 or 4 on a big-endian machine. assert self.cpu.supports_guard_gc_type - if IS_PPC_32: - self.mc.lhz(targetreg.value, loc_ptr.value, 2 * IS_BIG_ENDIAN) - else: - self.mc.lwz(targetreg.value, loc_ptr.value, 4 * IS_BIG_ENDIAN) + self.mc.LGF(targetreg, l.addr(4, loc_ptr)) def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): self._read_typeid(r.SCRATCH2, loc_ptr) @@ -666,9 +663,11 @@ self._read_typeid(r.SCRATCH2, loc_object) self.mc.load_imm(r.SCRATCH, base_type_info + infobits_offset) - assert shift_by == 0 # on PPC64; fixme for PPC32 - self.mc.lbzx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) - self.mc.andix(r.SCRATCH2.value, r.SCRATCH2.value, IS_OBJECT_FLAG & 0xff) + assert shift_by == 0 + self.mc.AGR(r.SCRATCH, r.SCRATCH2) + self.mc.LLGC(r.SCRATCH2, l.addr(0, r.SCRATCH)) + self.mc.LGHI(r.SCRATCH, l.imm(IS_OBJECT_FLAG & 0xff)) + self.mc.NGR(r.SCRATCH2, r.SCRATCH) self.guard_success_cc = c.NE self._emit_guard(op, arglocs[1:]) @@ -683,7 +682,7 @@ self.mc.LG(r.SCRATCH2, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field assert check_imm(offset2) - self.mc.LG(r.SCRATCH2.value, r.SCRATCH2.value, offset2) + self.mc.load(r.SCRATCH2, r.SCRATCH2, offset2) else: # read the typeid self._read_typeid(r.SCRATCH, loc_object) @@ -692,8 +691,11 @@ base_type_info, shift_by, sizeof_ti = ( self.cpu.gc_ll_descr.get_translated_info_for_typeinfo()) self.mc.load_imm(r.SCRATCH2, base_type_info + sizeof_ti + offset2) - assert shift_by == 0 # on PPC64; fixme for PPC32 - self.mc.ldx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) + assert shift_by == 0 + # add index manually + # we cannot use r0 in l.addr(...) + self.mc.AGR(r.SCRATCH, r.SCRATCH2) + self.mc.load(r.SCRATCH2, r.SCRATCH, 0) # get the two bounds to check against vtable_ptr = loc_check_against_class.getint() vtable_ptr = rffi.cast(rclass.CLASSTYPE, vtable_ptr) @@ -706,8 +708,8 @@ assert 0 <= check_min <= 0x7fff assert 0 <= check_diff <= 0xffff # check by doing the unsigned comparison (tmp - min) < (max - min) - self.mc.subi(r.SCRATCH2.value, r.SCRATCH2.value, check_min) - self.mc.cmp_op(0, r.SCRATCH2.value, check_diff, imm=True, signed=False) + self.mc.AGHI(r.SCRATCH2, l.imm(-check_min)) + self.mc.cmp_op(r.SCRATCH2, l.imm(check_diff), imm=True, signed=False) # the guard passes if we get a result of "below or equal" self.guard_success_cc = c.LE self._emit_guard(op, arglocs[2:]) @@ -831,7 +833,7 @@ addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: self.mc.LGR(r.SCRATCH, index_loc) - slef.mc.AGR(r.SCRATCH, offset_loc) + self.mc.AGR(r.SCRATCH, offset_loc) addr_loc = l.addr(0, base_loc, r.SCRATCH) self._memory_read(result_loc, addr_loc, size_loc.value, sign_loc.value) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -286,31 +286,12 @@ raise NoVariableToSpill() return even, odd - def ensure_in_reg(self, var, reg): - """ opposed to ensure_reg, this loads the contents of the variable - directly into reg """ - if isinstance(var, ConstInt): - if -2**15 <= var.value and var.value <= 2*15-1: - self.assembler.mc.LGHI(reg, l.imm(var.value)) - elif -2**31 <= var.value and var.value <= 2*31-1: - self.assembler.mc.LGFI(reg, l.imm(var.value)) - else: - poolloc = self.ensure_reg(a1) - self.assembler.mc.LG(reg, poolloc) - else: - loc = self.loc(var, must_exist=True) - if loc is not reg: - self.assembler.regalloc_mov(loc, reg) - return reg - def force_result_in_even_reg(self, result_v, loc, forbidden_vars=[]): pass def force_result_in_odd_reg(self, result_v, loc, forbidden_vars=[]): pass - - class ZARCHFrameManager(FrameManager): def __init__(self, base_ofs): FrameManager.__init__(self) _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit