Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80459:c877ffac4111
Date: 2015-10-26 11:52 +0100
http://bitbucket.org/pypy/pypy/changeset/c877ffac4111/
Log: adding and adjusting structure while processing through
assemble_loop method
diff --git a/rpython/jit/backend/zarch/assembler.py
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -7,13 +7,14 @@
from rpython.jit.backend.zarch import locations as loc
from rpython.jit.backend.zarch.codebuilder import InstrBuilder
from rpython.jit.backend.zarch.arch import WORD
+from rpython.jit.backend.zarch.opassembler import IntOpAssembler
from rpython.jit.backend.zarch.regalloc import Regalloc
from rpython.jit.metainterp.resoperation import rop
from rpython.rlib.objectmodel import we_are_translated, specialize,
compute_unique_id
from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
-class AssemblerZARCH(BaseAssembler):
+class AssemblerZARCH(BaseAssembler, IntOpAssembler):
def __init__(self, cpu, translate_support_code=False):
BaseAssembler.__init__(self, cpu, translate_support_code)
@@ -178,21 +179,107 @@
frame_depth = max(frame_depth, target_frame_depth)
return frame_depth
+ def regalloc_mov(self, prev_loc, loc):
+ if prev_loc.is_imm():
+ value = prev_loc.getint()
+ # move immediate value to register
+ if loc.is_core_reg():
+ self.mc.load_imm(loc, value)
+ return
+ # move immediate value to memory
+ elif loc.is_stack():
+ with scratch_reg(self.mc):
+ offset = loc.value
+ self.mc.load_imm(r.SCRATCH, value)
+ self.mc.store(r.SCRATCH.value, r.SPP, offset)
+ return
+ assert 0, "not supported location"
+ elif prev_loc.is_stack():
+ offset = prev_loc.value
+ # move from memory to register
+ if loc.is_core_reg():
+ self.mc.load(loc, r.SPP, offset)
+ return
+ # move in memory
+ elif loc.is_stack():
+ target_offset = loc.value
+ with scratch_reg(self.mc):
+ self.mc.load(r.SCRATCH.value, r.SPP, offset)
+ self.mc.store(r.SCRATCH.value, r.SPP, target_offset)
+ return
+ # move from memory to fp register
+ elif loc.is_fp_reg():
+ assert prev_loc.type == FLOAT, 'source not float location'
+ self.mc.lfd(loc, r.SPP, offset)
+ return
+ assert 0, "not supported location"
+ elif prev_loc.is_core_reg():
+ reg = prev_loc.value
+ # move to another register
+ if loc.is_core_reg():
+ other_reg = loc.value
+ self.mc.mr(other_reg, reg)
+ return
+ # move to memory
+ elif loc.is_stack():
+ offset = loc.value
+ self.mc.store(reg, r.SPP, offset)
+ return
+ assert 0, "not supported location"
+ elif prev_loc.is_imm_float():
+ value = prev_loc.getint()
+ # move immediate value to fp register
+ if loc.is_fp_reg():
+ with scratch_reg(self.mc):
+ self.mc.load_imm(r.SCRATCH, value)
+ self.mc.lfdx(loc.value, 0, r.SCRATCH.value)
+ return
+ # move immediate value to memory
+ elif loc.is_stack():
+ with scratch_reg(self.mc):
+ offset = loc.value
+ self.mc.load_imm(r.SCRATCH, value)
+ self.mc.lfdx(r.FP_SCRATCH.value, 0, r.SCRATCH.value)
+ self.mc.stfd(r.FP_SCRATCH.value, r.SPP.value, offset)
+ return
+ assert 0, "not supported location"
+ elif prev_loc.is_fp_reg():
+ reg = prev_loc.value
+ # move to another fp register
+ if loc.is_fp_reg():
+ other_reg = loc.value
+ self.mc.fmr(other_reg, reg)
+ return
+ # move from fp register to memory
+ elif loc.is_stack():
+ assert loc.type == FLOAT, "target not float location"
+ offset = loc.value
+ self.mc.stfd(reg, r.SPP.value, offset)
+ return
+ assert 0, "not supported location"
+ assert 0, "not supported location"
+
# ________________________________________
# ASSEMBLER EMISSION
- def emit_op_int_add(self, op):
- pass
+ def emit_increment_debug_counter(self, op, arglocs, regalloc):
+ pass # TODO
-def notimplemented_op(self, op, arglocs, regalloc, fcond):
+ def emit_finish(self, op, arglocs, regalloc):
+ pass # TODO
+
+def notimplemented_op(asm, op, arglocs, regalloc):
print "[ZARCH/asm] %s not implemented" % op.getopname()
raise NotImplementedError(op)
asm_operations = [notimplemented_op] * (rop._LAST + 1)
asm_extra_operations = {}
-for name, value in AssemblerZARCH.__dict__.iteritems():
- if name.startswith('emit_op_'):
- opname = name[len('emit_op_'):]
- num = getattr(rop, opname.upper())
- asm_operations[num] = value
+for key, value in rop.__dict__.items():
+ key = key.lower()
+ if key.startswith('_'):
+ continue
+ methname = 'emit_%s' % key
+ if hasattr(AssemblerZARCH, methname):
+ func = getattr(AssemblerZARCH, methname).im_func
+ asm_operations[value] = func
diff --git a/rpython/jit/backend/zarch/codebuilder.py
b/rpython/jit/backend/zarch/codebuilder.py
--- a/rpython/jit/backend/zarch/codebuilder.py
+++ b/rpython/jit/backend/zarch/codebuilder.py
@@ -84,6 +84,9 @@
self.clear_cache(addr)
self._dump(addr, "jit-backend-dump", "s390x")
+ def load(self, treg, sreg, offset):
+ self.LG(treg, loc.addr(offset, sreg))
+
def currpos(self):
return self.get_relative_pos()
diff --git a/rpython/jit/backend/zarch/helper/regalloc.py
b/rpython/jit/backend/zarch/helper/regalloc.py
--- a/rpython/jit/backend/zarch/helper/regalloc.py
+++ b/rpython/jit/backend/zarch/helper/regalloc.py
@@ -0,0 +1,22 @@
+from rpython.jit.metainterp.history import ConstInt, FLOAT
+from rpython.jit.backend.zarch.locations import imm
+
+def check_imm(arg, lower_bound=-2**15, upper_bound=2**15-1):
+ if isinstance(arg, ConstInt):
+ i = arg.getint()
+ return lower_bound <= i <= upper_bound
+ return False
+
+def _prepare_binary_arith(self, op):
+ a0 = op.getarg(0)
+ a1 = op.getarg(1)
+ if check_imm(a0):
+ a0, a1 = a1, a0
+ l0 = self.ensure_reg(a0)
+ if check_imm(a1):
+ l1 = imm(a1.getint())
+ else:
+ l1 = self.ensure_reg(a1)
+ self.free_op_vars()
+ self.force_result_in_reg(op, a0)
+ return [l0, l1]
diff --git a/rpython/jit/backend/zarch/instructions.py
b/rpython/jit/backend/zarch/instructions.py
--- a/rpython/jit/backend/zarch/instructions.py
+++ b/rpython/jit/backend/zarch/instructions.py
@@ -20,8 +20,7 @@
'AG': ('rxy', ['\xE3','\x08']),
'AGF': ('rxy', ['\xE3','\x18']),
'AHI': ('ri', ['\xA7','\x0A']),
-
- # floating point
+ 'AGHI': ('ri', ['\xA7','\x0B']),
}
logic_mnemonic_codes = {
diff --git a/rpython/jit/backend/zarch/opassembler.py
b/rpython/jit/backend/zarch/opassembler.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -0,0 +1,12 @@
+
+class IntOpAssembler(object):
+ _mixin_ = True
+
+ def emit_int_add(self, op, arglocs, regalloc):
+ l0, l1 = arglocs
+ assert not l0.is_imm()
+ if l1.is_imm():
+ self.mc.AGHI(l0, l1)
+ else:
+ self.mc.AGR(l0, l1)
+
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -16,6 +16,7 @@
from rpython.jit.backend.llsupport.descr import ArrayDescr
import rpython.jit.backend.zarch.registers as r
import rpython.jit.backend.zarch.conditions as c
+import rpython.jit.backend.zarch.helper.regalloc as regallochelp
from rpython.jit.backend.llsupport.descr import unpack_arraydescr
from rpython.jit.backend.llsupport.descr import unpack_fielddescr
from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr
@@ -252,6 +253,14 @@
var = op.getarg(i)
self.possibly_free_var(var)
+ def force_result_in_reg(self, var, loc):
+ if var.type == FLOAT:
+ forbidden_vars = self.fprm.temp_boxes
+ return self.fprm.force_result_in_reg(var, loc, forbidden_vars)
+ else:
+ forbidden_vars = self.rm.temp_boxes
+ return self.rm.force_result_in_reg(var, loc, forbidden_vars)
+
def force_allocate_reg(self, var):
if var.type == FLOAT:
forbidden_vars = self.fprm.temp_boxes
@@ -450,6 +459,14 @@
# * P R E P A R E O P E R A T I O N S *
# ******************************************************
+ def prepare_increment_debug_counter(self, op):
+ pass # XXX
+
+ prepare_int_add = regallochelp._prepare_binary_arith
+
+ def prepare_finish(self, op):
+ return []
+
def notimplemented(self, op):
msg = '[S390X/regalloc] %s not implemented\n' % op.getopname()
if we_are_translated():
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit