Author: Armin Rigo <[email protected]>
Branch:
Changeset: r81509:c46508226ad9
Date: 2015-12-31 10:46 +0000
http://bitbucket.org/pypy/pypy/changeset/c46508226ad9/
Log: Remove the hacks in optimizeopt introduced by 'faster-rstruct' and
instead add a 'llop.gc_load_indexed' operation that turns into a
regular GC_LOAD_INDEXED resop.
diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py
b/pypy/module/pypyjit/test_pypy_c/test_struct.py
--- a/pypy/module/pypyjit/test_pypy_c/test_struct.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py
@@ -45,7 +45,7 @@
# the newstr and the strsetitems are because the string is forced,
# which is in turn because the optimizer doesn't know how to handle a
- # getarrayitem_gc_i on a virtual string. It could be improved, but it
+ # gc_load_indexed_i on a virtual string. It could be improved, but it
# is also true that in real life cases struct.unpack is called on
# strings which come from the outside, so it's a minor issue.
assert loop.match_by_id("unpack", """
@@ -55,17 +55,17 @@
strsetitem(p88, 1, i14)
strsetitem(p88, 2, i17)
strsetitem(p88, 3, i20)
- i91 = getarrayitem_gc_i(p88, 0, descr=<ArrayS 4>)
+ i91 = gc_load_indexed_i(p88, 0, 1, _, -4)
""")
def test_struct_object(self):
def main(n):
import struct
- s = struct.Struct("i")
+ s = struct.Struct("ii")
i = 1
while i < n:
- buf = s.pack(i) # ID: pack
- x = s.unpack(buf)[0] # ID: unpack
+ buf = s.pack(-1, i) # ID: pack
+ x = s.unpack(buf)[1] # ID: unpack
i += x / i
return i
@@ -88,10 +88,15 @@
assert loop.match_by_id('unpack', """
# struct.unpack
- p88 = newstr(4)
- strsetitem(p88, 0, i11)
- strsetitem(p88, 1, i14)
- strsetitem(p88, 2, i17)
- strsetitem(p88, 3, i20)
- i91 = getarrayitem_gc_i(p88, 0, descr=<ArrayS 4>)
+ p88 = newstr(8)
+ strsetitem(p88, 0, 255)
+ strsetitem(p88, 1, 255)
+ strsetitem(p88, 2, 255)
+ strsetitem(p88, 3, 255)
+ strsetitem(p88, 4, i11)
+ strsetitem(p88, 5, i14)
+ strsetitem(p88, 6, i17)
+ strsetitem(p88, 7, i20)
+ i90 = gc_load_indexed_i(p88, 0, 1, _, -4)
+ i91 = gc_load_indexed_i(p88, 4, 1, _, -4)
""")
diff --git a/rpython/jit/backend/llgraph/runner.py
b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -13,6 +13,7 @@
from rpython.rtyper.llinterp import LLInterpreter, LLException
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
+from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper import rclass
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
@@ -638,18 +639,9 @@
return array.getlength()
def bh_getarrayitem_gc(self, a, index, descr):
+ a = support.cast_arg(lltype.Ptr(descr.A), a)
+ array = a._obj
assert index >= 0
- if descr.A is descr.OUTERA:
- a = support.cast_arg(lltype.Ptr(descr.A), a)
- else:
- # we use rffi.cast instead of support.cast_arg because the types
- # might not be "compatible" enough from the lltype point of
- # view. In particular, this happens when we use
- # str_storage_getitem, in which an rpy_string is casted to
- # rpy_string_as_Signed (or similar)
- a = rffi.cast(lltype.Ptr(descr.OUTERA), a)
- a = getattr(a, descr.OUTERA._arrayfld)
- array = a._obj
return support.cast_result(descr.A.OF, array.getitem(index))
bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc
@@ -714,6 +706,24 @@
else:
return self.bh_raw_load_i(struct, offset, descr)
+ def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes):
+ if bytes == 1: T = rffi.UCHAR
+ elif bytes == 2: T = rffi.USHORT
+ elif bytes == 4: T = rffi.UINT
+ elif bytes == 8: T = rffi.ULONGLONG
+ elif bytes == -1: T = rffi.SIGNEDCHAR
+ elif bytes == -2: T = rffi.SHORT
+ elif bytes == -4: T = rffi.INT
+ elif bytes == -8: T = rffi.LONGLONG
+ else: raise NotImplementedError(bytes)
+ x = llop.gc_load_indexed(T, struct, index, scale, base_ofs)
+ return lltype.cast_primitive(lltype.Signed, x)
+
+ def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes):
+ if bytes != 8:
+ raise Exception("gc_load_indexed_f is only for 'double'!")
+ return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale,
base_ofs)
+
def bh_increment_debug_counter(self, addr):
p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr)
p[0] += 1
diff --git a/rpython/jit/backend/llsupport/llmodel.py
b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -725,6 +725,16 @@
def bh_raw_load_f(self, addr, offset, descr):
return self.read_float_at_mem(addr, offset)
+ def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes):
+ offset = base_ofs + scale * index
+ return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0)
+
+ def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes):
+ # only for 'double'!
+ assert bytes == rffi.sizeof(lltype.Float)
+ offset = base_ofs + scale * index
+ return self.read_float_at_mem(addr, offset)
+
def bh_new(self, sizedescr):
return self.gc_ll_descr.gc_malloc(sizedescr)
diff --git a/rpython/jit/codewriter/jtransform.py
b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1021,18 +1021,20 @@
kind = getkind(op.result.concretetype)[0]
return SpaceOperation('getinteriorfield_gc_%s' % kind, args,
op.result)
- elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct):
- # special-case 2: GcStruct with Array field
- v_inst, c_field, v_index = op.args
- STRUCT = v_inst.concretetype.TO
- ARRAY = getattr(STRUCT, c_field.value)
- assert isinstance(ARRAY, lltype.Array)
- arraydescr = self.cpu.arraydescrof(STRUCT)
- kind = getkind(op.result.concretetype)[0]
- assert kind in ('i', 'f')
- return SpaceOperation('getarrayitem_gc_%s' % kind,
- [op.args[0], v_index, arraydescr],
- op.result)
+ #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct):
+ # # special-case 2: GcStruct with Array field
+ # ---was added in the faster-rstruct branch,---
+ # ---no longer directly supported---
+ # v_inst, c_field, v_index = op.args
+ # STRUCT = v_inst.concretetype.TO
+ # ARRAY = getattr(STRUCT, c_field.value)
+ # assert isinstance(ARRAY, lltype.Array)
+ # arraydescr = self.cpu.arraydescrof(STRUCT)
+ # kind = getkind(op.result.concretetype)[0]
+ # assert kind in ('i', 'f')
+ # return SpaceOperation('getarrayitem_gc_%s' % kind,
+ # [op.args[0], v_index, arraydescr],
+ # op.result)
else:
assert False, 'not supported'
@@ -1084,6 +1086,25 @@
return SpaceOperation('raw_load_%s' % kind,
[op.args[0], op.args[1], descr], op.result)
+ def rewrite_op_gc_load_indexed(self, op):
+ T = op.result.concretetype
+ kind = getkind(T)[0]
+ assert kind != 'r'
+ descr = self.cpu.arraydescrof(rffi.CArray(T))
+ if (not isinstance(op.args[2], Constant) or
+ not isinstance(op.args[3], Constant)):
+ raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'"
+ " should be constants")
+ # xxx hard-code the size in bytes at translation time, which is
+ # probably fine and avoids lots of issues later
+ bytes = descr.get_item_size_in_bytes()
+ if descr.is_item_signed():
+ bytes = -bytes
+ c_bytes = Constant(bytes, lltype.Signed)
+ return SpaceOperation('gc_load_indexed_%s' % kind,
+ [op.args[0], op.args[1],
+ op.args[2], op.args[3], c_bytes], op.result)
+
def _rewrite_equality(self, op, opname):
arg0, arg1 = op.args
if isinstance(arg0, Constant) and not arg0.value:
diff --git a/rpython/jit/metainterp/blackhole.py
b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -1434,6 +1434,13 @@
def bhimpl_raw_load_f(cpu, addr, offset, arraydescr):
return cpu.bh_raw_load_f(addr, offset, arraydescr)
+ @arguments("cpu", "r", "i", "i", "i", "i", returns="i")
+ def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes):
+ return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes)
+ @arguments("cpu", "r", "i", "i", "i", "i", returns="f")
+ def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes):
+ return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes)
+
@arguments("r", "d", "d")
def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr):
pass
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py
b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -535,16 +535,10 @@
cf.do_setfield(self, op)
def optimize_GETARRAYITEM_GC_I(self, op):
- # When using str_storage_getitem it might happen that op.getarg(0) is
- # a virtual string, NOT an array. In that case, we cannot cache the
- # getarrayitem as if it were an array, obviously. In theory we could
- # improve by writing special code to interpter the buffer of the
- # virtual string as if it were an array, but it looks complicate,
- # fragile and not worth it.
arrayinfo = self.ensure_ptr_info_arg0(op)
indexb = self.getintbound(op.getarg(1))
cf = None
- if indexb.is_constant() and not arrayinfo.is_vstring():
+ if indexb.is_constant():
index = indexb.getint()
arrayinfo.getlenbound(None).make_gt_const(index)
# use the cache on (arraydescr, index), which is a constant
@@ -561,7 +555,7 @@
self.make_nonnull(op.getarg(0))
self.emit_operation(op)
# the remember the result of reading the array item
- if cf is not None and not arrayinfo.is_vstring():
+ if cf is not None:
arrayinfo.setitem(op.getdescr(), indexb.getint(),
self.get_box_replacement(op.getarg(0)),
self.get_box_replacement(op), cf,
diff --git a/rpython/jit/metainterp/optimizeopt/info.py
b/rpython/jit/metainterp/optimizeopt/info.py
--- a/rpython/jit/metainterp/optimizeopt/info.py
+++ b/rpython/jit/metainterp/optimizeopt/info.py
@@ -24,9 +24,6 @@
def is_virtual(self):
return False
- def is_vstring(self):
- return False
-
def is_precise(self):
return False
diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py
b/rpython/jit/metainterp/optimizeopt/virtualize.py
--- a/rpython/jit/metainterp/optimizeopt/virtualize.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualize.py
@@ -277,10 +277,8 @@
self.emit_operation(op)
def optimize_GETARRAYITEM_GC_I(self, op):
- # When using str_storage_getitem we op.getarg(0) is a string, NOT an
- # array, hence the check. In that case, it will be forced
opinfo = self.getptrinfo(op.getarg(0))
- if opinfo and opinfo.is_virtual() and not opinfo.is_vstring():
+ if opinfo and opinfo.is_virtual():
indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
item = opinfo.getitem(op.getdescr(), indexbox.getint())
diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py
b/rpython/jit/metainterp/optimizeopt/vstring.py
--- a/rpython/jit/metainterp/optimizeopt/vstring.py
+++ b/rpython/jit/metainterp/optimizeopt/vstring.py
@@ -62,9 +62,6 @@
self.mode = mode
self.length = length
- def is_vstring(self):
- return True
-
def getlenbound(self, mode):
from rpython.jit.metainterp.optimizeopt import intutils
diff --git a/rpython/jit/metainterp/pyjitpl.py
b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -810,6 +810,27 @@
return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr,
addrbox, offsetbox)
+ def _remove_symbolics(self, c):
+ if not we_are_translated():
+ from rpython.rtyper.lltypesystem import ll2ctypes
+ assert isinstance(c, ConstInt)
+ c = ConstInt(ll2ctypes.lltype2ctypes(c.value))
+ return c
+
+ @arguments("box", "box", "box", "box", "box")
+ def opimpl_gc_load_indexed_i(self, addrbox, indexbox,
+ scalebox, baseofsbox, bytesbox):
+ return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox,
+ self._remove_symbolics(scalebox),
+ self._remove_symbolics(baseofsbox), bytesbox)
+
+ @arguments("box", "box", "box", "box", "box")
+ def opimpl_gc_load_indexed_f(self, addrbox, indexbox,
+ scalebox, baseofsbox, bytesbox):
+ return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox,
+ self._remove_symbolics(scalebox),
+ self._remove_symbolics(baseofsbox), bytesbox)
+
@arguments("box")
def opimpl_hint_force_virtualizable(self, box):
self.metainterp.gen_store_back_in_vable(box)
diff --git a/rpython/jit/metainterp/test/test_strstorage.py
b/rpython/jit/metainterp/test/test_strstorage.py
--- a/rpython/jit/metainterp/test/test_strstorage.py
+++ b/rpython/jit/metainterp/test/test_strstorage.py
@@ -19,7 +19,7 @@
res = self.interp_operations(f, [], supports_singlefloats=True)
#
kind = getkind(TYPE)[0] # 'i' or 'f'
- self.check_operations_history({'getarrayitem_gc_%s' % kind: 1,
+ self.check_operations_history({'gc_load_indexed_%s' % kind: 1,
'finish': 1})
#
if TYPE == lltype.SingleFloat:
@@ -29,8 +29,8 @@
return longlong.int2singlefloat(res)
return res
- def str_storage_supported(self, TYPE):
- py.test.skip('this is not a JIT test')
+ #def str_storage_supported(self, TYPE):
+ # py.test.skip('this is not a JIT test')
def test_force_virtual_str_storage(self):
byteorder = sys.byteorder
@@ -48,6 +48,6 @@
'strsetitem': 1, # str forcing
'call_pure_r': 1, # str forcing (copystrcontent)
'guard_no_exception': 1, # str forcing
- 'getarrayitem_gc_i': 1, # str_storage_getitem
+ 'gc_load_indexed_i': 1, # str_storage_getitem
'finish': 1
})
diff --git a/rpython/rlib/rstruct/nativefmttable.py
b/rpython/rlib/rstruct/nativefmttable.py
--- a/rpython/rlib/rstruct/nativefmttable.py
+++ b/rpython/rlib/rstruct/nativefmttable.py
@@ -11,7 +11,6 @@
from rpython.rlib.rstruct.standardfmttable import native_is_bigendian
from rpython.rlib.rstruct.error import StructError
from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.strstorage import str_storage_getitem
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.tool import rffi_platform
from rpython.translator.tool.cbuild import ExternalCompilationInfo
diff --git a/rpython/rlib/rstruct/standardfmttable.py
b/rpython/rlib/rstruct/standardfmttable.py
--- a/rpython/rlib/rstruct/standardfmttable.py
+++ b/rpython/rlib/rstruct/standardfmttable.py
@@ -12,7 +12,7 @@
from rpython.rlib.rstruct import ieee
from rpython.rlib.rstruct.error import StructError, StructOverflowError
from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported
+from rpython.rlib.strstorage import str_storage_getitem
from rpython.rlib import rarithmetic
from rpython.rtyper.lltypesystem import rffi
@@ -185,13 +185,14 @@
data = fmtiter.read(size)
fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian))
return
- if not str_storage_supported(TYPE):
- # this happens e.g. on win32 and ARM32: we cannot read the string
- # content as an array of doubles because it's not properly
- # aligned. But we can read a longlong and convert to float
- assert TYPE == rffi.DOUBLE
- assert rffi.sizeof(TYPE) == 8
- return unpack_longlong2float(fmtiter)
+ ## XXX check if the following code is still needed
+ ## if not str_storage_supported(TYPE):
+ ## # this happens e.g. on win32 and ARM32: we cannot read the
string
+ ## # content as an array of doubles because it's not properly
+ ## # aligned. But we can read a longlong and convert to float
+ ## assert TYPE == rffi.DOUBLE
+ ## assert rffi.sizeof(TYPE) == 8
+ ## return unpack_longlong2float(fmtiter)
try:
# fast path
val = unpack_fastpath(TYPE)(fmtiter)
@@ -246,7 +247,7 @@
@specialize.argtype(0)
def unpack_int_fastpath_maybe(fmtiter):
- if fmtiter.bigendian != native_is_bigendian or not
str_storage_supported(TYPE):
+ if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754:
## or not str_storage_supported(TYPE):
return False
try:
intvalue = unpack_fastpath(TYPE)(fmtiter)
diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py
--- a/rpython/rlib/strstorage.py
+++ b/rpython/rlib/strstorage.py
@@ -9,54 +9,31 @@
# rstr.py:copy_string_contents), which has no chance to work during
# tracing
#
-# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading
-# from GC pointers. However:
-#
-# a. we would like to use a CompositeOffset as the offset (using the
-# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet)
-# supported before translation: it works only if you pass an actual
-# integer
-#
-# b. raw_load from a GC pointer is not (yet) supported by the
-# JIT. There are plans to introduce a gc_load operation: when it
-# will be there, we could fix the issue above and actually use it to
-# implement str_storage_getitem
-#
-# 3. the actual solution: cast rpy_string to a GcStruct which has the very
+# 2. cast rpy_string to a GcStruct which has the very
# same layout, with the only difference that its 'chars' field is no
# longer an Array(Char) but e.e. an Array(Signed). Then, we just need to
-# read the appropriate index into the array
+# read the appropriate index into the array. To support this solution,
+# the JIT's optimizer needed a few workarounds. This was removed.
+#
+# 3. use the newly introduced 'llop.gc_load_indexed'.
+#
-from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
-from rpython.rtyper.lltypesystem.rstr import STR, _get_raw_str_buf
+
+from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.rtyper.lltypesystem.rstr import STR
from rpython.rtyper.annlowlevel import llstr
-from rpython.rlib.objectmodel import specialize, we_are_translated
+from rpython.rlib.objectmodel import specialize
[email protected]()
-def _rpy_string_as_type(TP):
- # sanity check that STR is actually what we think it is
- assert STR._flds == {
- 'hash': lltype.Signed,
- 'chars': lltype.Array(lltype.Char, hints={'immutable': True})
- }
- STR_AS_TP = lltype.GcStruct('rpy_string_as_%s' % TP,
- ('hash', lltype.Signed),
- ('chars', lltype.Array(TP, hints={'immutable':
True})))
- return STR_AS_TP
-
[email protected](0)
-def str_storage_supported(TP):
- # on some architectures (e.g. win32 and arm32) an array of longlongs needs
- # to be aligned at 8 bytes boundaries, so we cannot safely cast from STR
- # to STR_AS_TP. In that case, we str_storage_getitem is simply not
- # supported
- return rffi.sizeof(TP) <= rffi.sizeof(lltype.Signed)
@specialize.ll()
-def str_storage_getitem(TP, s, index):
- assert str_storage_supported(TP) # sanity check
- STR_AS_TP = _rpy_string_as_type(TP)
+def str_storage_getitem(TP, s, byte_offset):
+ # WARNING: the 'byte_offset' is, as its name says, measured in bytes;
+ # however, it should be aligned for TP, otherwise on some platforms this
+ # code will crash!
lls = llstr(s)
- str_as_tp = rffi.cast(lltype.Ptr(STR_AS_TP), lls)
- index = index / rffi.sizeof(TP)
- return str_as_tp.chars[index]
+ base_ofs = (llmemory.offsetof(STR, 'chars') +
+ llmemory.itemoffsetof(STR.chars, 0))
+ scale_factor = llmemory.sizeof(lltype.Char)
+ return llop.gc_load_indexed(TP, lls, byte_offset,
+ scale_factor, base_ofs)
diff --git a/rpython/rlib/test/test_strstorage.py
b/rpython/rlib/test/test_strstorage.py
--- a/rpython/rlib/test/test_strstorage.py
+++ b/rpython/rlib/test/test_strstorage.py
@@ -2,7 +2,7 @@
import sys
import struct
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported
+from rpython.rlib.strstorage import str_storage_getitem
from rpython.rlib.rarithmetic import r_singlefloat
from rpython.rtyper.test.tool import BaseRtypingTest
@@ -10,14 +10,14 @@
class BaseStrStorageTest:
- def test_str_getitem_supported(self):
- if IS_32BIT:
- expected = False
- else:
- expected = True
- #
- assert self.str_storage_supported(rffi.LONGLONG) == expected
- assert self.str_storage_supported(rffi.DOUBLE) == expected
+ ## def test_str_getitem_supported(self):
+ ## if IS_32BIT:
+ ## expected = False
+ ## else:
+ ## expected = True
+ ## #
+ ## assert self.str_storage_supported(rffi.LONGLONG) == expected
+ ## assert self.str_storage_supported(rffi.DOUBLE) == expected
def test_signed(self):
buf = struct.pack('@ll', 42, 43)
@@ -34,8 +34,8 @@
assert int(x) == 43
def test_float(self):
- if not str_storage_supported(lltype.Float):
- py.test.skip('str_storage_getitem(lltype.Float) not supported on
this machine')
+ ## if not str_storage_supported(lltype.Float):
+ ## py.test.skip('str_storage_getitem(lltype.Float) not supported
on this machine')
buf = struct.pack('@dd', 12.3, 45.6)
size = struct.calcsize('@d')
assert self.str_storage_getitem(lltype.Float, buf, 0) == 12.3
@@ -52,20 +52,45 @@
class TestDirect(BaseStrStorageTest):
- def str_storage_supported(self, TYPE):
- return str_storage_supported(TYPE)
+ ## def str_storage_supported(self, TYPE):
+ ## return str_storage_supported(TYPE)
def str_storage_getitem(self, TYPE, buf, offset):
return str_storage_getitem(TYPE, buf, offset)
class TestRTyping(BaseStrStorageTest, BaseRtypingTest):
- def str_storage_supported(self, TYPE):
- def fn():
- return str_storage_supported(TYPE)
- return self.interpret(fn, [])
+ ## def str_storage_supported(self, TYPE):
+ ## def fn():
+ ## return str_storage_supported(TYPE)
+ ## return self.interpret(fn, [])
def str_storage_getitem(self, TYPE, buf, offset):
def fn(offset):
return str_storage_getitem(TYPE, buf, offset)
return self.interpret(fn, [offset])
+
+
+class TestCompiled(BaseStrStorageTest):
+ cache = {}
+
+ def str_storage_getitem(self, TYPE, buf, offset):
+ if TYPE not in self.cache:
+ from rpython.translator.c.test.test_genc import compile
+
+ assert isinstance(TYPE, lltype.Primitive)
+ if TYPE in (lltype.Float, lltype.SingleFloat):
+ TARGET_TYPE = lltype.Float
+ else:
+ TARGET_TYPE = lltype.Signed
+
+ def llf(buf, offset):
+ x = str_storage_getitem(TYPE, buf, offset)
+ return lltype.cast_primitive(TARGET_TYPE, x)
+
+ fn = compile(llf, [str, int])
+ self.cache[TYPE] = fn
+ #
+ fn = self.cache[TYPE]
+ x = fn(buf, offset)
+ return lltype.cast_primitive(TYPE, x)
diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py
b/rpython/rtyper/lltypesystem/ll2ctypes.py
--- a/rpython/rtyper/lltypesystem/ll2ctypes.py
+++ b/rpython/rtyper/lltypesystem/ll2ctypes.py
@@ -902,6 +902,14 @@
llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) *
llobj.repeat
elif isinstance(llobj, ComputedIntSymbolic):
llobj = llobj.compute_fn()
+ elif isinstance(llobj, llmemory.CompositeOffset):
+ llobj = sum([lltype2ctypes(c) for c in llobj.offsets])
+ elif isinstance(llobj, llmemory.FieldOffset):
+ CSTRUCT = get_ctypes_type(llobj.TYPE)
+ llobj = getattr(CSTRUCT, llobj.fldname).offset
+ elif isinstance(llobj, llmemory.ArrayItemsOffset):
+ CARRAY = get_ctypes_type(llobj.TYPE)
+ llobj = CARRAY.items.offset
else:
raise NotImplementedError(llobj) # don't know about symbolic
value
diff --git a/rpython/rtyper/lltypesystem/lloperation.py
b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -417,6 +417,7 @@
'raw_load': LLOp(sideeffects=False, canrun=True),
'raw_store': LLOp(canrun=True),
'bare_raw_store': LLOp(),
+ 'gc_load_indexed': LLOp(sideeffects=False, canrun=True),
'stack_malloc': LLOp(), # mmh
'track_alloc_start': LLOp(),
'track_alloc_stop': LLOp(),
diff --git a/rpython/rtyper/lltypesystem/opimpl.py
b/rpython/rtyper/lltypesystem/opimpl.py
--- a/rpython/rtyper/lltypesystem/opimpl.py
+++ b/rpython/rtyper/lltypesystem/opimpl.py
@@ -702,6 +702,17 @@
return p[0]
op_raw_load.need_result_type = True
+def op_gc_load_indexed(TVAL, p, index, scale, base_ofs):
+ # 'base_ofs' should be a CompositeOffset(..., ArrayItemsOffset).
+ # 'scale' should be a llmemory.sizeof().
+ from rpython.rtyper.lltypesystem import rffi
+ ofs = base_ofs + scale * index
+ if isinstance(ofs, int):
+ return op_raw_load(TVAL, p, ofs)
+ p = rffi.cast(rffi.CArrayPtr(TVAL), llmemory.cast_ptr_to_adr(p) + ofs)
+ return p[0]
+op_gc_load_indexed.need_result_type = True
+
def op_likely(x):
assert isinstance(x, bool)
return x
diff --git a/rpython/rtyper/lltypesystem/rbytearray.py
b/rpython/rtyper/lltypesystem/rbytearray.py
--- a/rpython/rtyper/lltypesystem/rbytearray.py
+++ b/rpython/rtyper/lltypesystem/rbytearray.py
@@ -8,10 +8,10 @@
def mallocbytearray(size):
return lltype.malloc(BYTEARRAY, size)
-_, _, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY,
BYTEARRAY,
+_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY,
BYTEARRAY,
lltype.Char,
'bytearray')
-_, _, _, copy_bytearray_contents_from_str =
rstr._new_copy_contents_fun(rstr.STR,
+_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR,
BYTEARRAY,
lltype.Char,
'bytearray_from_str')
diff --git a/rpython/rtyper/lltypesystem/rstr.py
b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -136,15 +136,13 @@
copy_raw_to_string = func_with_new_name(copy_raw_to_string,
'copy_raw_to_%s' % name)
- return _get_raw_buf, copy_string_to_raw, copy_raw_to_string,
copy_string_contents
+ return copy_string_to_raw, copy_raw_to_string, copy_string_contents
-(_get_raw_str_buf,
- copy_string_to_raw,
+(copy_string_to_raw,
copy_raw_to_string,
copy_string_contents) = _new_copy_contents_fun(STR, STR, Char, 'string')
-(_get_raw_unicode_buf,
- copy_unicode_to_raw,
+(copy_unicode_to_raw,
copy_raw_to_unicode,
copy_unicode_contents) = _new_copy_contents_fun(UNICODE, UNICODE, UniChar,
'unicode')
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -299,7 +299,7 @@
def gen_op(self, op):
macro = 'OP_%s' % op.opname.upper()
line = None
- if op.opname.startswith('gc_'):
+ if op.opname.startswith('gc_') and op.opname != 'gc_load_indexed':
meth = getattr(self.gcpolicy, macro, None)
if meth:
line = meth(self, op)
@@ -709,6 +709,19 @@
"%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];"
% locals())
+ def OP_GC_LOAD_INDEXED(self, op):
+ addr = self.expr(op.args[0])
+ index = self.expr(op.args[1])
+ scale = self.expr(op.args[2])
+ base_ofs = self.expr(op.args[3])
+ result = self.expr(op.result)
+ TYPE = op.result.concretetype
+ typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
+ return (
+ "%(result)s = ((%(typename)s) (((char *)%(addr)s) + "
+ "%(base_ofs)s + %(scale)s * %(index)s))[0];"
+ % locals())
+
def OP_CAST_PRIMITIVE(self, op):
TYPE = self.lltypemap(op.result)
val = self.expr(op.args[0])
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit