Author: Ronan Lamy <[email protected]> Branch: unicode-dtype Changeset: r78001:32e9412990e1 Date: 2015-06-09 16:20 +0100 http://bitbucket.org/pypy/pypy/changeset/32e9412990e1/
Log: hg merge default diff too long, truncating to 2000 out of 2175 lines diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.1.0 +Version: 1.1.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.1.0" -__version_info__ = (1, 1, 0) +__version__ = "1.1.2" +__version_info__ = (1, 1, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/gc_weakref.py b/lib_pypy/cffi/gc_weakref.py --- a/lib_pypy/cffi/gc_weakref.py +++ b/lib_pypy/cffi/gc_weakref.py @@ -2,18 +2,23 @@ class GcWeakrefs(object): - # code copied and adapted from WeakKeyDictionary. - def __init__(self, ffi): self.ffi = ffi - self.data = data = {} - def remove(k): - destructor, cdata = data.pop(k) - destructor(cdata) - self.remove = remove + self.data = {} + self.nextindex = 0 def build(self, cdata, destructor): # make a new cdata of the same type as the original one new_cdata = self.ffi.cast(self.ffi._backend.typeof(cdata), cdata) - self.data[ref(new_cdata, self.remove)] = destructor, cdata + # + def remove(key): + # careful, this function is not protected by any lock + old_key = self.data.pop(index) + assert old_key is key + destructor(cdata) + # + key = ref(new_cdata, remove) + index = self.nextindex + self.nextindex = index + 1 # we're protected by the lock here + self.data[index] = key return new_cdata diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -775,7 +775,8 @@ try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double - prnt(' (void)((p->%s) << 1);' % fname) + prnt(" (void)((p->%s) << 1); /* check that '%s.%s' is " + "an integer */" % (fname, cname, fname)) continue # only accept exactly the type declared, except that '[]' # is interpreted as a '*' and so will match any array length. @@ -949,7 +950,7 @@ prnt('{') prnt(' int n = (%s) <= 0;' % (name,)) prnt(' *o = (unsigned long long)((%s) << 0);' - ' /* check that we get an integer */' % (name,)) + ' /* check that %s is an integer */' % (name, name)) if check_value is not None: if check_value > 0: check_value = '%dU' % (check_value,) @@ -1088,8 +1089,9 @@ self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) def _emit_bytecode_UnknownIntegerType(self, tp, index): - s = '_cffi_prim_int(sizeof(%s), (((%s)-1) << 0) <= 0)' % ( - tp.name, tp.name) + s = ('_cffi_prim_int(sizeof(%s), (\n' + ' ((%s)-1) << 0 /* check that %s is an integer type */\n' + ' ) <= 0)' % (tp.name, tp.name, tp.name)) self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) def _emit_bytecode_RawFunctionType(self, tp, index): diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -18,7 +18,9 @@ # __init__.py files may already try to import the file that # we are generating. with open(filename) as f: - code = compile(f.read(), filename, 'exec') + src = f.read() + src += '\n' # Python 2.6 compatibility + code = compile(src, filename, 'exec') exec(code, glob, glob) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -402,12 +402,16 @@ else: assert tp is not None assert check_value is None - prnt(tp.get_c_name(' %s(void)' % funcname, name),) - prnt('{') if category == 'var': ampersand = '&' else: ampersand = '' + extra = '' + if category == 'const' and isinstance(tp, model.StructOrUnion): + extra = 'const *' + ampersand = '&' + prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) + prnt('{') prnt(' return (%s%s);' % (ampersand, name)) prnt('}') prnt() @@ -436,9 +440,14 @@ value += (1 << (8*self.ffi.sizeof(BLongLong))) else: assert check_value is None - BFunc = self.ffi._typeof_locked(tp.get_c_name('(*)(void)', name))[0] + fntypeextra = '(*)(void)' + if isinstance(tp, model.StructOrUnion): + fntypeextra = '*' + fntypeextra + BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] function = module.load_function(BFunc, funcname) value = function() + if isinstance(tp, model.StructOrUnion): + value = value[0] return value def _loaded_gen_constant(self, tp, name, module, library): diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -536,16 +536,17 @@ return self def __repr__(self): + module = "datetime." if self.__class__ is timedelta else "" if self._microseconds: - return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, + return "%s(%d, %d, %d)" % (module + self.__class__.__name__, self._days, self._seconds, self._microseconds) if self._seconds: - return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__, + return "%s(%d, %d)" % (module + self.__class__.__name__, self._days, self._seconds) - return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days) + return "%s(%d)" % (module + self.__class__.__name__, self._days) def __str__(self): mm, ss = divmod(self._seconds, 60) @@ -798,7 +799,8 @@ >>> repr(dt) 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' """ - return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__, + module = "datetime." if self.__class__ is date else "" + return "%s(%d, %d, %d)" % (module + self.__class__.__name__, self._year, self._month, self._day) @@ -1286,7 +1288,8 @@ s = ", %d" % self._second else: s = "" - s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__, + module = "datetime." if self.__class__ is time else "" + s= "%s(%d, %d%s)" % (module + self.__class__.__name__, self._hour, self._minute, s) if self._tzinfo is not None: assert s[-1:] == ")" @@ -1698,7 +1701,8 @@ if L[-1] == 0: del L[-1] s = ", ".join(map(str, L)) - s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s) + module = "datetime." if self.__class__ is datetime else "" + s = "%s(%s)" % (module + self.__class__.__name__, s) if self._tzinfo is not None: assert s[-1:] == ")" s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -1,3 +1,4 @@ +import os import sys import py @@ -38,7 +39,7 @@ "_csv", "cppyy", "_pypyjson" ]) -if sys.platform.startswith('linux') and sys.maxint > 2147483647: +if sys.platform.startswith('linux') and os.uname()[4] == 'x86_64': working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -21,7 +21,10 @@ this_dir = os.path.dirname(sys.argv[0]) def debug(msg): - os.write(2, "debug: " + msg + '\n') + try: + os.write(2, "debug: " + msg + '\n') + except OSError: + pass # bah, no working stderr :-( # __________ Entry point __________ diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload -VERSION = "1.1.0" +VERSION = "1.1.2" class Module(MixedModule): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3335,4 +3335,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "1.1.0" + assert __version__ == "1.1.2" diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -28,6 +28,7 @@ class W_BaseConnection(W_Root): BUFFER_SIZE = 1024 + buffer = lltype.nullptr(rffi.CCHARP.TO) def __init__(self, flags): self.flags = flags @@ -35,7 +36,8 @@ flavor='raw') def __del__(self): - lltype.free(self.buffer, flavor='raw') + if self.buffer: + lltype.free(self.buffer, flavor='raw') try: self.do_close() except OSError: @@ -204,6 +206,7 @@ class W_FileConnection(W_BaseConnection): INVALID_HANDLE_VALUE = -1 + fd = INVALID_HANDLE_VALUE if sys.platform == 'win32': def WRITE(self, data): diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -189,9 +189,11 @@ assert data2 == '\x00\x00\x00\x04defg' def test_repr(self): - import _multiprocessing - c = _multiprocessing.Connection(1) - assert repr(c) == '<read-write Connection, handle 1>' + import _multiprocessing, os + fd = os.dup(1) # closed by Connection.__del__ + c = _multiprocessing.Connection(fd) + assert repr(c) == '<read-write Connection, handle %d>' % fd if hasattr(_multiprocessing, 'PipeConnection'): - c = _multiprocessing.PipeConnection(1) - assert repr(c) == '<read-write PipeConnection, handle 1>' + fd = os.dup(1) # closed by PipeConnection.__del__ + c = _multiprocessing.PipeConnection(fd) + assert repr(c) == '<read-write PipeConnection, handle %d>' % fd diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -65,15 +65,7 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_FLAGS(space, w_array): assert isinstance(w_array, W_NDimArray) - flags = NPY_BEHAVED_NS - if isinstance(w_array.implementation, ConcreteArray): - flags |= NPY_OWNDATA - if len(w_array.get_shape()) < 2: - flags |= NPY_CONTIGUOUS - elif w_array.implementation.order == 'C': - flags |= NPY_C_CONTIGUOUS - else: - flags |= NPY_F_CONTIGUOUS + flags = NPY_BEHAVED_NS | w_array.get_flags() return flags @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -30,7 +30,15 @@ IMP_HOOK = 9 SO = '.pyd' if _WIN32 else '.so' -DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] + +# this used to change for every minor version, but no longer does: there +# is little point any more, as the so's tend to be cross-version- +# compatible, more so than between various versions of CPython. Be +# careful if we need to update it again: it is now used for both cpyext +# and cffi so's. If we do have to update it, we'd likely need a way to +# split the two usages again. +#DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] +DEFAULT_SOABI = 'pypy-26' @specialize.memo() def get_so_extension(space): diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -10,6 +10,7 @@ from rpython.rlib.rarithmetic import LONG_BIT from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize +from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import constants as NPY @@ -66,7 +67,8 @@ assert isinstance(multiarray, MixedModule) scalar = multiarray.get("scalar") - ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) + ret = space.newtuple([scalar, space.newtuple( + [space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) return ret @@ -368,13 +370,11 @@ if dtype.elsize != self.get_dtype(space).elsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) - if dtype.is_str_or_unicode(): - return dtype.coerce(space, space.wrap(self.raw_str())) - elif dtype.is_record(): + if dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: - return dtype.itemtype.runpack_str(space, self.raw_str()) + return dtype.runpack_str(space, self.raw_str()) def descr_self(self, space): return self @@ -536,8 +536,20 @@ def get_dtype(self, space): return self.dtype + @jit.unroll_safe def raw_str(self): - return self.arr.dtype.itemtype.to_str(self) + builder = StringBuilder() + i = self.ofs + end = i + self.dtype.elsize + with self.arr as storage: + while i < end: + assert isinstance(storage[i], str) + if storage[i] == '\x00': + break + builder.append(storage[i]) + i += 1 + return builder.build() + class W_VoidBox(W_FlexibleBox): def descr_getitem(self, space, w_item): @@ -562,7 +574,7 @@ if isinstance(dtype.itemtype, VoidType): read_val = dtype.itemtype.readarray(self.arr, self.ofs, ofs, dtype) else: - read_val = dtype.itemtype.read(self.arr, self.ofs, ofs, dtype) + read_val = dtype.read(self.arr, self.ofs, ofs) if isinstance (read_val, W_StringBox): # StringType returns a str return space.wrap(dtype.itemtype.to_str(read_val)) @@ -582,7 +594,7 @@ raise oefmt(space.w_IndexError, "222only integers, slices (`:`), " "ellipsis (`...`), numpy.newaxis (`None`) and integer or " "boolean arrays are valid indices") - dtype.itemtype.store(self.arr, self.ofs, ofs, + dtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) def convert_to(self, space, dtype): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -17,7 +17,6 @@ from rpython.rtyper.annlowlevel import cast_gcref_to_instance from pypy.interpreter.baseobjspace import W_Root - class BaseConcreteArray(object): _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', 'strides[*]', 'backstrides[*]', 'order', 'gcstruct', @@ -44,13 +43,13 @@ return backstrides def getitem(self, index): - return self.dtype.itemtype.read(self, index, 0) + return self.dtype.read(self, index, 0) def getitem_bool(self, index): - return self.dtype.itemtype.read_bool(self, index, 0) + return self.dtype.read_bool(self, index, 0) def setitem(self, index, value): - self.dtype.itemtype.store(self, index, 0, value) + self.dtype.store(self, index, 0, value) @jit.unroll_safe def setslice(self, space, arr): @@ -334,12 +333,19 @@ def get_buffer(self, space, readonly): return ArrayBuffer(self, readonly) - def astype(self, space, dtype): + def astype(self, space, dtype, order): # copy the general pattern of the strides # but make the array storage contiguous in memory shape = self.get_shape() strides = self.get_strides() - if len(strides) > 0: + if order not in ('C', 'F'): + raise oefmt(space.w_ValueError, "Unknown order %s in astype", order) + if len(strides) == 0: + t_strides = [] + backstrides = [] + elif order != self.order: + t_strides, backstrides = calc_strides(shape, dtype, order) + else: mins = strides[0] t_elsize = dtype.elsize for s in strides: @@ -347,10 +353,7 @@ mins = s t_strides = [s * t_elsize / mins for s in strides] backstrides = calc_backstrides(t_strides, shape) - else: - t_strides = [] - backstrides = [] - impl = ConcreteArray(shape, dtype, self.order, t_strides, backstrides) + impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl @@ -376,7 +379,7 @@ gc._trace_callback(callback, arg, storage) storage += step i += 1 - + lambda_customtrace = lambda: customtrace def _setup(): @@ -409,8 +412,9 @@ self.gcstruct = V_OBJECTSTORE def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.elsize, - box, 0, self.size, 0, self.gcstruct) + self.dtype.itemtype.fill( + self.storage, self.dtype.elsize, self.dtype.is_native(), + box, 0, self.size, 0, self.gcstruct) def set_shape(self, space, orig_array, new_shape): strides, backstrides = calc_strides(new_shape, self.dtype, @@ -440,7 +444,7 @@ gcstruct = V_OBJECTSTORE flags = NPY.ARRAY_ALIGNED | NPY.ARRAY_WRITEABLE if storage == lltype.nullptr(RAW_STORAGE): - length = support.product(shape) + length = support.product(shape) if dtype.num == NPY.OBJECT: storage = dtype.itemtype.malloc(length * dtype.elsize, zero=True) gcstruct = _create_objectstore(storage, length, dtype.elsize) @@ -502,7 +506,7 @@ ConcreteArray.__init__(self, shape, dtype, order, strides, backstrides, storage, zero) self.flags &= ~ NPY.ARRAY_WRITEABLE - + def descr_setitem(self, space, orig_array, w_index, w_value): raise OperationError(space.w_ValueError, space.wrap( "assignment destination is read-only")) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -5,8 +5,10 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) +from rpython.annotator.model import SomeChar from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize, compute_hash, we_are_translated +from rpython.rlib.objectmodel import ( + specialize, compute_hash, we_are_translated, enforceargs) from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, support, constants as NPY from .base import W_NDimArray @@ -38,6 +40,15 @@ out = W_NDimArray.from_shape(space, shape, dtype) return out +def byteorder_w(space, w_str): + order = space.str_w(w_str) + if len(order) != 1: + raise oefmt(space.w_ValueError, + "endian is not 1-char string in Numpy dtype unpickling") + endian = order[0] + if endian not in (NPY.LITTLE, NPY.BIG, NPY.NATIVE, NPY.IGNORE): + raise oefmt(space.w_ValueError, "Invalid byteorder %s", endian) + return endian class W_Dtype(W_Root): @@ -45,15 +56,13 @@ "itemtype?", "w_box_type", "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?"] - def __init__(self, itemtype, w_box_type, byteorder=None, names=[], + @enforceargs(byteorder=SomeChar()) + def __init__(self, itemtype, w_box_type, byteorder=NPY.NATIVE, names=[], fields={}, elsize=None, shape=[], subdtype=None): self.itemtype = itemtype self.w_box_type = w_box_type - if byteorder is None: - if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): - byteorder = NPY.IGNORE - else: - byteorder = NPY.NATIVE + if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): + byteorder = NPY.IGNORE self.byteorder = byteorder self.names = names self.fields = fields @@ -137,7 +146,8 @@ return bool(self.fields) def is_native(self): - return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) + # Use ord() to ensure that self.byteorder is a char and JITs properly + return ord(self.byteorder) in (ord(NPY.NATIVE), ord(NPY.NATBYTE)) def as_signed(self, space): """Convert from an unsigned integer dtype to its signed partner""" @@ -397,6 +407,20 @@ return space.wrap(0) return space.wrap(len(self.fields)) + def runpack_str(self, space, s): + if self.is_str_or_unicode(): + return self.coerce(space, space.wrap(s)) + return self.itemtype.runpack_str(space, s, self.is_native()) + + def store(self, arr, i, offset, value): + return self.itemtype.store(arr, i, offset, value, self.is_native()) + + def read(self, arr, i, offset): + return self.itemtype.read(arr, i, offset, self) + + def read_bool(self, arr, i, offset): + return self.itemtype.read_bool(arr, i, offset, self) + def descr_reduce(self, space): w_class = space.type(self) builder_args = space.newtuple([ @@ -432,7 +456,7 @@ "can't handle version %d of numpy.dtype pickle", version) - endian = space.str_w(space.getitem(w_data, space.wrap(1))) + endian = byteorder_w(space, space.getitem(w_data, space.wrap(1))) if endian == NPY.NATBYTE: endian = NPY.NATIVE @@ -492,11 +516,10 @@ endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE elif newendian != NPY.IGNORE: endian = newendian - itemtype = self.itemtype.__class__(space, endian in (NPY.NATIVE, NPY.NATBYTE)) fields = self.fields if fields is None: fields = {} - return W_Dtype(itemtype, + return W_Dtype(self.itemtype, self.w_box_type, byteorder=endian, elsize=self.elsize, names=self.names, fields=fields, shape=self.shape, subdtype=self.subdtype) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -566,10 +566,7 @@ while not ai.done(state): fromstring_driver.jit_merge_point(dtype=dtype, itemsize=itemsize) sub = s[i*itemsize:i*itemsize + itemsize] - if dtype.is_str_or_unicode(): - val = dtype.coerce(space, space.wrap(sub)) - else: - val = dtype.itemtype.runpack_str(space, sub) + val = dtype.runpack_str(space, sub) ai.setitem(state, val) state = ai.next(state) i += 1 diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -20,6 +20,7 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import get_shape_from_iterable, \ shape_agreement, shape_agreement_multiple, is_c_contiguous, is_f_contiguous +from pypy.module.micronumpy.casting import can_cast_array def _match_dot_shapes(space, left, right): @@ -43,7 +44,6 @@ raise oefmt(space.w_ValueError, "objects are not aligned") return out_shape, right_critical_dim - class __extend__(W_NDimArray): @jit.unroll_safe def descr_get_shape(self, space): @@ -279,7 +279,7 @@ s.append(separator) s.append(' ') if self.is_scalar() and dtype.is_str(): - s.append(dtype.itemtype.to_str(i.getitem(state))) + s.append(i.getitem(state).raw_str()) else: s.append(dtype.itemtype.str_format(i.getitem(state), add_quotes=True)) state = i.next(state) @@ -592,10 +592,11 @@ if self.is_scalar(): return space.wrap(0) dtype = self.get_dtype().descr_newbyteorder(space, NPY.NATIVE) - contig = self.implementation.astype(space, dtype) + contig = self.implementation.astype(space, dtype, self.get_order()) return contig.argsort(space, w_axis) - def descr_astype(self, space, w_dtype): + @unwrap_spec(order=str, casting=str, subok=bool, copy=bool) + def descr_astype(self, space, w_dtype, order='K', casting='unsafe', subok=True, copy=True): cur_dtype = self.get_dtype() new_dtype = space.interp_w(descriptor.W_Dtype, space.call_function( space.gettypefor(descriptor.W_Dtype), w_dtype)) @@ -603,13 +604,32 @@ raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) - if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: - if cur_dtype.num == NPY.STRING: - new_dtype = descriptor.variable_dtype( - space, 'S' + str(cur_dtype.elsize)) + if new_dtype.is_str() and new_dtype.elsize == 0: + elsize = 0 + itype = cur_dtype.itemtype + for i in range(self.get_size()): + elsize = max(elsize, len(itype.str_format(self.implementation.getitem(i), add_quotes=False))) + new_dtype = descriptor.variable_dtype( + space, 'S' + str(elsize)) + + if not can_cast_array(space, self, new_dtype, casting): + raise oefmt(space.w_TypeError, "Cannot cast array from %s to %s" + "according to the rule %s", + space.str_w(self.get_dtype().descr_repr(space)), + space.str_w(new_dtype.descr_repr(space)), casting) + order = support.get_order_as_CF(self.get_order(), order) + if (not copy and new_dtype == self.get_dtype() and order == self.get_order() + and (subok or type(self) is W_NDimArray)): + return self impl = self.implementation - new_impl = impl.astype(space, new_dtype) - return wrap_impl(space, space.type(self), self, new_impl) + new_impl = impl.astype(space, new_dtype, order) + if new_impl is None: + return self + if subok: + w_type = space.type(self) + else: + w_type = None + return wrap_impl(space, w_type, self, new_impl) def descr_get_base(self, space): impl = self.implementation diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -460,17 +460,18 @@ # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): - selfd = self.dtypes[i] + self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() - if not selfd: + if not self_d: self.dtypes[i] = seq_d - elif selfd != seq_d: + elif self_d != seq_d: if not 'r' in self.op_flags[i].tmp_copy: raise oefmt(space.w_TypeError, "Iterator operand required copying or " "buffering for operand %d", i) impl = self.seq[i].implementation - new_impl = impl.astype(space, selfd) + order = support.get_order_as_CF(impl.order, self.order) + new_impl = impl.astype(space, self_d, order) self.seq[i] = W_NDimArray(new_impl) else: #copy them from seq diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -161,3 +161,14 @@ w_priority_r = space.findattr(w_rhs, space.wrap('__array_priority__')) or w_zero # XXX what is better, unwrapping values or space.gt? return space.is_true(space.gt(w_priority_r, w_priority_l)) + +def get_order_as_CF(proto_order, req_order): + if req_order == 'C': + return 'C' + elif req_order == 'F': + return 'F' + elif req_order == 'K': + return proto_order + elif req_order == 'A': + return proto_order + diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2249,6 +2249,15 @@ assert c.shape == b.shape assert c.strides == (8,) + exc = raises(TypeError, a.astype, 'i8', casting='safe') + assert exc.value.message.startswith( + "Cannot cast array from dtype('complex128') to dtype('int64')") + a = arange(6, dtype='f4').reshape(2, 3) + b = a.astype('f4', copy=False) + assert a is b + b = a.astype('f4', order='C', copy=False) + assert a is b + def test_base(self): from numpy import array assert array(1).base is None diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -83,8 +83,8 @@ def test_complex_op(self): import numpy as np import sys - a = np.array(['abc', 'def'], dtype=object) - b = np.array([1, 2, 3], dtype=object) + a = np.array(['abc', 'def'], dtype=object) + b = np.array([1, 2, 3], dtype=object) c = np.array([complex(1, 1), complex(1, -1)], dtype=object) for arg in (a,b,c): assert (arg == np.real(arg)).all() @@ -164,3 +164,11 @@ a = np.array([(1, 'object')], dt) # Wrong way - should complain about writing buffer to object dtype raises(ValueError, np.array, [1, 'object'], dt) + + def test_astype(self): + import numpy as np + a = np.array([b'a' * 100], dtype='O') + assert 'a' * 100 in str(a) + b = a.astype('S') + assert 'a' * 100 in str(b) + diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -133,12 +133,11 @@ return dispatcher class BaseType(object): - _immutable_fields_ = ['native', 'space'] + _immutable_fields_ = ['space'] strlen = 0 # chars needed to print any possible value of the type - def __init__(self, space, native=True): + def __init__(self, space): assert isinstance(space, ObjSpace) - self.native = native self.space = space def __repr__(self): @@ -199,37 +198,38 @@ def default_fromstring(self, space): raise NotImplementedError - def _read(self, storage, i, offset): + def _read(self, storage, i, offset, native): res = raw_storage_getitem_unaligned(self.T, storage, i + offset) - if not self.native: + if not native: res = byteswap(res) return res - def _write(self, storage, i, offset, value): - if not self.native: + def _write(self, storage, i, offset, value, native): + if not native: value = byteswap(value) raw_storage_setitem_unaligned(storage, i + offset, value) - def read(self, arr, i, offset, dtype=None): + def read(self, arr, i, offset, dtype): with arr as storage: - return self.box(self._read(storage, i, offset)) - - def read_bool(self, arr, i, offset): + return self.box(self._read(storage, i, offset, dtype.is_native())) + + def read_bool(self, arr, i, offset, dtype): with arr as storage: - return bool(self.for_computation(self._read(storage, i, offset))) - - def store(self, arr, i, offset, box): + return bool(self.for_computation( + self._read(storage, i, offset, dtype.is_native()))) + + def store(self, arr, i, offset, box, native): with arr as storage: - self._write(storage, i, offset, self.unbox(box)) - - def fill(self, storage, width, box, start, stop, offset, gcstruct): + self._write(storage, i, offset, self.unbox(box), native) + + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): - self._write(storage, i, offset, value) - - def runpack_str(self, space, s): + self._write(storage, i, offset, value, native) + + def runpack_str(self, space, s, native): v = rffi.cast(self.T, runpack(self.format_code, s)) - if not self.native: + if not native: v = byteswap(v) return self.box(v) @@ -1058,10 +1058,10 @@ def box(self, value): return self.BoxType(rffi.cast(rffi.DOUBLE, value)) - def runpack_str(self, space, s): + def runpack_str(self, space, s, native): assert len(s) == 2 fval = self.box(unpack_float(s, native_is_bigendian)) - if not self.native: + if not native: fval = self.byteswap(fval) return fval @@ -1074,19 +1074,19 @@ swapped = byteswap(rffi.cast(self._STORAGE_T, hbits)) return self.box(float_unpack(r_ulonglong(swapped), 2)) - def _read(self, storage, i, offset): + def _read(self, storage, i, offset, native): hbits = raw_storage_getitem_unaligned(self._STORAGE_T, storage, i + offset) - if not self.native: + if not native: hbits = byteswap(hbits) return float_unpack(r_ulonglong(hbits), 2) - def _write(self, storage, i, offset, value): + def _write(self, storage, i, offset, value, native): try: hbits = float_pack(value, 2) except OverflowError: hbits = float_pack(rfloat.INFINITY, 2) hbits = rffi.cast(self._STORAGE_T, hbits) - if not self.native: + if not native: hbits = byteswap(hbits) raw_storage_setitem_unaligned(storage, i + offset, hbits) @@ -1148,14 +1148,14 @@ op = '+' if imag >= 0 or rfloat.isnan(imag) else '' return ''.join(['(', real_str, op, imag_str, ')']) - def runpack_str(self, space, s): - comp = self.ComponentBoxType._get_dtype(space).itemtype + def runpack_str(self, space, s, native): + comp = self.ComponentBoxType._get_dtype(space) l = len(s) // 2 real = comp.runpack_str(space, s[:l]) imag = comp.runpack_str(space, s[l:]) - if not self.native: - real = comp.byteswap(real) - imag = comp.byteswap(imag) + if not native: + real = comp.itemtype.byteswap(real) + imag = comp.itemtype.byteswap(imag) return self.composite(real, imag) @staticmethod @@ -1174,9 +1174,10 @@ real, imag = self.for_computation(self.unbox(v)) return bool(real) or bool(imag) - def read_bool(self, arr, i, offset): + def read_bool(self, arr, i, offset, dtype): with arr as storage: - v = self.for_computation(self._read(storage, i, offset)) + v = self.for_computation( + self._read(storage, i, offset, dtype.is_native())) return bool(v[0]) or bool(v[1]) def get_element_size(self): @@ -1219,35 +1220,35 @@ assert isinstance(box, self.BoxType) return box.real, box.imag - def _read(self, storage, i, offset): + def _read(self, storage, i, offset, native): real = raw_storage_getitem_unaligned(self.T, storage, i + offset) imag = raw_storage_getitem_unaligned(self.T, storage, i + offset + rffi.sizeof(self.T)) - if not self.native: + if not native: real = byteswap(real) imag = byteswap(imag) return real, imag - def read(self, arr, i, offset, dtype=None): + def read(self, arr, i, offset, dtype): with arr as storage: - real, imag = self._read(storage, i, offset) + real, imag = self._read(storage, i, offset, dtype.is_native()) return self.box_complex(real, imag) - def _write(self, storage, i, offset, value): + def _write(self, storage, i, offset, value, native): real, imag = value - if not self.native: + if not native: real = byteswap(real) imag = byteswap(imag) raw_storage_setitem_unaligned(storage, i + offset, real) raw_storage_setitem_unaligned(storage, i + offset + rffi.sizeof(self.T), imag) - def store(self, arr, i, offset, box): + def store(self, arr, i, offset, box, native): with arr as storage: - self._write(storage, i, offset, self.unbox(box)) - - def fill(self, storage, width, box, start, stop, offset, gcstruct): + self._write(storage, i, offset, self.unbox(box), native) + + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): - self._write(storage, i, offset, value) + self._write(storage, i, offset, value, native) @complex_binary_op def add(self, v1, v2): @@ -1745,10 +1746,10 @@ char = NPY.LONGDOUBLELTR BoxType = boxes.W_FloatLongBox - def runpack_str(self, space, s): + def runpack_str(self, space, s, native): assert len(s) == boxes.long_double_size fval = self.box(unpack_float80(s, native_is_bigendian)) - if not self.native: + if not native: fval = self.byteswap(fval) return fval @@ -1788,14 +1789,14 @@ # return the item itself return self.unbox(self.box(w_item)) - def store(self, arr, i, offset, box): + def store(self, arr, i, offset, box, native): if arr.gcstruct is V_OBJECTSTORE: raise oefmt(self.space.w_NotImplementedError, "cannot store object in array with no gc hook") self._write(arr.storage, i, offset, self.unbox(box), arr.gcstruct) - def read(self, arr, i, offset, dtype=None): + def read(self, arr, i, offset, dtype): return self.box(self._read(arr.storage, i, offset)) def byteswap(self, w_v): @@ -1814,7 +1815,7 @@ raw_storage_setitem_unaligned(storage, i + offset, value) @jit.dont_look_inside - def _read(self, storage, i, offset): + def _read(self, storage, i, offset, native=True): res = raw_storage_getitem_unaligned(self.T, storage, i + offset) if we_are_translated(): gcref = rffi.cast(llmemory.GCREF, res) @@ -1823,7 +1824,7 @@ w_obj = _all_objs_for_tests[res] return w_obj - def fill(self, storage, width, box, start, stop, offset, gcstruct): + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): self._write(storage, i, offset, value, gcstruct) @@ -1866,7 +1867,7 @@ def str_format(self, box, add_quotes=True): return self.space.str_w(self.space.repr(self.unbox(box))) - def runpack_str(self, space, s): + def runpack_str(self, space, s, native): raise oefmt(space.w_NotImplementedError, "fromstring not implemented for object type") @@ -2051,20 +2052,8 @@ def get_element_size(self): return rffi.sizeof(self.T) - @jit.unroll_safe def to_str(self, item): - builder = StringBuilder() - assert isinstance(item, boxes.W_FlexibleBox) - i = item.ofs - end = i + item.dtype.elsize - with item.arr as storage: - while i < end: - assert isinstance(storage[i], str) - if storage[i] == '\x00': - break - builder.append(storage[i]) - i += 1 - return builder.build() + return item.raw_str() def str_unary_op(func): specialize.argtype(1)(func) @@ -2105,7 +2094,7 @@ storage[j] = '\x00' return boxes.W_StringBox(arr, 0, arr.dtype) - def store(self, arr, i, offset, box): + def store(self, arr, i, offset, box, native): assert isinstance(box, boxes.W_StringBox) size = min(arr.dtype.elsize - offset, box.arr.size - box.ofs) with arr as storage: @@ -2118,9 +2107,7 @@ for k in range(size): storage[k + offset + i] = box_storage[k + box.ofs] - def read(self, arr, i, offset, dtype=None): - if dtype is None: - dtype = arr.dtype + def read(self, arr, i, offset, dtype): return boxes.W_StringBox(arr, i + offset, dtype) def str_format(self, item, add_quotes=True): @@ -2185,7 +2172,7 @@ def bool(self, v): return bool(self.to_str(v)) - def fill(self, storage, width, box, start, stop, offset, gcstruct): + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -2205,7 +2192,7 @@ value = space.unicode_w(w_item) return boxes.W_UnicodeBox(value) - def store(self, arr, i, offset, box): + def store(self, arr, i, offset, box, native): assert isinstance(box, boxes.W_UnicodeBox) value = box._value for k in range(len(value)): @@ -2213,7 +2200,7 @@ data = rffi.cast(Int32.T, ord(box._value[k])) raw_storage_setitem_unaligned(arr.storage, index, data) - def read(self, arr, i, offset, dtype=None): + def read(self, arr, i, offset, dtype): if dtype is None: dtype = arr.dtype size = dtype.elsize // 4 @@ -2269,7 +2256,7 @@ def bool(self, v): raise NotImplementedError - def fill(self, storage, width, box, start, stop, offset, gcstruct): + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): raise NotImplementedError @@ -2293,8 +2280,8 @@ itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): - w_box = itemtype.coerce(space, subdtype, items_w[i]) - itemtype.store(arr, 0, ofs, w_box) + w_box = subdtype.coerce(space, items_w[i]) + subdtype.store(arr, 0, ofs, w_box) ofs += itemtype.get_element_size() else: for w_item in items_w: @@ -2311,13 +2298,13 @@ return boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe - def store(self, arr, i, ofs, box): + def store(self, arr, i, offset, box, native): assert i == 0 assert isinstance(box, boxes.W_VoidBox) assert box.dtype is box.arr.dtype with arr as arr_storage, box.arr as box_storage: for k in range(box.arr.dtype.elsize): - arr_storage[k + ofs] = box_storage[k + box.ofs] + arr_storage[k + offset] = box_storage[k + box.ofs] def readarray(self, arr, i, offset, dtype=None): from pypy.module.micronumpy.base import W_NDimArray @@ -2330,9 +2317,7 @@ dtype.subdtype) return W_NDimArray(implementation) - def read(self, arr, i, offset, dtype=None): - if dtype is None: - dtype = arr.dtype + def read(self, arr, i, offset, dtype): return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe @@ -2351,10 +2336,11 @@ ret_unwrapped = [] for name in dt.names: ofs, dtype = dt.fields[name] + # XXX: code duplication with W_VoidBox.descr_getitem() if isinstance(dtype.itemtype, VoidType): read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) else: - read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) + read_val = dtype.read(item.arr, ofs, 0) if isinstance (read_val, boxes.W_StringBox): # StringType returns a str read_val = space.wrap(dtype.itemtype.to_str(read_val)) @@ -2373,9 +2359,7 @@ kind = NPY.VOIDLTR char = NPY.VOIDLTR - def read(self, arr, i, offset, dtype=None): - if dtype is None: - dtype = arr.dtype + def read(self, arr, i, offset, dtype): return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe @@ -2410,22 +2394,21 @@ arr = VoidBoxStorage(dtype.elsize, dtype) for i in range(len(dtype.fields)): ofs, subdtype = dtype.fields[dtype.names[i]] - itemtype = subdtype.itemtype try: - w_box = itemtype.coerce(space, subdtype, items_w[i]) + w_box = subdtype.coerce(space, items_w[i]) except IndexError: - w_box = itemtype.coerce(space, subdtype, None) - itemtype.store(arr, 0, ofs, w_box) + w_box = subdtype.coerce(space, None) + subdtype.store(arr, 0, ofs, w_box) return boxes.W_VoidBox(arr, 0, dtype) - def runpack_str(self, space, s): + def runpack_str(self, space, s, native): raise oefmt(space.w_NotImplementedError, "fromstring not implemented for record types") - def store(self, arr, i, ofs, box): + def store(self, arr, i, offset, box, native): assert isinstance(box, boxes.W_VoidBox) with arr as storage: - self._store(storage, i, ofs, box, box.dtype.elsize) + self._store(storage, i, offset, box, box.dtype.elsize) @jit.unroll_safe def _store(self, storage, i, ofs, box, size): @@ -2433,7 +2416,7 @@ for k in range(size): storage[k + i + ofs] = box_storage[k + box.ofs] - def fill(self, storage, width, box, start, stop, offset, gcstruct): + def fill(self, storage, width, native, box, start, stop, offset, gcstruct): assert isinstance(box, boxes.W_VoidBox) assert width == box.dtype.elsize for i in xrange(start, stop, width): @@ -2449,9 +2432,8 @@ dtype = box.dtype for name in dtype.names: ofs, subdtype = dtype.fields[name] - itemtype = subdtype.itemtype - subbox = itemtype.read(box.arr, box.ofs, ofs, subdtype) - items.append(itemtype.to_builtin_type(space, subbox)) + subbox = subdtype.read(box.arr, box.ofs, ofs) + items.append(subdtype.itemtype.to_builtin_type(space, subbox)) return space.newtuple(items) @jit.unroll_safe @@ -2461,12 +2443,12 @@ first = True for name in box.dtype.names: ofs, subdtype = box.dtype.fields[name] - tp = subdtype.itemtype if first: first = False else: pieces.append(", ") - val = tp.read(box.arr, box.ofs, ofs, subdtype) + val = subdtype.read(box.arr, box.ofs, ofs) + tp = subdtype.itemtype pieces.append(tp.str_format(val, add_quotes=add_quotes)) pieces.append(")") return "".join(pieces) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1458,6 +1458,63 @@ import gc; gc.collect(); gc.collect(); gc.collect() assert seen == [1] + def test_gc_2(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + import gc; gc.collect() + assert seen == [] + del q1, q2 + import gc; gc.collect(); gc.collect(); gc.collect(); gc.collect() + assert seen == [2, 1] + + def test_gc_3(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + r = ffi.new("int *", 123) + seen = [] + seen_r = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + s1 = ffi.gc(r, lambda r: seen_r.append(4)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + s2 = ffi.gc(s1, lambda r: seen_r.append(5)) + q3 = ffi.gc(q2, lambda p: seen.append(3)) + import gc; gc.collect() + assert seen == [] + assert seen_r == [] + del q1, q2, q3, s2, s1 + import gc; gc.collect(); gc.collect(); gc.collect(); gc.collect() + assert seen == [3, 2, 1] + assert seen_r == [5, 4] + + def test_gc_4(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + q3 = ffi.gc(q2, lambda p: seen.append(3)) + import gc; gc.collect() + assert seen == [] + del q1, q3 # q2 remains, and has a hard ref to q1 + import gc; gc.collect(); gc.collect(); gc.collect() + assert seen == [3] + + def test_gc_finite_list(self): + ffi = FFI(backend=self.Backend()) + p = ffi.new("int *", 123) + keepalive = [] + for i in range(10): + keepalive.append(ffi.gc(p, lambda p: None)) + assert len(ffi.gc_weakrefs.data) == i + 1 #should be a private attr + del keepalive[:] + import gc; gc.collect(); gc.collect() + for i in range(10): + keepalive.append(ffi.gc(p, lambda p: None)) + assert len(ffi.gc_weakrefs.data) == 10 + def test_CData_CType(self): ffi = FFI(backend=self.Backend()) assert isinstance(ffi.cast("int", 0), ffi.CData) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -2228,3 +2228,11 @@ ffi.cdef("static const int FOO = 123;") e = py.test.raises(VerificationError, ffi.verify, "#define FOO 124") assert str(e.value).endswith("FOO has the real value 124, not 123") + +def test_const_struct_global(): + ffi = FFI() + ffi.cdef("typedef struct { int x; ...; } T; const T myglob;") + lib = ffi.verify("typedef struct { double y; int x; } T;" + "const T myglob = { 0.1, 42 };") + assert ffi.typeof(lib.myglob) == ffi.typeof("T") + assert lib.myglob.x == 42 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -1,6 +1,5 @@ # Generated by pypy/tool/import_cffi.py import py, os, sys, shutil -import imp import subprocess from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -16,28 +15,12 @@ except OSError as e: py.test.skip("Cannot execute virtualenv: %s" % (e,)) - try: - deepcopy = os.symlink - except: - import shutil, errno - def deepcopy(src, dst): - try: - shutil.copytree(src, dst) - except OSError as e: - if e.errno in (errno.ENOTDIR, errno.EINVAL): - shutil.copy(src, dst) - else: - print('got errno') - print(e.errno) - print('not') - print(errno.ENOTDIR) - raise - site_packages = None for dirpath, dirnames, filenames in os.walk(str(tmpdir)): if os.path.basename(dirpath) == 'site-packages': site_packages = dirpath break + paths = "" if site_packages: try: from cffi import _pycparser @@ -50,15 +33,22 @@ pass else: modules += ('ply',) # needed for older versions of pycparser + paths = [] for module in modules: - target = imp.find_module(module)[1] - deepcopy(target, os.path.join(site_packages, - os.path.basename(target))) - return tmpdir + target = __import__(module, None, None, []) + src = os.path.abspath(target.__file__) + for end in ['__init__.pyc', '__init__.pyo', '__init__.py']: + if src.lower().endswith(end): + src = src[:-len(end)-1] + break + paths.append(os.path.dirname(src)) + paths = os.pathsep.join(paths) + return tmpdir, paths SNIPPET_DIR = py.path.local(__file__).join('..', 'snippets') -def really_run_setup_and_program(dirname, venv_dir, python_snippet): +def really_run_setup_and_program(dirname, venv_dir_and_paths, python_snippet): + venv_dir, paths = venv_dir_and_paths def remove(dir): dir = str(SNIPPET_DIR.join(dirname, dir)) shutil.rmtree(dir, ignore_errors=True) @@ -76,9 +66,11 @@ else: bindir = 'bin' vp = str(venv_dir.join(bindir).join('python')) - subprocess.check_call((vp, 'setup.py', 'clean')) - subprocess.check_call((vp, 'setup.py', 'install')) - subprocess.check_call((vp, str(python_f))) + env = os.environ.copy() + env['PYTHONPATH'] = paths + subprocess.check_call((vp, 'setup.py', 'clean'), env=env) + subprocess.check_call((vp, 'setup.py', 'install'), env=env) + subprocess.check_call((vp, str(python_f)), env=env) finally: os.chdir(olddir) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import py, sys import _cffi_backend as _cffi1_backend @@ -66,6 +66,7 @@ ffi = _cffi1_backend.FFI() p = ffi.new("char[]", init=b"foobar\x00baz") assert ffi.string(p) == b"foobar" + assert ffi.string(cdata=p, maxlen=3) == b"foo" def test_ffi_errno(): # xxx not really checking errno, just checking that we can read/write it @@ -158,11 +159,19 @@ assert str(e.value) == ("undefined struct/union name\n" "struct never_heard_of_s\n" " ^") + e = py.test.raises(ffi.error, ffi.cast, "\t\n\x01\x1f~\x7f\x80\xff", 0) + marks = "?" if sys.version_info < (3,) else "??" + assert str(e.value) == ("identifier expected\n" + " ??~?%s%s\n" + " ^" % (marks, marks)) + e = py.test.raises(ffi.error, ffi.cast, "X" * 600, 0) + assert str(e.value) == ("undefined type name") def test_ffi_buffer(): ffi = _cffi1_backend.FFI() a = ffi.new("signed char[]", [5, 6, 7]) assert ffi.buffer(a)[:] == b'\x05\x06\x07' + assert ffi.buffer(cdata=a, size=2)[:] == b'\x05\x06' def test_ffi_from_buffer(): import array @@ -179,3 +188,11 @@ ffi = _cffi1_backend.FFI() assert isinstance(ffi.cast("int", 42), CData) assert isinstance(ffi.typeof("int"), CType) + +def test_ffi_getwinerror(): + if sys.platform != "win32": + py.test.skip("for windows") + ffi = _cffi1_backend.FFI() + n = (1 << 29) + 42 + code, message = ffi.getwinerror(code=n) + assert code == n diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -33,7 +33,9 @@ struct ab { int a, b; }; struct abc { int a, b, c; }; - enum foq { A0, B0, CC0, D0 }; + /* don't use A0, B0, CC0, D0 because termios.h might be included + and it has its own #defines for these names */ + enum foq { cffiA0, cffiB0, cffiCC0, cffiD0 }; enum bar { A1, B1=-2, CC1, D1, E1 }; enum baz { A2=0x1000, B2=0x2000 }; enum foo2 { A3, B3, C3, D3 }; @@ -879,9 +881,9 @@ def test_enum(self): # enum foq { A0, B0, CC0, D0 }; - assert ffi.string(ffi.cast("enum foq", 0)) == "A0" - assert ffi.string(ffi.cast("enum foq", 2)) == "CC0" - assert ffi.string(ffi.cast("enum foq", 3)) == "D0" + assert ffi.string(ffi.cast("enum foq", 0)) == "cffiA0" + assert ffi.string(ffi.cast("enum foq", 2)) == "cffiCC0" + assert ffi.string(ffi.cast("enum foq", 3)) == "cffiD0" assert ffi.string(ffi.cast("enum foq", 4)) == "4" # enum bar { A1, B1=-2, CC1, D1, E1 }; assert ffi.string(ffi.cast("enum bar", 0)) == "A1" @@ -1408,6 +1410,47 @@ import gc; gc.collect(); gc.collect(); gc.collect() assert seen == [1] + def test_gc_2(self): + p = ffi.new("int *", 123) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + import gc; gc.collect() + assert seen == [] + del q1, q2 + import gc; gc.collect(); gc.collect(); gc.collect(); gc.collect() + assert seen == [2, 1] + + def test_gc_3(self): + p = ffi.new("int *", 123) + r = ffi.new("int *", 123) + seen = [] + seen_r = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + s1 = ffi.gc(r, lambda r: seen_r.append(4)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + s2 = ffi.gc(s1, lambda r: seen_r.append(5)) + q3 = ffi.gc(q2, lambda p: seen.append(3)) + import gc; gc.collect() + assert seen == [] + assert seen_r == [] + del q1, q2, q3, s2, s1 + import gc; gc.collect(); gc.collect(); gc.collect(); gc.collect() + assert seen == [3, 2, 1] + assert seen_r == [5, 4] + + def test_gc_4(self): + p = ffi.new("int *", 123) + seen = [] + q1 = ffi.gc(p, lambda p: seen.append(1)) + q2 = ffi.gc(q1, lambda p: seen.append(2)) + q3 = ffi.gc(q2, lambda p: seen.append(3)) + import gc; gc.collect() + assert seen == [] + del q1, q3 # q2 remains, and has a hard ref to q1 + import gc; gc.collect(); gc.collect(); gc.collect() + assert seen == [3] + def test_CData_CType(self): assert isinstance(ffi.cast("int", 0), ffi.CData) assert isinstance(ffi.new("int *"), ffi.CData) @@ -1534,8 +1577,8 @@ assert p.a == -52525 # p = ffi.cast("enum foq", 2) - assert ffi.string(p) == "CC0" - assert ffi2.sizeof("char[CC0]") == 2 + assert ffi.string(p) == "cffiCC0" + assert ffi2.sizeof("char[cffiCC0]") == 2 # p = ffi.new("anon_foo_t *", [-52526]) assert p.a == -52526 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -8,6 +8,7 @@ def setup_module(mod): SRC = """ + #include <string.h> #define FOOBAR (-42) static const int FOOBAZ = -43; #define BIGPOS 420000000000L @@ -54,6 +55,7 @@ struct foo_s; typedef struct bar_s { int x; signed char a[]; } bar_t; enum foo_e { AA, BB, CC }; + int strlen(const char *); """) ffi.set_source('re_python_pysrc', None) ffi.emit_python_code(str(tmpdir.join('re_python_pysrc.py'))) @@ -82,10 +84,20 @@ def test_function_with_varargs(): import _cffi_backend from re_python_pysrc import ffi - lib = ffi.dlopen(extmod) + lib = ffi.dlopen(extmod, 0) assert lib.add43(45, ffi.cast("int", -5)) == 45 assert type(lib.add43) is _cffi_backend.FFI.CData +def test_dlopen_none(): + import _cffi_backend + from re_python_pysrc import ffi + name = None + if sys.platform == 'win32': + import ctypes.util + name = ctypes.util.find_msvcrt() + lib = ffi.dlopen(name) + assert lib.strlen(b"hello") == 5 + def test_dlclose(): import _cffi_backend from re_python_pysrc import ffi diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -993,3 +993,13 @@ ffi.typeof('function_t*') lib.function(ffi.NULL) # assert did not crash + +def test_alignment_of_longlong(): + ffi = FFI() + x1 = ffi.alignof('unsigned long long') + assert x1 in [4, 8] + ffi.cdef("struct foo_s { unsigned long long x; };") + lib = verify(ffi, 'test_alignment_of_longlong', + "struct foo_s { unsigned long long x; };") + assert ffi.alignof('unsigned long long') == x1 + assert ffi.alignof('struct foo_s') == x1 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -2118,25 +2118,19 @@ try: ffi1 = FFI() ffi1.cdef("int foo_verify_dlopen_flags;") - - sys.setdlopenflags(ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) + sys.setdlopenflags(ffi1.RTLD_GLOBAL | ffi1.RTLD_NOW) lib1 = ffi1.verify("int foo_verify_dlopen_flags;") - lib2 = get_second_lib() - - lib1.foo_verify_dlopen_flags = 42 - assert lib2.foo_verify_dlopen_flags == 42 - lib2.foo_verify_dlopen_flags += 1 - assert lib1.foo_verify_dlopen_flags == 43 finally: sys.setdlopenflags(old) -def get_second_lib(): - # Hack, using modulename makes the test fail ffi2 = FFI() - ffi2.cdef("int foo_verify_dlopen_flags;") - lib2 = ffi2.verify("int foo_verify_dlopen_flags;", - flags=ffi2.RTLD_GLOBAL | ffi2.RTLD_LAZY) - return lib2 + ffi2.cdef("int *getptr(void);") + lib2 = ffi2.verify(""" + extern int foo_verify_dlopen_flags; + static int *getptr(void) { return &foo_verify_dlopen_flags; } + """) + p = lib2.getptr() + assert ffi1.addressof(lib1, 'foo_verify_dlopen_flags') == p def test_consider_not_implemented_function_type(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_zdist.py @@ -30,13 +30,17 @@ if hasattr(self, 'saved_cwd'): os.chdir(self.saved_cwd) - def run(self, args): + def run(self, args, cwd=None): env = os.environ.copy() - newpath = self.rootdir - if 'PYTHONPATH' in env: - newpath += os.pathsep + env['PYTHONPATH'] - env['PYTHONPATH'] = newpath - subprocess.check_call([self.executable] + args, env=env) + # a horrible hack to prevent distutils from finding ~/.pydistutils.cfg + # (there is the --no-user-cfg option, but not in Python 2.6...) + env['HOME'] = '/this/path/does/not/exist' + if cwd is None: + newpath = self.rootdir + if 'PYTHONPATH' in env: + newpath += os.pathsep + env['PYTHONPATH'] + env['PYTHONPATH'] = newpath + subprocess.check_call([self.executable] + args, cwd=cwd, env=env) def _prepare_setuptools(self): if hasattr(TestDist, '_setuptools_ready'): @@ -45,8 +49,7 @@ import setuptools except ImportError: py.test.skip("setuptools not found") - subprocess.check_call([self.executable, 'setup.py', 'egg_info'], - cwd=self.rootdir) + self.run(['setup.py', 'egg_info'], cwd=self.rootdir) TestDist._setuptools_ready = True def check_produced_files(self, content, curdir=None): diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py --- a/pypy/module/test_lib_pypy/test_datetime.py +++ b/pypy/module/test_lib_pypy/test_datetime.py @@ -6,9 +6,40 @@ class BaseTestDatetime: def test_repr(self): - print datetime - expected = "datetime.datetime(1, 2, 3, 0, 0)" - assert repr(datetime.datetime(1,2,3)) == expected + checks = ( + (datetime.date(2015, 6, 8), "datetime.date(2015, 6, 8)"), + (datetime.datetime(2015, 6, 8, 12, 34, 56), "datetime.datetime(2015, 6, 8, 12, 34, 56)"), + (datetime.time(12, 34, 56), "datetime.time(12, 34, 56)"), + (datetime.timedelta(1), "datetime.timedelta(1)"), + (datetime.timedelta(1, 2), "datetime.timedelta(1, 2)"), + (datetime.timedelta(1, 2, 3), "datetime.timedelta(1, 2, 3)"), + ) + for obj, expected in checks: + assert repr(obj) == expected + + def test_repr_overridden(self): + class date_safe(datetime.date): + pass + + class datetime_safe(datetime.datetime): + pass + + class time_safe(datetime.time): + pass + + class timedelta_safe(datetime.timedelta): + pass + + checks = ( + (date_safe(2015, 6, 8), "date_safe(2015, 6, 8)"), + (datetime_safe(2015, 6, 8, 12, 34, 56), "datetime_safe(2015, 6, 8, 12, 34, 56)"), + (time_safe(12, 34, 56), "time_safe(12, 34, 56)"), + (timedelta_safe(1), "timedelta_safe(1)"), + (timedelta_safe(1, 2), "timedelta_safe(1, 2)"), + (timedelta_safe(1, 2, 3), "timedelta_safe(1, 2, 3)"), + ) + for obj, expected in checks: + assert repr(obj) == expected def test_attributes(self): for x in [datetime.date.today(), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -21,6 +21,7 @@ from rpython.annotator.argument import simple_args from rpython.rlib.objectmodel import r_dict, r_ordereddict, Symbolic from rpython.tool.algo.unionfind import UnionFind +from rpython.tool.flattenrec import FlattenRecursion from rpython.rtyper import extregistry @@ -425,6 +426,8 @@ self.methoddescs[key] = result return result + _see_mutable_flattenrec = FlattenRecursion() + def see_mutable(self, x): key = (x.__class__, x) if key in self.seen_mutable: @@ -433,8 +436,11 @@ self.seen_mutable[key] = True self.event('mutable', x) source = InstanceSource(self, x) - for attr in source.all_instance_attributes(): - clsdef.add_source_for_attribute(attr, source) # can trigger reflowing + def delayed(): + for attr in source.all_instance_attributes(): + clsdef.add_source_for_attribute(attr, source) + # ^^^ can trigger reflowing + self._see_mutable_flattenrec(delayed) def valueoftype(self, t): return annotationoftype(t, self) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -109,10 +109,13 @@ kind='unicode') else: self.malloc_slowpath_unicode = None - self.cond_call_slowpath = [self._build_cond_call_slowpath(False, False), - self._build_cond_call_slowpath(False, True), - self._build_cond_call_slowpath(True, False), - self._build_cond_call_slowpath(True, True)] + lst = [0, 0, 0, 0] + lst[0] = self._build_cond_call_slowpath(False, False) + lst[1] = self._build_cond_call_slowpath(False, True) + if self.cpu.supports_floats: + lst[2] = self._build_cond_call_slowpath(True, False) + lst[3] = self._build_cond_call_slowpath(True, True) + self.cond_call_slowpath = lst self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -382,7 +382,8 @@ # we have one word to align mc.SUB_ri(esp.value, 7 * WORD) # align and reserve some space mc.MOV_sr(WORD, eax.value) # save for later - mc.MOVSD_sx(2 * WORD, xmm0.value) # 32-bit: also 3 * WORD + if self.cpu.supports_floats: + mc.MOVSD_sx(2 * WORD, xmm0.value) # 32-bit: also 3 * WORD if IS_X86_32: mc.MOV_sr(4 * WORD, edx.value) mc.MOV_sr(0, ebp.value) @@ -423,7 +424,8 @@ else: if IS_X86_32: mc.MOV_rs(edx.value, 4 * WORD) - mc.MOVSD_xs(xmm0.value, 2 * WORD) + if self.cpu.supports_floats: + mc.MOVSD_xs(xmm0.value, 2 * WORD) mc.MOV_rs(eax.value, WORD) # restore self._restore_exception(mc, exc0, exc1) mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -103,6 +103,7 @@ # ____________________________________________________________ + def compile_loop(metainterp, greenkey, start, inputargs, jumpargs, full_preamble_needed=True, @@ -148,27 +149,28 @@ if part.quasi_immutable_deps: loop.quasi_immutable_deps.update(part.quasi_immutable_deps) if part.operations[-1].getopnum() == rop.LABEL: - inliner = Inliner(inputargs, jumpargs) - part.quasi_immutable_deps = None - part.operations = [part.operations[-1]] + \ - [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ - [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], - None, descr=jitcell_token)] - target_token = part.operations[0].getdescr() - assert isinstance(target_token, TargetToken) - all_target_tokens.append(target_token) - inputargs = jumpargs - jumpargs = part.operations[-1].getarglist() + if start_state is not None: + inliner = Inliner(inputargs, jumpargs) + part.quasi_immutable_deps = None + part.operations = [part.operations[-1]] + \ + [inliner.inline_op(h_ops[i]) for i in range(start, len(h_ops))] + \ + [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jumpargs], + None, descr=jitcell_token)] + target_token = part.operations[0].getdescr() + assert isinstance(target_token, TargetToken) + all_target_tokens.append(target_token) + inputargs = jumpargs + jumpargs = part.operations[-1].getarglist() - try: - optimize_trace(metainterp_sd, jitdriver_sd, part, enable_opts, - start_state=start_state, export_state=False) - except InvalidLoop: - return None + try: + optimize_trace(metainterp_sd, jitdriver_sd, part, enable_opts, + start_state=start_state, export_state=False) + except InvalidLoop: + return None - loop.operations = loop.operations[:-1] + part.operations - if part.quasi_immutable_deps: - loop.quasi_immutable_deps.update(part.quasi_immutable_deps) + loop.operations = loop.operations[:-1] + part.operations + if part.quasi_immutable_deps: + loop.quasi_immutable_deps.update(part.quasi_immutable_deps) assert part.operations[-1].getopnum() != rop.LABEL if not loop.quasi_immutable_deps: diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -154,6 +154,22 @@ loop.operations = self.optimizer.get_newoperations() if export_state: + jd_sd = self.optimizer.jitdriver_sd + try: + threshold = jd_sd.warmstate.disable_unrolling_threshold + except AttributeError: # tests only + threshold = sys.maxint + if len(loop.operations) > threshold: + if loop.operations[0].getopnum() == rop.LABEL: + # abandoning unrolling, too long + new_descr = stop_label.getdescr() + if loop.operations[0].getopnum() == rop.LABEL: + new_descr = loop.operations[0].getdescr() + stop_label = stop_label.copy_and_change(rop.JUMP, + descr=new_descr) + self.optimizer.send_extra_operation(stop_label) + loop.operations = self.optimizer.get_newoperations() + return None final_state = self.export_state(stop_label) else: final_state = None diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -70,7 +70,7 @@ def jittify_and_run(interp, graph, args, repeat=1, graph_and_interp_only=False, backendopt=False, trace_limit=sys.maxint, inline=False, loop_longevity=0, retrace_limit=5, - function_threshold=4, + function_threshold=4, disable_unrolling=sys.maxint, enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, max_unroll_recursion=7, **kwds): from rpython.config.config import ConfigError @@ -95,6 +95,7 @@ jd.warmstate.set_param_max_retrace_guards(max_retrace_guards) jd.warmstate.set_param_enable_opts(enable_opts) jd.warmstate.set_param_max_unroll_recursion(max_unroll_recursion) + jd.warmstate.set_param_disable_unrolling(disable_unrolling) warmrunnerdesc.finish() if graph_and_interp_only: return interp, graph diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -256,6 +256,9 @@ def set_param_inlining(self, value): self.inlining = value + def set_param_disable_unrolling(self, value): + self.disable_unrolling_threshold = value + def set_param_enable_opts(self, value): from rpython.jit.metainterp.optimizeopt import ALL_OPTS_DICT, ALL_OPTS_NAMES diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -549,6 +549,7 @@ 'retrace_limit': 'how many times we can try retracing before giving up', 'max_retrace_guards': 'number of extra guards a retrace can cause', 'max_unroll_loops': 'number of extra unrollings a loop can cause', + 'disable_unrolling': 'after how many operations we should not unroll', 'enable_opts': 'INTERNAL USE ONLY (MAY NOT WORK OR LEAD TO CRASHES): ' 'optimizations to enable, or all = %s' % ENABLE_ALL_OPTS, 'max_unroll_recursion': 'how many levels deep to unroll a recursive function' @@ -564,6 +565,7 @@ 'retrace_limit': 5, 'max_retrace_guards': 15, 'max_unroll_loops': 0, + 'disable_unrolling': 100, 'enable_opts': 'all', 'max_unroll_recursion': 7, } diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -7,6 +7,7 @@ from rpython.rlib.objectmodel import UnboxedValue from rpython.tool.pairtype import pairtype, pair from rpython.tool.identity_dict import identity_dict +from rpython.tool.flattenrec import FlattenRecursion from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype @@ -767,11 +768,14 @@ self.initialize_prebuilt_data(Ellipsis, self.classdef, result) return result + _initialize_data_flattenrec = FlattenRecursion() + def initialize_prebuilt_instance(self, value, classdef, result): # must fill in the hash cache before the other ones # (see test_circular_hash_initialization) self.initialize_prebuilt_hash(value, result) - self.initialize_prebuilt_data(value, classdef, result) + self._initialize_data_flattenrec(self.initialize_prebuilt_data, + value, classdef, result) def get_ll_hash_function(self): return ll_inst_hash diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -1279,3 +1279,16 @@ return cls[k](a, b).b assert self.interpret(f, [1, 4, 7]) == 7 + + def test_flatten_convert_const(self): + # check that we can convert_const() a chain of more than 1000 + # instances + class A(object): + def __init__(self, next): + self.next = next + a = None _______________________________________________ pypy-commit mailing list [email protected] https://mail.python.org/mailman/listinfo/pypy-commit
