Author: mattip <[email protected]>
Branch: numpypy.float16
Changeset: r58550:0a4fa60c1207
Date: 2012-10-28 23:26 +0200
http://bitbucket.org/pypy/pypy/changeset/0a4fa60c1207/
Log: merge default into branch
diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -118,7 +118,7 @@
# 1. find the start of the statement
from codeop import compile_command
end = None
- for start in range(lineno, -1, max(-1, lineno - 10)):
+ for start in range(lineno, -1, -1):
if assertion:
line = self.lines[start]
# the following lines are not fully tested, change with care
@@ -135,9 +135,9 @@
compile_command(trysource)
except (SyntaxError, OverflowError, ValueError):
continue
-
+
# 2. find the end of the statement
- for end in range(lineno+1, min(len(self)+1, lineno + 10)):
+ for end in range(lineno+1, len(self)+1):
trysource = self[start:end]
if trysource.isparseable():
return start, end
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py
b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -1,17 +1,17 @@
from pypy.jit.metainterp import jitprof, resume, compile
from pypy.jit.metainterp.executor import execute_nonspec
-from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt,
REF, INT
+from pypy.jit.metainterp.history import BoxInt, BoxFloat, Const, ConstInt, REF
from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
ImmutableIntUnbounded, \
IntLowerBound, MININT,
MAXINT
-from pypy.jit.metainterp.optimizeopt.util import (make_dispatcher_method,
- args_dict)
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
from pypy.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp
from pypy.jit.metainterp.typesystem import llhelper, oohelper
from pypy.tool.pairtype import extendabletype
-from pypy.rlib.debug import debug_start, debug_stop, debug_print
+from pypy.rlib.debug import debug_print
from pypy.rlib.objectmodel import specialize
+
LEVEL_UNKNOWN = '\x00'
LEVEL_NONNULL = '\x01'
LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays
@@ -20,6 +20,8 @@
MODE_ARRAY = '\x00'
MODE_STR = '\x01'
MODE_UNICODE = '\x02'
+
+
class LenBound(object):
def __init__(self, mode, descr, bound):
self.mode = mode
diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py
b/pypy/jit/metainterp/optimizeopt/rewrite.py
--- a/pypy/jit/metainterp/optimizeopt/rewrite.py
+++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
@@ -1,8 +1,11 @@
from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.jit.metainterp.history import ConstInt, make_hashable_int
+from pypy.jit.metainterp import compile
+from pypy.jit.metainterp.history import (Const, ConstInt, BoxInt, BoxFloat,
+ BoxPtr, make_hashable_int)
from pypy.jit.metainterp.optimize import InvalidLoop
from pypy.jit.metainterp.optimizeopt.intutils import IntBound
-from pypy.jit.metainterp.optimizeopt.optimizer import *
+from pypy.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED,
+ CONST_0, CONST_1)
from pypy.jit.metainterp.optimizeopt.util import _findall,
make_dispatcher_method
from pypy.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop,
ResOperation)
@@ -426,14 +429,31 @@
source_start_box = self.get_constant_box(op.getarg(3))
dest_start_box = self.get_constant_box(op.getarg(4))
length = self.get_constant_box(op.getarg(5))
- if (source_value.is_virtual() and source_start_box and dest_start_box
- and length and (dest_value.is_virtual() or length.getint() <= 8)):
+ if (source_start_box and dest_start_box
+ and length and (dest_value.is_virtual() or length.getint() <= 8)
and
+ (source_value.is_virtual() or length.getint() <= 8)):
from pypy.jit.metainterp.optimizeopt.virtualize import VArrayValue
- assert isinstance(source_value, VArrayValue)
source_start = source_start_box.getint()
dest_start = dest_start_box.getint()
for index in range(length.getint()):
- val = source_value.getitem(index + source_start)
+ # XXX fish fish fish
+ arraydescr =
op.getdescr().get_extra_info().write_descrs_arrays[0]
+ if source_value.is_virtual():
+ assert isinstance(source_value, VArrayValue)
+ val = source_value.getitem(index + source_start)
+ else:
+ if arraydescr.is_array_of_pointers():
+ resbox = BoxPtr()
+ elif arraydescr.is_array_of_floats():
+ resbox = BoxFloat()
+ else:
+ resbox = BoxInt()
+ newop = ResOperation(rop.GETARRAYITEM_GC,
+ [op.getarg(1),
+ ConstInt(index + source_start)], resbox,
+ descr=arraydescr)
+ self.optimizer.propagate_forward(newop)
+ val = self.getvalue(resbox)
if dest_value.is_virtual():
dest_value.setitem(index + dest_start, val)
else:
@@ -441,7 +461,7 @@
[op.getarg(2),
ConstInt(index + dest_start),
val.get_key_box()], None,
- descr=source_value.arraydescr)
+ descr=arraydescr)
self.emit_operation(newop)
return True
if length and length.getint() == 0:
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -3239,6 +3239,42 @@
'''
self.optimize_loop(ops, expected)
+ def test_arraycopy_not_virtual_2(self):
+ ops = '''
+ [p0]
+ p1 = new_array(3, descr=arraydescr)
+ call(0, p0, p1, 0, 0, 3, descr=arraycopydescr)
+ i0 = getarrayitem_gc(p1, 0, descr=arraydescr)
+ jump(i0)
+ '''
+ expected = '''
+ [p0]
+ i0 = getarrayitem_gc(p0, 0, descr=arraydescr)
+ i1 = getarrayitem_gc(p0, 1, descr=arraydescr) # removed by the backend
+ i2 = getarrayitem_gc(p0, 2, descr=arraydescr) # removed by the backend
+ jump(i0)
+ '''
+ self.optimize_loop(ops, expected)
+
+ def test_arraycopy_not_virtual_3(self):
+ ops = '''
+ [p0, p1]
+ call(0, p0, p1, 0, 0, 3, descr=arraycopydescr)
+ i0 = getarrayitem_gc(p1, 0, descr=arraydescr)
+ jump(i0)
+ '''
+ expected = '''
+ [p0, p1]
+ i0 = getarrayitem_gc(p0, 0, descr=arraydescr)
+ i1 = getarrayitem_gc(p0, 1, descr=arraydescr)
+ i2 = getarrayitem_gc(p0, 2, descr=arraydescr)
+ setarrayitem_gc(p1, 0, i0, descr=arraydescr)
+ setarrayitem_gc(p1, 1, i1, descr=arraydescr)
+ setarrayitem_gc(p1, 2, i2, descr=arraydescr)
+ jump(i0)
+ '''
+ self.optimize_loop(ops, expected)
+
def test_arraycopy_no_elem(self):
""" this was actually observed in the wild
"""
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py
b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -4,6 +4,7 @@
from pypy.jit.metainterp.history import TreeLoop, TargetToken, JitCellToken
from pypy.jit.metainterp.jitexc import JitException
from pypy.jit.metainterp.optimize import InvalidLoop
+from pypy.rlib.debug import debug_print, debug_start, debug_stop
from pypy.jit.metainterp.optimizeopt.optimizer import *
from pypy.jit.metainterp.optimizeopt.generalize import KillHugeIntBounds
from pypy.jit.metainterp.inliner import Inliner
diff --git a/pypy/jit/metainterp/test/test_list.py
b/pypy/jit/metainterp/test/test_list.py
--- a/pypy/jit/metainterp/test/test_list.py
+++ b/pypy/jit/metainterp/test/test_list.py
@@ -128,6 +128,17 @@
res = self.interp_operations(f, [], listops=True)
assert res == 10
+ def test_arraycopy_bug(self):
+ def f():
+ l = [1, 2, 3, 4]
+ l2 = [1, 2, 3, 4]
+ l[2] = 13
+ l2[0:len(l2)] = l[:]
+ return l2[0] + l2[1] + l2[2] + l2[3]
+
+ res = self.interp_operations(f, [], listops=True)
+ assert res == f()
+
def test_arraycopy_full(self):
jitdriver = JitDriver(greens = [], reds = ['n'])
def f(n):
diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py
b/pypy/module/micronumpy/arrayimpl/concrete.py
--- a/pypy/module/micronumpy/arrayimpl/concrete.py
+++ b/pypy/module/micronumpy/arrayimpl/concrete.py
@@ -10,6 +10,7 @@
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rlib import jit
from pypy.rlib.rawstorage import free_raw_storage
+from pypy.rlib.debug import make_sure_not_resized
class ConcreteArrayIterator(base.BaseArrayIterator):
def __init__(self, array):
@@ -379,6 +380,9 @@
class ConcreteArray(BaseConcreteArray):
def __init__(self, shape, dtype, order, strides, backstrides):
+ make_sure_not_resized(shape)
+ make_sure_not_resized(strides)
+ make_sure_not_resized(backstrides)
self.shape = shape
self.size = support.product(shape) * dtype.get_size()
self.storage = dtype.itemtype.malloc(self.size)
@@ -387,8 +391,8 @@
self.strides = strides
self.backstrides = backstrides
- def create_iter(self, shape):
- if shape == self.get_shape():
+ def create_iter(self, shape=None):
+ if shape is None or shape == self.get_shape():
return ConcreteArrayIterator(self)
r = calculate_broadcast_strides(self.strides, self.backstrides,
self.get_shape(), shape)
@@ -424,8 +428,8 @@
def fill(self, box):
loop.fill(self, box.convert_to(self.dtype))
- def create_iter(self, shape):
- if shape != self.get_shape():
+ def create_iter(self, shape=None):
+ if shape is not None and shape != self.get_shape():
r = calculate_broadcast_strides(self.strides, self.backstrides,
self.get_shape(), shape)
return MultiDimViewIterator(self.parent,
diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py
b/pypy/module/micronumpy/arrayimpl/scalar.py
--- a/pypy/module/micronumpy/arrayimpl/scalar.py
+++ b/pypy/module/micronumpy/arrayimpl/scalar.py
@@ -34,7 +34,7 @@
def get_shape(self):
return []
- def create_iter(self, shape):
+ def create_iter(self, shape=None):
return ScalarIterator(self.value)
def get_scalar_value(self):
diff --git a/pypy/module/micronumpy/dot.py b/pypy/module/micronumpy/dot.py
--- a/pypy/module/micronumpy/dot.py
+++ b/pypy/module/micronumpy/dot.py
@@ -11,12 +11,12 @@
right_critical_dim = len(right_shape) - 2
right_critical_dim_size = right_shape[right_critical_dim]
assert right_critical_dim >= 0
- out_shape += left_shape[:-1] + \
- right_shape[0:right_critical_dim] + \
- right_shape[right_critical_dim + 1:]
+ out_shape = out_shape + left_shape[:-1] + \
+ right_shape[0:right_critical_dim] + \
+ right_shape[right_critical_dim + 1:]
elif len(right_shape) > 0:
#dot does not reduce for scalars
- out_shape += left_shape[:-1]
+ out_shape = out_shape + left_shape[:-1]
if my_critical_dim_size != right_critical_dim_size:
raise OperationError(space.w_ValueError, space.wrap(
"objects are not aligned"))
diff --git a/pypy/module/micronumpy/interp_numarray.py
b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -25,7 +25,7 @@
shape = []
for w_item in space.fixedview(w_size):
shape.append(space.int_w(w_item))
- return shape
+ return shape[:]
class __extend__(W_NDimArray):
@jit.unroll_safe
@@ -190,7 +190,7 @@
return space.call_function(cache.w_array_str, self)
def dump_data(self):
- i = self.create_iter(self.get_shape())
+ i = self.create_iter()
first = True
dtype = self.get_dtype()
s = StringBuilder()
@@ -206,8 +206,6 @@
return s.build()
def create_iter(self, shape=None):
- if shape is None:
- shape = self.get_shape()
return self.implementation.create_iter(shape)
def create_axis_iter(self, shape, dim):
@@ -396,7 +394,7 @@
if self.get_size() > 1:
raise OperationError(space.w_ValueError, space.wrap(
"The truth value of an array with more than one element is
ambiguous. Use a.any() or a.all()"))
- iter = self.create_iter(self.get_shape())
+ iter = self.create_iter()
return space.wrap(space.is_true(iter.getitem()))
def _binop_impl(ufunc_name):
@@ -681,7 +679,7 @@
if ndmin > len(shape):
shape = [1] * (ndmin - len(shape)) + shape
arr = W_NDimArray.from_shape(shape, dtype, order=order)
- arr_iter = arr.create_iter(arr.get_shape())
+ arr_iter = arr.create_iter()
for w_elem in elems_w:
arr_iter.setitem(dtype.coerce(space, w_elem))
arr_iter.next()
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -89,7 +89,7 @@
reds = ['obj', 'obj_iter', 'cur_value'])
def compute_reduce(obj, calc_dtype, func, done_func, identity):
- obj_iter = obj.create_iter(obj.get_shape())
+ obj_iter = obj.create_iter()
if identity is None:
cur_value = obj_iter.getitem().convert_to(calc_dtype)
obj_iter.next()
@@ -109,7 +109,7 @@
return cur_value
def fill(arr, box):
- arr_iter = arr.create_iter(arr.get_shape())
+ arr_iter = arr.create_iter()
while not arr_iter.done():
arr_iter.setitem(box)
arr_iter.next()
@@ -159,7 +159,7 @@
def do_axis_reduce(shape, func, arr, dtype, axis, out, identity):
out_iter = out.create_axis_iter(arr.get_shape(), axis)
- arr_iter = arr.create_iter(arr.get_shape())
+ arr_iter = arr.create_iter()
if identity is not None:
identity = identity.convert_to(dtype)
shapelen = len(shape)
@@ -192,7 +192,7 @@
result = 0
idx = 1
dtype = arr.get_dtype()
- iter = arr.create_iter(arr.get_shape())
+ iter = arr.create_iter()
cur_best = iter.getitem()
iter.next()
shapelen = len(arr.get_shape())
diff --git a/pypy/module/micronumpy/strides.py
b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -15,16 +15,22 @@
jit.isconstant(len(chunks))
)
def calculate_slice_strides(shape, start, strides, backstrides, chunks):
- rstrides = []
- rbackstrides = []
+ size = 0
+ for chunk in chunks:
+ if chunk.step != 0:
+ size += 1
+ rstrides = [0] * size
+ rbackstrides = [0] * size
rstart = start
- rshape = []
+ rshape = [0] * size
i = -1
+ j = 0
for i, chunk in enumerate_chunks(chunks):
if chunk.step != 0:
- rstrides.append(strides[i] * chunk.step)
- rbackstrides.append(strides[i] * (chunk.lgt - 1) * chunk.step)
- rshape.append(chunk.lgt)
+ rstrides[j] = strides[i] * chunk.step
+ rbackstrides[j] = strides[i] * (chunk.lgt - 1) * chunk.step
+ rshape[j] = chunk.lgt
+ j += 1
rstart += strides[i] * chunk.start
# add a reminder
s = i + 1
@@ -64,13 +70,13 @@
while True:
new_batch = []
if not batch:
- return shape, []
+ return shape[:], []
if is_single_elem(space, batch[0], is_rec_type):
for w_elem in batch:
if not is_single_elem(space, w_elem, is_rec_type):
raise OperationError(space.w_ValueError, space.wrap(
"setting an array element with a sequence"))
- return shape, batch
+ return shape[:], batch
size = space.len_w(batch[0])
for w_elem in batch:
if (is_single_elem(space, w_elem, is_rec_type) or
@@ -255,19 +261,19 @@
cur_step = steps[oldI]
n_old_elems_to_use *= old_shape[oldI]
assert len(new_strides) == len(new_shape)
- return new_strides
+ return new_strides[:]
def calculate_dot_strides(strides, backstrides, res_shape, skip_dims):
- rstrides = []
- rbackstrides = []
- j=0
+ rstrides = [0] * len(res_shape)
+ rbackstrides = [0] * len(res_shape)
+ j = 0
for i in range(len(res_shape)):
if i in skip_dims:
- rstrides.append(0)
- rbackstrides.append(0)
+ rstrides[i] = 0
+ rbackstrides[i] = 0
else:
- rstrides.append(strides[j])
- rbackstrides.append(backstrides[j])
+ rstrides[i] = strides[j]
+ rbackstrides[i] = backstrides[j]
j += 1
return rstrides, rbackstrides
diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py
--- a/pypy/rlib/objectmodel.py
+++ b/pypy/rlib/objectmodel.py
@@ -188,7 +188,7 @@
src = py.code.Source("""
def %(name)s(%(arglist)s):
if not we_are_translated():
- typecheck(%(arglist)s)
+ typecheck(%(arglist)s) # pypy.rlib.objectmodel
return %(name)s_original(%(arglist)s)
""" % dict(name=f.func_name, arglist=arglist))
#
diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py
--- a/pypy/rlib/rgc.py
+++ b/pypy/rlib/rgc.py
@@ -145,8 +145,11 @@
from pypy.rlib.objectmodel import keepalive_until_here
# XXX: Hack to ensure that we get a proper effectinfo.write_descrs_arrays
- if NonConstant(False):
- dest[dest_start] = source[source_start]
+ # and also, maybe, speed up very small cases
+ if length <= 1:
+ if length == 1:
+ dest[dest_start] = source[source_start]
+ return
# supports non-overlapping copies only
if not we_are_translated():
diff --git a/pypy/rlib/test/test_rgc.py b/pypy/rlib/test/test_rgc.py
--- a/pypy/rlib/test/test_rgc.py
+++ b/pypy/rlib/test/test_rgc.py
@@ -134,6 +134,23 @@
assert check.called
+def test_ll_arraycopy_small():
+ TYPE = lltype.GcArray(lltype.Signed)
+ for length in range(5):
+ a1 = lltype.malloc(TYPE, 10)
+ a2 = lltype.malloc(TYPE, 6)
+ org1 = range(20, 30)
+ org2 = range(50, 56)
+ for i in range(len(a1)): a1[i] = org1[i]
+ for i in range(len(a2)): a2[i] = org2[i]
+ rgc.ll_arraycopy(a1, a2, 4, 2, length)
+ for i in range(10):
+ assert a1[i] == org1[i]
+ for i in range(6):
+ if 2 <= i < 2 + length:
+ assert a2[i] == a1[i+2]
+ else:
+ assert a2[i] == org2[i]
def test_ll_shrink_array_1():
diff --git a/pypy/rpython/lltypesystem/lltype.py
b/pypy/rpython/lltypesystem/lltype.py
--- a/pypy/rpython/lltypesystem/lltype.py
+++ b/pypy/rpython/lltypesystem/lltype.py
@@ -1651,10 +1651,7 @@
if n < 0:
raise ValueError, "negative array length"
_parentable.__init__(self, TYPE)
- try:
- myrange = range(n)
- except OverflowError:
- raise MemoryError("definitely too many items")
+ myrange = self._check_range(n)
self.items = [TYPE.OF._allocate(initialization=initialization,
parent=self, parentindex=j)
for j in myrange]
@@ -1664,6 +1661,14 @@
def __repr__(self):
return '<%s>' % (self,)
+ def _check_range(self, n):
+ # checks that it's ok to make an array of size 'n', and returns
+ # range(n). Explicitly overridden by some tests.
+ try:
+ return range(n)
+ except OverflowError:
+ raise MemoryError("definitely too many items")
+
def _str_item(self, item):
if isinstance(item, _uninitialized):
return '#'
diff --git a/pypy/rpython/lltypesystem/rdict.py
b/pypy/rpython/lltypesystem/rdict.py
--- a/pypy/rpython/lltypesystem/rdict.py
+++ b/pypy/rpython/lltypesystem/rdict.py
@@ -4,6 +4,7 @@
rtype_newdict)
from pypy.rpython.lltypesystem import lltype
from pypy.rlib import objectmodel, jit
+from pypy.rlib.debug import ll_assert
from pypy.rlib.rarithmetic import r_uint, intmask, LONG_BIT
from pypy.rpython import rmodel
from pypy.rpython.error import TyperError
@@ -462,22 +463,30 @@
def _ll_dict_setitem_lookup_done(d, key, value, hash, i):
valid = (i & HIGHEST_BIT) == 0
i = i & MASK
- everused = d.entries.everused(i)
- # set up the new entry
ENTRY = lltype.typeOf(d.entries).TO.OF
entry = d.entries[i]
- entry.value = value
- if valid:
- return
+ if not d.entries.everused(i):
+ # a new entry that was never used before
+ ll_assert(not valid, "valid but not everused")
+ rc = d.resize_counter - 3
+ if rc <= 0: # if needed, resize the dict -- before the insertion
+ ll_dict_resize(d)
+ i = ll_dict_lookup_clean(d, hash) # then redo the lookup for 'key'
+ entry = d.entries[i]
+ rc = d.resize_counter - 3
+ ll_assert(rc > 0, "ll_dict_resize failed?")
+ d.resize_counter = rc
+ if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
+ entry.value = value
+ else:
+ # override an existing or deleted entry
+ entry.value = value
+ if valid:
+ return
entry.key = key
if hasattr(ENTRY, 'f_hash'): entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'): entry.f_valid = True
d.num_items += 1
- if not everused:
- if hasattr(ENTRY, 'f_everused'): entry.f_everused = True
- d.resize_counter -= 3
- if d.resize_counter <= 0:
- ll_dict_resize(d)
def ll_dict_insertclean(d, key, value, hash):
# Internal routine used by ll_dict_resize() to insert an item which is
@@ -534,8 +543,9 @@
# make a 'new_size' estimate and shrink it if there are many
# deleted entry markers. See CPython for why it is a good idea to
# quadruple the dictionary size as long as it's not too big.
- if d.num_items > 50000: new_estimate = d.num_items * 2
- else: new_estimate = d.num_items * 4
+ num_items = d.num_items + 1
+ if num_items > 50000: new_estimate = num_items * 2
+ else: new_estimate = num_items * 4
new_size = DICT_INITSIZE
while new_size <= new_estimate:
new_size *= 2
diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py
--- a/pypy/rpython/test/test_rdict.py
+++ b/pypy/rpython/test/test_rdict.py
@@ -970,6 +970,39 @@
DICT = lltype.typeOf(llres.item1)
assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key',
'value']
+ def test_memoryerror_should_not_insert(self):
+ # This shows a misbehaviour that also exists in CPython 2.7, but not
+ # any more in CPython 3.3. The behaviour is that even if a dict
+ # insertion raises MemoryError, the new item is still inserted.
+ # If we catch the MemoryError, we can keep inserting new items until
+ # the dict table is completely full. Then the next insertion loops
+ # forever. This test only checks that after a MemoryError the
+ # new item was not inserted.
+ def _check_small_range(self, n):
+ if n >= 128:
+ raise MemoryError
+ return range(n)
+ original_check_range = lltype._array._check_range
+ try:
+ lltype._array._check_range = _check_small_range
+ #
+ def do_insert(d, i):
+ d[i] = i
+ def func():
+ d = {}
+ i = 0
+ while True:
+ try:
+ do_insert(d, i)
+ except MemoryError:
+ return (i in d)
+ i += 1
+ res = self.interpret(func, [])
+ assert res == 0
+ #
+ finally:
+ lltype._array._check_range = original_check_range
+
# ____________________________________________________________
diff --git a/pypy/translator/goal/test2/test_app_main.py
b/pypy/translator/goal/test2/test_app_main.py
--- a/pypy/translator/goal/test2/test_app_main.py
+++ b/pypy/translator/goal/test2/test_app_main.py
@@ -689,6 +689,10 @@
child_out_err.close()
def test_proper_sys_path(self, tmpdir):
+ data = self.run('-c "import _ctypes"', python_flags='-S')
+ if data.startswith('Traceback'):
+ py.test.skip("'python -S' cannot import extension modules: "
+ "see probably http://bugs.python.org/issue586680")
@contextmanager
def chdir_and_unset_pythonpath(new_cwd):
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit