Author: Carl Friedrich Bolz <[email protected]>
Branch: typed-cells
Changeset: r75651:153478e63e23
Date: 2015-02-02 16:31 +0100
http://bitbucket.org/pypy/pypy/changeset/153478e63e23/

Log:    merge default

diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -197,6 +197,55 @@
 (now-dead) object are still true about the new object.
 
 
+
+Would type annotations help PyPy's performance?
+-----------------------------------------------
+
+Two examples of type annotations that are being proposed for improved
+performance are `Cython types`__ and `PEP 484 - Type Hints`__.
+
+.. __: 
http://docs.cython.org/src/reference/language_basics.html#declaring-data-types
+.. __: https://www.python.org/dev/peps/pep-0484/
+
+**Cython types** are, by construction, similar to C declarations.  For
+example, a local variable or an instance attribute can be declared
+``"cdef int"`` to force a machine word to be used.  This changes the
+usual Python semantics (e.g. no overflow checks, and errors when
+trying to write other types of objects there).  It gives some extra
+performance, but the exact benefits are unclear: right now
+(January 2015) for example we are investigating a technique that would
+store machine-word integers directly on instances, giving part of the
+benefits without the user-supplied ``"cdef int"``.
+
+**PEP 484 - Type Hints,** on the other hand, is almost entirely
+useless if you're looking at performance.  First, as the name implies,
+they are *hints:* they must still be checked at runtime, like PEP 484
+says.  Or maybe you're fine with a mode in which you get very obscure
+crashes when the type annotations are wrong; but even in that case the
+speed benefits would be extremely minor.
+
+There are several reasons for why.  One of them is that annotations
+are at the wrong level (e.g. a PEP 484 "int" corresponds to Python 3's
+int type, which does not necessarily fits inside one machine word;
+even worse, an "int" annotation allows arbitrary int subclasses).
+Another is that a lot more information is needed to produce good code
+(e.g. "this ``f()`` called here really means this function there, and
+will never be monkey-patched" -- same with ``len()`` or ``list()``,
+btw).  The third reason is that some "guards" in PyPy's JIT traces
+don't really have an obvious corresponding type (e.g. "this dict is so
+far using keys which don't override ``__hash__`` so a more efficient
+implementation was used").  Many guards don't even have any correspondence
+with types at all ("this class attribute was not modified"; "the loop
+counter did not reach zero so we don't need to release the GIL"; and
+so on).
+
+As PyPy works right now, it is able to derive far more useful
+information than can ever be given by PEP 484, and it works
+automatically.  As far as we know, this is true even if we would add
+other techniques to PyPy, like a fast first-pass JIT.
+
+
+
 .. _`prolog and javascript`:
 
 Can I use PyPy's translation toolchain for other languages besides Python?
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1017,6 +1017,9 @@
     def newlist_unicode(self, list_u):
         return self.newlist([self.wrap(u) for u in list_u])
 
+    def newlist_int(self, list_i):
+        return self.newlist([self.wrap(i) for i in list_i])
+
     def newlist_hint(self, sizehint):
         from pypy.objspace.std.listobject import make_empty_list_with_size
         return make_empty_list_with_size(self, sizehint)
diff --git a/pypy/module/cpyext/include/patchlevel.h 
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION             "2.7.8"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "2.5.0-alpha0"
+#define PYPY_VERSION "2.6.0-alpha0"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/cpyext/ndarrayobject.py 
b/pypy/module/cpyext/ndarrayobject.py
--- a/pypy/module/cpyext/ndarrayobject.py
+++ b/pypy/module/cpyext/ndarrayobject.py
@@ -291,6 +291,6 @@
               Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t], 
PyObject)
 def PyUFunc_FromFuncAndData(space, funcs, data, types, ntypes,
                     nin, nout, identity, name, doc, check_return):
-    w_signature = ""
+    w_signature =  ','.join(['()'] * nin) + '->' + ','.join(['()'] * nout)
     return do_ufunc(space, funcs, data, types, ntypes, nin, nout, identity,
                     name, doc, check_return, w_signature)
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -82,9 +82,18 @@
             return w_object.descr_copy(space, w_order)
         elif not copy and (subok or type(w_object) is W_NDimArray):
             return w_object
-
-    # not an array or incorrect dtype
-    shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype)
+    if isinstance(w_object, W_NDimArray) and copy and not subok:
+        # TODO do the loop.assign without copying elems_w
+        shape = w_object.get_shape()
+        _elems_w = w_object.reshape(space, space.wrap(-1))
+        elems_w = [None] * w_object.get_size()
+        for i in range(len(elems_w)):
+            elems_w[i] = _elems_w.descr_getitem(space, space.wrap(i))
+        if space.is_none(w_dtype):
+            dtype = w_object.get_dtype()
+    else:
+        # not an array
+        shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype)
     if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1):
         dtype = strides.find_dtype_for_seq(space, elems_w, dtype)
         if dtype is None:
diff --git a/pypy/module/micronumpy/ndarray.py 
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -430,9 +430,15 @@
             order = 'C'
         else:
             order = space.str_w(w_order)
+        if order == 'K' and is_c_contiguous(self.implementation):
+            for s in  self.implementation.get_strides():
+                if s < 0:
+                    break
+            else:
+                order = 'C'
         if order != 'C':
             raise OperationError(space.w_NotImplementedError, space.wrap(
-                "order not implemented"))
+                "order != 'C' only partially implemented"))
         return self.reshape(space, space.wrap(-1))
 
     @unwrap_spec(w_axis=WrappedDefault(None),
diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py
--- a/pypy/module/micronumpy/nditer.py
+++ b/pypy/module/micronumpy/nditer.py
@@ -252,10 +252,6 @@
     # Copy logic from npyiter_coalesce_axes, used in ufunc iterators
     # and in nditer's with 'external_loop' flag
     can_coalesce = True
-    if it.order == 'F':
-        fastest = 0
-    else:
-        fastest = -1
     for idim in range(it.ndim - 1):
         for op_it, _ in it.iters:
             if op_it is None:
@@ -275,7 +271,7 @@
         if can_coalesce:
             for i in range(len(it.iters)):
                 new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it,
-                                         it.order, fastest)
+                                         it.order)
                 it.iters[i] = (new_iter, new_iter.reset())
             if len(it.shape) > 1:
                 if it.order == 'F':
@@ -289,7 +285,7 @@
             break
     # Always coalesce at least one
     for i in range(len(it.iters)):
-        new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C', -1)
+        new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C')
         it.iters[i] = (new_iter, new_iter.reset())
     if len(it.shape) > 1:
         if it.order == 'F':
@@ -300,12 +296,11 @@
         it.shape = [1]
 
 
-def coalesce_iter(old_iter, op_flags, it, order, fastest=-1, flat=True):
+def coalesce_iter(old_iter, op_flags, it, order, flat=True):
     '''
     We usually iterate through an array one value at a time.
     But after coalesce(), getoperand() will return a slice by removing
-    the fastest varying dimension from the beginning or end of the shape.
-    XXX - what happens on swapaxis arrays?
+    the fastest varying dimension(s) from the beginning or end of the shape.
     If flat is true, then the slice will be 1d, otherwise stack up the shape of
     the fastest varying dimension in the slice, so an iterator of a  'C' array 
     of shape (2,4,3) after two calls to coalesce will iterate 2 times over a 
slice
@@ -319,6 +314,9 @@
         new_strides = strides[1:]
         new_backstrides = backstrides[1:]
         _stride = old_iter.slice_stride + [strides[0]]
+        _shape =  old_iter.slice_shape + [shape[0]]
+        _backstride = old_iter.slice_backstride + [strides[0] * (shape[0] - 1)]
+        fastest = shape[0]
     else:
         new_shape = shape[:-1]
         new_strides = strides[:-1]
@@ -326,14 +324,15 @@
         # use the operand's iterator's rightmost stride,
         # even if it is not the fastest (for 'F' or swapped axis)
         _stride = [strides[-1]] + old_iter.slice_stride
-    _shape = [shape[fastest]]  + old_iter.slice_shape
-    _backstride = [(_shape[fastest] - 1) * _stride[0]] + 
old_iter.slice_backstride
+        _shape = [shape[-1]]  + old_iter.slice_shape
+        _backstride = [(shape[-1] - 1) * strides[-1]] + 
old_iter.slice_backstride
+        fastest = shape[-1]
     if flat:
         _shape = [support.product(_shape)]
         if len(_stride) > 1:
             _stride = [min(_stride[0], _stride[1])]
         _backstride = [(shape[0] - 1) * _stride[0]]
-    return SliceIter(old_iter.array, old_iter.size / shape[fastest],
+    return SliceIter(old_iter.array, old_iter.size / fastest,
                 new_shape, new_strides, new_backstrides,
                 _shape, _stride, _backstride, op_flags, it)
 
diff --git a/pypy/module/micronumpy/test/test_ndarray.py 
b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -2994,6 +2994,7 @@
         assert (arange(3).ravel() == arange(3)).all()
         assert (arange(6).reshape(2, 3).ravel() == arange(6)).all()
         assert (arange(6).reshape(2, 3).T.ravel() == [0, 3, 1, 4, 2, 5]).all()
+        assert (arange(3).ravel('K') == arange(3)).all()
 
     def test_nonzero(self):
         from numpy import array
diff --git a/pypy/module/micronumpy/test/test_subtype.py 
b/pypy/module/micronumpy/test/test_subtype.py
--- a/pypy/module/micronumpy/test/test_subtype.py
+++ b/pypy/module/micronumpy/test/test_subtype.py
@@ -304,10 +304,13 @@
                         out.shape = (sh, 1)
                     else:
                         out.shape = (1, sh)
-                print 'out, shape was',old_shape,'now',out.shape
+                #print 'out, shape was',old_shape,'now',out.shape,'out',out
                 return out
-        a = matrix([[1., 2.]])
+        a = matrix([[1., 2.], [3., 4.]])
         b = N.array([a])
+        assert (b == a).all()
+        b = N.array(a)
+        assert len(b.shape) == 2
 
     def test_setstate_no_version(self):
         # Some subclasses of ndarray, like MaskedArray, do not use
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py 
b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -7,6 +7,7 @@
 from pypy.module.micronumpy.concrete import VoidBoxStorage
 from pypy.interpreter.gateway import interp2app
 from pypy.conftest import option
+from pypy.interpreter.error import OperationError
 
 
 class TestUfuncCoercion(object):
@@ -129,7 +130,10 @@
                                              '', ufunc.dtypes)
         assert index == 0
         assert dtypes == [f32_dtype, c64_dtype]
-
+        raises(OperationError, ufunc.type_resolver, space, [f32_array], [None],
+                                'u->u', ufunc.dtypes)
+        exc = raises(OperationError, ufunc.type_resolver, space, [f32_array], 
[None],
+                                'i->i', ufunc.dtypes)
 
 class AppTestUfuncs(BaseNumpyAppTest):
     def test_constants(self):
@@ -169,8 +173,7 @@
                             dtypes=[int, int, int, float, float, float])
             int_func22 = frompyfunc([int, int], 2, 2, 
signature='(i),(i)->(i),(i)',
                                     dtypes=['match'])
-            int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i),(i)',
-                                    dtypes=['match'])
+            int_func12 = frompyfunc([int], 1, 2, dtypes=['match'])
             retype = dtype(int)
         a = arange(10)
         assert isinstance(adder_ufunc1, ufunc)
@@ -223,6 +226,7 @@
             assert len(in_array.shape) == 2
             assert in_array.shape == out_array.shape
             out_array[:] = in_array * 2
+
         from numpy import frompyfunc, dtype, arange
         ufunc = frompyfunc([times_2], 1, 1,
                             signature='(m,n)->(n,m)',
@@ -233,6 +237,7 @@
         ai3 = ufunc(ai[0,:,:])
         ai2 = ufunc(ai)
         assert (ai2 == ai * 2).all()
+
         ufunc = frompyfunc([times_2], 1, 1,
                             signature='(m,m)->(m,m)',
                             dtypes=[dtype(int), dtype(int)],
@@ -245,6 +250,21 @@
         ai2 = ufunc(ai)
         assert (ai2 == ai * 2).all()
 
+    def test_frompyfunc_needs_nditer(self):
+        def summer(in0):
+            print 'in summer, in0=',in0,'in0.shape=',in0.shape
+            return in0.sum()
+
+        from numpy import frompyfunc, dtype, arange
+        ufunc = frompyfunc([summer], 1, 1,
+                            signature='(m,m)->()',
+                            dtypes=[dtype(int), dtype(int)],
+                            stack_inputs=False,
+                          )
+        ai = arange(12, dtype=int).reshape(3, 2, 2)
+        ao = ufunc(ai)
+        assert ao.size == 3
+
     def test_frompyfunc_sig_broadcast(self):
         def sum_along_0(in_array, out_array):
             out_array[...] = in_array.sum(axis=0)
@@ -269,6 +289,26 @@
         aout = ufunc_sum(ai)
         assert aout.shape == (3, 3)
 
+    def test_frompyfunc_fortran(self):
+        import numpy as np
+        def tofrom_fortran(in0, out0):
+            out0[:] = in0.T
+
+        def lapack_like_times2(in0, out0):
+            a = np.empty(in0.T.shape, in0.dtype)
+            tofrom_fortran(in0, a)
+            a *= 2
+            tofrom_fortran(a, out0)
+
+        times2 = np.frompyfunc([lapack_like_times2], 1, 1,
+                            signature='(m,n)->(m,n)',
+                            dtypes=[np.dtype(float), np.dtype(float)],
+                            stack_inputs=True,
+                          )
+        in0 = np.arange(3300, dtype=float).reshape(100, 33)
+        out0 = times2(in0)
+        assert out0.shape == in0.shape
+        assert (out0 == in0 * 2).all()
 
     def test_ufunc_kwargs(self):
         from numpy import ufunc, frompyfunc, arange, dtype
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -520,7 +520,9 @@
     '''
     _immutable_fields_ = ["funcs", "dtypes", "data", "match_dtypes"]
 
-    def __init__(self, space, funcs, name, identity, nin, nout, dtypes, 
signature, match_dtypes=False, stack_inputs=False):
+    def __init__(self, space, funcs, name, identity, nin, nout, dtypes,
+                 signature, match_dtypes=False, stack_inputs=False,
+                 external_loop=False):
         # XXX make sure funcs, signature, dtypes, nin, nout are consistent
 
         # These don't matter, we use the signature and dtypes for determining
@@ -549,6 +551,7 @@
         self.core_num_dims = [0] * self.nargs  # number of core dimensions of 
each nargs
         self.core_offsets = [0] * self.nargs
         self.core_dim_ixs = [] # indices into unique shapes for each arg
+        self.external_loop = external_loop
 
     def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, 
dtype=None,
                cumulative=False):
@@ -586,29 +589,21 @@
                     _dtypes.append(_dtypes[0])
         index, dtypes = self.type_resolver(space, inargs, outargs, sig, 
_dtypes)
         func = self.funcs[index]
-        if not self.core_enabled:
-            # func is going to do all the work, it must accept W_NDimArray args
-            inargs0 = inargs[0]
-            assert isinstance(inargs0, W_NDimArray)
-            arg_shapes = [inargs0.get_shape()] * self.nargs
-            inargs, outargs, need_to_cast = self.alloc_args(space, inargs, 
outargs,
-                                              dtypes, arg_shapes)
-            for tf in need_to_cast:
-                if tf:
-                    raise oefmt(space.w_NotImplementedError, "casting not 
supported yet")
-            if self.stack_inputs:
-                arglist = space.newlist(list(inargs + outargs))
-                space.call_args(func, Arguments.frompacked(space, arglist))
-            else:
-                arglist = space.newlist(inargs)
-                outargs = space.call_args(func, Arguments.frompacked(space, 
arglist))
-                return outargs
-            if len(outargs) < 2:
-                return outargs[0]
-            return space.newtuple(outargs)
         iter_shape, arg_shapes, matched_dims = self.verify_args(space, inargs, 
outargs)
         inargs, outargs, need_to_cast = self.alloc_args(space, inargs, 
outargs, dtypes,
                                           arg_shapes)
+        if not self.external_loop:
+            inargs0 = inargs[0]
+            outargs0 = outargs[0]
+            assert isinstance(inargs0, W_NDimArray)
+            assert isinstance(outargs0, W_NDimArray)
+            res_dtype = outargs0.get_dtype()
+            new_shape = inargs0.get_shape()
+            if len(outargs) < 2:
+                return loop.call_many_to_one(space, new_shape, func,
+                                             res_dtype, inargs, outargs[0])
+            return loop.call_many_to_many(space, new_shape, func,
+                                             res_dtype, inargs, outargs)
         for tf in need_to_cast:
             if tf:
                 raise oefmt(space.w_NotImplementedError, "casting not 
supported yet")
@@ -619,48 +614,44 @@
         w_casting = space.w_None
         w_op_axes = space.w_None
 
+        #print '\nsignature', sig
+        #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 
'broad' in d]
+        #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' 
in d]
+        #print 'shapes',[d.get_shape() for d in inargs + outargs]
+        #print 'steps',[d.implementation.strides for d in inargs + outargs]
+        if isinstance(func, W_GenericUFuncCaller):
+            # Use GeneralizeUfunc interface with signature
+            # Unlike numpy, we will not broadcast dims before
+            # the core_ndims rather we use nditer iteration
+            # so dims[0] == 1
+            dims = [1] + matched_dims
+            steps = []
+            allargs = inargs + outargs
+            for i in range(len(allargs)):
+                steps.append(0)
+            for i in range(len(allargs)):
+                _arg = allargs[i]
+                assert isinstance(_arg, W_NDimArray)
+                start_dim = len(iter_shape)
+                steps += _arg.implementation.strides[start_dim:]
+            func.set_dims_and_steps(space, dims, steps)
+        else:
+            # it is a function, ready to be called by the iterator,
+            # from frompyfunc
+            pass
+        # mimic NpyIter_AdvancedNew with a nditer
+        w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) 
+        nd_it = W_NDIter(space, space.newlist(inargs + outargs), w_flags,
+                      w_op_flags, w_op_dtypes, w_casting, w_op_axes,
+                      w_itershape)
+        # coalesce each iterators, according to inner_dimensions
+        for i in range(len(inargs) + len(outargs)):
+            for j in range(self.core_num_dims[i]):
+                new_iter = coalesce_iter(nd_it.iters[i][0], nd_it.op_flags[i],
+                                nd_it, nd_it.order, flat=False)
+                nd_it.iters[i] = (new_iter, new_iter.reset())
+            # do the iteration
         if self.stack_inputs:
-            #print '\nsignature', sig
-            #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 
'broad' in d]
-            #print [(d, locals()[d]) for d in locals() if 'core' in d or 
'broad' in d]
-            #print 'shapes',[d.get_shape() for d in inargs + outargs]
-            #print 'steps',[d.implementation.strides for d in inargs + outargs]
-            if isinstance(func, W_GenericUFuncCaller):
-                # Use GeneralizeUfunc interface with signature
-                # Unlike numpy, we will not broadcast dims before
-                # the core_ndims rather we use nditer iteration
-                # so dims[0] == 1
-                dims = [1] + matched_dims
-                steps = []
-                allargs = inargs + outargs
-                for i in range(len(allargs)):
-                    steps.append(0)
-                for i in range(len(allargs)):
-                    _arg = allargs[i]
-                    assert isinstance(_arg, W_NDimArray)
-                    start_dim = len(iter_shape)
-                    steps += _arg.implementation.strides[start_dim:]
-                func.set_dims_and_steps(space, dims, steps)
-            else:
-                # it is a function, ready to be called by the iterator,
-                # from frompyfunc
-                pass
-            # mimic NpyIter_AdvancedNew with a nditer
-            w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) 
-            nd_it = W_NDIter(space, space.newlist(inargs + outargs), w_flags,
-                          w_op_flags, w_op_dtypes, w_casting, w_op_axes,
-                          w_itershape)
-            # coalesce each iterators, according to inner_dimensions
-            if nd_it.order == 'F':
-                fastest = 0
-            else:
-                fastest = -1
-            for i in range(len(inargs) + len(outargs)):
-                for j in range(self.core_num_dims[i]):
-                    new_iter = coalesce_iter(nd_it.iters[i][0], 
nd_it.op_flags[i],
-                                    nd_it, nd_it.order, fastest, flat=False)
-                    nd_it.iters[i] = (new_iter, new_iter.reset())
-            # do the iteration
             while not nd_it.done:
                 # XXX jit me
                 for it, st in nd_it.iters:
@@ -674,20 +665,35 @@
                     args.append(nd_it.getitem(it, st))
                     nd_it.iters[i] = (it, it.next(st))
                 space.call_args(func, Arguments.frompacked(space, 
space.newlist(args)))
-            if len(outargs) > 1:
-                return space.newtuple([convert_to_array(space, o) for o in 
outargs])
-            return outargs[0]
-        inargs0 = inargs[0]
-        outargs0 = outargs[0]
-        assert isinstance(inargs0, W_NDimArray)
-        assert isinstance(outargs0, W_NDimArray)
-        res_dtype = outargs0.get_dtype()
-        new_shape = inargs0.get_shape()
-        if len(outargs) < 2:
-            return loop.call_many_to_one(space, new_shape, func,
-                                         res_dtype, inargs, outargs[0])
-        return loop.call_many_to_many(space, new_shape, func,
-                                         res_dtype, inargs, outargs)
+        else:
+            # do the iteration
+            while not nd_it.done:
+                # XXX jit me
+                for it, st in nd_it.iters:
+                    if not it.done(st):
+                        break
+                else:
+                    nd_it.done = True
+                    break
+                initers = []
+                outiters = []
+                nin = len(inargs)
+                for i, (it, st) in enumerate(nd_it.iters[:nin]):
+                    initers.append(nd_it.getitem(it, st))
+                    nd_it.iters[i] = (it, it.next(st))
+                for i, (it, st) in enumerate(nd_it.iters[nin:]):
+                    outiters.append(nd_it.getitem(it, st))
+                    nd_it.iters[i + nin] = (it, it.next(st))
+                outs = space.call_args(func, Arguments.frompacked(space, 
space.newlist(initers)))
+                if len(outiters) < 2:
+                    outiters[0].descr_setitem(space, space.w_Ellipsis, outs)
+                else:
+                    for i in range(self.nout):
+                        w_val = space.getitem(outs, space.wrap(i))
+                        outiters[i].descr_setitem(space, space.w_Ellipsis, 
w_val)
+        if len(outargs) > 1:
+            return space.newtuple([convert_to_array(space, o) for o in 
outargs])
+        return outargs[0]
 
     def parse_kwargs(self, space, kwargs_w):
         w_subok, w_out, casting, sig, extobj = \
@@ -727,20 +733,24 @@
         nop = len(inargs) + len(outargs)
         dtypes = []
         if isinstance(type_tup, str) and len(type_tup) > 0:
-            if len(type_tup) == 1:
-                dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * 
self.nargs
-            elif len(type_tup) == self.nargs + 2:
-                for i in range(self.nin):
-                    
dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]])
-                #skip the '->' in the signature
-                for i in range(self.nout):
-                    j = i + self.nin + 2
-                    
dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]])
-            else:
-                raise oefmt(space.w_TypeError, "a type-string for %s " \
-                    "requires 1 typecode or %d typecode(s) before and %d" \
-                    " after the -> sign, not '%s'", self.name, self.nin, 
-                    self.nout, type_tup)
+            try:
+                if len(type_tup) == 1:
+                    dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] 
* self.nargs
+                elif len(type_tup) == self.nargs + 2:
+                    for i in range(self.nin):
+                        
dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]])
+                    #skip the '->' in the signature
+                    for i in range(self.nout):
+                        j = i + self.nin + 2
+                        
dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]])
+                else:
+                    raise oefmt(space.w_TypeError, "a type-string for %s " \
+                        "requires 1 typecode or %d typecode(s) before and %d" \
+                        " after the -> sign, not '%s'", self.name, self.nin, 
+                        self.nout, type_tup)
+            except KeyError:
+                raise oefmt(space.w_ValueError, "unknown typecode in" \
+                        " call to %s with type-string '%s'", self.name, 
type_tup)
         else:
             # XXX why does the next line not pass translation?
             # dtypes = [i.get_dtype() for i in inargs]
@@ -764,9 +774,13 @@
                 break
         else:
             if len(self.funcs) > 1:
+                dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, 
d.elsize) \
+                                 for d in dtypes])
+                _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, 
d.elsize) \
+                                for d in _dtypes])
                 raise oefmt(space.w_TypeError,
-                            'input dtype did not match any known dtypes',
-                           )
+                     "input dtype [%s] did not match any known dtypes [%s] ", 
+                     dtypesstr,_dtypesstr)
             i = 0
         # Fill in empty dtypes
         for j in range(self.nargs):
@@ -1239,7 +1253,8 @@
      w_identity=None, name='', doc='', stack_inputs=False):
     ''' frompyfunc(func, nin, nout) #cpython numpy compatible
         frompyfunc(func, nin, nout, dtypes=None, signature='',
-                   identity=None, name='', doc='', stack_inputs=False)
+                   identity=None, name='', doc='', 
+                   stack_inputs=False)
 
     Takes an arbitrary Python function and returns a ufunc.
 
@@ -1255,10 +1270,12 @@
     nout : int
         The number of arrays returned by `func`.
     dtypes: None or [dtype, ...] of the input, output args for each function,
-               or 'match' to force output to exactly match input dtype
+         or 'match' to force output to exactly match input dtype
+         Note that 'match' is a pypy-only extension to allow non-object
+         return dtypes      
     signature*: str, default=''
          The mapping of input args to output args, defining the
-         inner-loop indexing
+         inner-loop indexing. If it is empty, the func operates on scalars
     identity*: None (default) or int
          For reduce-type ufuncs, the default value
     name: str, default=''
@@ -1277,7 +1294,7 @@
 
     Notes
     -----
-    If the signature and out_dtype are both missing, the returned ufunc
+    If the signature and dtype are both missing, the returned ufunc
         always returns PyObject arrays (cpython numpy compatability).
     Input arguments marked with a * are pypy-only extensions
 
@@ -1333,16 +1350,15 @@
             'identity must be None or an int')
 
     if len(signature) == 0:
-        # cpython compatability, func is of the form (),()->()
-        signature = ','.join(['()'] * nin) + '->' + ','.join(['()'] * nout)
+        external_loop=False
     else:
-        #stack_inputs = True
-        pass
+        external_loop=True
 
     w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes,
                            signature, match_dtypes=match_dtypes,
-                           stack_inputs=stack_inputs)
-    _parse_signature(space, w_ret, w_ret.signature)
+                           stack_inputs=stack_inputs, 
external_loop=external_loop)
+    if w_ret.external_loop:
+        _parse_signature(space, w_ret, w_ret.signature)
     if doc:
         w_ret.w_doc = space.wrap(doc)
     return w_ret
diff --git a/pypy/module/pypyjit/interp_jit.py 
b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -173,7 +173,7 @@
 exiting (blackhole) steps, but just not from the final assembler.
 
 Note that the return value of the callable is ignored, because
-there is no reasonable way to guess what it sound be in case the
+there is no reasonable way to guess what it should be in case the
 function is not called.
 
 This is meant to be used notably in sys.settrace() for coverage-
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,7 @@
 #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
 CPYTHON_API_VERSION        = 1013   #XXX # sync with include/modsupport.h
 
-PYPY_VERSION               = (2, 5, 0, "alpha", 0)    #XXX # sync patchlevel.h
+PYPY_VERSION               = (2, 6, 0, "alpha", 0)    #XXX # sync patchlevel.h
 
 if platform.name == 'msvc':
     COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600)
diff --git a/pypy/objspace/std/dictmultiobject.py 
b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -1173,8 +1173,8 @@
     def wrapkey(space, key):
         return space.wrap(key)
 
-    # XXX there is no space.newlist_int yet to implement w_keys more
-    # efficiently
+    def w_keys(self, w_dict):
+        return self.space.newlist_int(self.listview_int(w_dict))
 
 create_iterator_classes(IntDictStrategy)
 
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -176,6 +176,12 @@
         storage = strategy.erase(list_u)
         return W_ListObject.from_storage_and_strategy(space, storage, strategy)
 
+    @staticmethod
+    def newlist_int(space, list_i):
+        strategy = space.fromcache(IntegerListStrategy)
+        storage = strategy.erase(list_i)
+        return W_ListObject.from_storage_and_strategy(space, storage, strategy)
+
     def __repr__(self):
         """ representation for debugging purposes """
         return "%s(%s, %s)" % (self.__class__.__name__, self.strategy,
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -288,6 +288,9 @@
     def newlist_unicode(self, list_u):
         return W_ListObject.newlist_unicode(self, list_u)
 
+    def newlist_int(self, list_i):
+        return W_ListObject.newlist_int(self, list_i)
+
     def newdict(self, module=False, instance=False, kwargs=False,
                 strdict=False):
         return W_DictMultiObject.allocate_and_init_instance(
diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py
--- a/rpython/rtyper/rclass.py
+++ b/rpython/rtyper/rclass.py
@@ -711,8 +711,15 @@
                     continue
                 value = self.classdef.classdesc.read_attribute(fldname, None)
                 if value is not None:
-                    cvalue = inputconst(r.lowleveltype,
-                                        r.convert_desc_or_const(value))
+                    ll_value = r.convert_desc_or_const(value)
+                    # don't write NULL GC pointers: we know that the malloc
+                    # done above initialized at least the GC Ptr fields to
+                    # NULL already, and that's true for all our GCs
+                    if (isinstance(r.lowleveltype, Ptr) and
+                            r.lowleveltype.TO._gckind == 'gc' and
+                            not ll_value):
+                        continue
+                    cvalue = inputconst(r.lowleveltype, ll_value)
                     self.setfield(vptr, fldname, cvalue, llops,
                                   flags={'access_directly': True})
         return vptr
diff --git a/rpython/translator/backendopt/test/test_malloc.py 
b/rpython/translator/backendopt/test/test_malloc.py
--- a/rpython/translator/backendopt/test/test_malloc.py
+++ b/rpython/translator/backendopt/test/test_malloc.py
@@ -340,3 +340,15 @@
             u[0].s.x = x
             return u[0].s.x
         graph = self.check(f, [int], [42], 42)
+
+    def test_two_paths_one_with_constant(self):
+        py.test.skip("XXX implement me?")
+        def fn(n):
+            if n > 100:
+                tup = (0,)
+            else:
+                tup = (n,)
+            (n,)    # <- flowspace
+            return tup[0]
+
+        self.check(fn, [int], [42], 42)
diff --git a/rpython/translator/c/test/test_standalone.py 
b/rpython/translator/c/test/test_standalone.py
--- a/rpython/translator/c/test/test_standalone.py
+++ b/rpython/translator/c/test/test_standalone.py
@@ -808,12 +808,7 @@
         t, cbuilder = self.compile(entry_point, shared=True)
         assert cbuilder.shared_library_name is not None
         assert cbuilder.shared_library_name != cbuilder.executable_name
-        if os.name == 'posix':
-            library_path = cbuilder.shared_library_name.dirpath()
-            if sys.platform == 'darwin':
-                monkeypatch.setenv('DYLD_LIBRARY_PATH', library_path)
-            else:
-                monkeypatch.setenv('LD_LIBRARY_PATH', library_path)
+        #Do not set LD_LIBRARY_PATH, make sure $ORIGIN flag is working
         out, err = cbuilder.cmdexec("a b")
         assert out == "3"
 
diff --git a/rpython/translator/platform/freebsd.py 
b/rpython/translator/platform/freebsd.py
--- a/rpython/translator/platform/freebsd.py
+++ b/rpython/translator/platform/freebsd.py
@@ -12,6 +12,7 @@
     cflags = tuple(
         ['-O3', '-pthread', '-fomit-frame-pointer'] +
         os.environ.get('CFLAGS', '').split())
+    rpath_flags = ['-Wl,-rpath=\'$$ORIGIN/\'',  '-Wl,-z,origin']
 
 class Freebsd_64(Freebsd):
     shared_only = ('-fPIC',)
diff --git a/rpython/translator/platform/posix.py 
b/rpython/translator/platform/posix.py
--- a/rpython/translator/platform/posix.py
+++ b/rpython/translator/platform/posix.py
@@ -112,9 +112,9 @@
             target_name = exe_name.basename
 
         if shared:
-            cflags = self.cflags + self.get_shared_only_compile_flags()
+            cflags = tuple(self.cflags) + self.get_shared_only_compile_flags()
         else:
-            cflags = self.cflags + self.standalone_only
+            cflags = tuple(self.cflags) + tuple(self.standalone_only)
 
         m = GnuMakefile(path)
         m.exe_name = path.join(exe_name.basename)
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to