Author: Manuel Jacob <[email protected]>
Branch: py3k
Changeset: r77443:d9182d927ee4
Date: 2015-05-21 03:25 +0200
http://bitbucket.org/pypy/pypy/changeset/d9182d927ee4/

Log:    hg merge 8cb5c941efb4

        This is part of a series of commits to merge default into the py3k
        branch. The merge is very large, so it's easier when split into
        smaller pieces.

diff too long, truncating to 2000 out of 8429 lines

diff --git a/.tddium.requirements.txt b/.tddium.requirements.txt
deleted file mode 100644
--- a/.tddium.requirements.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-pytest
diff --git a/lib-python/2.7/test/test_urllib2net.py 
b/lib-python/2.7/test/test_urllib2net.py
--- a/lib-python/2.7/test/test_urllib2net.py
+++ b/lib-python/2.7/test/test_urllib2net.py
@@ -102,11 +102,8 @@
 
     def test_ftp(self):
         urls = [
-            'ftp://ftp.kernel.org/pub/linux/kernel/README',
-            'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
-            #'ftp://ftp.kernel.org/pub/leenox/kernel/test',
-            'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
-                '/research-reports/00README-Legal-Rules-Regs',
+            'ftp://ftp.debian.org/debian/README',
+            'ftp://ftp.debian.org/debian/non-existent-file',
             ]
         self._test_urls(urls, self._extra_handlers())
 
@@ -255,6 +252,7 @@
         with test_support.transient_internet(url, timeout=None):
             u = _urlopen_with_retry(url)
             self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
+            u.close()
 
     def test_http_default_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -266,6 +264,7 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 60)
+            u.close()
 
     def test_http_no_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -277,20 +276,23 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
+            u.close()
 
     def test_http_timeout(self):
         url = "http://www.example.com";
         with test_support.transient_internet(url):
             u = _urlopen_with_retry(url, timeout=120)
             self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
+            u.close()
 
-    FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/";
+    FTP_HOST = 'ftp://ftp.debian.org/debian/'
 
     def test_ftp_basic(self):
         self.assertIsNone(socket.getdefaulttimeout())
         with test_support.transient_internet(self.FTP_HOST, timeout=None):
             u = _urlopen_with_retry(self.FTP_HOST)
             self.assertIsNone(u.fp.fp._sock.gettimeout())
+            u.close()
 
     def test_ftp_default_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -301,6 +303,7 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
+            u.close()
 
     def test_ftp_no_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout(),)
@@ -311,11 +314,16 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertIsNone(u.fp.fp._sock.gettimeout())
+            u.close()
 
     def test_ftp_timeout(self):
         with test_support.transient_internet(self.FTP_HOST):
-            u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
+            try:
+                u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
+            except:
+                raise
             self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
+            u.close()
 
 
 def test_main():
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -454,6 +454,7 @@
         self.__cursors_counter = 0
         self.__statements = []
         self.__statements_counter = 0
+        self.__rawstatements = set()
         self._statement_cache = _StatementCache(self, cached_statements)
 
         self.__func_cache = {}
@@ -483,6 +484,14 @@
 
         self.__do_all_statements(Statement._finalize, True)
 
+        # depending on when this close() is called, the statements' weakrefs
+        # may be already dead, even though Statement.__del__() was not called
+        # yet.  In this case, self.__rawstatements is not empty.
+        if self.__rawstatements is not None:
+            for stmt in list(self.__rawstatements):
+                self._finalize_raw_statement(stmt)
+            self.__rawstatements = None
+
         if self._db:
             ret = _lib.sqlite3_close(self._db)
             if ret != _lib.SQLITE_OK:
@@ -562,6 +571,7 @@
         self.__cursors = [r for r in self.__cursors if r() is not None]
 
     def _remember_statement(self, statement):
+        self.__rawstatements.add(statement._statement)
         self.__statements.append(weakref.ref(statement))
         self.__statements_counter += 1
         if self.__statements_counter < 200:
@@ -569,6 +579,11 @@
         self.__statements_counter = 0
         self.__statements = [r for r in self.__statements if r() is not None]
 
+    def _finalize_raw_statement(self, _statement):
+        if self.__rawstatements is not None:
+            self.__rawstatements.remove(_statement)
+            _lib.sqlite3_finalize(_statement)
+
     def __do_all_statements(self, action, reset_cursors):
         for weakref in self.__statements:
             statement = weakref()
@@ -1211,7 +1226,6 @@
 
     def __init__(self, connection, sql):
         self.__con = connection
-        self.__con._remember_statement(self)
 
         self._in_use = False
 
@@ -1256,17 +1270,19 @@
         if ret != _lib.SQLITE_OK:
             raise self.__con._get_exception(ret)
 
+        self.__con._remember_statement(self)
+
         tail = _ffi.string(next_char[0]).decode('utf-8')
         if _check_remaining_sql(tail):
             raise Warning("You can only execute one statement at a time.")
 
     def __del__(self):
         if self._statement:
-            _lib.sqlite3_finalize(self._statement)
+            self.__con._finalize_raw_statement(self._statement)
 
     def _finalize(self):
         if self._statement:
-            _lib.sqlite3_finalize(self._statement)
+            self.__con._finalize_raw_statement(self._statement)
             self._statement = None
         self._in_use = False
 
diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py
--- a/lib_pypy/_tkinter/tklib.py
+++ b/lib_pypy/_tkinter/tklib.py
@@ -1,7 +1,7 @@
 # C bindings with libtcl and libtk.
 
 from cffi import FFI
-import sys
+import sys, os
 
 tkffi = FFI()
 
@@ -135,9 +135,12 @@
     linklibs = ['tcl', 'tk']
     libdirs = []
 else:
-    incdirs=['/usr/include/tcl']
-    linklibs=['tcl', 'tk']
-    libdirs = []
+    for _ver in ['', '8.6', '8.5', '']:
+        incdirs = ['/usr/include/tcl' + _ver]
+        linklibs = ['tcl' + _ver, 'tk' + _ver]
+        libdirs = []
+        if os.path.isdir(incdirs[0]):
+            break
 
 tklib = tkffi.verify("""
 #include <tcl.h>
diff --git a/lib_pypy/pyrepl/simple_interact.py 
b/lib_pypy/pyrepl/simple_interact.py
--- a/lib_pypy/pyrepl/simple_interact.py
+++ b/lib_pypy/pyrepl/simple_interact.py
@@ -33,6 +33,15 @@
         return False
     return True
 
+def _strip_final_indent(text):
+    # kill spaces and tabs at the end, but only if they follow '\n'.
+    # meant to remove the auto-indentation only (although it would of
+    # course also remove explicitly-added indentation).
+    short = text.rstrip(' \t')
+    n = len(short)
+    if n > 0 and text[n-1] == '\n':
+        return short
+    return text
 
 def run_multiline_interactive_console(mainmodule=None):
     import code
@@ -43,9 +52,9 @@
     def more_lines(unicodetext):
         # ooh, look at the hack:
         if sys.version_info < (3,):
-            src = "#coding:utf-8\n"+unicodetext.encode('utf-8')
+            src = 
"#coding:utf-8\n"+_strip_final_indent(unicodetext).encode('utf-8')
         else:
-            src = unicodetext
+            src = _strip_final_indent(unicodetext)
         try:
             code = console.compile(src, '<stdin>', 'single')
         except (OverflowError, SyntaxError, ValueError):
@@ -62,7 +71,7 @@
                                             returns_unicode=True)
             except EOFError:
                 break
-            more = console.push(statement)
+            more = console.push(_strip_final_indent(statement))
             assert not more
         except KeyboardInterrupt:
             console.write("\nKeyboardInterrupt\n")
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -146,6 +146,26 @@
 :doc:`objspace proxies <objspace-proxies>` document.
 
 
+Packaging (preparing for installation)
+--------------------------------------
+
+Packaging is required if you want to install PyPy system-wide, even to
+install on the same machine.  The reason is that doing so prepares a
+number of extra features that cannot be done lazily on a root-installed
+PyPy, because the normal users don't have write access.  This concerns
+mostly libraries that would normally be compiled if and when they are
+imported the first time.
+
+::
+    
+    cd pypy/tool/release
+    ./package.py pypy-VER-PLATFORM
+
+This creates a clean and prepared hierarchy, as well as a ``.tar.bz2``
+with the same content; both are found by default in
+``/tmp/usession-YOURNAME/build/``.  You can then either move the file
+hierarchy or unpack the ``.tar.bz2`` at the correct place.
+
 
 Installation
 ------------
diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst
--- a/pypy/doc/getting-started-dev.rst
+++ b/pypy/doc/getting-started-dev.rst
@@ -207,12 +207,17 @@
 large amount of options that can be used to customize pyinteractive.py).
 As an example of using PyPy from the command line, you could type::
 
-    python pyinteractive.py -c "from test import pystone; pystone.main(10)"
+    python pyinteractive.py --withmod-time -c "from test import pystone; 
pystone.main(10)"
 
 Alternatively, as with regular Python, you can simply give a
 script name on the command line::
 
-    python pyinteractive.py ../../lib-python/2.7/test/pystone.py 10
+    python pyinteractive.py --withmod-time 
../../lib-python/2.7/test/pystone.py 10
+
+The ``--withmod-xxx`` option enables the built-in module ``xxx``.  By
+default almost none of them are, because initializing them takes time.
+If you want anyway to enable all built-in modules, you can use
+``--allworkingmodules``.
 
 See our :doc:`configuration sections <config/index>` for details about what 
all the commandline
 options do.
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -29,7 +29,8 @@
 ====================
 
 ``pypy-stm`` is a variant of the regular PyPy interpreter.  (This
-version supports Python 2.7; see below for `Python 3`_.)  With caveats_
+version supports Python 2.7; see below for `Python 3, CPython,
+and others`_.)  With caveats_
 listed below, it should be in theory within 20%-50% slower than a
 regular PyPy, comparing the JIT version in both cases (but see below!).
 It is called
@@ -178,8 +179,8 @@
 
 
 
-Python 3
-========
+Python 3, CPython, and others
+=============================
 
 In this document I describe "pypy-stm", which is based on PyPy's Python
 2.7 interpreter.  Supporting Python 3 should take about half an
@@ -194,6 +195,29 @@
 framework, although the amount of work to put there might vary, because
 the STM framework within RPython is currently targeting the PyPy
 interpreter and other ones might have slightly different needs.
+But in general, all the tedious transformations are done by RPython
+and you're only left with the (hopefully few) hard and interesting bits.
+
+The core of STM works as a library written in C (see `reference to
+implementation details`_ below).  It means that it can be used on
+other interpreters than the ones produced by RPython.  Duhton_ is an
+early example of that.  At this point, you might think about adapting
+this library for CPython.  You're warned, though: as far as I can
+tell, it is a doomed idea.  I had a hard time debugging Duhton, and
+that's infinitely simpler than CPython.  Even ignoring that, you can
+see in the C sources of Duhton that many core design decisions are
+different than in CPython: no refcounting; limited support for
+prebuilt "static" objects; ``stm_read()`` and ``stm_write()`` macro
+calls everywhere (and getting very rare and very obscure bugs if you
+forget one); and so on.  You could imagine some custom special-purpose
+extension of the C language, which you would preprocess to regular C.
+In my opinion that's starting to look a lot like RPython itself, but
+maybe you'd prefer this approach.  Of course you still have to worry
+about each and every C extension module you need, but maybe you'd have
+a way forward.
+
+.. _Duhton: https://bitbucket.org/pypy/duhton
+
 
 
 User Guide
@@ -372,18 +396,49 @@
   and ``y`` that are thread-local: reading or writing them from
   concurrently-running transactions will return independent results.
   (Any other attributes of ``Foo`` instances will be globally visible
-  from all threads, as usual.)  The optional argument to
-  ``threadlocalproperty()`` is the default value factory: in case no
-  value was assigned in the current thread yet, the factory is called
-  and its result becomes the value in that thread (like
-  ``collections.defaultdict``).  If no default value factory is
-  specified, uninitialized reads raise ``AttributeError``.  Note that
-  with ``TransactionQueue`` you get a pool of a fixed number of
-  threads, each running the transactions one after the other; such
-  thread-local properties will have the value last stored in them in
-  the same thread,, which may come from a random previous transaction.
-  This means that ``threadlocalproperty`` is useful mainly to avoid
-  conflicts from cache-like data structures.
+  from all threads, as usual.)  This is useful together with
+  ``TransactionQueue`` for these two cases:
+
+  - For attributes of long-lived objects that change during one
+    transaction, but should always be reset to some initial value
+    around transaction (for example, initialized to 0 at the start of
+    a transaction; or, if used for a list of pending things to do
+    within this transaction, it will always be empty at the end of one
+    transaction).
+
+  - For general caches across transactions.  With ``TransactionQueue``
+    you get a pool of a fixed number N of threads, each running the
+    transactions serially.  A thread-local property will have the
+    value last stored in it by the same thread, which may come from a
+    random previous transaction.  Basically, you get N copies of the
+    property's value, and each transaction accesses a random copy.  It
+    works fine for caches.
+
+  In more details, the optional argument to ``threadlocalproperty()``
+  is the default value factory: in case no value was assigned in the
+  current thread yet, the factory is called and its result becomes the
+  value in that thread (like ``collections.defaultdict``).  If no
+  default value factory is specified, uninitialized reads raise
+  ``AttributeError``.
+
+* In addition to all of the above, there are cases where write-write
+  conflicts are caused by writing the same value to an attribute again
+  and again.  See for example ea2e519614ab_: this fixes two such
+  issues where we write an object field without first checking if we
+  already did it.  The ``dont_change_any_more`` field is a flag set to
+  ``True`` in that part of the code, but usually this
+  ``rtyper_makekey()`` method will be called many times for the same
+  object; the code used to repeatedly set the flag to ``True``, but
+  now it first checks and only does the write if it is ``False``.
+  Similarly, in the second half of the checkin, the method
+  ``setup_block_entry()`` used to both assign the ``concretetype``
+  fields and return a list, but its two callers were different: one
+  would really need the ``concretetype`` fields initialized, whereas
+  the other would only need to get its result list --- the
+  ``concretetype`` field in that case might already be set or not, but
+  that would not matter.
+
+.. _ea2e519614ab: https://bitbucket.org/pypy/pypy/commits/ea2e519614ab
 
 Note that Python is a complicated language; there are a number of less
 common cases that may cause conflict (of any kind) where we might not
@@ -509,6 +564,15 @@
 Miscellaneous functions
 -----------------------
 
+* First, note that the ``transaction`` module is found in the file
+  ``lib_pypy/transaction.py``.  This file can be copied around to
+  execute the same programs on CPython or on non-STM PyPy, with
+  fall-back behavior.  (One case where the behavior differs is
+  ``atomic``, which is in this fall-back case just a regular lock; so
+  ``with atomic`` only prevent other threads from entering other
+  ``with atomic`` sections, but won't prevent other threads from
+  running non-atomic code.)
+
 * ``transaction.getsegmentlimit()``: return the number of "segments" in
   this pypy-stm.  This is the limit above which more threads will not be
   able to execute on more cores.  (Right now it is limited to 4 due to
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,5 +3,67 @@
 =======================
 
 .. this is a revision shortly after release-2.5.1
-.. startrev: 397b96217b85
+.. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1
 
+issue2005:
+ignore errors on closing random file handles while importing a module (cpython 
compatibility)
+
+issue2013:
+added constants to _ssl for TLS 1.1 and 1.2
+
+issue2014:
+Add PyLong_FromUnicode to cpyext.
+
+issue2017: 
+On non-Linux-x86 platforms, reduced the memory impact of
+creating a lot of greenlets/tasklets.  Particularly useful on Win32 and
+on ARM, where you used to get a MemoryError after only 2500-5000
+greenlets (the 32-bit address space is exhausted).
+
+Update gdb_pypy for python3 (gdb comatability)
+
+Merged rstrategies into rpython which provides a library for Storage Strategies
+
+Support unicode strings in numpy.dtype creation i.e. np.dtype(u'int64')
+
+Various rpython cleanups for vmprof support
+
+issue2019:
+Fix isspace as called by rpython unicode.strip()
+
+issue2023:
+In the cpyext 'Concrete Object Layer' API,
+don't call methods on the object (which can be overriden),
+but directly on the concrete base type.
+
+issue2029:
+Hide the default_factory attribute in a dict
+
+issue2027:
+Better document pyinteractive and add --withmod-time
+
+.. branch: gc-incminimark-pinning-improve
+
+branch gc-incminimark-pinning-improve: 
+Object Pinning is now used in `bz2` and `rzlib` (therefore also affects
+Python's `zlib`). In case the data to compress/decompress is inside the nursery
+(incminimark) it no longer needs to create a non-moving copy of it. This saves
+one `malloc` and copying the data.  Additionally a new GC environment variable
+is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes.
+
+.. branch: refactor-pycall
+
+branch refactor-pycall:
+Make `*`-unpacking in RPython function calls completely equivalent to passing
+the tuple's elements as arguments. In other words, `f(*(a, b))` now behaves 
+exactly like `f(a, b)`.
+
+.. branch: issue2018
+branch issue2018:
+Allow prebuilt rpython dict with function values
+
+.. branch: vmprof
+.. Merged but then backed out, hopefully it will return as vmprof2
+
+.. branch: object-dtype2
+Extend numpy dtypes to allow using objects with associated garbage collection 
hook
diff --git a/pypy/goal/targetnumpystandalone.py 
b/pypy/goal/targetnumpystandalone.py
deleted file mode 100644
--- a/pypy/goal/targetnumpystandalone.py
+++ /dev/null
@@ -1,43 +0,0 @@
-
-""" Usage:
-
-./targetnumpystandalone-c <bytecode> array_size
-
-Will execute a give numpy bytecode. Arrays will be ranges (in float) modulo 10,
-constants would be consecutive starting from one.
-
-Bytecode should contain letters 'a' 'l' and 'f' so far and be correct
-"""
-
-import time
-from pypy.module.micronumpy.compile import numpy_compile
-from rpython.jit.codewriter.policy import JitPolicy
-from rpython.rtyper.annlowlevel import hlstr
-
-def entry_point(argv):
-    if len(argv) != 3:
-        print __doc__
-        return 1
-    try:
-        size = int(argv[2])
-    except ValueError:
-        print "INVALID LITERAL FOR INT:", argv[2]
-        print __doc__
-        return 3
-    t0 = time.time()
-    main(argv[0], size)
-    print "bytecode:", argv[0], "size:", size
-    print "took:", time.time() - t0
-    return 0
-
-def main(bc, size):
-    if not isinstance(bc, str):
-        bc = hlstr(bc) # for tests
-    a = numpy_compile(bc, size)
-    a = a.compute()
-
-def target(*args):
-    return entry_point, None
-
-def jitpolicy(driver):
-    return JitPolicy()
diff --git a/pypy/interpreter/astcompiler/assemble.py 
b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -1,5 +1,6 @@
 """Python control flow graph generation and bytecode assembly."""
 
+import os
 from rpython.rlib import rfloat
 from rpython.rlib.objectmodel import specialize, we_are_translated
 
@@ -9,6 +10,10 @@
 from pypy.tool import stdlib_opcode as ops
 
 
+class StackDepthComputationError(Exception):
+    pass
+
+
 class Instruction(object):
     """Represents a single opcode."""
 
@@ -55,11 +60,13 @@
     reaches the end of the block, it continues to next_block.
     """
 
+    marked = False
+    have_return = False
+    auto_inserted_return = False
+
     def __init__(self):
         self.instructions = []
         self.next_block = None
-        self.marked = False
-        self.have_return = False
 
     def _post_order_see(self, stack, nextblock):
         if nextblock.marked == 0:
@@ -386,7 +393,11 @@
         # look into a block when all the previous blocks have been done.
         self._max_depth = 0
         for block in blocks:
-            self._do_stack_depth_walk(block)
+            depth = self._do_stack_depth_walk(block)
+            if block.auto_inserted_return and depth != 0:
+                os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
+                    self.compile_info.filename, self.name, self.first_lineno))
+                #raise StackDepthComputationError   # fatal error
         return self._max_depth
 
     def _next_stack_depth_walk(self, nextblock, depth):
@@ -395,20 +406,21 @@
 
     def _do_stack_depth_walk(self, block):
         depth = block.initial_depth
-        done = False
         for instr in block.instructions:
             depth += _opcode_stack_effect(instr.opcode, instr.arg)
             if depth >= self._max_depth:
                 self._max_depth = depth
+            jump_op = instr.opcode
             if instr.has_jump:
                 target_depth = depth
-                jump_op = instr.opcode
                 if jump_op == ops.FOR_ITER:
                     target_depth -= 2
                 elif (jump_op == ops.SETUP_FINALLY or
                       jump_op == ops.SETUP_EXCEPT or
                       jump_op == ops.SETUP_WITH):
-                    target_depth += 3
+                    if jump_op == ops.SETUP_WITH:
+                        target_depth -= 1     # ignore the w_result just pushed
+                    target_depth += 3         # add [exc_type, exc, unroller]
                     if target_depth > self._max_depth:
                         self._max_depth = target_depth
                 elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
@@ -417,10 +429,14 @@
                 self._next_stack_depth_walk(instr.jump[0], target_depth)
                 if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
                     # Nothing more can occur.
-                    done = True
                     break
-        if block.next_block and not done:
-            self._next_stack_depth_walk(block.next_block, depth)
+            elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
+                # Nothing more can occur.
+                break
+        else:
+            if block.next_block:
+                self._next_stack_depth_walk(block.next_block, depth)
+        return depth
 
     def _build_lnotab(self, blocks):
         """Build the line number table for tracebacks and tracing."""
@@ -473,6 +489,7 @@
             if self.add_none_to_final_return:
                 self.load_const(self.space.w_None)
             self.emit_op(ops.RETURN_VALUE)
+            self.current_block.auto_inserted_return = True
         # Set the first lineno if it is not already explicitly set.
         if self.first_lineno == -1:
             if self.first_block.instructions:
@@ -565,7 +582,7 @@
     ops.INPLACE_OR: -1,
     ops.INPLACE_XOR: -1,
 
-    ops.STORE_SUBSCR: -2,
+    ops.STORE_SUBSCR: -3,
     ops.DELETE_SUBSCR: -2,
 
     ops.GET_ITER: 0,
@@ -581,7 +598,9 @@
     ops.STORE_LOCALS: -1,
     ops.POP_BLOCK: 0,
     ops.POP_EXCEPT: 0,
-    ops.END_FINALLY: -1,
+    ops.END_FINALLY: -3,     # assume always 3: we pretend that SETUP_FINALLY
+                             # pushes 3.  In truth, it would only push 1 and
+                             # the corresponding END_FINALLY only pops 1.
     ops.SETUP_WITH: 1,
     ops.SETUP_FINALLY: 0,
     ops.SETUP_EXCEPT: 4,
@@ -589,7 +608,6 @@
     ops.RETURN_VALUE: -1,
     ops.YIELD_VALUE: 0,
     ops.BUILD_MAP: 1,
-    ops.BUILD_SET: 1,
     ops.COMPARE_OP: -1,
 
     ops.LOOKUP_METHOD: 1,
@@ -646,6 +664,9 @@
 def _compute_BUILD_LIST(arg):
     return 1 - arg
 
+def _compute_BUILD_SET(arg):
+    return 1 - arg
+
 def _compute_MAKE_CLOSURE(arg):
     return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF)
 
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py 
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -791,6 +791,60 @@
         code = compile_with_astcompiler(source, 'exec', self.space)
         assert code.co_stacksize == 2
 
+    def test_stackeffect_bug3(self):
+        source = """if 1:
+        try: pass
+        finally: pass
+        try: pass
+        finally: pass
+        try: pass
+        finally: pass
+        try: pass
+        finally: pass
+        try: pass
+        finally: pass
+        try: pass
+        finally: pass
+        """
+        code = compile_with_astcompiler(source, 'exec', self.space)
+        assert code.co_stacksize == 3
+
+    def test_stackeffect_bug4(self):
+        source = """if 1:
+        with a: pass
+        with a: pass
+        with a: pass
+        with a: pass
+        with a: pass
+        with a: pass
+        """
+        code = compile_with_astcompiler(source, 'exec', self.space)
+        assert code.co_stacksize == 4
+
+    def test_stackeffect_bug5(self):
+        source = """if 1:
+        a[:]; a[:]; a[:]; a[:]; a[:]; a[:]
+        a[1:]; a[1:]; a[1:]; a[1:]; a[1:]; a[1:]
+        a[:2]; a[:2]; a[:2]; a[:2]; a[:2]; a[:2]
+        a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]; a[1:2]
+        """
+        code = compile_with_astcompiler(source, 'exec', self.space)
+        assert code.co_stacksize == 3
+
+    def test_stackeffect_bug6(self):
+        source = """if 1:
+        {1}; {1}; {1}; {1}; {1}; {1}; {1}
+        """
+        code = compile_with_astcompiler(source, 'exec', self.space)
+        assert code.co_stacksize == 1
+
+    def test_stackeffect_bug7(self):
+        source = '''def f():
+            for i in a:
+                return
+        '''
+        code = compile_with_astcompiler(source, 'exec', self.space)
+
     def test_lambda(self):
         yield self.st, "y = lambda x: x", "y(4)", 4
 
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -395,14 +395,11 @@
         return space.wrap(self.name.decode('utf-8'))
 
     def fset_func_name(self, space, w_name):
-        try:
+        if space.isinstance_w(w_name, space.w_unicode):
             self.name = space.str_w(w_name)
-        except OperationError, e:
-            if e.match(space, space.w_TypeError):
-                raise OperationError(space.w_TypeError,
-                                     space.wrap("func_name must be set "
-                                                "to a string object"))
-            raise
+        else:
+            raise OperationError(space.w_TypeError,
+                space.wrap("__name__ must be set to a string object"))
 
     def fdel_func_doc(self, space):
         self.w_doc = space.w_None
diff --git a/pypy/interpreter/test/test_function.py 
b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -146,6 +146,12 @@
         assert &#26085;&#26412;.__name__ == '&#26085;&#26412;'
         """
 
+    def test_set_name(self):
+        def f(): pass
+        f.__name__ = 'g'
+        assert f.func_name == 'g'
+        raises(TypeError, "f.__name__ = u'g'")
+
 
 class AppTestFunction:
     def test_simple_call(self):
diff --git a/pypy/module/_collections/app_defaultdict.py 
b/pypy/module/_collections/app_defaultdict.py
--- a/pypy/module/_collections/app_defaultdict.py
+++ b/pypy/module/_collections/app_defaultdict.py
@@ -11,6 +11,7 @@
 
 
 class defaultdict(dict):
+    __slots__ = ['default_factory']
 
     def __init__(self, *args, **kwds):
         if len(args) > 0:
@@ -20,17 +21,9 @@
                 raise TypeError("first argument must be callable")
         else:
             default_factory = None
-        self._default_factory = default_factory
+        defaultdict.default_factory.__set__(self, default_factory)
         super(defaultdict, self).__init__(*args, **kwds)
 
-    @property
-    def default_factory(self):
-        return self._default_factory
-
-    @default_factory.setter
-    def default_factory(self, default_factory):
-        self._default_factory = default_factory
-
     def __missing__(self, key):
         pass    # this method is written at interp-level
     __missing__.__code__ = _collections.__missing__.__code__
@@ -41,15 +34,15 @@
             return "defaultdict(...)"
         try:
             recurse.add(id(self))
-            return "defaultdict(%s, %s)" % (repr(self._default_factory), 
super(defaultdict, self).__repr__())
+            return "defaultdict(%s, %s)" % (repr(self.default_factory),
+                                            super(defaultdict, 
self).__repr__())
         finally:
             recurse.remove(id(self))
 
     def copy(self):
-        return type(self)(self._default_factory, self)
+        return type(self)(self.default_factory, self)
 
-    def __copy__(self):
-        return self.copy()
+    __copy__ = copy
 
     def __reduce__(self):
         """
@@ -63,5 +56,5 @@
 
            This API is used by pickle.py and copy.py.
         """
-        return (type(self), (self._default_factory,), None, None,
+        return (type(self), (self.default_factory,), None, None,
                 iter(self.items()))
diff --git a/pypy/module/_collections/test/test_defaultdict.py 
b/pypy/module/_collections/test/test_defaultdict.py
--- a/pypy/module/_collections/test/test_defaultdict.py
+++ b/pypy/module/_collections/test/test_defaultdict.py
@@ -57,6 +57,28 @@
         assert d2[2] == 3
         assert d2[3] == 42
 
+    def test_no_dict(self):
+        import _collections
+        assert not hasattr(_collections.defaultdict(), '__dict__')
+
+    def test_no_setattr(self):
+        import _collections
+        class D(_collections.defaultdict):
+            def __setattr__(self, attr, name):
+                raise AssertionError
+        d = D(int)
+        assert d['5'] == 0
+        d['6'] += 3
+        assert d['6'] == 3
+
+    def test_default_factory(self):
+        import _collections
+        f = lambda: 42
+        d = _collections.defaultdict(f)
+        assert d.default_factory is f
+        d.default_factory = lambda: 43
+        assert d['5'] == 43
+
     def test_reduce(self):
         import _collections
         d = _collections.defaultdict(None, {3: 4})
diff --git a/pypy/module/_hashlib/__init__.py b/pypy/module/_hashlib/__init__.py
--- a/pypy/module/_hashlib/__init__.py
+++ b/pypy/module/_hashlib/__init__.py
@@ -1,11 +1,10 @@
 from pypy.interpreter.mixedmodule import MixedModule
-from pypy.module._hashlib.interp_hashlib import algorithms
+from pypy.module._hashlib.interp_hashlib import algorithms, fetch_names
 
 
 class Module(MixedModule):
     interpleveldefs = {
         'new' : 'interp_hashlib.new',
-        'openssl_md_meth_names': 'interp_hashlib.get(space).w_meth_names'
         }
 
     appleveldefs = {
@@ -15,5 +14,5 @@
         interpleveldefs['openssl_' + name] = 'interp_hashlib.new_' + name
 
     def startup(self, space):
-        from rpython.rlib.ropenssl import init_digests
-        init_digests()
+        w_meth_names = fetch_names(space)
+        space.setattr(self, space.wrap('openssl_md_meth_names'), w_meth_names)
diff --git a/pypy/module/_hashlib/interp_hashlib.py 
b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -16,47 +16,40 @@
 algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
 
 def hash_name_mapper_callback(obj_name, userdata):
-    state = global_state[0]
-    assert state is not None
     if not obj_name:
         return
     # Ignore aliased names, they pollute the list and OpenSSL appears
     # to have a its own definition of alias as the resulting list
     # still contains duplicate and alternate names for several
     # algorithms.
-    if obj_name[0].c_alias:
+    if rffi.cast(lltype.Signed, obj_name[0].c_alias):
         return
     try:
-        w_name = state.space.wrap(rffi.charp2str(obj_name[0].c_name))
-        state.space.call_method(state.w_meth_names, "add", w_name)
+        space = global_name_fetcher.space
+        w_name = space.wrap(rffi.charp2str(obj_name[0].c_name))
+        global_name_fetcher.meth_names.append(w_name)
     except OperationError, e:
-        state.w_error = e
+        global_name_fetcher.w_error = e
 
-# XXX make it threadlocal?
-global_state = [None]
+class NameFetcher:
+    def setup(self, space):
+        self.space = space
+        self.meth_names = []
+        self.w_error = None
+    def _cleanup_(self):
+        self.__dict__.clear()
+global_name_fetcher = NameFetcher()
 
-class State:
-    def __init__(self, space):
-        self.space = space
-        self.generate_method_names(space)
-
-    def generate_method_names(self, space):
-        if not we_are_translated():
-            ropenssl.init_digests()
-        self.w_error = None
-        try:
-            global_state[0] = self
-            self.w_meth_names = space.call_function(space.w_set)
-            ropenssl.OBJ_NAME_do_all(
-                ropenssl.OBJ_NAME_TYPE_MD_METH,
-                hash_name_mapper_callback, None)
-        finally:
-            global_state[0] = None
-        if self.w_error:
-            raise self.w_error
-
-def get(space):
-    return space.fromcache(State)
+def fetch_names(space):
+    global_name_fetcher.setup(space)
+    ropenssl.init_digests()
+    ropenssl.OBJ_NAME_do_all(ropenssl.OBJ_NAME_TYPE_MD_METH,
+                             hash_name_mapper_callback, None)
+    if global_name_fetcher.w_error:
+        raise global_name_fetcher.w_error
+    meth_names = global_name_fetcher.meth_names
+    global_name_fetcher.meth_names = None
+    return space.call_function(space.w_frozenset, space.newlist(meth_names))
 
 class W_Hash(W_Root):
     NULL_CTX = lltype.nullptr(ropenssl.EVP_MD_CTX.TO)
@@ -144,10 +137,10 @@
             with self.lock:
                 ropenssl.EVP_MD_CTX_copy(ctx, self.ctx)
             digest_size = self.digest_size
-            with lltype.scoped_alloc(rffi.CCHARP.TO, digest_size) as digest:
-                ropenssl.EVP_DigestFinal(ctx, digest, None)
+            with rffi.scoped_alloc_buffer(digest_size) as buf:
+                ropenssl.EVP_DigestFinal(ctx, buf.raw, None)
                 ropenssl.EVP_MD_CTX_cleanup(ctx)
-                return rffi.charpsize2str(digest, digest_size)
+                return buf.str(digest_size)
 
 
 W_Hash.typedef = TypeDef(
diff --git a/pypy/module/_hashlib/test/test_hashlib.py 
b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -5,7 +5,7 @@
 
     def test_method_names(self):
         import _hashlib
-        assert isinstance(_hashlib.openssl_md_meth_names, set)
+        assert isinstance(_hashlib.openssl_md_meth_names, frozenset)
         assert "md5" in _hashlib.openssl_md_meth_names
 
     def test_simple(self):
diff --git a/pypy/module/_hashlib/test/test_ztranslation.py 
b/pypy/module/_hashlib/test/test_ztranslation.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_hashlib/test/test_ztranslation.py
@@ -0,0 +1,4 @@
+from pypy.objspace.fake.checkmodule import checkmodule
+
+def test_checkmodule():
+    checkmodule('_hashlib')
diff --git a/pypy/module/_minimal_curses/interp_curses.py 
b/pypy/module/_minimal_curses/interp_curses.py
--- a/pypy/module/_minimal_curses/interp_curses.py
+++ b/pypy/module/_minimal_curses/interp_curses.py
@@ -13,7 +13,7 @@
     def __init__(self, msg):
         self.msg = msg
 
-from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES
+from rpython.annotator.description import FORCE_ATTRIBUTES_INTO_CLASSES
 from rpython.annotator.model import SomeString
 
 # this is necessary due to annmixlevel
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -79,7 +79,9 @@
 constants["PROTOCOL_TLSv1"]  = PY_SSL_VERSION_TLS1
 if HAVE_TLSv1_2:
     constants["PROTOCOL_TLSv1_1"] = PY_SSL_VERSION_TLS1_1
+    constants["OP_NO_TLSv1_1"] = SSL_OP_NO_TLSv1_1
     constants["PROTOCOL_TLSv1_2"] = PY_SSL_VERSION_TLS1_2
+    constants["OP_NO_TLSv1_2"] = SSL_OP_NO_TLSv1_2
 
 # protocol options
 constants["OP_ALL"] = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -292,10 +292,7 @@
         in_bufsize = datasize
 
         with OutBuffer(self.bzs) as out:
-            with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-
-                for i in range(datasize):
-                    in_buf[i] = data[i]
+            with rffi.scoped_nonmovingbuffer(data) as in_buf:
 
                 self.bzs.c_next_in = in_buf
                 rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize)
@@ -399,9 +396,7 @@
 
         in_bufsize = len(data)
 
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             self.bzs.c_next_in = in_buf
             rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize)
 
@@ -453,9 +448,7 @@
     with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
         in_bufsize = len(data)
 
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             bzs.c_next_in = in_buf
             rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
 
@@ -495,9 +488,7 @@
         return space.wrapbytes("")
 
     with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             bzs.c_next_in = in_buf
             rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
 
diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py
--- a/pypy/module/cpyext/dictobject.py
+++ b/pypy/module/cpyext/dictobject.py
@@ -91,39 +91,39 @@
 @cpython_api([PyObject], lltype.Void)
 def PyDict_Clear(space, w_obj):
     """Empty an existing dictionary of all key-value pairs."""
-    space.call_method(w_obj, "clear")
+    space.call_method(space.w_dict, "clear", w_obj)
 
 @cpython_api([PyObject], PyObject)
 def PyDict_Copy(space, w_obj):
     """Return a new dictionary that contains the same key-value pairs as p.
     """
-    return space.call_method(w_obj, "copy")
+    return space.call_method(space.w_dict, "copy", w_obj)
 
 @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
 def PyDict_Update(space, w_obj, w_other):
     """This is the same as PyDict_Merge(a, b, 1) in C, or a.update(b) in
     Python.  Return 0 on success or -1 if an exception was raised.
     """
-    space.call_method(w_obj, "update", w_other)
+    space.call_method(space.w_dict, "update", w_obj, w_other)
     return 0
 
 @cpython_api([PyObject], PyObject)
 def PyDict_Keys(space, w_obj):
     """Return a PyListObject containing all the keys from the dictionary,
     as in the dictionary method dict.keys()."""
-    return space.call_function(space.w_list, space.call_method(w_obj, "keys"))
+    return space.call_function(space.w_list, space.call_method(space.w_dict, 
"keys", w_obj))
 
 @cpython_api([PyObject], PyObject)
 def PyDict_Values(space, w_obj):
     """Return a PyListObject containing all the values from the
     dictionary p, as in the dictionary method dict.values()."""
-    return space.call_function(space.w_list, space.call_method(w_obj, 
"values"))
+    return space.call_function(space.w_list, space.call_method(space.w_dict, 
"values", w_obj))
 
 @cpython_api([PyObject], PyObject)
 def PyDict_Items(space, w_obj):
     """Return a PyListObject containing all the items from the
     dictionary, as in the dictionary method dict.items()."""
-    return space.call_function(space.w_list, space.call_method(w_obj, "items"))
+    return space.call_function(space.w_list, space.call_method(space.w_dict, 
"items", w_obj))
 
 @cpython_api([PyObject, Py_ssize_tP, PyObjectP, PyObjectP], rffi.INT_real, 
error=CANNOT_FAIL)
 def PyDict_Next(space, w_dict, ppos, pkey, pvalue):
@@ -175,7 +175,7 @@
     # not complete.
 
     try:
-        w_iter = space.iter(space.call_method(w_dict, "items"))
+        w_iter = space.iter(space.call_method(space.w_dict, "items", w_dict))
         pos = ppos[0]
         while pos:
             space.next(w_iter)
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -65,7 +65,7 @@
     """Insert the item item into list list in front of index index.  Return
     0 if successful; return -1 and set an exception if unsuccessful.
     Analogous to list.insert(index, item)."""
-    space.call_method(w_list, "insert", space.wrap(index), w_item)
+    space.call_method(space.w_list, "insert", w_list, space.wrap(index), 
w_item)
     return 0
 
 @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
@@ -98,7 +98,7 @@
     failure.  This is equivalent to list.sort()."""
     if not isinstance(w_list, W_ListObject):
         PyErr_BadInternalCall(space)
-    space.call_method(w_list, "sort")
+    space.call_method(space.w_list, "sort", w_list)
     return 0
 
 @cpython_api([PyObject], rffi.INT_real, error=-1)
@@ -107,7 +107,7 @@
     failure.  This is the equivalent of list.reverse()."""
     if not isinstance(w_list, W_ListObject):
         PyErr_BadInternalCall(space)
-    space.call_method(w_list, "reverse")
+    space.call_method(space.w_list, "reverse", w_list)
     return 0
 
 @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject)
diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py
--- a/pypy/module/cpyext/longobject.py
+++ b/pypy/module/cpyext/longobject.py
@@ -186,6 +186,17 @@
         pend[0] = rffi.ptradd(str, len(s))
     return space.call_function(space.w_int, w_str, w_base)
 
+@cpython_api([rffi.CWCHARP, Py_ssize_t, rffi.INT_real], PyObject)
+def PyLong_FromUnicode(space, u, length, base):
+    """Convert a sequence of Unicode digits to a Python long integer value.
+    The first parameter, u, points to the first character of the Unicode
+    string, length gives the number of characters, and base is the radix
+    for the conversion.  The radix must be in the range [2, 36]; if it is
+    out of range, ValueError will be raised."""
+    w_value = space.wrap(rffi.wcharpsize2unicode(u, length))
+    w_base = space.wrap(rffi.cast(lltype.Signed, base))
+    return space.call_function(space.w_long, w_value, w_base)
+
 @cpython_api([rffi.VOIDP], PyObject)
 def PyLong_FromVoidPtr(space, p):
     """Create a Python integer or long integer from the pointer p. The pointer 
value
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
--- a/pypy/module/cpyext/setobject.py
+++ b/pypy/module/cpyext/setobject.py
@@ -36,7 +36,7 @@
     values of brand new frozensets before they are exposed to other code."""
     if not PySet_Check(space, w_s):
         PyErr_BadInternalCall(space)
-    space.call_method(w_s, 'add', w_obj)
+    space.call_method(space.w_set, 'add', w_s, w_obj)
     return 0
 
 @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
@@ -49,7 +49,7 @@
     instance of set or its subtype."""
     if not PySet_Check(space, w_s):
         PyErr_BadInternalCall(space)
-    space.call_method(w_s, 'discard', w_obj)
+    space.call_method(space.w_set, 'discard', w_s, w_obj)
     return 0
 
 
@@ -59,12 +59,12 @@
     object from the set.  Return NULL on failure.  Raise KeyError if the
     set is empty. Raise a SystemError if set is an not an instance of
     set or its subtype."""
-    return space.call_method(w_set, "pop")
+    return space.call_method(space.w_set, "pop", w_set)
 
 @cpython_api([PyObject], rffi.INT_real, error=-1)
 def PySet_Clear(space, w_set):
     """Empty an existing set of all elements."""
-    space.call_method(w_set, 'clear')
+    space.call_method(space.w_set, 'clear', w_set)
     return 0
 
 @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -1501,15 +1501,8 @@
 def PyCallIter_Check(space, op):
     """Return true if the type of op is PyCallIter_Type."""
     raise NotImplementedError
-    
 
-@cpython_api([rffi.CArrayPtr(Py_UNICODE), Py_ssize_t, rffi.INT_real], PyObject)
-def PyLong_FromUnicode(space, u, length, base):
-    """Convert a sequence of Unicode digits to a Python integer value.  The 
Unicode
-    string is first encoded to a byte string using PyUnicode_EncodeDecimal()
-    and then converted using PyLong_FromString()."""
-    raise NotImplementedError
-    
+
 
 @cpython_api([PyObject], rffi.SIZE_T, error=-1)
 def PyLong_AsSize_t(space, pylong):
@@ -1519,7 +1512,7 @@
     Raise OverflowError if the value of pylong is out of range for a
     size_t."""
     raise NotImplementedError
-    
+
 
 @cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1)
 def PyMapping_DelItemString(space, o, key):
diff --git a/pypy/module/cpyext/test/test_longobject.py 
b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -185,3 +185,16 @@
         assert module.from_bytearray(False, False) == 0xBC9A
         assert module.from_bytearray(False, True) == -0x4365
 
+    def test_fromunicode(self):
+        module = self.import_extension('foo', [
+            ("from_unicode", "METH_O",
+             """
+                 Py_UNICODE* u = PyUnicode_AsUnicode(args);
+                 return Py_BuildValue("NN",
+                     PyLong_FromUnicode(u, 6, 10),
+                     PyLong_FromUnicode(u, 6, 16));
+             """),
+            ])
+        # A string with arabic digits. 'BAD' is after the 6th character.
+        assert module.from_unicode(u'  1\u0662\u0663\u0664BAD') == (1234, 4660)
+
diff --git a/pypy/module/cpyext/test/test_ztranslation.py 
b/pypy/module/cpyext/test/test_ztranslation.py
--- a/pypy/module/cpyext/test/test_ztranslation.py
+++ b/pypy/module/cpyext/test/test_ztranslation.py
@@ -7,4 +7,4 @@
         # original from_ref, just return w_some_obj
         return space.w_object
     monkeypatch.setattr(pyobject, 'from_ref', from_ref)
-    checkmodule('cpyext', '_rawffi')
+    checkmodule('cpyext', '_rawffi', translate_startup=False)
diff --git a/pypy/module/micronumpy/__init__.py 
b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -30,6 +30,9 @@
     for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']:
         interpleveldefs[c] = 'space.wrap(constants.%s)' % c
 
+    def startup(self, space):
+        from pypy.module.micronumpy.concrete import _setup
+        _setup()
 
 class UMathModule(MixedModule):
     appleveldefs = {}
diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py
--- a/pypy/module/micronumpy/base.py
+++ b/pypy/module/micronumpy/base.py
@@ -34,11 +34,13 @@
 
     @staticmethod
     def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True):
-        from pypy.module.micronumpy import concrete
+        from pypy.module.micronumpy import concrete, descriptor, boxes
         from pypy.module.micronumpy.strides import calc_strides
         strides, backstrides = calc_strides(shape, dtype.base, order)
         impl = concrete.ConcreteArray(shape, dtype.base, order, strides,
                                       backstrides, zero=zero)
+        if dtype == descriptor.get_dtype_cache(space).w_objectdtype:
+            impl.fill(space, boxes.W_ObjectBox(space.w_None))
         if w_instance:
             return wrap_impl(space, space.type(w_instance), w_instance, impl)
         return W_NDimArray(impl)
@@ -123,7 +125,7 @@
     def get_shape(self):
         return self.implementation.get_shape()
 
-    def get_dtype(self):
+    def get_dtype(self, space=None):
         return self.implementation.dtype
 
     def get_order(self):
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -596,6 +596,19 @@
         #    arr.storage[i] = arg[i]
         return W_UnicodeBox(arr, 0, arr.dtype)
 
+class W_ObjectBox(W_GenericBox):
+    descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.OBJECT)
+
+    def __init__(self, w_obj):
+        self.w_obj = w_obj
+
+    def convert_to(self, space, dtype):
+        if dtype.is_bool():
+            return W_BoolBox(space.bool_w(self.w_obj))
+        return self # XXX
+
+    def descr__getattr__(self, space, w_key):
+        return space.getattr(self.w_obj, w_key)
 
 W_GenericBox.typedef = TypeDef("numpy.generic",
     __new__ = interp2app(W_GenericBox.descr__new__.im_func),
@@ -844,3 +857,9 @@
     __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func),
     __len__ = interp2app(W_UnicodeBox.descr_len),
 )
+
+W_ObjectBox.typedef = TypeDef("numpy.object_", W_ObjectBox.typedef,
+    __new__ = interp2app(W_ObjectBox.descr__new__.im_func),
+    __getattr__ = interp2app(W_ObjectBox.descr__getattr__),
+)
+
diff --git a/pypy/module/micronumpy/compile.py 
b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -3,7 +3,7 @@
 """
 import re
 from pypy.interpreter import special
-from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root
+from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace
 from pypy.interpreter.error import OperationError
 from rpython.rlib.objectmodel import specialize, instantiate
 from rpython.rlib.nonconst import NonConstant
@@ -47,7 +47,7 @@
     def lookup(self, name):
         return self.getdictvalue(self, name)
 
-class FakeSpace(object):
+class FakeSpace(ObjSpace):
     w_ValueError = W_TypeObject("ValueError")
     w_TypeError = W_TypeObject("TypeError")
     w_IndexError = W_TypeObject("IndexError")
@@ -67,6 +67,7 @@
     w_unicode = W_TypeObject("unicode")
     w_complex = W_TypeObject("complex")
     w_dict = W_TypeObject("dict")
+    w_object = W_TypeObject("object")
 
     def __init__(self):
         """NOT_RPYTHON"""
@@ -88,7 +89,8 @@
         return self.wrap(len(w_obj.items))
 
     def getattr(self, w_obj, w_attr):
-        return StringObject(NonConstant('foo'))
+        assert isinstance(w_attr, StringObject)
+        return w_obj.getdictvalue(self, w_attr.v)
 
     def isinstance_w(self, w_obj, w_tp):
         try:
diff --git a/pypy/module/micronumpy/concrete.py 
b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -1,11 +1,11 @@
 from pypy.interpreter.error import OperationError, oefmt
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
 from rpython.rlib.buffer import Buffer
-from rpython.rlib.debug import make_sure_not_resized
+from rpython.rlib.debug import make_sure_not_resized, debug_print
 from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \
     raw_storage_getitem, raw_storage_setitem, RAW_STORAGE
-from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.module.micronumpy import support, loop
+from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
+from pypy.module.micronumpy import support, loop, constants as NPY
 from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \
     ArrayArgumentException
 from pypy.module.micronumpy.iterators import ArrayIter
@@ -13,11 +13,13 @@
     RecordChunk, calc_strides, calc_new_strides, shape_agreement,
     calculate_broadcast_strides, calc_backstrides)
 from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rtyper.annlowlevel import cast_gcref_to_instance
+from pypy.interpreter.baseobjspace import W_Root
 
 
 class BaseConcreteArray(object):
     _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]',
-                          'strides[*]', 'backstrides[*]', 'order']
+                          'strides[*]', 'backstrides[*]', 'order', 'gcstruct']
     start = 0
     parent = None
     flags = 0
@@ -333,6 +335,44 @@
         loop.setslice(space, impl.get_shape(), impl, self)
         return impl
 
+OBJECTSTORE = lltype.GcStruct('ObjectStore',
+                              ('length', lltype.Signed),
+                              ('step', lltype.Signed),
+                              ('storage', llmemory.Address),
+                              rtti=True)
+offset_of_storage = llmemory.offsetof(OBJECTSTORE, 'storage')
+offset_of_length = llmemory.offsetof(OBJECTSTORE, 'length')
+offset_of_step = llmemory.offsetof(OBJECTSTORE, 'step')
+
+V_OBJECTSTORE = lltype.nullptr(OBJECTSTORE)
+
+def customtrace(gc, obj, callback, arg):
+    #debug_print('in customtrace w/obj', obj)
+    length = (obj + offset_of_length).signed[0]
+    step = (obj + offset_of_step).signed[0]
+    storage = (obj + offset_of_storage).address[0]
+    #debug_print('tracing', length, 'objects in ndarray.storage')
+    i = 0
+    while i < length:
+        gc._trace_callback(callback, arg, storage)
+        storage += step
+        i += 1
+    
+lambda_customtrace = lambda: customtrace
+
+def _setup():
+    rgc.register_custom_trace_hook(OBJECTSTORE, lambda_customtrace)
+
[email protected]_look_inside
+def _create_objectstore(storage, length, elsize):
+    gcstruct = lltype.malloc(OBJECTSTORE)
+    # JIT does not support cast_ptr_to_adr
+    gcstruct.storage = llmemory.cast_ptr_to_adr(storage)
+    #print 'create gcstruct',gcstruct,'with 
storage',storage,'as',gcstruct.storage
+    gcstruct.length = length
+    gcstruct.step = elsize
+    return gcstruct
+
 
 class ConcreteArrayNotOwning(BaseConcreteArray):
     def __init__(self, shape, dtype, order, strides, backstrides, storage, 
start=0):
@@ -347,10 +387,11 @@
         self.backstrides = backstrides
         self.storage = storage
         self.start = start
+        self.gcstruct = V_OBJECTSTORE
 
     def fill(self, space, box):
         self.dtype.itemtype.fill(self.storage, self.dtype.elsize,
-                                 box, 0, self.size, 0)
+                                 box, 0, self.size, 0, self.gcstruct)
 
     def set_shape(self, space, orig_array, new_shape):
         strides, backstrides = calc_strides(new_shape, self.dtype,
@@ -374,17 +415,24 @@
     def base(self):
         return None
 
-
 class ConcreteArray(ConcreteArrayNotOwning):
     def __init__(self, shape, dtype, order, strides, backstrides,
                  storage=lltype.nullptr(RAW_STORAGE), zero=True):
+        gcstruct = V_OBJECTSTORE
         if storage == lltype.nullptr(RAW_STORAGE):
-            storage = dtype.itemtype.malloc(support.product(shape) *
-                                            dtype.elsize, zero=zero)
+            length = support.product(shape) 
+            if dtype.num == NPY.OBJECT:
+                storage = dtype.itemtype.malloc(length * dtype.elsize, 
zero=True)
+                gcstruct = _create_objectstore(storage, length, dtype.elsize)
+            else:
+                storage = dtype.itemtype.malloc(length * dtype.elsize, 
zero=zero)
         ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, 
backstrides,
                                         storage)
+        self.gcstruct = gcstruct
 
     def __del__(self):
+        if self.gcstruct:
+            self.gcstruct.length = 0
         free_raw_storage(self.storage, track_allocation=False)
 
 
@@ -423,6 +471,7 @@
             parent = parent.parent # one level only
         self.parent = parent
         self.storage = parent.storage
+        self.gcstruct = parent.gcstruct
         self.order = parent.order
         self.dtype = dtype
         self.size = support.product(shape) * self.dtype.elsize
@@ -480,6 +529,7 @@
 class VoidBoxStorage(BaseConcreteArray):
     def __init__(self, size, dtype):
         self.storage = alloc_raw_storage(size)
+        self.gcstruct = V_OBJECTSTORE
         self.dtype = dtype
         self.size = size
 
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -38,6 +38,34 @@
         raise oefmt(space.w_ValueError,
                     "object __array__ method not producing an array")
 
+def try_interface_method(space, w_object):
+    try:
+        w_interface = space.getattr(w_object, 
space.wrap("__array_interface__"))
+    except OperationError, e:
+        if e.match(space, space.w_AttributeError):
+            return None
+        raise
+    if w_interface is None:
+        # happens from compile.py
+        return None
+    version = space.int_w(space.finditem(w_interface, space.wrap("version")))
+    if version < 3:
+        raise oefmt(space.w_NotImplementedError,
+                "__array_interface__ version %d not supported", version)
+    # make a view into the data
+    w_shape = space.finditem(w_interface, space.wrap('shape'))
+    w_dtype = space.finditem(w_interface, space.wrap('typestr'))
+    w_descr = space.finditem(w_interface, space.wrap('descr'))
+    data_w = space.listview(space.finditem(w_interface, space.wrap('data')))
+    w_strides = space.finditem(w_interface, space.wrap('strides'))
+    shape = [space.int_w(i) for i in space.listview(w_shape)]
+    dtype = descriptor.decode_w_dtype(space, w_dtype)
+    rw = space.is_true(data_w[1])
+    #print 'create view from 
shape',shape,'dtype',dtype,'descr',w_descr,'data',data_w[0],'rw',rw
+    raise oefmt(space.w_NotImplementedError,
+                "creating array from __array_interface__ not supported yet")
+    return 
+    
 
 @unwrap_spec(ndmin=int, copy=bool, subok=bool)
 def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False,
@@ -63,7 +91,11 @@
             # continue with w_array, but do further operations in place
             w_object = w_array
             copy = False
-
+    if not isinstance(w_object, W_NDimArray):
+        w_array = try_interface_method(space, w_object)
+        if w_array is not None:
+            w_object = w_array
+            copy = False
     dtype = descriptor.decode_w_dtype(space, w_dtype)
 
     if space.is_none(w_order):
diff --git a/pypy/module/micronumpy/descriptor.py 
b/pypy/module/micronumpy/descriptor.py
--- a/pypy/module/micronumpy/descriptor.py
+++ b/pypy/module/micronumpy/descriptor.py
@@ -6,7 +6,7 @@
 from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
                                       interp_attrproperty, 
interp_attrproperty_w)
 from rpython.rlib import jit
-from rpython.rlib.objectmodel import specialize, compute_hash
+from rpython.rlib.objectmodel import specialize, compute_hash, 
we_are_translated
 from rpython.rlib.rarithmetic import r_longlong, r_ulonglong
 from pypy.module.micronumpy import types, boxes, base, support, constants as 
NPY
 from pypy.module.micronumpy.appbridge import get_appbridge_cache
@@ -56,7 +56,7 @@
         self.char = char
         self.w_box_type = w_box_type
         if byteorder is None:
-            if itemtype.get_element_size() == 1:
+            if itemtype.get_element_size() == 1 or isinstance(itemtype, 
types.ObjectType):
                 byteorder = NPY.IGNORE
             else:
                 byteorder = NPY.NATIVE
@@ -112,6 +112,9 @@
     def is_str(self):
         return self.num == NPY.STRING
 
+    def is_object(self):
+        return self.num == NPY.OBJECT
+
     def is_str_or_unicode(self):
         return self.num == NPY.STRING or self.num == NPY.UNICODE
 
@@ -429,7 +432,7 @@
 
                 self.names.append(name)
                 self.fields[name] = offset, dtype
-            self.itemtype = types.RecordType()
+            self.itemtype = types.RecordType(space)
 
         if self.is_flexible():
             self.elsize = size
@@ -444,7 +447,7 @@
                 endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE
             elif newendian != NPY.IGNORE:
                 endian = newendian
-        itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE))
+        itemtype = self.itemtype.__class__(space, endian in (NPY.NATIVE, 
NPY.NATBYTE))
         fields = self.fields
         if fields is None:
             fields = {}
@@ -483,7 +486,7 @@
         fields[fldname] = (offset, subdtype)
         offset += subdtype.elsize
         names.append(fldname)
-    return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR,
+    return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR,
                    space.gettypefor(boxes.W_VoidBox),
                    names=names, fields=fields, elsize=offset)
 
@@ -494,8 +497,17 @@
 
 
 def dtype_from_spec(space, w_spec):
-    w_lst = get_appbridge_cache(space).call_method(space,
-        'numpy.core._internal', '_commastring', Arguments(space, [w_spec]))
+
+    if we_are_translated():
+        w_lst = get_appbridge_cache(space).call_method(space,
+            'numpy.core._internal', '_commastring', Arguments(space, [w_spec]))
+    else:
+        # testing, handle manually
+        if space.eq_w(w_spec, space.wrap('u4,u4,u4')):
+            w_lst = space.newlist([space.wrap('u4')]*3)
+        else:
+            raise oefmt(space.w_RuntimeError,
+                    "cannot parse w_spec")
     if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1:
         raise oefmt(space.w_RuntimeError,
                     "_commastring is not returning a list with len >= 1")
@@ -542,15 +554,15 @@
         if size == 1:
             return subdtype
         size *= subdtype.elsize
-        return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR,
+        return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, 
NPY.VOIDLTR,
                        space.gettypefor(boxes.W_VoidBox),
                        shape=shape, subdtype=subdtype, elsize=size)
 
     if space.is_none(w_dtype):
         return cache.w_float64dtype
-    elif space.isinstance_w(w_dtype, w_subtype):
+    if space.isinstance_w(w_dtype, w_subtype):
         return w_dtype
-    elif space.isinstance_w(w_dtype, space.w_unicode):
+    if space.isinstance_w(w_dtype, space.w_unicode):
         name = space.str_w(w_dtype)
         if _check_for_commastring(name):
             return dtype_from_spec(space, w_dtype)
@@ -586,8 +598,7 @@
         if w_dtype is dtype.w_box_type:
             return dtype
     if space.isinstance_w(w_dtype, space.w_type):
-        raise oefmt(space.w_NotImplementedError,
-            "cannot create dtype with type '%N'", w_dtype)
+        return cache.w_objectdtype
     raise oefmt(space.w_TypeError, "data type not understood")
 
 
@@ -654,7 +665,7 @@
 
 def new_string_dtype(space, size, char=NPY.STRINGLTR):
     return W_Dtype(
-        types.StringType(),
+        types.StringType(space),
         elsize=size,
         num=NPY.STRING,
         kind=NPY.STRINGLTR,
@@ -664,7 +675,7 @@
 
 
 def new_unicode_dtype(space, size):
-    itemtype = types.UnicodeType()
+    itemtype = types.UnicodeType(space)
     return W_Dtype(
         itemtype,
         elsize=size * itemtype.get_element_size(),
@@ -677,7 +688,7 @@
 
 def new_void_dtype(space, size):
     return W_Dtype(
-        types.VoidType(),
+        types.VoidType(space),
         elsize=size,
         num=NPY.VOID,
         kind=NPY.VOIDLTR,
@@ -689,126 +700,126 @@
 class DtypeCache(object):
     def __init__(self, space):
         self.w_booldtype = W_Dtype(
-            types.Bool(),
+            types.Bool(space),
             num=NPY.BOOL,
             kind=NPY.GENBOOLLTR,
             char=NPY.BOOLLTR,
             w_box_type=space.gettypefor(boxes.W_BoolBox),
         )
         self.w_int8dtype = W_Dtype(
-            types.Int8(),
+            types.Int8(space),
             num=NPY.BYTE,
             kind=NPY.SIGNEDLTR,
             char=NPY.BYTELTR,
             w_box_type=space.gettypefor(boxes.W_Int8Box),
         )
         self.w_uint8dtype = W_Dtype(
-            types.UInt8(),
+            types.UInt8(space),
             num=NPY.UBYTE,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.UBYTELTR,
             w_box_type=space.gettypefor(boxes.W_UInt8Box),
         )
         self.w_int16dtype = W_Dtype(
-            types.Int16(),
+            types.Int16(space),
             num=NPY.SHORT,
             kind=NPY.SIGNEDLTR,
             char=NPY.SHORTLTR,
             w_box_type=space.gettypefor(boxes.W_Int16Box),
         )
         self.w_uint16dtype = W_Dtype(
-            types.UInt16(),
+            types.UInt16(space),
             num=NPY.USHORT,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.USHORTLTR,
             w_box_type=space.gettypefor(boxes.W_UInt16Box),
         )
         self.w_int32dtype = W_Dtype(
-            types.Int32(),
+            types.Int32(space),
             num=NPY.INT,
             kind=NPY.SIGNEDLTR,
             char=NPY.INTLTR,
             w_box_type=space.gettypefor(boxes.W_Int32Box),
         )
         self.w_uint32dtype = W_Dtype(
-            types.UInt32(),
+            types.UInt32(space),
             num=NPY.UINT,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.UINTLTR,
             w_box_type=space.gettypefor(boxes.W_UInt32Box),
         )
         self.w_longdtype = W_Dtype(
-            types.Long(),
+            types.Long(space),
             num=NPY.LONG,
             kind=NPY.SIGNEDLTR,
             char=NPY.LONGLTR,
             w_box_type=space.gettypefor(boxes.W_LongBox),
         )
         self.w_ulongdtype = W_Dtype(
-            types.ULong(),
+            types.ULong(space),
             num=NPY.ULONG,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.ULONGLTR,
             w_box_type=space.gettypefor(boxes.W_ULongBox),
         )
         self.w_int64dtype = W_Dtype(
-            types.Int64(),
+            types.Int64(space),
             num=NPY.LONGLONG,
             kind=NPY.SIGNEDLTR,
             char=NPY.LONGLONGLTR,
             w_box_type=space.gettypefor(boxes.W_Int64Box),
         )
         self.w_uint64dtype = W_Dtype(
-            types.UInt64(),
+            types.UInt64(space),
             num=NPY.ULONGLONG,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.ULONGLONGLTR,
             w_box_type=space.gettypefor(boxes.W_UInt64Box),
         )
         self.w_float32dtype = W_Dtype(
-            types.Float32(),
+            types.Float32(space),
             num=NPY.FLOAT,
             kind=NPY.FLOATINGLTR,
             char=NPY.FLOATLTR,
             w_box_type=space.gettypefor(boxes.W_Float32Box),
         )
         self.w_float64dtype = W_Dtype(
-            types.Float64(),
+            types.Float64(space),
             num=NPY.DOUBLE,
             kind=NPY.FLOATINGLTR,
             char=NPY.DOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_Float64Box),
         )
         self.w_floatlongdtype = W_Dtype(
-            types.FloatLong(),
+            types.FloatLong(space),
             num=NPY.LONGDOUBLE,
             kind=NPY.FLOATINGLTR,
             char=NPY.LONGDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_FloatLongBox),
         )
         self.w_complex64dtype = W_Dtype(
-            types.Complex64(),
+            types.Complex64(space),
             num=NPY.CFLOAT,
             kind=NPY.COMPLEXLTR,
             char=NPY.CFLOATLTR,
             w_box_type=space.gettypefor(boxes.W_Complex64Box),
         )
         self.w_complex128dtype = W_Dtype(
-            types.Complex128(),
+            types.Complex128(space),
             num=NPY.CDOUBLE,
             kind=NPY.COMPLEXLTR,
             char=NPY.CDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_Complex128Box),
         )
         self.w_complexlongdtype = W_Dtype(
-            types.ComplexLong(),
+            types.ComplexLong(space),
             num=NPY.CLONGDOUBLE,
             kind=NPY.COMPLEXLTR,
             char=NPY.CLONGDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_ComplexLongBox),
         )
         self.w_stringdtype = W_Dtype(
-            types.StringType(),
+            types.StringType(space),
             elsize=0,
             num=NPY.STRING,
             kind=NPY.STRINGLTR,
@@ -816,7 +827,7 @@
             w_box_type=space.gettypefor(boxes.W_StringBox),
         )
         self.w_unicodedtype = W_Dtype(
-            types.UnicodeType(),
+            types.UnicodeType(space),
             elsize=0,
             num=NPY.UNICODE,
             kind=NPY.UNICODELTR,
@@ -824,7 +835,7 @@
             w_box_type=space.gettypefor(boxes.W_UnicodeBox),
         )
         self.w_voiddtype = W_Dtype(
-            types.VoidType(),
+            types.VoidType(space),
             elsize=0,
             num=NPY.VOID,
             kind=NPY.VOIDLTR,
@@ -832,26 +843,33 @@
             w_box_type=space.gettypefor(boxes.W_VoidBox),
         )
         self.w_float16dtype = W_Dtype(
-            types.Float16(),
+            types.Float16(space),
             num=NPY.HALF,
             kind=NPY.FLOATINGLTR,
             char=NPY.HALFLTR,
             w_box_type=space.gettypefor(boxes.W_Float16Box),
         )
         self.w_intpdtype = W_Dtype(
-            types.Long(),
+            types.Long(space),
             num=NPY.LONG,
             kind=NPY.SIGNEDLTR,
             char=NPY.INTPLTR,
             w_box_type=space.gettypefor(boxes.W_LongBox),
         )
         self.w_uintpdtype = W_Dtype(
-            types.ULong(),
+            types.ULong(space),
             num=NPY.ULONG,
             kind=NPY.UNSIGNEDLTR,
             char=NPY.UINTPLTR,
             w_box_type=space.gettypefor(boxes.W_ULongBox),
         )
+        self.w_objectdtype = W_Dtype(
+            types.ObjectType(space),
+            num=NPY.OBJECT,
+            kind=NPY.OBJECTLTR,
+            char=NPY.OBJECTLTR,
+            w_box_type=space.gettypefor(boxes.W_ObjectBox),
+        )
         aliases = {
             NPY.BOOL:        ['bool_', 'bool8'],
             NPY.BYTE:        ['byte'],
@@ -870,6 +888,7 @@
             NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'],
             NPY.STRING:      ['string_', 'str'],
             NPY.UNICODE:     ['unicode_'],
+            NPY.OBJECT:      ['object_'],
         }
         self.alternate_constructors = {
             NPY.BOOL:     [space.w_bool],
@@ -888,6 +907,8 @@
             NPY.UNICODE:  [space.w_unicode],
             NPY.VOID:     [space.gettypefor(boxes.W_GenericBox)],
                            #space.w_buffer,  # XXX no buffer in space
+            NPY.OBJECT:   [space.gettypefor(boxes.W_ObjectBox),
+                           space.w_object],
         }
         float_dtypes = [self.w_float16dtype, self.w_float32dtype,
                         self.w_float64dtype, self.w_floatlongdtype]
@@ -907,7 +928,7 @@
             self.w_int64dtype, self.w_uint64dtype,
             ] + float_dtypes + complex_dtypes + [
             self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype,
-            self.w_intpdtype, self.w_uintpdtype,
+            self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype,
         ]
         self.float_dtypes_by_num_bytes = sorted(
             (dtype.elsize, dtype)
@@ -959,6 +980,7 @@
             'USHORT': self.w_uint16dtype,
             'FLOAT': self.w_float32dtype,
             'BOOL': self.w_booldtype,
+            'OBJECT': self.w_objectdtype,
         }
 
         typeinfo_partial = {
diff --git a/pypy/module/micronumpy/ndarray.py 
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -202,11 +202,16 @@
             return self
         elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \
                 and w_idx.ndims() > 0:
-            return self.getitem_filter(space, w_idx)
-        try:
-            return self.implementation.descr_getitem(space, self, w_idx)
-        except ArrayArgumentException:
-            return self.getitem_array_int(space, w_idx)
+            w_ret = self.getitem_filter(space, w_idx)
+        else:
+            try:
+                w_ret = self.implementation.descr_getitem(space, self, w_idx)
+            except ArrayArgumentException:
+                w_ret = self.getitem_array_int(space, w_idx)
+        if isinstance(w_ret, boxes.W_ObjectBox):
+            #return the W_Root object, not a scalar
+            w_ret = w_ret.w_obj
+        return w_ret
 
     def getitem(self, space, index_list):
         return self.implementation.getitem_index(space, index_list)
@@ -550,6 +555,7 @@
             else:
                 strides = self.descr_get_strides(space)
             space.setitem_str(w_d, 'strides', strides)
+            space.setitem_str(w_d, 'version', space.wrap(3))
             return w_d
 
     w_pypy_data = None
@@ -837,7 +843,7 @@
                         "new type not compatible with array."))
                 # Strides, shape does not change
                 v = impl.astype(space, dtype)
-                return wrap_impl(space, w_type, self, v) 
+                return wrap_impl(space, w_type, self, v)
             strides = impl.get_strides()
             if dims == 1 or strides[0] <strides[-1]:
                 # Column-major, resize first dimension
@@ -1187,7 +1193,7 @@
                         "improper dtype '%R'", dtype)
         self.implementation = W_NDimArray.from_shape_and_storage(
             space, [space.int_w(i) for i in space.listview(shape)],
-            rffi.str2charp(space.str_w(storage), track_allocation=False), 
+            rffi.str2charp(space.str_w(storage), track_allocation=False),
             dtype, storage_bytes=space.len_w(storage), 
owning=True).implementation
 
     def descr___array_finalize__(self, space, w_obj):
diff --git a/pypy/module/micronumpy/test/dummy_module.py 
b/pypy/module/micronumpy/test/dummy_module.py
--- a/pypy/module/micronumpy/test/dummy_module.py
+++ b/pypy/module/micronumpy/test/dummy_module.py
@@ -20,7 +20,7 @@
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to