Author: Matti Picus <matti.pi...@gmail.com>
Branch: py3.6
Changeset: r93795:d9e0d13cf28e
Date: 2018-02-09 17:28 -0500
http://bitbucket.org/pypy/pypy/changeset/d9e0d13cf28e/

Log:    merge py3.5

diff too long, truncating to 2000 out of 2839 lines

diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -155,9 +155,10 @@
     factory = Connection if not factory else factory
     # an sqlite3 db seems to be around 100 KiB at least (doesn't matter if
     # backed by :memory: or a file)
+    res = factory(database, timeout, detect_types, isolation_level,
+                    check_same_thread, factory, cached_statements, uri)
     add_memory_pressure(100 * 1024)
-    return factory(database, timeout, detect_types, isolation_level,
-                    check_same_thread, factory, cached_statements, uri)
+    return res
 
 
 def _unicode_text_factory(x):
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,11 +1,12 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.11.3
+Version: 1.11.4
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
 Author-email: python-c...@googlegroups.com
 License: MIT
+Description-Content-Type: UNKNOWN
 Description: 
         CFFI
         ====
@@ -27,5 +28,7 @@
 Classifier: Programming Language :: Python :: 3.2
 Classifier: Programming Language :: Python :: 3.3
 Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
 Classifier: Programming Language :: Python :: Implementation :: CPython
 Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI
 from .error import CDefError, FFIError, VerificationError, VerificationMissing
 
-__version__ = "1.11.3"
-__version_info__ = (1, 11, 3)
+__version__ = "1.11.4"
+__version_info__ = (1, 11, 4)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -8,37 +8,20 @@
    the same works for the other two macros.  Py_DEBUG implies them,
    but not the other way around.
 
-   Issue #350: more mess: on Windows, with _MSC_VER, we have to define
-   Py_LIMITED_API even before including pyconfig.h.  In that case, we
-   guess what pyconfig.h will do to the macros above, and check our
-   guess after the #include.
+   Issue #350 is still open: on Windows, the code here causes it to link
+   with PYTHON36.DLL (for example) instead of PYTHON3.DLL.  A fix was
+   attempted in 164e526a5515 and 14ce6985e1c3, but reverted: virtualenv
+   does not make PYTHON3.DLL available, and so the "correctly" compiled
+   version would not run inside a virtualenv.  We will re-apply the fix
+   after virtualenv has been fixed for some time.  For explanation, see
+   issue #355.  For a workaround if you want PYTHON3.DLL and don't worry
+   about virtualenv, see issue #350.  See also 'py_limited_api' in
+   setuptools_ext.py.
 */
 #if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API)
-#  ifdef _MSC_VER
-#    if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && 
!defined(Py_REF_DEBUG)
-#      define Py_LIMITED_API
-#    endif
-#    include <pyconfig.h>
-     /* sanity-check: Py_LIMITED_API will cause crashes if any of these
-        are also defined.  Normally, the Python file PC/pyconfig.h does not
-        cause any of these to be defined, with the exception that _DEBUG
-        causes Py_DEBUG.  Double-check that. */
-#    ifdef Py_LIMITED_API
-#      if defined(Py_DEBUG)
-#        error "pyconfig.h unexpectedly defines Py_DEBUG but _DEBUG is not set"
-#      endif
-#      if defined(Py_TRACE_REFS)
-#        error "pyconfig.h unexpectedly defines Py_TRACE_REFS"
-#      endif
-#      if defined(Py_REF_DEBUG)
-#        error "pyconfig.h unexpectedly defines Py_REF_DEBUG"
-#      endif
-#    endif
-#  else
-#    include <pyconfig.h>
-#    if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
-#      define Py_LIMITED_API
-#    endif
+#  include <pyconfig.h>
+#  if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG)
+#    define Py_LIMITED_API
 #  endif
 #endif
 
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -247,7 +247,7 @@
 
         if (f != NULL && f != Py_None) {
             PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
-                               "\ncompiled with cffi version: 1.11.3"
+                               "\ncompiled with cffi version: 1.11.4"
                                "\n_cffi_backend module: ", f);
             modules = PyImport_GetModuleDict();
             mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: greenlet
-Version: 0.4.12
+Version: 0.4.13
 Summary: Lightweight in-process concurrent programming
 Home-page: https://github.com/python-greenlet/greenlet
 Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -2,7 +2,7 @@
 import __pypy__
 import _continuation
 
-__version__ = "0.4.12"
+__version__ = "0.4.13"
 
 # ____________________________________________________________
 # Exceptions
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -1,26 +1,41 @@
 Potential Project List
 ======================
 
-Google Summer of Code 2017
---------------------------
+Getting involved
+----------------
 
-PyPy is generally open to new ideas for Google Summer of Code. We are happy to 
accept good ideas around the PyPy ecosystem. If you need more information about 
the ideas we propose for this year please join us on irc, channel #pypy 
(freenode). If you are unsure, but still think that you can make a valuable 
contribution to PyPy, dont hesitate to contact us on #pypy or on our mailing 
list.
-
+We are happy to discuss ideas around the PyPy ecosystem.
+If you are interested in palying with RPython or PyPy, or have a new idea not
+mentioned here please join us on irc, channel #pypy (freenode). If you are 
unsure,
+but still think that you can make a valuable contribution to PyPy, dont
+hesitate to contact us on #pypy or on our mailing list. Here are some ideas
+to get you thinking:
 
 * **Optimize PyPy Memory Usage**:  Sometimes PyPy consumes more memory than 
CPython.
-  Two examples: 1) PyPy seems to allocate and keep alive more strings when 
importing a big Python modules.
-  2) The base interpreter size (cold VM started from a console) of PyPy is 
bigger than the one of CPython.
-  The general procedure of this project is: Run both CPython and PyPy of the 
same Python version and
-  compare the memory usage (using Massif or other tools).
+  Two examples: 1) PyPy seems to allocate and keep alive more strings when
+  importing a big Python modules.  2) The base interpreter size (cold VM 
started
+  from a console) of PyPy is bigger than the one of CPython. The general
+  procedure of this project is: Run both CPython and PyPy of the same Python
+  version and compare the memory usage (using Massif or other tools).
   If PyPy consumes a lot more memory then find and resolve the issue.
 
-* **VMProf + memory profiler**: vmprof by now has a memory profiler that can 
be used already. We want extend it with more features and resolve some current 
limitations.
+* **VMProf + memory profiler**: vmprof is a statistical memory profiler. We
+  want extend it with new features and resolve some current limitations.
 
-* **VMProf visualisations**: vmprof just shows a flame graph of the 
statistical profile and some more information about specific call sites. It 
would be very interesting to experiment with different information (such as 
memory, or even information generated by our jit compiler).
+* **VMProf visualisations**: vmprof shows a flame graph of the statistical
+  profile and some more information about specific call sites. It would be
+  very interesting to experiment with different information (such as memory,
+  or even information generated by our jit compiler).
 
-* **Explicit typing in RPython**: PyPy wants to have better ways to specify 
the signature and class attribute types in RPython. See more information about 
this topic below on this page.
+* **Explicit typing in RPython**: PyPy wants to have better ways to specify
+  the signature and class attribute types in RPython. See more information
+  about this topic below on this page.
 
-* **Virtual Reality (VR) visualisations for vmprof**: This is a very open 
topic with lots of freedom to explore data visualisation for profiles. No VR 
hardware would be needed for this project. Either universities provide such 
hardware or in any other case we potentially can lend the VR hardware setup.
+* **Virtual Reality (VR) visualisations for vmprof**: This is a very open
+  topic with lots of freedom to explore data visualisation for profiles. No
+  VR hardware would be needed for this project. Either universities provide
+  such hardware or in any other case we potentially can lend the VR hardware
+  setup.
 
 Simple tasks for newcomers
 --------------------------
@@ -34,6 +49,11 @@
 * Implement AF_XXX packet types of sockets:
   https://bitbucket.org/pypy/pypy/issue/1942/support-for-af_xxx-sockets
 
+* Help with documentation. One task would be to document rpython configuration
+  options currently listed only on :doc:`this site <configuration>` also on the
+  RPython_ documentation site.
+
+.. _RPython: http://rpython.readthedocs.io
 
 Mid-to-large tasks
 ------------------
@@ -201,7 +221,9 @@
 Introduce new benchmarks
 ------------------------
 
-We're usually happy to introduce new benchmarks. Please consult us
+Our benchmark runner_ is showing its age. We should merge with the `CPython 
site`_
+
+Additionally, we're usually happy to introduce new benchmarks. Please consult 
us
 before, but in general something that's real-world python code
 and is not already represented is welcome. We need at least a standalone
 script that can run without parameters. Example ideas (benchmarks need
@@ -209,6 +231,8 @@
 
 * `hg`
 
+.. _runner: http://speed.pypy.org
+.. _`CPython site`: https://speed.python.org/
 
 ======================================
 Make more python modules pypy-friendly
@@ -238,15 +262,6 @@
 using more pypy-friendly technologies, e.g. cffi. Here is a partial list of
 good work that needs to be finished:
 
-**matplotlib** https://github.com/matplotlib/matplotlib
-
-    Status: using the matplotlib branch of PyPy and the tkagg-cffi branch of
-    matplotlib from https://github.com/mattip/matplotlib/tree/tkagg-cffi, the
-    tkagg backend can function.
-
-    TODO: the matplotlib branch passes numpy arrays by value (copying all the
-    data), this proof-of-concept needs help to become completely compliant
-
 **wxPython** https://bitbucket.org/amauryfa/wxpython-cffi
 
     Status: A project by a PyPy developer to adapt the Phoenix sip build 
system to cffi
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -14,3 +14,29 @@
 .. branch: cpyext-datetime2
 
 Support ``tzinfo`` field on C-API datetime objects, fixes latest pandas HEAD
+
+
+.. branch: mapdict-size-limit
+
+Fix a corner case of mapdict: When an instance is used like a dict (using
+``setattr`` and ``getattr``, or ``.__dict__``) and a lot of attributes are
+added, then the performance using mapdict is linear in the number of
+attributes. This is now fixed (by switching to a regular dict after 80
+attributes).
+
+
+.. branch: cpyext-faster-arg-passing
+
+When using cpyext, improve the speed of passing certain objects from PyPy to C
+code, most notably None, True, False, types, all instances of C-defined types.
+Before, a dict lookup was needed every time such an object crossed over, now it
+is just a field read.
+
+
+.. branch: 2634_datetime_timedelta_performance
+
+Improve datetime + timedelta performance.
+
+.. branch: memory-accounting
+
+Improve way to describe memory
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py 
b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -1422,3 +1422,11 @@
         exc = py.test.raises(SyntaxError, self.get_ast, input).value
         assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
                            " bytes in position 0-2: truncated \\xXX escape")
+
+    def test_decode_error_in_string_literal_correct_line(self):
+        input = "u'a' u'b'\\\n u'c' u'\\x'"
+        exc = py.test.raises(SyntaxError, self.get_ast, input).value
+        assert exc.msg == ("(unicode error) 'unicodeescape' codec can't decode"
+                           " bytes in position 0-1: truncated \\xXX escape")
+        assert exc.lineno == 2
+        assert exc.offset == 6
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -207,6 +207,21 @@
     def _set_mapdict_storage_and_map(self, storage, map):
         raise NotImplementedError
 
+
+    # -------------------------------------------------------------------
+    # cpyext support
+    # these functions will only be seen by the annotator if we translate
+    # with the cpyext module
+
+    def _cpyext_as_pyobj(self, space):
+        from pypy.module.cpyext.pyobject import w_root_as_pyobj
+        return w_root_as_pyobj(self, space)
+
+    def _cpyext_attach_pyobj(self, space, py_obj):
+        from pypy.module.cpyext.pyobject import w_root_attach_pyobj
+        return w_root_attach_pyobj(self, space, py_obj)
+
+
     # -------------------------------------------------------------------
 
     def is_w(self, space, w_other):
diff --git a/pypy/module/__pypy__/interp_magic.py 
b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -134,7 +134,7 @@
                   space.newbool(debug))
 
 @unwrap_spec(estimate=int)
-def add_memory_pressure(estimate):
+def add_memory_pressure(space, estimate):
     """ Add memory pressure of estimate bytes. Useful when calling a C function
     that internally allocates a big chunk of memory. This instructs the GC to
     garbage collect sooner than it would otherwise."""
diff --git a/pypy/module/_cffi_backend/__init__.py 
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
 from rpython.rlib import rdynload, clibffi
 from rpython.rtyper.lltypesystem import rffi
 
-VERSION = "1.11.3"
+VERSION = "1.11.4"
 
 FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
 try:
diff --git a/pypy/module/_cffi_backend/allocator.py 
b/pypy/module/_cffi_backend/allocator.py
--- a/pypy/module/_cffi_backend/allocator.py
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -21,13 +21,13 @@
         if self.w_alloc is None:
             if self.should_clear_after_alloc:
                 ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
-                                    flavor='raw', zero=True,
-                                    add_memory_pressure=True)
+                                    flavor='raw', zero=True)
             else:
                 ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
-                                    flavor='raw', zero=False,
-                                    add_memory_pressure=True)
-            return cdataobj.W_CDataNewStd(space, ptr, ctype, length)
+                                    flavor='raw', zero=False)
+            w_res = cdataobj.W_CDataNewStd(space, ptr, ctype, length)
+            rgc.add_memory_pressure(datasize, w_res)
+            return w_res
         else:
             w_raw_cdata = space.call_function(self.w_alloc,
                                               space.newint(datasize))
@@ -53,7 +53,7 @@
             if self.w_free is not None:
                 res.w_free = self.w_free
                 res.register_finalizer(space)
-            rgc.add_memory_pressure(datasize)
+            rgc.add_memory_pressure(datasize, res)
             return res
 
     @unwrap_spec(w_init=WrappedDefault(None))
diff --git a/pypy/module/_cffi_backend/cdataobj.py 
b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -447,7 +447,10 @@
             with self as ptr:
                 w_res = W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
         if size != 0:
-            rgc.add_memory_pressure(size)
+            if isinstance(w_res, W_CDataGCP):
+                rgc.add_memory_pressure(size, w_res)
+            else:
+                rgc.add_memory_pressure(size, self)
         return w_res
 
     def unpack(self, length):
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py 
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
 # ____________________________________________________________
 
 import sys
-assert __version__ == "1.11.3", ("This test_c.py file is for testing a version"
+assert __version__ == "1.11.4", ("This test_c.py file is for testing a version"
                                  " of cffi that differs from the one that we"
                                  " get from 'import _cffi_backend'")
 if sys.version_info < (3,):
diff --git a/pypy/module/_io/test/test_interp_textio.py 
b/pypy/module/_io/test/test_interp_textio.py
--- a/pypy/module/_io/test/test_interp_textio.py
+++ b/pypy/module/_io/test/test_interp_textio.py
@@ -1,6 +1,6 @@
 import pytest
 try:
-    from hypothesis import given, strategies as st
+    from hypothesis import given, strategies as st, settings
 except ImportError:
     pytest.skip("hypothesis required")
 import os
@@ -29,6 +29,7 @@
 
 @given(data=st_readline(),
        mode=st.sampled_from(['\r', '\n', '\r\n', '']))
+@settings(deadline=None)
 def test_readline(space, data, mode):
     txt, limits = data
     w_stream = W_BytesIO(space)
diff --git a/pypy/module/cpyext/include/longobject.h 
b/pypy/module/cpyext/include/longobject.h
--- a/pypy/module/cpyext/include/longobject.h
+++ b/pypy/module/cpyext/include/longobject.h
@@ -20,6 +20,9 @@
 
 #define PyLong_AS_LONG(op) PyLong_AsLong(op)
 
+#define _PyLong_AsByteArray(v, bytes, n, little_endian, is_signed)   \
+    _PyLong_AsByteArrayO((PyObject *)(v), bytes, n, little_endian, is_signed)
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py
--- a/pypy/module/cpyext/longobject.py
+++ b/pypy/module/cpyext/longobject.py
@@ -3,8 +3,8 @@
     cpython_api, PyObject, build_type_checkers_flags, Py_ssize_t,
     CONST_STRING, ADDR, CANNOT_FAIL)
 from pypy.objspace.std.longobject import W_LongObject
-from pypy.interpreter.error import OperationError
-from rpython.rlib.rbigint import rbigint
+from pypy.interpreter.error import OperationError, oefmt
+from rpython.rlib.rbigint import rbigint, InvalidSignednessError
 
 PyLong_Check, PyLong_CheckExact = build_type_checkers_flags("Long")
 
@@ -251,3 +251,26 @@
         byteorder = 'big'
     result = rbigint.frombytes(s, byteorder, signed != 0)
     return space.newlong_from_rbigint(result)
+
+@cpython_api([PyObject, rffi.UCHARP, rffi.SIZE_T,
+              rffi.INT_real, rffi.INT_real], rffi.INT_real, error=-1)
+def _PyLong_AsByteArrayO(space, w_v, bytes, n, little_endian, is_signed):
+    n = rffi.cast(lltype.Signed, n)
+    little_endian = rffi.cast(lltype.Signed, little_endian)
+    signed = rffi.cast(lltype.Signed, is_signed) != 0
+    byteorder = 'little' if little_endian else 'big'
+    bigint = space.bigint_w(w_v)
+    try:
+        digits = bigint.tobytes(n, byteorder, signed)
+    except InvalidSignednessError:     # < 0 but not 'signed'
+        # in this case, CPython raises OverflowError even though the C
+        # comments say it should raise TypeError
+        raise oefmt(space.w_OverflowError,
+                    "can't convert negative long to unsigned")
+    except OverflowError:
+        raise oefmt(space.w_OverflowError,
+                    "long too big to convert")
+    assert len(digits) == n
+    for i in range(n):
+        bytes[i] = rffi.cast(rffi.UCHAR, digits[i])
+    return 0
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -10,6 +10,8 @@
     PyVarObject, Py_ssize_t, init_function, cts)
 from pypy.module.cpyext.state import State
 from pypy.objspace.std.typeobject import W_TypeObject
+from pypy.objspace.std.noneobject import W_NoneObject
+from pypy.objspace.std.boolobject import W_BoolObject
 from pypy.objspace.std.objectobject import W_ObjectObject
 from rpython.rlib.objectmodel import specialize, we_are_translated
 from rpython.rlib.objectmodel import keepalive_until_here
@@ -21,6 +23,52 @@
 #________________________________________________________
 # type description
 
+class W_BaseCPyObject(W_ObjectObject):
+    """ A subclass of W_ObjectObject that has one field for directly storing
+    the link from the w_obj to the cpy ref. This is only used for C-defined
+    types. """
+
+
+def check_true(s_arg, bookeeper):
+    assert s_arg.const is True
+
+def w_root_as_pyobj(w_obj, space):
+    from rpython.rlib.debug import check_annotation
+    # make sure that translation crashes if we see this while not translating
+    # with cpyext
+    check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+    # default implementation of _cpyext_as_pyobj
+    return rawrefcount.from_obj(PyObject, w_obj)
+
+def w_root_attach_pyobj(w_obj, space, py_obj):
+    from rpython.rlib.debug import check_annotation
+    check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+    assert space.config.objspace.usemodules.cpyext
+    # default implementation of _cpyext_attach_pyobj
+    rawrefcount.create_link_pypy(w_obj, py_obj)
+
+
+def add_direct_pyobj_storage(cls):
+    """ Add the necessary methods to a class to store a reference to the py_obj
+    on its instances directly. """
+
+    cls._cpy_ref = lltype.nullptr(PyObject.TO)
+
+    def _cpyext_as_pyobj(self, space):
+        return self._cpy_ref
+    cls._cpyext_as_pyobj = _cpyext_as_pyobj
+
+    def _cpyext_attach_pyobj(self, space, py_obj):
+        self._cpy_ref = py_obj
+        rawrefcount.create_link_pyobj(self, py_obj)
+    cls._cpyext_attach_pyobj = _cpyext_attach_pyobj
+
+add_direct_pyobj_storage(W_BaseCPyObject)
+add_direct_pyobj_storage(W_TypeObject)
+add_direct_pyobj_storage(W_NoneObject)
+add_direct_pyobj_storage(W_BoolObject)
+
+
 class BaseCpyTypedescr(object):
     basestruct = PyObject.TO
     W_BaseObject = W_ObjectObject
@@ -66,8 +114,12 @@
 
     def realize(self, space, obj):
         w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
+        assert isinstance(w_type, W_TypeObject)
         try:
-            w_obj = space.allocate_instance(self.W_BaseObject, w_type)
+            if w_type.flag_cpytype:
+                w_obj = space.allocate_instance(W_BaseCPyObject, w_type)
+            else:
+                w_obj = space.allocate_instance(self.W_BaseObject, w_type)
         except OperationError as e:
             if e.match(space, space.w_TypeError):
                 raise oefmt(space.w_SystemError,
@@ -76,6 +128,9 @@
                             w_type)
             raise
         track_reference(space, obj, w_obj)
+        if w_type.flag_cpytype:
+            assert isinstance(w_obj, W_BaseCPyObject)
+            w_obj._cpy_ref = obj
         return w_obj
 
 typedescr_cache = {}
@@ -186,12 +241,12 @@
     Ties together a PyObject and an interpreter object.
     The PyObject's refcnt is increased by REFCNT_FROM_PYPY.
     The reference in 'py_obj' is not stolen!  Remember to decref()
-    it is you need to.
+    it if you need to.
     """
     # XXX looks like a PyObject_GC_TRACK
     assert py_obj.c_ob_refcnt < rawrefcount.REFCNT_FROM_PYPY
     py_obj.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY
-    rawrefcount.create_link_pypy(w_obj, py_obj)
+    w_obj._cpyext_attach_pyobj(space, py_obj)
 
 
 w_marker_deallocating = W_Root()
@@ -237,7 +292,7 @@
 @jit.dont_look_inside
 def as_pyobj(space, w_obj, w_userdata=None, immortal=False):
     """
-    Returns a 'PyObject *' representing the given intepreter object.
+    Returns a 'PyObject *' representing the given interpreter object.
     This doesn't give a new reference, but the returned 'PyObject *'
     is valid at least as long as 'w_obj' is.  **To be safe, you should
     use keepalive_until_here(w_obj) some time later.**  In case of
@@ -245,7 +300,7 @@
     """
     assert not is_pyobj(w_obj)
     if w_obj is not None:
-        py_obj = rawrefcount.from_obj(PyObject, w_obj)
+        py_obj = w_obj._cpyext_as_pyobj(space)
         if not py_obj:
             py_obj = create_ref(space, w_obj, w_userdata, immortal=immortal)
         #
diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py
--- a/pypy/module/cpyext/sequence.py
+++ b/pypy/module/cpyext/sequence.py
@@ -267,7 +267,7 @@
     raise oefmt(space.w_ValueError, "sequence.index(x): x not in sequence")
 
 class CPyListStrategy(ListStrategy):
-    erase, unerase = rerased.new_erasing_pair("empty")
+    erase, unerase = rerased.new_erasing_pair("cpylist")
     erase = staticmethod(erase)
     unerase = staticmethod(unerase)
 
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -1577,13 +1577,6 @@
     """
     raise NotImplementedError
 
-@cpython_api([PyObject], PyObject)
-def PyUnicode_AsUTF32String(space, unicode):
-    """Return a Python byte string using the UTF-32 encoding in native byte
-    order. The string always starts with a BOM mark.  Error handling is 
"strict".
-    Return NULL if an exception was raised by the codec."""
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP, rffi.INTP, Py_ssize_t], 
PyObject)
 def PyUnicode_DecodeUTF16Stateful(space, s, size, errors, byteorder, consumed):
     """If consumed is NULL, behave like PyUnicode_DecodeUTF16(). If
@@ -1612,13 +1605,6 @@
     Return NULL if an exception was raised by the codec."""
     raise NotImplementedError
 
-@cpython_api([PyObject], PyObject)
-def PyUnicode_AsUTF16String(space, unicode):
-    """Return a Python byte string using the UTF-16 encoding in native byte
-    order. The string always starts with a BOM mark.  Error handling is 
"strict".
-    Return NULL if an exception was raised by the codec."""
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject)
 def PyUnicode_DecodeUTF7(space, s, size, errors):
     """Create a Unicode object by decoding size bytes of the UTF-7 encoded 
string
diff --git a/pypy/module/cpyext/test/test_longobject.py 
b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -259,6 +259,48 @@
         assert module.from_bytearray(False, False) == 0x9ABC41
         assert module.from_bytearray(False, True) == -0x6543BF
 
+    def test_asbytearray(self):
+        module = self.import_extension('foo', [
+            ("as_bytearray", "METH_VARARGS",
+             """
+                 PyObject *result;
+                 PyLongObject *o;
+                 int n, little_endian, is_signed;
+                 unsigned char *bytes;
+                 if (!PyArg_ParseTuple(args, "O!iii", &PyLong_Type, &o, &n,
+                         &little_endian, &is_signed))
+                     return NULL;
+                 bytes = malloc(n);
+                 if (_PyLong_AsByteArray(o, bytes, (size_t)n,
+                                         little_endian, is_signed) != 0)
+                 {
+                     free(bytes);
+                     return NULL;
+                 }
+                 result = PyString_FromStringAndSize((const char *)bytes, n);
+                 free(bytes);
+                 return result;
+             """),
+            ])
+        s = module.as_bytearray(0x41BC9AL, 4, True, False)
+        assert s == "\x9A\xBC\x41\x00"
+        s = module.as_bytearray(0x41BC9AL, 4, False, False)
+        assert s == "\x00\x41\xBC\x9A"
+        s = module.as_bytearray(0x41BC9AL, 3, True, False)
+        assert s == "\x9A\xBC\x41"
+        s = module.as_bytearray(0x41BC9AL, 3, True, True)
+        assert s == "\x9A\xBC\x41"
+        s = module.as_bytearray(0x9876L, 2, True, False)
+        assert s == "\x76\x98"
+        s = module.as_bytearray(0x9876L - 0x10000L, 2, True, True)
+        assert s == "\x76\x98"
+        raises(OverflowError, module.as_bytearray,
+                              0x9876L, 2, False, True)
+        raises(OverflowError, module.as_bytearray,
+                              -1L, 2, True, False)
+        raises(OverflowError, module.as_bytearray,
+                              0x1234567L, 3, True, False)
+
     def test_fromunicode(self):
         module = self.import_extension('foo', [
             ("from_unicode", "METH_O",
diff --git a/pypy/module/cpyext/test/test_object.py 
b/pypy/module/cpyext/test/test_object.py
--- a/pypy/module/cpyext/test/test_object.py
+++ b/pypy/module/cpyext/test/test_object.py
@@ -218,7 +218,7 @@
 
         if not cls.runappdirect:
             cls.total_mem = 0
-            def add_memory_pressure(estimate):
+            def add_memory_pressure(estimate, object=None):
                 assert estimate >= 0
                 cls.total_mem += estimate
             cls.orig_add_memory_pressure = [rgc.add_memory_pressure]
diff --git a/pypy/module/cpyext/test/test_typeobject.py 
b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -3,13 +3,23 @@
 from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 from pypy.module.cpyext.test.test_api import BaseApiTest
 from pypy.module.cpyext.api import generic_cpy_call
-from pypy.module.cpyext.pyobject import make_ref, from_ref, decref
+from pypy.module.cpyext.pyobject import make_ref, from_ref, decref, as_pyobj
 from pypy.module.cpyext.typeobject import cts, PyTypeObjectPtr
 
 import sys
 import pytest
 
 class AppTestTypeObject(AppTestCpythonExtensionBase):
+
+    def setup_class(cls):
+        AppTestCpythonExtensionBase.setup_class.im_func(cls)
+        def _check_uses_shortcut(w_inst):
+            res = hasattr(w_inst, "_cpy_ref") and w_inst._cpy_ref
+            res = res and as_pyobj(cls.space, w_inst) == w_inst._cpy_ref
+            return cls.space.newbool(res)
+        cls.w__check_uses_shortcut = cls.space.wrap(
+            gateway.interp2app(_check_uses_shortcut))
+
     def test_typeobject(self):
         import sys
         module = self.import_module(name='foo')
@@ -162,6 +172,25 @@
         assert fuu2(u"abc").baz().escape()
         raises(TypeError, module.fooType.object_member.__get__, 1)
 
+    def test_shortcut(self):
+        # test that instances of classes that are defined in C become an
+        # instance of W_BaseCPyObject and thus can be converted faster back to
+        # their pyobj, because they store a pointer to it directly.
+        if self.runappdirect:
+            skip("can't run with -A")
+        module = self.import_module(name='foo')
+        obj = module.fooType()
+        assert self._check_uses_shortcut(obj)
+        # W_TypeObjects use shortcut
+        assert self._check_uses_shortcut(object)
+        assert self._check_uses_shortcut(type)
+        # None, True, False use shortcut
+        assert self._check_uses_shortcut(None)
+        assert self._check_uses_shortcut(True)
+        assert self._check_uses_shortcut(False)
+        assert not self._check_uses_shortcut(1)
+        assert not self._check_uses_shortcut(object())
+
     def test_multiple_inheritance1(self):
         module = self.import_module(name='foo')
         obj = module.UnicodeSubtype(u'xyz')
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py 
b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -359,6 +359,20 @@
         m = self.import_module('_widechar')
         raises(ValueError, m.test_widechar)
 
+    def test_AsUTFNString(self):
+        module = self.import_extension('foo', [
+            ("asutf8", "METH_O", "return PyUnicode_AsUTF8String(args);"),
+            ("asutf16", "METH_O", "return PyUnicode_AsUTF16String(args);"),
+            ("asutf32", "METH_O", "return PyUnicode_AsUTF32String(args);"),
+            ])
+        u = u'sp\x09m\u1234\U00012345'
+        s = module.asutf8(u)
+        assert s == u.encode('utf-8')
+        s = module.asutf16(u)
+        assert s == u.encode('utf-16')
+        s = module.asutf32(u)
+        assert s == u.encode('utf-32')
+
 
 class TestUnicode(BaseApiTest):
     def test_unicodeobject(self, space):
@@ -448,10 +462,24 @@
         lltype.free(ar, flavor='raw')
 
     def test_AsUTF8String(self, space):
-        w_u = space.wrap(u'sp\x09m')
+        w_u = space.wrap(u'sp\x09m\u1234')
         w_res = PyUnicode_AsUTF8String(space, w_u)
         assert space.type(w_res) is space.w_bytes
-        assert space.unwrap(w_res) == 'sp\tm'
+        assert space.unwrap(w_res) == 'sp\tm\xe1\x88\xb4'
+
+    def test_AsUTF16String(self, space):
+        u = u'sp\x09m\u1234\U00012345'
+        w_u = space.wrap(u)
+        w_res = PyUnicode_AsUTF16String(space, w_u)
+        assert space.type(w_res) is space.w_bytes
+        assert space.unwrap(w_res) == u.encode('utf-16')
+
+    def test_AsUTF32String(self, space):
+        u = u'sp\x09m\u1234\U00012345'
+        w_u = space.wrap(u)
+        w_res = PyUnicode_AsUTF32String(space, w_u)
+        assert space.type(w_res) is space.w_bytes
+        assert space.unwrap(w_res) == u.encode('utf-32')
 
     def test_decode_utf8(self, space):
         u = rffi.str2charp(u'sp\x134m'.encode("utf-8"))
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -341,8 +341,12 @@
         if len(slot_names) == 1:
             func = getattr(pto, slot_names[0])
             if slot_names[0] == 'c_tp_hash':
-                if hash_not_impl == func:
-                    # special case for tp_hash == PyObject_HashNotImplemented
+                # two special cases where __hash__ is explicitly set to None
+                # (which leads to an unhashable type):
+                # 1) tp_hash == PyObject_HashNotImplemented
+                # 2) tp_hash == NULL and tp_richcompare not NULL
+                if hash_not_impl == func or (
+                        not func and pto.c_tp_richcompare):
                     dict_w[method_name] = space.w_None
                     continue
         else:
diff --git a/pypy/module/cpyext/unicodeobject.py 
b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -716,7 +716,7 @@
     ref[0] = rffi.cast(PyObject, py_newuni)
     return 0
 
-def make_conversion_functions(suffix, encoding):
+def make_conversion_functions(suffix, encoding, only_for_asstring=False):
     @cpython_api([PyObject], PyObject)
     @func_renamer('PyUnicode_As%sString' % suffix)
     def PyUnicode_AsXXXString(space, w_unicode):
@@ -728,6 +728,9 @@
         return unicodeobject.encode_object(space, w_unicode, encoding, 
"strict")
     globals()['PyUnicode_As%sString' % suffix] = PyUnicode_AsXXXString
 
+    if only_for_asstring:
+        return
+
     @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject)
     @func_renamer('PyUnicode_Decode%s' % suffix)
     def PyUnicode_DecodeXXX(space, s, size, errors):
@@ -758,6 +761,8 @@
     globals()['PyUnicode_Encode%s' % suffix] = PyUnicode_EncodeXXX
 
 make_conversion_functions('UTF8', 'utf-8')
+make_conversion_functions('UTF16', 'utf-16', only_for_asstring=True)
+make_conversion_functions('UTF32', 'utf-32', only_for_asstring=True)
 make_conversion_functions('ASCII', 'ascii')
 make_conversion_functions('Latin1', 'latin-1')
 if sys.platform == 'win32':
diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py
--- a/pypy/module/gc/__init__.py
+++ b/pypy/module/gc/__init__.py
@@ -19,6 +19,7 @@
                 space.config.translation.gctransformer == "framework"):
             self.appleveldefs.update({
                 'dump_rpy_heap': 'app_referents.dump_rpy_heap',
+                'get_stats': 'app_referents.get_stats',
                 })
             self.interpleveldefs.update({
                 'get_rpy_roots': 'referents.get_rpy_roots',
@@ -28,6 +29,7 @@
                 'get_objects': 'referents.get_objects',
                 'get_referents': 'referents.get_referents',
                 'get_referrers': 'referents.get_referrers',
+                '_get_stats': 'referents.get_stats',
                 '_dump_rpy_heap': 'referents._dump_rpy_heap',
                 'get_typeids_z': 'referents.get_typeids_z',
                 'get_typeids_list': 'referents.get_typeids_list',
diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py
--- a/pypy/module/gc/app_referents.py
+++ b/pypy/module/gc/app_referents.py
@@ -48,3 +48,66 @@
                 file.flush()
             fd = file.fileno()
         gc._dump_rpy_heap(fd)
+
+class GcStats(object):
+    def __init__(self, s):
+        self._s = s
+        for item in ('total_gc_memory', 'jit_backend_used',
+                     'total_memory_pressure',
+                     'total_allocated_memory', 'jit_backend_allocated',
+                     'peak_memory', 'peak_allocated_memory', 
'total_arena_memory',
+                     'total_rawmalloced_memory', 'nursery_size',
+                     'peak_arena_memory', 'peak_rawmalloced_memory'):
+            setattr(self, item, self._format(getattr(self._s, item)))
+        self.memory_used_sum = self._format(self._s.total_gc_memory + 
self._s.total_memory_pressure +
+                                            self._s.jit_backend_used)
+        self.memory_allocated_sum = 
self._format(self._s.total_allocated_memory + self._s.total_memory_pressure +
+                                            self._s.jit_backend_allocated)
+
+    def _format(self, v):
+        if v < 1000000:
+            # bit unlikely ;-)
+            return "%.1fkB" % (v / 1024.)
+        return "%.1fMB" % (v / 1024. / 1024.)
+
+    def __repr__(self):
+        if self._s.total_memory_pressure != -1:
+            extra = "\nmemory pressure:    %s" % self.total_memory_pressure
+        else:
+            extra = ""
+        return """Total memory consumed:
+    GC used:            %s (peak: %s)
+       in arenas:            %s
+       rawmalloced:          %s
+       nursery:              %s
+    raw assembler used: %s%s
+    -----------------------------
+    Total:              %s
+
+    Total memory allocated:
+    GC allocated:            %s (peak: %s)
+       in arenas:            %s
+       rawmalloced:          %s
+       nursery:              %s
+    raw assembler allocated: %s%s
+    -----------------------------
+    Total:                   %s
+    """ % (self.total_gc_memory, self.peak_memory,
+              self.total_arena_memory,
+              self.total_rawmalloced_memory,
+              self.nursery_size,
+           self.jit_backend_used,
+           extra,
+           self.memory_used_sum,
+
+           self.total_allocated_memory, self.peak_allocated_memory,
+              self.peak_arena_memory,
+              self.peak_rawmalloced_memory,
+              self.nursery_size,
+           self.jit_backend_allocated,
+           extra,
+           self.memory_allocated_sum)
+
+
+def get_stats():
+    return GcStats(gc._get_stats())
diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py
--- a/pypy/module/gc/referents.py
+++ b/pypy/module/gc/referents.py
@@ -1,7 +1,7 @@
-from rpython.rlib import rgc
+from rpython.rlib import rgc, jit_hooks
 from pypy.interpreter.baseobjspace import W_Root
-from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.gateway import unwrap_spec
+from pypy.interpreter.typedef import TypeDef, interp_attrproperty
+from pypy.interpreter.gateway import unwrap_spec, interp2app
 from pypy.interpreter.error import oefmt, wrap_oserror
 from rpython.rlib.objectmodel import we_are_translated
 
@@ -170,3 +170,53 @@
     l = rgc.get_typeids_list()
     list_w = [space.newint(l[i]) for i in range(len(l))]
     return space.newlist(list_w)
+
+class W_GcStats(W_Root):
+    def __init__(self, memory_pressure):
+        if memory_pressure:
+            self.total_memory_pressure = 
rgc.get_stats(rgc.TOTAL_MEMORY_PRESSURE)
+        else:
+            self.total_memory_pressure = -1
+        self.total_gc_memory = rgc.get_stats(rgc.TOTAL_MEMORY)
+        self.total_allocated_memory = rgc.get_stats(rgc.TOTAL_ALLOCATED_MEMORY)
+        self.peak_memory = rgc.get_stats(rgc.PEAK_MEMORY)
+        self.peak_allocated_memory = rgc.get_stats(rgc.PEAK_ALLOCATED_MEMORY)
+        self.jit_backend_allocated = jit_hooks.stats_asmmemmgr_allocated(None)
+        self.jit_backend_used = jit_hooks.stats_asmmemmgr_used(None)
+        self.total_arena_memory = rgc.get_stats(rgc.TOTAL_ARENA_MEMORY)
+        self.total_rawmalloced_memory = rgc.get_stats(
+            rgc.TOTAL_RAWMALLOCED_MEMORY)
+        self.peak_arena_memory = rgc.get_stats(rgc.PEAK_ARENA_MEMORY)
+        self.peak_rawmalloced_memory = 
rgc.get_stats(rgc.PEAK_RAWMALLOCED_MEMORY)
+        self.nursery_size = rgc.get_stats(rgc.NURSERY_SIZE)
+
+W_GcStats.typedef = TypeDef("GcStats",
+    total_memory_pressure=interp_attrproperty("total_memory_pressure",
+        cls=W_GcStats, wrapfn="newint"),
+    total_gc_memory=interp_attrproperty("total_gc_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    peak_allocated_memory=interp_attrproperty("peak_allocated_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    peak_memory=interp_attrproperty("peak_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    total_allocated_memory=interp_attrproperty("total_allocated_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    jit_backend_allocated=interp_attrproperty("jit_backend_allocated",
+        cls=W_GcStats, wrapfn="newint"),
+    jit_backend_used=interp_attrproperty("jit_backend_used",
+        cls=W_GcStats, wrapfn="newint"),
+    total_arena_memory=interp_attrproperty("total_arena_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    total_rawmalloced_memory=interp_attrproperty("total_rawmalloced_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    peak_arena_memory=interp_attrproperty("peak_arena_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    peak_rawmalloced_memory=interp_attrproperty("peak_rawmalloced_memory",
+        cls=W_GcStats, wrapfn="newint"),
+    nursery_size=interp_attrproperty("nursery_size",
+        cls=W_GcStats, wrapfn="newint"),
+)
+
+@unwrap_spec(memory_pressure=bool)
+def get_stats(space, memory_pressure=False):
+    return W_GcStats(memory_pressure)
diff --git a/pypy/module/pyexpat/interp_pyexpat.py 
b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -843,11 +843,11 @@
     # Currently this is just the size of the pointer and some estimated bytes.
     # The struct isn't actually defined in expat.h - it is in xmlparse.c
     # XXX: find a good estimate of the XML_ParserStruct
-    rgc.add_memory_pressure(XML_Parser_SIZE + 300)
     if not xmlparser:
         raise oefmt(space.w_RuntimeError, "XML_ParserCreate failed")
 
     parser = W_XMLParserType(space, xmlparser, w_intern)
+    rgc.add_memory_pressure(XML_Parser_SIZE + 300, parser)
     XML_SetUnknownEncodingHandler(
         parser.itself, UnknownEncodingHandlerData_callback,
         rffi.cast(rffi.VOIDP, parser.id))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py 
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -2298,3 +2298,11 @@
     else:
         assert lib.__loader__ is None
         assert lib.__spec__ is None
+
+def test_realize_struct_error():
+    ffi = FFI()
+    ffi.cdef("""typedef ... foo_t; struct foo_s { void (*x)(foo_t); };""")
+    lib = verify(ffi, "test_realize_struct_error", """
+        typedef int foo_t; struct foo_s { void (*x)(foo_t); };
+    """)
+    py.test.raises(TypeError, ffi.new, "struct foo_s *")
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -7,7 +7,7 @@
 from pypy.objspace.std.dictmultiobject import (
     W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator,
     BaseValueIterator, BaseItemIterator, _never_equal_to_string,
-    W_DictObject,
+    W_DictObject, BytesDictStrategy, UnicodeDictStrategy
 )
 from pypy.objspace.std.typeobject import MutableCell
 
@@ -25,6 +25,10 @@
 # note: we use "x * NUM_DIGITS_POW2" instead of "x << NUM_DIGITS" because
 # we want to propagate knowledge that the result cannot be negative
 
+# the maximum number of attributes stored in mapdict (afterwards just use a
+# dict)
+LIMIT_MAP_ATTRIBUTES = 80
+
 
 class AbstractAttribute(object):
     _immutable_fields_ = ['terminator']
@@ -253,6 +257,9 @@
     def materialize_r_dict(self, space, obj, dict_w):
         raise NotImplementedError("abstract base class")
 
+    def materialize_str_dict(self, space, obj, str_dict):
+        raise NotImplementedError("abstract base class")
+
     def remove_dict_entries(self, obj):
         raise NotImplementedError("abstract base class")
 
@@ -272,6 +279,13 @@
 
     def _write_terminator(self, obj, name, index, w_value):
         obj._get_mapdict_map().add_attr(obj, name, index, w_value)
+        if index == DICT and obj._get_mapdict_map().length() >= 
LIMIT_MAP_ATTRIBUTES:
+            space = self.space
+            w_dict = obj.getdict(space)
+            assert isinstance(w_dict, W_DictMultiObject)
+            strategy = w_dict.get_strategy()
+            assert isinstance(strategy, MapDictStrategy)
+            strategy.switch_to_text_strategy(w_dict)
         return True
 
     def copy(self, obj):
@@ -302,6 +316,12 @@
         self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls)
 
     def materialize_r_dict(self, space, obj, dict_w):
+        return self._make_devolved(space)
+
+    def materialize_str_dict(self, space, obj, dict_w):
+        return self._make_devolved(space)
+
+    def _make_devolved(self, space):
         result = Object()
         result.space = space
         result._mapdict_init_empty(self.devolved_dict_terminator)
@@ -408,6 +428,14 @@
             self._copy_attr(obj, new_obj)
         return new_obj
 
+    def materialize_str_dict(self, space, obj, str_dict):
+        new_obj = self.back.materialize_str_dict(space, obj, str_dict)
+        if self.index == DICT:
+            str_dict[self.name] = obj._mapdict_read_storage(self.storageindex)
+        else:
+            self._copy_attr(obj, new_obj)
+        return new_obj
+
     def remove_dict_entries(self, obj):
         new_obj = self.back.remove_dict_entries(obj)
         if self.index != DICT:
@@ -737,6 +765,15 @@
         assert w_obj.getdict(self.space) is w_dict or 
w_obj._get_mapdict_map().terminator.w_cls is None
         materialize_r_dict(self.space, w_obj, dict_w)
 
+    def switch_to_text_strategy(self, w_dict):
+        w_obj = self.unerase(w_dict.dstorage)
+        strategy = self.space.fromcache(BytesDictStrategy)
+        str_dict = strategy.unerase(strategy.get_empty_storage())
+        w_dict.set_strategy(strategy)
+        w_dict.dstorage = strategy.erase(str_dict)
+        assert w_obj.getdict(self.space) is w_dict or 
w_obj._get_mapdict_map().terminator.w_cls is None
+        materialize_str_dict(self.space, w_obj, str_dict)
+
     def getitem(self, w_dict, w_key):
         space = self.space
         w_lookup_type = space.type(w_key)
@@ -832,6 +869,11 @@
     new_obj = map.materialize_r_dict(space, obj, dict_w)
     obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
 
+def materialize_str_dict(space, obj, dict_w):
+    map = obj._get_mapdict_map()
+    new_obj = map.materialize_str_dict(space, obj, dict_w)
+    obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
+
 
 class IteratorMixin(object):
 
diff --git a/pypy/objspace/std/test/test_mapdict.py 
b/pypy/objspace/std/test/test_mapdict.py
--- a/pypy/objspace/std/test/test_mapdict.py
+++ b/pypy/objspace/std/test/test_mapdict.py
@@ -114,6 +114,33 @@
     assert obj2.getdictvalue(space, "b") == 60
     assert obj2.map is obj.map
 
+def test_add_attribute_limit():
+    for numslots in [0, 10, 100]:
+        cls = Class()
+        obj = cls.instantiate()
+        for i in range(numslots):
+            obj.setslotvalue(i, i) # some extra slots too, sometimes
+        # test that eventually attributes are really just stored in a 
dictionary
+        for i in range(1000):
+            obj.setdictvalue(space, str(i), i)
+        # moved to dict (which is the remaining non-slot item)
+        assert len(obj.storage) == 1 + numslots
+
+        for i in range(1000):
+            assert obj.getdictvalue(space, str(i)) == i
+        for i in range(numslots):
+            assert obj.getslotvalue(i) == i # check extra slots
+
+    # this doesn't happen with slots
+    cls = Class()
+    obj = cls.instantiate()
+    for i in range(1000):
+        obj.setslotvalue(i, i)
+    assert len(obj.storage) == 1000
+
+    for i in range(1000):
+        assert obj.getslotvalue(i) == i
+
 def test_insert_different_orders():
     cls = Class()
     obj = cls.instantiate()
@@ -797,7 +824,6 @@
         assert d == {}
 
     def test_change_class_slots(self):
-        skip("not supported by pypy yet")
         class A(object):
             __slots__ = ["x", "y"]
 
@@ -815,7 +841,6 @@
         assert isinstance(a, B)
 
     def test_change_class_slots_dict(self):
-        skip("not supported by pypy yet")
         class A(object):
             __slots__ = ["x", "__dict__"]
         class B(object):
@@ -843,7 +868,7 @@
         assert a.y == 2
         d = a.__dict__
         d[1] = 3
-        assert d == {"x": 1, "y": 2, 1:3}
+        assert d == {"y": 2, 1: 3}
         a.__class__ = B
         assert a.x == 1
         assert a.y == 2
diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py
--- a/rpython/annotator/bookkeeper.py
+++ b/rpython/annotator/bookkeeper.py
@@ -71,6 +71,7 @@
 
         self.needs_generic_instantiate = {}
         self.thread_local_fields = set()
+        self.memory_pressure_types = set()
 
         self.register_builtins()
 
diff --git a/rpython/jit/codewriter/support.py 
b/rpython/jit/codewriter/support.py
--- a/rpython/jit/codewriter/support.py
+++ b/rpython/jit/codewriter/support.py
@@ -675,6 +675,8 @@
 
     def _ll_1_gc_add_memory_pressure(num):
         llop.gc_add_memory_pressure(lltype.Void, num)
+    def _ll_2_gc_add_memory_pressure(num, obj):
+        llop.gc_add_memory_pressure(lltype.Void, num, obj)
 
 
 def setup_extra_builtin(rtyper, oopspec_name, nb_args, extra=None):
diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py
--- a/rpython/memory/gc/base.py
+++ b/rpython/memory/gc/base.py
@@ -83,7 +83,9 @@
                             has_custom_trace,
                             fast_path_tracing,
                             has_gcptr,
-                            cannot_pin):
+                            cannot_pin,
+                            has_memory_pressure,
+                            get_memory_pressure_ofs):
         self.finalizer_handlers = finalizer_handlers
         self.destructor_or_custom_trace = destructor_or_custom_trace
         self.is_old_style_finalizer = is_old_style_finalizer
@@ -103,6 +105,8 @@
         self.fast_path_tracing = fast_path_tracing
         self.has_gcptr = has_gcptr
         self.cannot_pin = cannot_pin
+        self.has_memory_pressure = has_memory_pressure
+        self.get_memory_pressure_ofs = get_memory_pressure_ofs
 
     def get_member_index(self, type_id):
         return self.member_index(type_id)
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -72,6 +72,7 @@
 from rpython.rlib.rarithmetic import LONG_BIT_SHIFT
 from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop
 from rpython.rlib.objectmodel import specialize
+from rpython.rlib import rgc
 from rpython.memory.gc.minimarkpage import out_of_memory
 
 #
@@ -371,6 +372,7 @@
         self.old_rawmalloced_objects = self.AddressStack()
         self.raw_malloc_might_sweep = self.AddressStack()
         self.rawmalloced_total_size = r_uint(0)
+        self.rawmalloced_peak_size = r_uint(0)
 
         self.gc_state = STATE_SCANNING
         #
@@ -996,6 +998,8 @@
             # Record the newly allocated object and its full malloced size.
             # The object is young or old depending on the argument.
             self.rawmalloced_total_size += r_uint(allocsize)
+            self.rawmalloced_peak_size = max(self.rawmalloced_total_size,
+                                             self.rawmalloced_peak_size)
             if alloc_young:
                 if not self.young_rawmalloced_objects:
                     self.young_rawmalloced_objects = self.AddressDict()
@@ -1023,7 +1027,7 @@
             if self.max_heap_size < self.next_major_collection_threshold:
                 self.next_major_collection_threshold = self.max_heap_size
 
-    def raw_malloc_memory_pressure(self, sizehint):
+    def raw_malloc_memory_pressure(self, sizehint, adr):
         # Decrement by 'sizehint' plus a very little bit extra.  This
         # is needed e.g. for _rawffi, which may allocate a lot of tiny
         # arrays.
@@ -1183,6 +1187,24 @@
         """
         return self.ac.total_memory_used + self.rawmalloced_total_size
 
+    def get_total_memory_alloced(self):
+        """ Return the total memory allocated
+        """
+        return self.ac.total_memory_alloced + self.rawmalloced_total_size
+
+    def get_peak_memory_alloced(self):
+        """ Return the peak memory ever allocated. The peaks
+        can be at different times, but we just don't worry for now
+        """
+        return self.ac.peak_memory_alloced + self.rawmalloced_peak_size
+
+    def get_peak_memory_used(self):
+        """ Return the peak memory GC felt ever responsible for
+        """
+        mem_allocated = max(self.ac.peak_memory_used,
+                            self.ac.total_memory_used)
+        return mem_allocated + self.rawmalloced_peak_size
+
     def threshold_reached(self, extra=0):
         return (self.next_major_collection_threshold -
                 float(self.get_total_memory_used())) < float(extra)
@@ -2155,6 +2177,8 @@
         #
         size_gc_header = self.gcheaderbuilder.size_gc_header
         self.rawmalloced_total_size += r_uint(raw_malloc_usage(totalsize))
+        self.rawmalloced_peak_size = max(self.rawmalloced_total_size,
+                                         self.rawmalloced_peak_size)
         self.old_rawmalloced_objects.append(arena + size_gc_header)
         return arena
 
@@ -2932,6 +2956,32 @@
         self.old_objects_with_weakrefs.delete()
         self.old_objects_with_weakrefs = new_with_weakref
 
+    def get_stats(self, stats_no):
+        from rpython.memory.gc import inspector
+
+        if stats_no == rgc.TOTAL_MEMORY:
+            return intmask(self.get_total_memory_used() + self.nursery_size)
+        elif stats_no == rgc.PEAK_MEMORY:
+            return intmask(self.get_peak_memory_used() + self.nursery_size)
+        elif stats_no == rgc.PEAK_ALLOCATED_MEMORY:
+            return intmask(self.get_peak_memory_alloced() + self.nursery_size)
+        elif stats_no == rgc.TOTAL_ALLOCATED_MEMORY:
+            return intmask(self.get_total_memory_alloced() + self.nursery_size)
+        elif stats_no == rgc.TOTAL_MEMORY_PRESSURE:
+            return inspector.count_memory_pressure(self)
+        elif stats_no == rgc.TOTAL_ARENA_MEMORY:
+            return intmask(self.ac.total_memory_used)
+        elif stats_no == rgc.TOTAL_RAWMALLOCED_MEMORY:
+            return intmask(self.rawmalloced_total_size)
+        elif stats_no == rgc.PEAK_RAWMALLOCED_MEMORY:
+            return intmask(self.rawmalloced_peak_size)
+        elif stats_no == rgc.PEAK_ARENA_MEMORY:
+            return intmask(max(self.ac.peak_memory_used,
+                               self.ac.total_memory_used))
+        elif stats_no == rgc.NURSERY_SIZE:
+            return intmask(self.nursery_size)
+        return 0
+
 
     # ----------
     # RawRefCount
diff --git a/rpython/memory/gc/inspector.py b/rpython/memory/gc/inspector.py
--- a/rpython/memory/gc/inspector.py
+++ b/rpython/memory/gc/inspector.py
@@ -2,6 +2,7 @@
 Utility RPython functions to inspect objects in the GC.
 """
 from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
+from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.rlib.objectmodel import free_non_gc_object
 from rpython.rlib import rposix, rgc, jit
 
@@ -92,17 +93,12 @@
 
 AddressStack = get_address_stack()
 
-class HeapDumper(object):
-    _alloc_flavor_ = "raw"
-    BUFSIZE = 8192     # words
+class BaseWalker(object):
+    _alloc_flavor_ = 'raw'
 
-    def __init__(self, gc, fd):
+    def __init__(self, gc):
         self.gc = gc
         self.gcflag = gc.gcflag_extra
-        self.fd = rffi.cast(rffi.INT, fd)
-        self.writebuffer = lltype.malloc(rffi.SIGNEDP.TO, self.BUFSIZE,
-                                         flavor='raw')
-        self.buf_count = 0
         if self.gcflag == 0:
             self.seen = AddressDict()
         self.pending = AddressStack()
@@ -111,8 +107,107 @@
         if self.gcflag == 0:
             self.seen.delete()
         self.pending.delete()
+        free_non_gc_object(self)
+
+    def add_roots(self):
+        self.gc.enumerate_all_roots(_hd_add_root, self)
+        pendingroots = self.pending
+        self.pending = AddressStack()
+        self.walk(pendingroots)
+        pendingroots.delete()
+        self.end_add_roots_marker()
+
+    def end_add_roots_marker(self):
+        pass
+
+    def add(self, obj):
+        if self.gcflag == 0:
+            if not self.seen.contains(obj):
+                self.seen.setitem(obj, obj)
+                self.pending.append(obj)
+        else:
+            hdr = self.gc.header(obj)
+            if (hdr.tid & self.gcflag) == 0:
+                hdr.tid |= self.gcflag
+                self.pending.append(obj)
+
+    def walk(self, pending):
+        while pending.non_empty():
+            self.processobj(pending.pop())
+
+    # ----------
+    # A simplified copy of the above, to make sure we walk again all the
+    # objects to clear the 'gcflag'.
+
+    def unobj(self, obj):
+        gc = self.gc
+        gc.trace(obj, self._unref, None)
+
+    def _unref(self, pointer, _):
+        obj = pointer.address[0]
+        self.unadd(obj)
+
+    def unadd(self, obj):
+        assert self.gcflag != 0
+        hdr = self.gc.header(obj)
+        if (hdr.tid & self.gcflag) != 0:
+            hdr.tid &= ~self.gcflag
+            self.pending.append(obj)
+
+    def clear_gcflag_again(self):
+        self.gc.enumerate_all_roots(_hd_unadd_root, self)
+        pendingroots = self.pending
+        self.pending = AddressStack()
+        self.unwalk(pendingroots)
+        pendingroots.delete()
+
+    def unwalk(self, pending):
+        while pending.non_empty():
+            self.unobj(pending.pop())
+
+    def finish_processing(self):
+        if self.gcflag != 0:
+            self.clear_gcflag_again()
+            self.unwalk(self.pending)
+
+    def process(self):
+        self.add_roots()
+        self.walk(self.pending)
+
+
+class MemoryPressureCounter(BaseWalker):
+
+    def __init__(self, gc):
+        self.count = 0
+        BaseWalker.__init__(self, gc)
+
+    def processobj(self, obj):
+        gc = self.gc
+        typeid = gc.get_type_id(obj)
+        if gc.has_memory_pressure(typeid):
+            ofs = gc.get_memory_pressure_ofs(typeid)
+            val = (obj + ofs).signed[0]
+            self.count += val
+        gc.trace(obj, self._ref, None)
+
+    def _ref(self, pointer, _):
+        obj = pointer.address[0]
+        self.add(obj)
+
+
+class HeapDumper(BaseWalker):
+    BUFSIZE = 8192     # words
+
+    def __init__(self, gc, fd):
+        BaseWalker.__init__(self, gc)
+        self.fd = rffi.cast(rffi.INT, fd)
+        self.writebuffer = lltype.malloc(rffi.SIGNEDP.TO, self.BUFSIZE,
+                                         flavor='raw')
+        self.buf_count = 0
+
+    def delete(self):
         lltype.free(self.writebuffer, flavor='raw')
-        free_non_gc_object(self)
+        BaseWalker.delete(self)
 
     @jit.dont_look_inside
     def flush(self):
@@ -143,6 +238,7 @@
         self.write(0)
         self.write(0)
         self.write(-1)
+    end_add_roots_marker = write_marker
 
     def writeobj(self, obj):
         gc = self.gc
@@ -152,64 +248,13 @@
         self.write(gc.get_size_incl_hash(obj))
         gc.trace(obj, self._writeref, None)
         self.write(-1)
+    processobj = writeobj
 
     def _writeref(self, pointer, _):
         obj = pointer.address[0]
         self.write(llmemory.cast_adr_to_int(obj))
         self.add(obj)
 
-    def add(self, obj):
-        if self.gcflag == 0:
-            if not self.seen.contains(obj):
-                self.seen.setitem(obj, obj)
-                self.pending.append(obj)
-        else:
-            hdr = self.gc.header(obj)
-            if (hdr.tid & self.gcflag) == 0:
-                hdr.tid |= self.gcflag
-                self.pending.append(obj)
-
-    def add_roots(self):
-        self.gc.enumerate_all_roots(_hd_add_root, self)
-        pendingroots = self.pending
-        self.pending = AddressStack()
-        self.walk(pendingroots)
-        pendingroots.delete()
-        self.write_marker()
-
-    def walk(self, pending):
-        while pending.non_empty():
-            self.writeobj(pending.pop())
-
-    # ----------
-    # A simplified copy of the above, to make sure we walk again all the
-    # objects to clear the 'gcflag'.
-
-    def unwriteobj(self, obj):
-        gc = self.gc
-        gc.trace(obj, self._unwriteref, None)
-
-    def _unwriteref(self, pointer, _):
-        obj = pointer.address[0]
-        self.unadd(obj)
-
-    def unadd(self, obj):
-        assert self.gcflag != 0
-        hdr = self.gc.header(obj)
-        if (hdr.tid & self.gcflag) != 0:
-            hdr.tid &= ~self.gcflag
-            self.pending.append(obj)
-
-    def clear_gcflag_again(self):
-        self.gc.enumerate_all_roots(_hd_unadd_root, self)
-        pendingroots = self.pending
-        self.pending = AddressStack()
-        self.unwalk(pendingroots)
-        pendingroots.delete()
-
-    def unwalk(self, pending):
-        while pending.non_empty():
-            self.unwriteobj(pending.pop())
 
 def _hd_add_root(obj, heap_dumper):
     heap_dumper.add(obj)
@@ -219,15 +264,20 @@
 
 def dump_rpy_heap(gc, fd):
     heapdumper = HeapDumper(gc, fd)
-    heapdumper.add_roots()
-    heapdumper.walk(heapdumper.pending)
+    heapdumper.process()
     heapdumper.flush()
-    if heapdumper.gcflag != 0:
-        heapdumper.clear_gcflag_again()
-        heapdumper.unwalk(heapdumper.pending)
+    heapdumper.finish_processing()
     heapdumper.delete()
     return True
 
+def count_memory_pressure(gc):
+    counter = MemoryPressureCounter(gc)
+    counter.process()
+    counter.finish_processing()
+    res = counter.count
+    counter.delete()
+    return res
+
 def get_typeids_z(gc):
     srcaddress = gc.root_walker.gcdata.typeids_z
     return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(rgc.ARRAY_OF_CHAR))
diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py
--- a/rpython/memory/gc/minimark.py
+++ b/rpython/memory/gc/minimark.py
@@ -828,7 +828,7 @@
             if self.max_heap_size < self.next_major_collection_threshold:
                 self.next_major_collection_threshold = self.max_heap_size
 
-    def raw_malloc_memory_pressure(self, sizehint):
+    def raw_malloc_memory_pressure(self, sizehint, adr):
         self.next_major_collection_threshold -= sizehint
         if self.next_major_collection_threshold < 0:
             # cannot trigger a full collection now, but we can ensure
diff --git a/rpython/memory/gc/minimarkpage.py 
b/rpython/memory/gc/minimarkpage.py
--- a/rpython/memory/gc/minimarkpage.py
+++ b/rpython/memory/gc/minimarkpage.py
@@ -9,7 +9,6 @@
 WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT]
 assert 1 << WORD_POWER_2 == WORD
 
-
 # Terminology: the memory is subdivided into "arenas" containing "pages".
 # A page contains a number of allocated objects, called "blocks".
 
@@ -141,6 +140,9 @@
         # the total memory used, counting every block in use, without
         # the additional bookkeeping stuff.
         self.total_memory_used = r_uint(0)
+        self.peak_memory_used = r_uint(0)
+        self.total_memory_alloced = r_uint(0)
+        self.peak_memory_alloced = r_uint(0)
 
 
     def _new_page_ptr_list(self, length):
@@ -294,6 +296,10 @@
         # 'arena_base' points to the start of malloced memory; it might not
         # be a page-aligned address
         arena_base = llarena.arena_malloc(self.arena_size, False)
+        self.total_memory_alloced += self.arena_size
+        self.peak_memory_alloced = max(self.total_memory_alloced,
+                                       self.peak_memory_alloced)
+
         if not arena_base:
             out_of_memory("out of memory: couldn't allocate the next arena")
         arena_end = arena_base + self.arena_size
@@ -321,6 +327,8 @@
         """Prepare calls to mass_free_incremental(): moves the chained lists
         into 'self.old_xxx'.
         """
+        self.peak_memory_used = max(self.peak_memory_used,
+                                    self.total_memory_used)
         self.total_memory_used = r_uint(0)
         #
         size_class = self.small_request_threshold >> WORD_POWER_2
@@ -399,6 +407,7 @@
                     # The whole arena is empty.  Free it.
                     llarena.arena_reset(arena.base, self.arena_size, 4)
                     llarena.arena_free(arena.base)
+                    self.total_memory_alloced -= self.arena_size
                     lltype.free(arena, flavor='raw', track_allocation=False)
                     self.arenas_count -= 1
                     #
diff --git a/rpython/memory/gctransform/framework.py 
b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -392,23 +392,30 @@
                 inline = True)
 
         if getattr(GCClass, 'raw_malloc_memory_pressure', False):
-            def raw_malloc_memory_pressure_varsize(length, itemsize):
+            def raw_malloc_memory_pressure_varsize(length, itemsize, adr):
                 totalmem = length * itemsize
                 if totalmem > 0:
-                    gcdata.gc.raw_malloc_memory_pressure(totalmem)
+                    gcdata.gc.raw_malloc_memory_pressure(totalmem, adr)
                 #else: probably an overflow -- the following rawmalloc
                 #      will fail then
-            def raw_malloc_memory_pressure(sizehint):
-                gcdata.gc.raw_malloc_memory_pressure(sizehint)
+            def raw_malloc_memory_pressure(sizehint, adr):
+                gcdata.gc.raw_malloc_memory_pressure(sizehint, adr)
             self.raw_malloc_memory_pressure_varsize_ptr = getfn(
                 raw_malloc_memory_pressure_varsize,
-                [annmodel.SomeInteger(), annmodel.SomeInteger()],
+                [annmodel.SomeInteger(), annmodel.SomeInteger(),
+                 SomeAddress()],
                 annmodel.s_None, minimal_transform = False)
             self.raw_malloc_memory_pressure_ptr = getfn(
                 raw_malloc_memory_pressure,
-                [annmodel.SomeInteger()],
+                [annmodel.SomeInteger(), SomeAddress()],
                 annmodel.s_None, minimal_transform = False)
 
+        if getattr(GCClass, 'get_stats', False):
+            def get_stats(stats_no):
+                return gcdata.gc.get_stats(stats_no)
+            self.get_stats_ptr = getfn(get_stats, [annmodel.SomeInteger()],
+                annmodel.SomeInteger())
+
 
         self.identityhash_ptr = getfn(GCClass.identityhash.im_func,
                                       [s_gc, s_gcref],
@@ -831,6 +838,39 @@
 
     gct_fv_gc_malloc_varsize = gct_fv_gc_malloc
 
+    def gct_gc_add_memory_pressure(self, hop):
+        def _find_correct_type(TP):
+            T = TP.TO
+            while 'special_memory_pressure' not in T._flds:
+                T = T._flds['super']
+            return T
+
+        if hasattr(self, 'raw_malloc_memory_pressure_ptr'):
+            op = hop.spaceop
+            size = op.args[0]
+            if len(op.args) == 2:
+                v_fld = rmodel.inputconst(lltype.Void, 
"special_memory_pressure")
+                T = _find_correct_type(op.args[1].concretetype)
+                v_inst = hop.genop("cast_pointer", [op.args[1]],
+                    resulttype=lltype.Ptr(T))
+                hop.genop("bare_setfield", [v_inst, v_fld, size])
+                v_adr = hop.genop("cast_ptr_to_adr", [op.args[1]],
+                    resulttype=llmemory.Address)
+            else:
+                v_adr = rmodel.inputconst(llmemory.Address, llmemory.NULL)
+            hop.genop("direct_call", [self.raw_malloc_memory_pressure_ptr,
+                               size, v_adr])
+
+
+    def gct_gc_get_stats(self, hop):
+        if hasattr(self, 'get_stats_ptr'):
+            return hop.genop("direct_call",
+                [self.get_stats_ptr, hop.spaceop.args[0]],
+                resultvar=hop.spaceop.result)
+        hop.genop("same_as", [rmodel.inputconst(lltype.Signed, 0)],
+            resultvar=hop.spaceop.result)
+
+
     def gct_gc__collect(self, hop):
         op = hop.spaceop
         if len(op.args) == 1:
diff --git a/rpython/memory/gctransform/transform.py 
b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -535,12 +535,7 @@
         return self.varsize_malloc_helper(hop, flags, meth, [])
 
     def gct_gc_add_memory_pressure(self, hop):
-        if hasattr(self, 'raw_malloc_memory_pressure_ptr'):
-            op = hop.spaceop
-            size = op.args[0]
-            return hop.genop("direct_call",
-                          [self.raw_malloc_memory_pressure_ptr,
-                           size])
+        pass
 
     def varsize_malloc_helper(self, hop, flags, meth, extraargs):
         def intconst(c): return rmodel.inputconst(lltype.Signed, c)
@@ -574,9 +569,10 @@
                                                                     
c_offset_to_length):
         if flags.get('add_memory_pressure', False):
             if hasattr(self, 'raw_malloc_memory_pressure_varsize_ptr'):
+                v_adr = rmodel.inputconst(llmemory.Address, llmemory.NULL)
                 hop.genop("direct_call",
                           [self.raw_malloc_memory_pressure_varsize_ptr,
-                           v_length, c_item_size])
+                           v_length, c_item_size, v_adr])
         if c_offset_to_length is None:
             if flags.get('zero'):
                 fnptr = self.raw_malloc_varsize_no_length_zero_ptr
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -1,4 +1,4 @@
-from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup
+from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup, 
rffi
 from rpython.rtyper import rclass
 from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.rlib.debug import ll_assert
@@ -21,13 +21,21 @@
     # A destructor is called when the object is about to be freed.
     # A custom tracer (CT) enumerates the addresses that contain GCREFs.
     # Both are called with the address of the object as only argument.
+    # They're embedded in a struct that has raw_memory_offset as another
+    # argument, which is only valid if T_HAS_MEMORY_PRESSURE is set
     CUSTOM_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
     CUSTOM_FUNC_PTR = lltype.Ptr(CUSTOM_FUNC)
+    CUSTOM_DATA_STRUCT = lltype.Struct('custom_data',
+        ('customfunc', CUSTOM_FUNC_PTR),
+        ('memory_pressure_offset', lltype.Signed), # offset to where the amount
+                                           # of owned memory pressure is stored
+        )
+    CUSTOM_DATA_STRUCT_PTR = lltype.Ptr(CUSTOM_DATA_STRUCT)
 
     # structure describing the layout of a typeid
     TYPE_INFO = lltype.Struct("type_info",
         ("infobits",       lltype.Signed),    # combination of the T_xxx consts
-        ("customfunc",     CUSTOM_FUNC_PTR),
+        ("customdata",     CUSTOM_DATA_STRUCT_PTR),
         ("fixedsize",      lltype.Signed),
         ("ofstoptrs",      lltype.Ptr(OFFSETS_TO_GC_PTR)),
         hints={'immutable': True},
@@ -81,14 +89,16 @@
     def q_cannot_pin(self, typeid):
         typeinfo = self.get(typeid)
         ANY = (T_HAS_GCPTR | T_IS_WEAKREF)
-        return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customfunc)
+        return (typeinfo.infobits & ANY) != 0 or bool(typeinfo.customdata)
 
     def q_finalizer_handlers(self):
         adr = self.finalizer_handlers   # set from framework.py or gcwrapper.py
         return llmemory.cast_adr_to_ptr(adr, lltype.Ptr(FIN_HANDLER_ARRAY))
 
     def q_destructor_or_custom_trace(self, typeid):
-        return self.get(typeid).customfunc
+        if not self.get(typeid).customdata:
+            return lltype.nullptr(GCData.CUSTOM_FUNC_PTR.TO)
+        return self.get(typeid).customdata.customfunc
 
     def q_is_old_style_finalizer(self, typeid):
         typeinfo = self.get(typeid)
@@ -139,6 +149,15 @@
         infobits = self.get(typeid).infobits
         return infobits & T_ANY_SLOW_FLAG == 0
 
+    def q_has_memory_pressure(self, typeid):
+        infobits = self.get(typeid).infobits
+        return infobits & T_HAS_MEMORY_PRESSURE != 0
+
+    def q_get_memory_pressure_ofs(self, typeid):
+        infobits = self.get(typeid).infobits
+        assert infobits & T_HAS_MEMORY_PRESSURE != 0
+        return self.get(typeid).customdata.memory_pressure_offset
+
     def set_query_functions(self, gc):
         gc.set_query_functions(
             self.q_is_varsize,
@@ -159,7 +178,9 @@
             self.q_has_custom_trace,
             self.q_fast_path_tracing,
             self.q_has_gcptr,
-            self.q_cannot_pin)
+            self.q_cannot_pin,
+            self.q_has_memory_pressure,
+            self.q_get_memory_pressure_ofs)
 
     def _has_got_custom_trace(self, typeid):
         type_info = self.get(typeid)
@@ -176,8 +197,9 @@
 T_HAS_CUSTOM_TRACE          = 0x200000
 T_HAS_OLDSTYLE_FINALIZER    = 0x400000
 T_HAS_GCPTR                 = 0x1000000
-T_KEY_MASK                  = intmask(0xFE000000) # bug detection only
-T_KEY_VALUE                 = intmask(0x5A000000) # bug detection only
+T_HAS_MEMORY_PRESSURE       = 0x2000000 # first field is memory pressure field
+T_KEY_MASK                  = intmask(0xFC000000) # bug detection only
+T_KEY_VALUE                 = intmask(0x58000000) # bug detection only
 
 def _check_valid_type_info(p):
     ll_assert(p.infobits & T_KEY_MASK == T_KEY_VALUE, "invalid type_id")
@@ -192,6 +214,25 @@
     ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid),
               "invalid type_id")
 
+def has_special_memory_pressure(TYPE):
+    if TYPE._is_varsize():
+        return False
+    T = TYPE
+    while True:
+        if 'special_memory_pressure' in T._flds:
+            return True
+        if 'super' not in T._flds:
+            return False
+        T = T._flds['super']
+
+def get_memory_pressure_ofs(TYPE):
+    T = TYPE
+    while True:
+        if 'special_memory_pressure' in T._flds:
+            return llmemory.offsetof(T, 'special_memory_pressure')
+        if 'super' not in T._flds:
+            assert False, "get_ and has_memory_pressure disagree"
+        T = T._flds['super']    
 
 def encode_type_shape(builder, info, TYPE, index):
     """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
@@ -202,12 +243,18 @@
         infobits |= T_HAS_GCPTR
     #
     fptrs = builder.special_funcptr_for_type(TYPE)
-    if fptrs:
+    if fptrs or has_special_memory_pressure(TYPE):
+        customdata = lltype.malloc(GCData.CUSTOM_DATA_STRUCT, flavor='raw',
+                                   immortal=True)
+        info.customdata = customdata
         if "destructor" in fptrs:
-            info.customfunc = fptrs["destructor"]
+            customdata.customfunc = fptrs["destructor"]
         if "old_style_finalizer" in fptrs:
-            info.customfunc = fptrs["old_style_finalizer"]
+            customdata.customfunc = fptrs["old_style_finalizer"]
             infobits |= T_HAS_OLDSTYLE_FINALIZER
+        if has_special_memory_pressure(TYPE):
+            infobits |= T_HAS_MEMORY_PRESSURE
+            info.customdata.memory_pressure_offset = 
get_memory_pressure_ofs(TYPE)
     #
     if not TYPE._is_varsize():
         info.fixedsize = llarena.round_up_for_allocation(
diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py
--- a/rpython/memory/gcwrapper.py
+++ b/rpython/memory/gcwrapper.py
@@ -83,9 +83,9 @@
     def gettypeid(self, obj):
         return self.get_type_id(lltype.typeOf(obj).TO)
 
-    def add_memory_pressure(self, size):
+    def add_memory_pressure(self, size, adr):
         if hasattr(self.gc, 'raw_malloc_memory_pressure'):
-            self.gc.raw_malloc_memory_pressure(size)
+            self.gc.raw_malloc_memory_pressure(size, adr)
 
     def shrink_array(self, p, smallersize):
         if hasattr(self.gc, 'shrink_array'):
diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py
--- a/rpython/rlib/rbigint.py
+++ b/rpython/rlib/rbigint.py
@@ -695,9 +695,7 @@
             return NULLRBIGINT
 
         if asize == 1:
-            if a._digits[0] == NULLDIGIT:
-                return NULLRBIGINT
-            elif a._digits[0] == ONEDIGIT:
+            if a._digits[0] == ONEDIGIT:
                 return rbigint(b._digits[:b.size], a.sign * b.sign, b.size)
             elif bsize == 1:
                 res = b.widedigit(0) * a.widedigit(0)
@@ -784,35 +782,15 @@
 
     @jit.elidable
     def mod(self, other):
+        if other.sign == 0:
+            raise ZeroDivisionError("long division or modulo by zero")
         if self.sign == 0:
             return NULLRBIGINT
 
-        if other.sign != 0 and other.numdigits() == 1:
-            digit = other.digit(0)
-            if digit == 1:
-                return NULLRBIGINT
-            elif digit == 2:
-                modm = self.digit(0) & 1
-                if modm:
-                    return ONENEGATIVERBIGINT if other.sign == -1 else 
ONERBIGINT
-                return NULLRBIGINT
-            elif digit & (digit - 1) == 0:
-                mod = self.int_and_(digit - 1)
-            else:
-                # Perform
-                size = self.numdigits() - 1
-                if size > 0:
-                    rem = self.widedigit(size)
-                    size -= 1
-                    while size >= 0:
-                        rem = ((rem << SHIFT) + self.widedigit(size)) % digit
-                        size -= 1
-                else:
-                    rem = self.digit(0) % digit
-
-                if rem == 0:
-                    return NULLRBIGINT
-                mod = rbigint([_store_digit(rem)], -1 if self.sign < 0 else 1, 
1)
+        if other.numdigits() == 1:
+            otherint = other.digit(0) * other.sign
+            assert int_in_valid_range(otherint)
+            return self.int_mod(otherint)
         else:
             div, mod = _divrem(self, other)
         if mod.sign * other.sign == -1:
@@ -821,6 +799,8 @@
 
     @jit.elidable
     def int_mod(self, other):
+        if other == 0:
+            raise ZeroDivisionError("long division or modulo by zero")
         if self.sign == 0:
             return NULLRBIGINT
 
@@ -828,7 +808,7 @@
             # Fallback to long.
             return self.mod(rbigint.fromint(other))
 
-        elif other != 0:
+        if 1: # preserve indentation to preserve history
             digit = abs(other)
             if digit == 1:
                 return NULLRBIGINT
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to