Author: Carl Friedrich Bolz <cfb...@gmx.de>
Branch: guard-compatible
Changeset: r84382:608b83492e8d
Date: 2016-05-11 18:17 +0200
http://bitbucket.org/pypy/pypy/changeset/608b83492e8d/

Log:    merge default

diff too long, truncating to 2000 out of 54659 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,5 @@
 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -111,23 +111,24 @@
   Simon Burton
   Martin Matusiak
   Konstantin Lopuhin
+  Stefano Rivera
   Wenzhu Man
   John Witulski
   Laurence Tratt
   Ivan Sichmann Freitas
   Greg Price
   Dario Bertini
-  Stefano Rivera
   Mark Pearse
   Simon Cross
+  Edd Barrett
   Andreas St&#252;hrk
-  Edd Barrett
   Jean-Philippe St. Pierre
   Guido van Rossum
   Pavel Vinogradov
+  Spenser Bauman
   Jeremy Thurgood
   Pawe&#322; Piotr Przeradowski
-  Spenser Bauman
+  Tobias Pape
   Paul deGrandis
   Ilya Osadchiy
   marky1991
@@ -139,7 +140,7 @@
   Georg Brandl
   Bert Freudenberg
   Stian Andreassen
-  Tobias Pape
+  Mark Young
   Wanja Saatkamp
   Gerald Klix
   Mike Blume
@@ -170,9 +171,9 @@
   Yichao Yu
   Rocco Moretti
   Gintautas Miliauskas
+  Devin Jeanpierre
   Michael Twomey
   Lucian Branescu Mihaila
-  Devin Jeanpierre
   Gabriel Lavoie
   Olivier Dormond
   Jared Grubb
@@ -183,6 +184,7 @@
   Victor Stinner
   Andrews Medina
   anatoly techtonik
+  Sergey Matyunin
   Stuart Williams
   Jasper Schulz
   Christian Hudon
@@ -217,7 +219,6 @@
   Arjun Naik
   Valentina Mukhamedzhanova
   Stefano Parmesan
-  Mark Young
   Alexis Daboville
   Jens-Uwe Mager
   Carl Meyer
@@ -225,7 +226,9 @@
   Pieter Zieschang
   Gabriel
   Lukas Vacek
+  Kunal Grover
   Andrew Dalke
+  Florin Papa
   Sylvain Thenault
   Jakub Stasiak
   Nathan Taylor
@@ -240,7 +243,6 @@
   Kristjan Valur Jonsson
   David Lievens
   Neil Blakey-Milner
-  Sergey Matyunin
   Lutz Paelike
   Lucio Torre
   Lars Wassermann
@@ -252,9 +254,11 @@
   Artur Lisiecki
   Sergey Kishchenko
   Ignas Mikalajunas
+  Alecsandru Patrascu
   Christoph Gerum
   Martin Blais
   Lene Wagner
+  Catalin Gabriel Manciu
   Tomo Cocoa
   Kim Jin Su
   Toni Mattis
@@ -291,6 +295,7 @@
   Akira Li
   Gustavo Niemeyer
   Stephan Busemann
+  florinpapa
   Rafa&#322; Ga&#322;czy&#324;ski
   Matt Bogosian
   Christian Muirhead
@@ -305,6 +310,7 @@
   Boglarka Vezer
   Chris Pressey
   Buck Golemon
+  Diana Popa
   Konrad Delong
   Dinu Gherman
   Chris Lambacher
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
 
 if __name__ == '__main__':
     if len(sys.argv) != 2:
+        if len(sys.argv) == 1:
+            # start locally
+            import sshgraphserver
+            sshgraphserver.ssh_graph_server(['LOCAL'])
+            sys.exit(0)
         print >> sys.stderr, __doc__
         sys.exit(2)
     if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
 
 Usage:
     sshgraphserver.py  hostname  [more args for ssh...]
+    sshgraphserver.py  LOCAL
 
 This logs in to 'hostname' by passing the arguments on the command-line
 to ssh.  No further configuration is required: it works for all programs
 using the dotviewer library as long as they run on 'hostname' under the
 same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
 """
 
 import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
     s1 = socket.socket()
     s1.bind(('127.0.0.1', socket.INADDR_ANY))
     localhost, localport = s1.getsockname()
-    remoteport = random.randrange(10000, 20000)
-    #  ^^^ and just hope there is no conflict
 
-    args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, 
localport)]
-    args = args + sshargs + ['python -u -c "exec input()"']
-    print ' '.join(args[:-1])
+    if sshargs[0] != 'LOCAL':
+        remoteport = random.randrange(10000, 20000)
+        #  ^^^ and just hope there is no conflict
+
+        args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+            remoteport, localport)]
+        args = args + sshargs + ['python -u -c "exec input()"']
+    else:
+        remoteport = localport
+        args = ['python', '-u', '-c', 'exec input()']
+
+    print ' '.join(args)
     p = subprocess.Popen(args, bufsize=0,
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
         src_cmd_obj.ensure_finalized()
         for (src_option, dst_option) in option_pairs:
             if getattr(self, dst_option) is None:
-                setattr(self, dst_option,
-                        getattr(src_cmd_obj, src_option))
+                try:
+                    setattr(self, dst_option,
+                            getattr(src_cmd_obj, src_option))
+                except AttributeError:
+                    # This was added after problems with setuptools 18.4.
+                    # It seems that setuptools 20.9 fixes the problem.
+                    # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+                    # if I say "virtualenv -p pypy venv-pypy" then it
+                    # just installs setuptools 18.4 from some cache...
+                    pass
 
 
     def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/test/test_descr.py 
b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
             ("__reversed__", reversed, empty_seq, set(), {}),
             ("__length_hint__", list, zero, set(),
              {"__iter__" : iden, "next" : stop}),
-            ("__sizeof__", sys.getsizeof, zero, set(), {}),
             ("__instancecheck__", do_isinstance, return_true, set(), {}),
             ("__missing__", do_dict_missing, some_number,
              set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
             ("__format__", format, format_impl, set(), {}),
             ("__dir__", dir, empty_seq, set(), {}),
             ]
+        if test_support.check_impl_detail():
+            specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
 
         class Checker(object):
             def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
                 raise MyException
 
         for name, runner, meth_impl, ok, env in specials:
-            if name == '__length_hint__' or name == '__sizeof__':
-                if not test_support.check_impl_detail():
-                    continue
-
             class X(Checker):
                 pass
             for attr, obj in env.iteritems():
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
 
     overly detailed
 
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+   or create branch vendor/stdlib-3-*
 2. upgrade the files there
+   2a. remove lib-python/2.7/ or lib-python/3/
+   2b. copy the files from the cpython repo
+   2c. hg add lib-python/2.7/ or lib-python/3/
+   2d. hg remove --after
+   2e. show copied files in cpython repo by running `hg diff --git -r v<old> 
-r v<new> Lib | grep '^copy \(from\|to\)'`
+   2f. fix copies / renames manually by running `hg copy --after <from> <to>` 
for each copied file
 3. update stdlib-version.txt with the output of hg -id from the cpython repo
 4. commit
-5. update to default/py3k
+5. update to default / py3k
 6. create a integration branch for the new stdlib
    (just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
 8. commit
 10. fix issues
 11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -320,8 +320,7 @@
     def __reduce_ex__(self, proto):
         return type(self), (list(self), self.maxlen)
 
-    def __hash__(self):
-        raise TypeError("deque objects are unhashable")
+    __hash__ = None
 
     def __copy__(self):
         return self.__class__(self, self.maxlen)
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -67,7 +67,8 @@
                     subvalue = subfield.ctype
                     fields[subname] = Field(subname,
                                             relpos, 
subvalue._sizeofinstances(),
-                                            subvalue, i, is_bitfield)
+                                            subvalue, i, is_bitfield,
+                                            inside_anon_field=fields[name])
             else:
                 resnames.append(name)
         names = resnames
@@ -77,13 +78,15 @@
 
 
 class Field(object):
-    def __init__(self, name, offset, size, ctype, num, is_bitfield):
+    def __init__(self, name, offset, size, ctype, num, is_bitfield,
+                 inside_anon_field=None):
         self.__dict__['name'] = name
         self.__dict__['offset'] = offset
         self.__dict__['size'] = size
         self.__dict__['ctype'] = ctype
         self.__dict__['num'] = num
         self.__dict__['is_bitfield'] = is_bitfield
+        self.__dict__['inside_anon_field'] = inside_anon_field
 
     def __setattr__(self, name, value):
         raise AttributeError(name)
@@ -95,6 +98,8 @@
     def __get__(self, obj, cls=None):
         if obj is None:
             return self
+        if self.inside_anon_field is not None:
+            return getattr(self.inside_anon_field.__get__(obj), self.name)
         if self.is_bitfield:
             # bitfield member, use direct access
             return obj._buffer.__getattr__(self.name)
@@ -105,6 +110,9 @@
             return fieldtype._CData_output(suba, obj, offset)
 
     def __set__(self, obj, value):
+        if self.inside_anon_field is not None:
+            setattr(self.inside_anon_field.__get__(obj), self.name, value)
+            return
         fieldtype = self.ctype
         cobj = fieldtype.from_param(value)
         key = keepalive_key(self.num)
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,51 +1,22 @@
-from resource import _struct_rusage, struct_rusage
-from ctypes import CDLL, c_int, POINTER, byref
-from ctypes.util import find_library
+from resource import ffi, lib, _make_struct_rusage
 
 __all__ = ["wait3", "wait4"]
 
-libc = CDLL(find_library("c"))
-c_wait3 = libc.wait3
-c_wait3.argtypes = [POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait3.restype = c_int
-
-c_wait4 = libc.wait4
-c_wait4.argtypes = [c_int, POINTER(c_int), c_int, POINTER(_struct_rusage)]
-c_wait4.restype = c_int
-
-def create_struct_rusage(c_struct):
-    return struct_rusage((
-        float(c_struct.ru_utime),
-        float(c_struct.ru_stime),
-        c_struct.ru_maxrss,
-        c_struct.ru_ixrss,
-        c_struct.ru_idrss,
-        c_struct.ru_isrss,
-        c_struct.ru_minflt,
-        c_struct.ru_majflt,
-        c_struct.ru_nswap,
-        c_struct.ru_inblock,
-        c_struct.ru_oublock,
-        c_struct.ru_msgsnd,
-        c_struct.ru_msgrcv,
-        c_struct.ru_nsignals,
-        c_struct.ru_nvcsw,
-        c_struct.ru_nivcsw))
 
 def wait3(options):
-    status = c_int()
-    _rusage = _struct_rusage()
-    pid = c_wait3(byref(status), c_int(options), byref(_rusage))
+    status = ffi.new("int *")
+    ru = ffi.new("struct rusage *")
+    pid = lib.wait3(status, options, ru)
 
-    rusage = create_struct_rusage(_rusage)
+    rusage = _make_struct_rusage(ru)
 
-    return pid, status.value, rusage
+    return pid, status[0], rusage
 
 def wait4(pid, options):
-    status = c_int()
-    _rusage = _struct_rusage()
-    pid = c_wait4(c_int(pid), byref(status), c_int(options), byref(_rusage))
+    status = ffi.new("int *")
+    ru = ffi.new("struct rusage *")
+    pid = lib.wait4(pid, status, options, ru)
 
-    rusage = create_struct_rusage(_rusage)
+    rusage = _make_struct_rusage(ru)
 
-    return pid, status.value, rusage
+    return pid, status[0], rusage
diff --git a/lib_pypy/_resource_build.py b/lib_pypy/_resource_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_resource_build.py
@@ -0,0 +1,118 @@
+from cffi import FFI
+
+ffi = FFI()
+
+# Note: we don't directly expose 'struct timeval' or 'struct rlimit'
+
+
+rlimit_consts = '''
+RLIMIT_CPU
+RLIMIT_FSIZE
+RLIMIT_DATA
+RLIMIT_STACK
+RLIMIT_CORE
+RLIMIT_NOFILE
+RLIMIT_OFILE
+RLIMIT_VMEM
+RLIMIT_AS
+RLIMIT_RSS
+RLIMIT_NPROC
+RLIMIT_MEMLOCK
+RLIMIT_SBSIZE
+RLIM_INFINITY
+RUSAGE_SELF
+RUSAGE_CHILDREN
+RUSAGE_BOTH
+'''.split()
+
+rlimit_consts = ['#ifdef %s\n\t{"%s", %s},\n#endif\n' % (s, s, s)
+                 for s in rlimit_consts]
+
+
+ffi.set_source("_resource_cffi", """
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+
+static const struct my_rlimit_def {
+    const char *name;
+    long long value;
+} my_rlimit_consts[] = {
+$RLIMIT_CONSTS
+    { NULL, 0 }
+};
+
+#define doubletime(TV) ((double)(TV).tv_sec + (TV).tv_usec * 0.000001)
+
+static double my_utime(struct rusage *input)
+{
+    return doubletime(input->ru_utime);
+}
+
+static double my_stime(struct rusage *input)
+{
+    return doubletime(input->ru_stime);
+}
+
+static int my_getrlimit(int resource, long long result[2])
+{
+    struct rlimit rl;
+    if (getrlimit(resource, &rl) == -1)
+        return -1;
+    result[0] = rl.rlim_cur;
+    result[1] = rl.rlim_max;
+    return 0;
+}
+
+static int my_setrlimit(int resource, long long cur, long long max)
+{
+    struct rlimit rl;
+    rl.rlim_cur = cur & RLIM_INFINITY;
+    rl.rlim_max = max & RLIM_INFINITY;
+    return setrlimit(resource, &rl);
+}
+
+""".replace('$RLIMIT_CONSTS', ''.join(rlimit_consts)))
+
+
+ffi.cdef("""
+
+#define RLIM_NLIMITS ...
+
+const struct my_rlimit_def {
+    const char *name;
+    long long value;
+} my_rlimit_consts[];
+
+struct rusage {
+    long ru_maxrss;
+    long ru_ixrss;
+    long ru_idrss;
+    long ru_isrss;
+    long ru_minflt;
+    long ru_majflt;
+    long ru_nswap;
+    long ru_inblock;
+    long ru_oublock;
+    long ru_msgsnd;
+    long ru_msgrcv;
+    long ru_nsignals;
+    long ru_nvcsw;
+    long ru_nivcsw;
+    ...;
+};
+
+static double my_utime(struct rusage *);
+static double my_stime(struct rusage *);
+void getrusage(int who, struct rusage *result);
+int my_getrlimit(int resource, long long result[2]);
+int my_setrlimit(int resource, long long cur, long long max);
+
+int wait3(int *status, int options, struct rusage *rusage);
+int wait4(int pid, int *status, int options, struct rusage *rusage);
+""")
+
+
+if __name__ == "__main__":
+    ffi.compile()
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.5.2
+Version: 1.6.0
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "1.5.2"
-__version_info__ = (1, 5, 2)
+__version__ = "1.6.0"
+__version_info__ = (1, 6, 0)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
         f = PySys_GetObject((char *)"stderr");
         if (f != NULL && f != Py_None) {
             PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
-                               "\ncompiled with cffi version: 1.5.2"
+                               "\ncompiled with cffi version: 1.6.0"
                                "\n_cffi_backend module: ", f);
             modules = PyImport_GetModuleDict();
             mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -299,6 +299,23 @@
         """
         return self._backend.string(cdata, maxlen)
 
+    def unpack(self, cdata, length):
+        """Unpack an array of C data of the given length, 
+        returning a Python string/unicode/list.
+
+        If 'cdata' is a pointer to 'char', returns a byte string.
+        It does not stop at the first null.  This is equivalent to:
+        ffi.buffer(cdata, length)[:]
+
+        If 'cdata' is a pointer to 'wchar_t', returns a unicode string.
+        'length' is measured in wchar_t's; it is not the size in bytes.
+
+        If 'cdata' is a pointer to anything else, returns a list of
+        'length' items.  This is a faster equivalent to:
+        [cdata[i] for i in range(length)]
+        """
+        return self._backend.unpack(cdata, length)
+
     def buffer(self, cdata, size=-1):
         """Return a read-write buffer object that references the raw C data
         pointed to by the given 'cdata'.  The 'cdata' must be a pointer or
@@ -380,20 +397,7 @@
         data.  Later, when this new cdata object is garbage-collected,
         'destructor(old_cdata_object)' will be called.
         """
-        try:
-            gcp = self._backend.gcp
-        except AttributeError:
-            pass
-        else:
-            return gcp(cdata, destructor)
-        #
-        with self._lock:
-            try:
-                gc_weakrefs = self.gc_weakrefs
-            except AttributeError:
-                from .gc_weakref import GcWeakrefs
-                gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
-            return gc_weakrefs.build(cdata, destructor)
+        return self._backend.gcp(cdata, destructor)
 
     def _get_cached_btype(self, type):
         assert self._lock.acquire(False) is False
@@ -721,6 +725,26 @@
         raise ValueError("ffi.def_extern() is only available on API-mode FFI "
                          "objects")
 
+    def list_types(self):
+        """Returns the user type names known to this FFI instance.
+        This returns a tuple containing three lists of names:
+        (typedef_names, names_of_structs, names_of_unions)
+        """
+        typedefs = []
+        structs = []
+        unions = []
+        for key in self._parser._declarations:
+            if key.startswith('typedef '):
+                typedefs.append(key[8:])
+            elif key.startswith('struct '):
+                structs.append(key[7:])
+            elif key.startswith('union '):
+                unions.append(key[6:])
+        typedefs.sort()
+        structs.sort()
+        unions.sort()
+        return (typedefs, structs, unions)
+
 
 def _load_backend_lib(backend, name, flags):
     if name is None:
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
                         return x._value
                     raise TypeError("character expected, got %s" %
                                     type(x).__name__)
+                def __nonzero__(self):
+                    return ord(self._value) != 0
+            else:
+                def __nonzero__(self):
+                    return self._value != 0
 
             if kind == 'float':
                 @staticmethod
@@ -993,6 +998,31 @@
         assert onerror is None   # XXX not implemented
         return BType(source, error)
 
+    def gcp(self, cdata, destructor):
+        BType = self.typeof(cdata)
+
+        if destructor is None:
+            if not (hasattr(BType, '_gcp_type') and
+                    BType._gcp_type is BType):
+                raise TypeError("Can remove destructor only on a object "
+                                "previously returned by ffi.gc()")
+            cdata._destructor = None
+            return None
+
+        try:
+            gcp_type = BType._gcp_type
+        except AttributeError:
+            class CTypesDataGcp(BType):
+                __slots__ = ['_orig', '_destructor']
+                def __del__(self):
+                    if self._destructor is not None:
+                        self._destructor(self._orig)
+            gcp_type = BType._gcp_type = CTypesDataGcp
+        new_cdata = self.cast(gcp_type, cdata)
+        new_cdata._orig = cdata
+        new_cdata._destructor = destructor
+        return new_cdata
+
     typeof = type
 
     def getcname(self, BType, replace_with):
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -29,7 +29,8 @@
 _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
 _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
 _r_cdecl = re.compile(r"\b__cdecl\b")
-_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.')
+_r_extern_python = re.compile(r'\bextern\s*"'
+                              r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.')
 _r_star_const_space = re.compile(       # matches "* const "
     r"[*]\s*((const|volatile|restrict)\b\s*)+")
 
@@ -88,6 +89,12 @@
     #     void __cffi_extern_python_start;
     #     int foo(int);
     #     void __cffi_extern_python_stop;
+    #
+    # input: `extern "Python+C" int foo(int);`
+    # output:
+    #     void __cffi_extern_python_plus_c_start;
+    #     int foo(int);
+    #     void __cffi_extern_python_stop;
     parts = []
     while True:
         match = _r_extern_python.search(csource)
@@ -98,7 +105,10 @@
         #print ''.join(parts)+csource
         #print '=>'
         parts.append(csource[:match.start()])
-        parts.append('void __cffi_extern_python_start; ')
+        if 'C' in match.group(1):
+            parts.append('void __cffi_extern_python_plus_c_start; ')
+        else:
+            parts.append('void __cffi_extern_python_start; ')
         if csource[endpos] == '{':
             # grouping variant
             closing = csource.find('}', endpos)
@@ -302,7 +312,7 @@
                 break
         #
         try:
-            self._inside_extern_python = False
+            self._inside_extern_python = '__cffi_extern_python_stop'
             for decl in iterator:
                 if isinstance(decl, pycparser.c_ast.Decl):
                     self._parse_decl(decl)
@@ -376,8 +386,10 @@
         tp = self._get_type_pointer(tp, quals)
         if self._options.get('dllexport'):
             tag = 'dllexport_python '
-        elif self._inside_extern_python:
+        elif self._inside_extern_python == '__cffi_extern_python_start':
             tag = 'extern_python '
+        elif self._inside_extern_python == '__cffi_extern_python_plus_c_start':
+            tag = 'extern_python_plus_c '
         else:
             tag = 'function '
         self._declare(tag + decl.name, tp)
@@ -421,11 +433,9 @@
                     # hack: `extern "Python"` in the C source is replaced
                     # with "void __cffi_extern_python_start;" and
                     # "void __cffi_extern_python_stop;"
-                    self._inside_extern_python = not self._inside_extern_python
-                    assert self._inside_extern_python == (
-                        decl.name == '__cffi_extern_python_start')
+                    self._inside_extern_python = decl.name
                 else:
-                    if self._inside_extern_python:
+                    if self._inside_extern_python 
!='__cffi_extern_python_stop':
                         raise api.CDefError(
                             "cannot declare constants or "
                             "variables with 'extern \"Python\"'")
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -1145,11 +1145,11 @@
     def _generate_cpy_extern_python_collecttype(self, tp, name):
         assert isinstance(tp, model.FunctionPtrType)
         self._do_collect_type(tp)
+    _generate_cpy_dllexport_python_collecttype = \
+      _generate_cpy_extern_python_plus_c_collecttype = \
+      _generate_cpy_extern_python_collecttype
 
-    def _generate_cpy_dllexport_python_collecttype(self, tp, name):
-        self._generate_cpy_extern_python_collecttype(tp, name)
-
-    def _generate_cpy_extern_python_decl(self, tp, name, dllexport=False):
+    def _extern_python_decl(self, tp, name, tag_and_space):
         prnt = self._prnt
         if isinstance(tp.result, model.VoidType):
             size_of_result = '0'
@@ -1184,11 +1184,7 @@
             size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % (
                 tp.result.get_c_name(''), size_of_a,
                 tp.result.get_c_name(''), size_of_a)
-        if dllexport:
-            tag = 'CFFI_DLLEXPORT'
-        else:
-            tag = 'static'
-        prnt('%s %s' % (tag, tp.result.get_c_name(name_and_arguments)))
+        prnt('%s%s' % (tag_and_space, 
tp.result.get_c_name(name_and_arguments)))
         prnt('{')
         prnt('  char a[%s];' % size_of_a)
         prnt('  char *p = a;')
@@ -1206,8 +1202,14 @@
         prnt()
         self._num_externpy += 1
 
+    def _generate_cpy_extern_python_decl(self, tp, name):
+        self._extern_python_decl(tp, name, 'static ')
+
     def _generate_cpy_dllexport_python_decl(self, tp, name):
-        self._generate_cpy_extern_python_decl(tp, name, dllexport=True)
+        self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ')
+
+    def _generate_cpy_extern_python_plus_c_decl(self, tp, name):
+        self._extern_python_decl(tp, name, '')
 
     def _generate_cpy_extern_python_ctx(self, tp, name):
         if self.target_is_python:
@@ -1220,8 +1222,9 @@
         self._lsts["global"].append(
             GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name))
 
-    def _generate_cpy_dllexport_python_ctx(self, tp, name):
-        self._generate_cpy_extern_python_ctx(tp, name)
+    _generate_cpy_dllexport_python_ctx = \
+      _generate_cpy_extern_python_plus_c_ctx = \
+      _generate_cpy_extern_python_ctx
 
     def _string_literal(self, s):
         def _char_repr(c):
@@ -1231,7 +1234,7 @@
             if c == '\n': return '\\n'
             return '\\%03o' % ord(c)
         lines = []
-        for line in s.splitlines(True):
+        for line in s.splitlines(True) or ['']:
             lines.append('"%s"' % ''.join([_char_repr(c) for c in line]))
         return ' \\\n'.join(lines)
 
@@ -1319,7 +1322,9 @@
                 s = s.encode('ascii')
             super(NativeIO, self).write(s)
 
-def _make_c_or_py_source(ffi, module_name, preamble, target_file):
+def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose):
+    if verbose:
+        print("generating %s" % (target_file,))
     recompiler = Recompiler(ffi, module_name,
                             target_is_python=(preamble is None))
     recompiler.collect_type_table()
@@ -1331,6 +1336,8 @@
         with open(target_file, 'r') as f1:
             if f1.read(len(output) + 1) != output:
                 raise IOError
+        if verbose:
+            print("(already up-to-date)")
         return False     # already up-to-date
     except IOError:
         tmp_file = '%s.~%d' % (target_file, os.getpid())
@@ -1343,12 +1350,14 @@
             os.rename(tmp_file, target_file)
         return True
 
-def make_c_source(ffi, module_name, preamble, target_c_file):
+def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False):
     assert preamble is not None
-    return _make_c_or_py_source(ffi, module_name, preamble, target_c_file)
+    return _make_c_or_py_source(ffi, module_name, preamble, target_c_file,
+                                verbose)
 
-def make_py_source(ffi, module_name, target_py_file):
-    return _make_c_or_py_source(ffi, module_name, None, target_py_file)
+def make_py_source(ffi, module_name, target_py_file, verbose=False):
+    return _make_c_or_py_source(ffi, module_name, None, target_py_file,
+                                verbose)
 
 def _modname_to_file(outputdir, modname, extension):
     parts = modname.split('.')
@@ -1438,7 +1447,8 @@
                 target = '*'
         #
         ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
-        updated = make_c_source(ffi, module_name, preamble, c_file)
+        updated = make_c_source(ffi, module_name, preamble, c_file,
+                                verbose=compiler_verbose)
         if call_c_compiler:
             patchlist = []
             cwd = os.getcwd()
@@ -1458,7 +1468,8 @@
     else:
         if c_file is None:
             c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
-        updated = make_py_source(ffi, module_name, c_file)
+        updated = make_py_source(ffi, module_name, c_file,
+                                 verbose=compiler_verbose)
         if call_c_compiler:
             return c_file
         else:
@@ -1484,4 +1495,7 @@
     def typeof_disabled(*args, **kwds):
         raise NotImplementedError
     ffi._typeof = typeof_disabled
+    for name in dir(ffi):
+        if not name.startswith('_') and not hasattr(module.ffi, name):
+            setattr(ffi, name, NotImplemented)
     return module.lib
diff --git a/lib_pypy/ctypes_config_cache/.empty 
b/lib_pypy/ctypes_config_cache/.empty
new file mode 100644
--- /dev/null
+++ b/lib_pypy/ctypes_config_cache/.empty
@@ -0,0 +1,1 @@
+dummy file to allow old buildbot configuration to run
diff --git a/lib_pypy/ctypes_config_cache/__init__.py 
b/lib_pypy/ctypes_config_cache/__init__.py
deleted file mode 100644
diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py 
b/lib_pypy/ctypes_config_cache/dumpcache.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/dumpcache.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import sys, os
-from ctypes_configure import dumpcache
-
-def dumpcache2(basename, config):
-    size = 32 if sys.maxint <= 2**32 else 64
-    filename = '_%s_%s_.py' % (basename, size)
-    dumpcache.dumpcache(__file__, filename, config)
-    #
-    filename = os.path.join(os.path.dirname(__file__),
-                            '_%s_cache.py' % (basename,))
-    g = open(filename, 'w')
-    print >> g, '''\
-import sys
-_size = 32 if sys.maxint <= 2**32 else 64
-# XXX relative import, should be removed together with
-# XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib
-_mod = __import__("_%s_%%s_" %% (_size,),
-                  globals(), locals(), ["*"])
-globals().update(_mod.__dict__)\
-''' % (basename,)
-    g.close()
diff --git a/lib_pypy/ctypes_config_cache/locale.ctc.py 
b/lib_pypy/ctypes_config_cache/locale.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/locale.ctc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-'ctypes_configure' source for _locale.py.
-Run this to rebuild _locale_cache.py.
-"""
-
-from ctypes_configure.configure import (configure, ExternalCompilationInfo,
-    ConstantInteger, DefinedConstantInteger, SimpleType, check_eci)
-import dumpcache
-
-# ____________________________________________________________
-
-_CONSTANTS = [
-    'LC_CTYPE',
-    'LC_TIME',
-    'LC_COLLATE',
-    'LC_MONETARY',
-    'LC_MESSAGES',
-    'LC_NUMERIC',
-    'LC_ALL',
-    'CHAR_MAX',
-]
-
-class LocaleConfigure:
-    _compilation_info_ = ExternalCompilationInfo(includes=['limits.h',
-                                                           'locale.h'])
-for key in _CONSTANTS:
-    setattr(LocaleConfigure, key, DefinedConstantInteger(key))
-
-config = configure(LocaleConfigure, noerr=True)
-for key, value in config.items():
-    if value is None:
-        del config[key]
-        _CONSTANTS.remove(key)
-
-# ____________________________________________________________
-
-eci = ExternalCompilationInfo(includes=['locale.h', 'langinfo.h'])
-HAS_LANGINFO = check_eci(eci)
-
-if HAS_LANGINFO:
-    # list of all possible names
-    langinfo_names = [
-        "RADIXCHAR", "THOUSEP", "CRNCYSTR",
-        "D_T_FMT", "D_FMT", "T_FMT", "AM_STR", "PM_STR",
-        "CODESET", "T_FMT_AMPM", "ERA", "ERA_D_FMT", "ERA_D_T_FMT",
-        "ERA_T_FMT", "ALT_DIGITS", "YESEXPR", "NOEXPR", "_DATE_FMT",
-        ]
-    for i in range(1, 8):
-        langinfo_names.append("DAY_%d" % i)
-        langinfo_names.append("ABDAY_%d" % i)
-    for i in range(1, 13):
-        langinfo_names.append("MON_%d" % i)
-        langinfo_names.append("ABMON_%d" % i)
-    
-    class LanginfoConfigure:
-        _compilation_info_ = eci
-        nl_item = SimpleType('nl_item')
-    for key in langinfo_names:
-        setattr(LanginfoConfigure, key, DefinedConstantInteger(key))
-
-    langinfo_config = configure(LanginfoConfigure)
-    for key, value in langinfo_config.items():
-        if value is None:
-            del langinfo_config[key]
-            langinfo_names.remove(key)
-    config.update(langinfo_config)
-    _CONSTANTS += langinfo_names
-
-# ____________________________________________________________
-
-config['ALL_CONSTANTS'] = tuple(_CONSTANTS)
-config['HAS_LANGINFO'] = HAS_LANGINFO
-dumpcache.dumpcache2('locale', config)
diff --git a/lib_pypy/ctypes_config_cache/rebuild.py 
b/lib_pypy/ctypes_config_cache/rebuild.py
deleted file mode 100755
--- a/lib_pypy/ctypes_config_cache/rebuild.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-# Run this script to rebuild all caches from the *.ctc.py files.
-
-import os, sys
-
-sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), 
'..', '..')))
-
-import py
-
-_dirpath = os.path.dirname(__file__) or os.curdir
-
-from rpython.tool.ansi_print import AnsiLogger
-log = AnsiLogger("ctypes_config_cache")
-
-
-def rebuild_one(name):
-    filename = os.path.join(_dirpath, name)
-    d = {'__file__': filename}
-    path = sys.path[:]
-    try:
-        sys.path.insert(0, _dirpath)
-        execfile(filename, d)
-    finally:
-        sys.path[:] = path
-
-def try_rebuild():
-    size = 32 if sys.maxint <= 2**32 else 64
-    # remove the files '_*_size_.py'
-    left = {}
-    for p in os.listdir(_dirpath):
-        if p.startswith('_') and (p.endswith('_%s_.py' % size) or
-                                  p.endswith('_%s_.pyc' % size)):
-            os.unlink(os.path.join(_dirpath, p))
-        elif p.startswith('_') and (p.endswith('_.py') or
-                                    p.endswith('_.pyc')):
-            for i in range(2, len(p)-4):
-                left[p[:i]] = True
-    # remove the files '_*_cache.py' if there is no '_*_*_.py' left around
-    for p in os.listdir(_dirpath):
-        if p.startswith('_') and (p.endswith('_cache.py') or
-                                  p.endswith('_cache.pyc')):
-            if p[:-9] not in left:
-                os.unlink(os.path.join(_dirpath, p))
-    #
-    for p in os.listdir(_dirpath):
-        if p.endswith('.ctc.py'):
-            try:
-                rebuild_one(p)
-            except Exception, e:
-                log.ERROR("Running %s:\n  %s: %s" % (
-                    os.path.join(_dirpath, p),
-                    e.__class__.__name__, e))
-
-
-if __name__ == '__main__':
-    try_rebuild()
diff --git a/lib_pypy/ctypes_config_cache/resource.ctc.py 
b/lib_pypy/ctypes_config_cache/resource.ctc.py
deleted file mode 100644
--- a/lib_pypy/ctypes_config_cache/resource.ctc.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-'ctypes_configure' source for resource.py.
-Run this to rebuild _resource_cache.py.
-"""
-
-
-from ctypes import sizeof
-import dumpcache
-from ctypes_configure.configure import (configure,
-    ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger,
-    SimpleType)
-
-
-_CONSTANTS = (
-    'RLIM_INFINITY',
-    'RLIM_NLIMITS',
-)
-_OPTIONAL_CONSTANTS = (
-    'RLIMIT_CPU',
-    'RLIMIT_FSIZE',
-    'RLIMIT_DATA',
-    'RLIMIT_STACK',
-    'RLIMIT_CORE',
-    'RLIMIT_RSS',
-    'RLIMIT_NPROC',
-    'RLIMIT_NOFILE',
-    'RLIMIT_OFILE',
-    'RLIMIT_MEMLOCK',
-    'RLIMIT_AS',
-    'RLIMIT_LOCKS',
-    'RLIMIT_SIGPENDING',
-    'RLIMIT_MSGQUEUE',
-    'RLIMIT_NICE',
-    'RLIMIT_RTPRIO',
-    'RLIMIT_VMEM',
-
-    'RUSAGE_BOTH',
-    'RUSAGE_SELF',
-    'RUSAGE_CHILDREN',
-)
-
-# Setup our configure
-class ResourceConfigure:
-    _compilation_info_ = ExternalCompilationInfo(includes=['sys/resource.h'])
-    rlim_t = SimpleType('rlim_t')
-for key in _CONSTANTS:
-    setattr(ResourceConfigure, key, ConstantInteger(key))
-for key in _OPTIONAL_CONSTANTS:
-    setattr(ResourceConfigure, key, DefinedConstantInteger(key))
-
-# Configure constants and types
-config = configure(ResourceConfigure)
-config['rlim_t_max'] = (1<<(sizeof(config['rlim_t']) * 8)) - 1
-optional_constants = []
-for key in _OPTIONAL_CONSTANTS:
-    if config[key] is not None:
-        optional_constants.append(key)
-    else:
-        del config[key]
-
-config['ALL_CONSTANTS'] = _CONSTANTS + tuple(optional_constants)
-dumpcache.dumpcache2('resource', config)
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -1,4 +1,4 @@
-# ctypes implementation: Victor Stinner, 2008-05-08
+# indirectly based on ctypes implementation: Victor Stinner, 2008-05-08
 """
 This module provides access to the Unix password database.
 It is available on all Unix versions.
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -1,15 +1,8 @@
-import sys
-if sys.platform == 'win32':
-    raise ImportError('resource module not available for win32')
+"""http://docs.python.org/library/resource""";
 
-# load the platform-specific cache made by running resource.ctc.py
-from ctypes_config_cache._resource_cache import *
-
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, POINTER
+from _resource_cffi import ffi, lib
 from errno import EINVAL, EPERM
-import _structseq
+import _structseq, os
 
 try: from __pypy__ import builtinify
 except ImportError: builtinify = lambda f: f
@@ -18,106 +11,37 @@
 class error(Exception):
     pass
 
+class struct_rusage:
+    """struct_rusage: Result from getrusage.
 
-# Read required libc functions
-_getrusage = libc.getrusage
-_getrlimit = libc.getrlimit
-_setrlimit = libc.setrlimit
-try:
-    _getpagesize = libc.getpagesize
-    _getpagesize.argtypes = ()
-    _getpagesize.restype = c_int
-except AttributeError:
-    from os import sysconf
-    _getpagesize = None
+This object may be accessed either as a tuple of
+    (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
+    nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
+or via the attributes ru_utime, ru_stime, ru_maxrss, and so on."""
 
-
-class timeval(Structure):
-    _fields_ = (
-        ("tv_sec", c_long),
-        ("tv_usec", c_long),
-    )
-    def __str__(self):
-        return "(%s, %s)" % (self.tv_sec, self.tv_usec)
-
-    def __float__(self):
-        return self.tv_sec + self.tv_usec/1000000.0
-
-class _struct_rusage(Structure):
-    _fields_ = (
-        ("ru_utime", timeval),
-        ("ru_stime", timeval),
-        ("ru_maxrss", c_long),
-        ("ru_ixrss", c_long),
-        ("ru_idrss", c_long),
-        ("ru_isrss", c_long),
-        ("ru_minflt", c_long),
-        ("ru_majflt", c_long),
-        ("ru_nswap", c_long),
-        ("ru_inblock", c_long),
-        ("ru_oublock", c_long),
-        ("ru_msgsnd", c_long),
-        ("ru_msgrcv", c_long),
-        ("ru_nsignals", c_long),
-        ("ru_nvcsw", c_long),
-        ("ru_nivcsw", c_long),
-    )
-
-_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
-_getrusage.restype = c_int
-
-
-class struct_rusage:
     __metaclass__ = _structseq.structseqtype
 
-    ru_utime = _structseq.structseqfield(0)
-    ru_stime = _structseq.structseqfield(1)
-    ru_maxrss = _structseq.structseqfield(2)
-    ru_ixrss = _structseq.structseqfield(3)
-    ru_idrss = _structseq.structseqfield(4)
-    ru_isrss = _structseq.structseqfield(5)
-    ru_minflt = _structseq.structseqfield(6)
-    ru_majflt = _structseq.structseqfield(7)
-    ru_nswap = _structseq.structseqfield(8)
-    ru_inblock = _structseq.structseqfield(9)
-    ru_oublock = _structseq.structseqfield(10)
-    ru_msgsnd = _structseq.structseqfield(11)
-    ru_msgrcv = _structseq.structseqfield(12)
-    ru_nsignals = _structseq.structseqfield(13)
-    ru_nvcsw = _structseq.structseqfield(14)
-    ru_nivcsw = _structseq.structseqfield(15)
+    ru_utime = _structseq.structseqfield(0,    "user time used")
+    ru_stime = _structseq.structseqfield(1,    "system time used")
+    ru_maxrss = _structseq.structseqfield(2,   "max. resident set size")
+    ru_ixrss = _structseq.structseqfield(3,    "shared memory size")
+    ru_idrss = _structseq.structseqfield(4,    "unshared data size")
+    ru_isrss = _structseq.structseqfield(5,    "unshared stack size")
+    ru_minflt = _structseq.structseqfield(6,   "page faults not requiring I/O")
+    ru_majflt = _structseq.structseqfield(7,   "page faults requiring I/O")
+    ru_nswap = _structseq.structseqfield(8,    "number of swap outs")
+    ru_inblock = _structseq.structseqfield(9,  "block input operations")
+    ru_oublock = _structseq.structseqfield(10, "block output operations")
+    ru_msgsnd = _structseq.structseqfield(11,  "IPC messages sent")
+    ru_msgrcv = _structseq.structseqfield(12,  "IPC messages received")
+    ru_nsignals = _structseq.structseqfield(13,"signals received")
+    ru_nvcsw = _structseq.structseqfield(14,   "voluntary context switches")
+    ru_nivcsw = _structseq.structseqfield(15,  "involuntary context switches")
 
-@builtinify
-def rlimit_check_bounds(rlim_cur, rlim_max):
-    if rlim_cur > rlim_t_max:
-        raise ValueError("%d does not fit into rlim_t" % rlim_cur)
-    if rlim_max > rlim_t_max:
-        raise ValueError("%d does not fit into rlim_t" % rlim_max)
-
-class rlimit(Structure):
-    _fields_ = (
-        ("rlim_cur", rlim_t),
-        ("rlim_max", rlim_t),
-    )
-
-_getrlimit.argtypes = (c_int, POINTER(rlimit))
-_getrlimit.restype = c_int
-_setrlimit.argtypes = (c_int, POINTER(rlimit))
-_setrlimit.restype = c_int
-
-
-@builtinify
-def getrusage(who):
-    ru = _struct_rusage()
-    ret = _getrusage(who, byref(ru))
-    if ret == -1:
-        errno = get_errno()
-        if errno == EINVAL:
-            raise ValueError("invalid who parameter")
-        raise error(errno)
+def _make_struct_rusage(ru):
     return struct_rusage((
-        float(ru.ru_utime),
-        float(ru.ru_stime),
+        lib.my_utime(ru),
+        lib.my_stime(ru),
         ru.ru_maxrss,
         ru.ru_ixrss,
         ru.ru_idrss,
@@ -135,48 +59,59 @@
         ))
 
 @builtinify
+def getrusage(who):
+    ru = ffi.new("struct rusage *")
+    if lib.getrusage(who, ru) == -1:
+        if ffi.errno == EINVAL:
+            raise ValueError("invalid who parameter")
+        raise error(ffi.errno)
+    return _make_struct_rusage(ru)
+
+@builtinify
 def getrlimit(resource):
-    if not(0 <= resource < RLIM_NLIMITS):
+    if not (0 <= resource < lib.RLIM_NLIMITS):
         return ValueError("invalid resource specified")
 
-    rlim = rlimit()
-    ret = _getrlimit(resource, byref(rlim))
-    if ret == -1:
-        errno = get_errno()
-        raise error(errno)
-    return (rlim.rlim_cur, rlim.rlim_max)
+    result = ffi.new("long long[2]")
+    if lib.my_getrlimit(resource, result) == -1:
+        raise error(ffi.errno)
+    return (result[0], result[1])
 
 @builtinify
-def setrlimit(resource, rlim):
-    if not(0 <= resource < RLIM_NLIMITS):
+def setrlimit(resource, limits):
+    if not (0 <= resource < lib.RLIM_NLIMITS):
         return ValueError("invalid resource specified")
-    rlimit_check_bounds(*rlim)
-    rlim = rlimit(rlim[0], rlim[1])
 
-    ret = _setrlimit(resource, byref(rlim))
-    if ret == -1:
-        errno = get_errno()
-        if errno == EINVAL:
-            return ValueError("current limit exceeds maximum limit")
-        elif errno == EPERM:
-            return ValueError("not allowed to raise maximum limit")
+    limits = tuple(limits)
+    if len(limits) != 2:
+        raise ValueError("expected a tuple of 2 integers")
+
+    if lib.my_setrlimit(resource, limits[0], limits[1]) == -1:
+        if ffi.errno == EINVAL:
+            raise ValueError("current limit exceeds maximum limit")
+        elif ffi.errno == EPERM:
+            raise ValueError("not allowed to raise maximum limit")
         else:
-            raise error(errno)
+            raise error(ffi.errno)
+
 
 @builtinify
 def getpagesize():
-    if _getpagesize:
-        return _getpagesize()
-    else:
-        try:
-            return sysconf("SC_PAGE_SIZE")
-        except ValueError:
-            # Irix 5.3 has _SC_PAGESIZE, but not _SC_PAGE_SIZE
-            return sysconf("SC_PAGESIZE")
+    return os.sysconf("SC_PAGESIZE")
 
-__all__ = ALL_CONSTANTS + (
-    'error', 'timeval', 'struct_rusage', 'rlimit',
-    'getrusage', 'getrlimit', 'setrlimit', 'getpagesize',
+
+def _setup():
+    all_constants = []
+    p = lib.my_rlimit_consts
+    while p.name:
+        name = ffi.string(p.name)
+        globals()[name] = int(p.value)
+        all_constants.append(name)
+        p += 1
+    return all_constants
+
+__all__ = tuple(_setup()) + (
+    'error', 'getpagesize', 'struct_rusage',
+    'getrusage', 'getrlimit', 'setrlimit',
 )
-
-del ALL_CONSTANTS
+del _setup
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
     # if log is not opened, open it now
     if not _S_log_open:
         openlog()
+    if isinstance(message, unicode):
+        message = str(message)
     lib.syslog(priority, "%s", message)
 
 @builtinify
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -46,7 +46,6 @@
 except detect_cpu.ProcessorAutodetectError:
     pass
 
-
 translation_modules = default_modules.copy()
 translation_modules.update([
     "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
@@ -205,15 +204,6 @@
         BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
                    default=False),
 
-        BoolOption("withprebuiltchar",
-                   "use prebuilt single-character string objects",
-                   default=False),
-
-        BoolOption("sharesmallstr",
-                   "always reuse the prebuilt string objects "
-                   "(the empty string and potentially single-char strings)",
-                   default=False),
-
         BoolOption("withspecialisedtuple",
                    "use specialised tuples",
                    default=False),
@@ -223,39 +213,14 @@
                    default=False,
                    requires=[("objspace.honor__builtins__", False)]),
 
-        BoolOption("withmapdict",
-                   "make instances really small but slow without the JIT",
-                   default=False,
-                   requires=[("objspace.std.getattributeshortcut", True),
-                             ("objspace.std.withmethodcache", True),
-                       ]),
-
-        BoolOption("withrangelist",
-                   "enable special range list implementation that does not "
-                   "actually create the full list until the resulting "
-                   "list is mutated",
-                   default=False),
         BoolOption("withliststrategies",
                    "enable optimized ways to store lists of primitives ",
                    default=True),
 
-        BoolOption("withtypeversion",
-                   "version type objects when changing them",
-                   cmdline=None,
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
-
-        BoolOption("withmethodcache",
-                   "try to cache method lookups",
-                   default=False,
-                   requires=[("objspace.std.withtypeversion", True),
-                             ("translation.rweakref", True)]),
         BoolOption("withmethodcachecounter",
                    "try to cache methods and provide a counter in __pypy__. "
                    "for testing purposes only.",
-                   default=False,
-                   requires=[("objspace.std.withmethodcache", True)]),
+                   default=False),
         IntOption("methodcachesizeexp",
                   " 2 ** methodcachesizeexp is the size of the of the method 
cache ",
                   default=11),
@@ -266,22 +231,10 @@
         BoolOption("optimized_list_getitem",
                    "special case the 'list[integer]' expressions",
                    default=False),
-        BoolOption("getattributeshortcut",
-                   "track types that override __getattribute__",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
         BoolOption("newshortcut",
                    "cache and shortcut calling __new__ from builtin types",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
+                   default=False),
 
-        BoolOption("withidentitydict",
-                   "track types that override __hash__, __eq__ or __cmp__ and 
use a special dict strategy for those which do not",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
      ]),
 ])
 
@@ -297,15 +250,10 @@
     """
     # all the good optimizations for PyPy should be listed here
     if level in ['2', '3', 'jit']:
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withmethodcache=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
         config.objspace.std.suggest(intshortcut=True)
         config.objspace.std.suggest(optimized_list_getitem=True)
-        config.objspace.std.suggest(getattributeshortcut=True)
         #config.objspace.std.suggest(newshortcut=True)
         config.objspace.std.suggest(withspecialisedtuple=True)
-        config.objspace.std.suggest(withidentitydict=True)
         #if not IS_64_BITS:
         #    config.objspace.std.suggest(withsmalllong=True)
 
@@ -318,16 +266,13 @@
     # memory-saving optimizations
     if level == 'mem':
         config.objspace.std.suggest(withprebuiltint=True)
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
-        config.objspace.std.suggest(withmapdict=True)
+        config.objspace.std.suggest(withliststrategies=True)
         if not IS_64_BITS:
             config.objspace.std.suggest(withsmalllong=True)
 
     # extra optimizations with the JIT
     if level == 'jit':
         config.objspace.std.suggest(withcelldict=True)
-        config.objspace.std.suggest(withmapdict=True)
 
 
 def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py 
b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
 
     assert conf.objspace.usemodules.gc
 
-    conf.objspace.std.withmapdict = True
-    assert conf.objspace.std.withtypeversion
-    conf = get_pypy_config()
-    conf.objspace.std.withtypeversion = False
-    py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
 def test_conflicting_gcrootfinder():
     conf = get_pypy_config()
     conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
 def test_set_pypy_opt_level():
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '2')
-    assert conf.objspace.std.getattributeshortcut
+    assert conf.objspace.std.intshortcut
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '0')
-    assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
-    conf = get_pypy_config()
-    conf.translation.rweakref = False
-    set_pypy_opt_level(conf, '3')
-
-    assert not conf.objspace.std.withtypeversion
-    assert not conf.objspace.std.withmethodcache
+    assert not conf.objspace.std.intshortcut
 
 def test_check_documentation():
     def check_file_exists(fn):
diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst
--- a/pypy/doc/__pypy__-module.rst
+++ b/pypy/doc/__pypy__-module.rst
@@ -18,6 +18,7 @@
  - ``bytebuffer(length)``: return a new read-write buffer of the given length.
    It works like a simplified array of characters (actually, depending on the
    configuration the ``array`` module internally uses this).
+ - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before 
translation).
 
 
 Transparent Proxy Functionality
@@ -37,4 +38,3 @@
 --------------------------------------------------------
 
  - ``isfake(obj)``: returns True if ``obj`` is faked.
- - ``interp_pdb()``: start a pdb at interpreter-level.
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
     libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
-    tk-dev
+    tk-dev libgc-dev
 
 For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
 
 On Fedora::
 
-    yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
-    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
-    (XXX plus the Febora version of libgdbm-dev and tk-dev)
+    dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+    gdbm-devel
 
 For the optional lzma module on PyPy3 you will also need ``xz-devel``.
 
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
 
 To raise an application-level exception::
 
-    raise OperationError(space.w_XxxError, space.wrap("message"))
+    from pypy.interpreter.error import oefmt
+
+    raise oefmt(space.w_XxxError, "message")
+
+    raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+    raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
 
 To catch a specific application-level exception::
 
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt 
b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt 
b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for 
:config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt 
b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``.  This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt 
b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: 
../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt 
b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations 
<../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt 
b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt 
b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt 
b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the 
Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: 
../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt 
b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets 
an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -81,13 +81,13 @@
   Simon Burton
   Martin Matusiak
   Konstantin Lopuhin
+  Stefano Rivera
   Wenzhu Man
   John Witulski
   Laurence Tratt
   Ivan Sichmann Freitas
   Greg Price
   Dario Bertini
-  Stefano Rivera
   Mark Pearse
   Simon Cross
   Andreas St&#252;hrk
@@ -95,9 +95,10 @@
   Jean-Philippe St. Pierre
   Guido van Rossum
   Pavel Vinogradov
+  Spenser Bauman
   Jeremy Thurgood
   Pawe&#322; Piotr Przeradowski
-  Spenser Bauman
+  Tobias Pape
   Paul deGrandis
   Ilya Osadchiy
   marky1991
@@ -109,7 +110,7 @@
   Georg Brandl
   Bert Freudenberg
   Stian Andreassen
-  Tobias Pape
+  Mark Young
   Wanja Saatkamp
   Gerald Klix
   Mike Blume
@@ -140,9 +141,9 @@
   Yichao Yu
   Rocco Moretti
   Gintautas Miliauskas
+  Devin Jeanpierre
   Michael Twomey
   Lucian Branescu Mihaila
-  Devin Jeanpierre
   Gabriel Lavoie
   Olivier Dormond
   Jared Grubb
@@ -153,6 +154,7 @@
   Victor Stinner
   Andrews Medina
   anatoly techtonik
+  Sergey Matyunin
   Stuart Williams
   Jasper Schulz
   Christian Hudon
@@ -187,7 +189,6 @@
   Arjun Naik
   Valentina Mukhamedzhanova
   Stefano Parmesan
-  Mark Young
   Alexis Daboville
   Jens-Uwe Mager
   Carl Meyer
@@ -195,7 +196,9 @@
   Pieter Zieschang
   Gabriel
   Lukas Vacek
+  Kunal Grover
   Andrew Dalke
+  Florin Papa
   Sylvain Thenault
   Jakub Stasiak
   Nathan Taylor
@@ -210,7 +213,6 @@
   Kristjan Valur Jonsson
   David Lievens
   Neil Blakey-Milner
-  Sergey Matyunin
   Lutz Paelike
   Lucio Torre
   Lars Wassermann
@@ -222,9 +224,11 @@
   Artur Lisiecki
   Sergey Kishchenko
   Ignas Mikalajunas
+  Alecsandru Patrascu
   Christoph Gerum
   Martin Blais
   Lene Wagner
+  Catalin Gabriel Manciu
   Tomo Cocoa
   Kim Jin Su
   Toni Mattis
@@ -261,6 +265,7 @@
   Akira Li
   Gustavo Niemeyer
   Stephan Busemann
+  florinpapa
   Rafa&#322; Ga&#322;czy&#324;ski
   Matt Bogosian
   Christian Muirhead
@@ -275,6 +280,7 @@
   Boglarka Vezer
   Chris Pressey
   Buck Golemon
+  Diana Popa
   Konrad Delong
   Dinu Gherman
   Chris Lambacher
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
 The work on the cling backend has so far been done only for CPython, but
 bringing it to PyPy is a lot less work than developing it in the first place.
 
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
 .. _llvm: http://llvm.org/
 .. _clang: http://clang.llvm.org/
 
@@ -283,7 +283,8 @@
 core reflection set, but for the moment assume we want to have it in the
 reflection library that we are building for this example.
 
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
 which is a simple XML file specifying, either explicitly or by using a
 pattern, which classes, variables, namespaces, etc. to select from the given
 header file.
@@ -305,7 +306,7 @@
         <function name="BaseFactory" />
     </lcgdict>
 
-.. _selection file: 
http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
 
 Now the reflection info can be generated and compiled::
 
@@ -811,7 +812,7 @@
 immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
 variable.
 
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
 
 There are a couple of minor differences between PyCintex and cppyy, most to do
 with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
   wrappers.  On PyPy we can't tell the difference, so
   ``ismethod([].__add__) == ismethod(list.__add__) == True``.
 
+* in CPython, the built-in types have attributes that can be
+  implemented in various ways.  Depending on the way, if you try to
+  write to (or delete) a read-only (or undeletable) attribute, you get
+  either a ``TypeError`` or an ``AttributeError``.  PyPy tries to
+  strike some middle ground between full consistency and full
+  compatibility here.  This means that a few corner cases don't raise
+  the same exception, like ``del (lambda:None).__closure__``.
+
 * in pure Python, if you write ``class A(object): def f(self): pass``
   and have a subclass ``B`` which doesn't override ``f()``, then
   ``B.f(x)`` still checks that ``x`` is an instance of ``B``.  In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
 
 :source:`pypy/doc/discussion/`            drafts of ideas and documentation
 
-:source:`pypy/goal/`                      our :ref:`main PyPy-translation 
scripts <translate-pypy>`
+:source:`pypy/goal/`                      our main PyPy-translation scripts
                                           live here
 
 :source:`pypy/interpreter/`               :doc:`bytecode interpreter 
<interpreter>` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst 
b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
 
 
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
 
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``.  To
+make it possible, the RPython interface is now the following one (from
+May 2016):
 
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``.  These are called
+  immediately by the GC when the last reference to the object goes
+  away, like in CPython.  However, the long-term goal is that all
+  ``__del__()`` methods should only contain simple enough code.  If
+  they do, we call them "destructors".  They can't use operations that
+  would resurrect the object, for example.  Use the decorator
+  ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+  are supported for backward compatibility, but deprecated.  The rest
+  of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+  object with a __del__ --- we don't use the RPython-level
+  ``__del__()`` method.  Instead we use
+  ``rgc.FinalizerController.register_finalizer()``.  This allows us to
+  attach a finalizer method to the object, giving more control over
+  the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer.  A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough.  A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory.  Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it.  These restrictions are
+checked.  In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object.  As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+  base class of all instances with a finalizer.  (If you need
+  finalizers on several unrelated classes, you need several unrelated
+  ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``.  At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer.  Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on.  It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance.  Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes).  If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly.  It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues.  This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to