Author: Manuel Jacob <m...@manueljacob.de>
Branch: py3k
Changeset: r80676:23a35d67b4bd
Date: 2015-11-14 18:43 +0100
http://bitbucket.org/pypy/pypy/changeset/23a35d67b4bd/

Log:    hg merge 57c9a47c70f6 (last revision of default branch which got
        into 4.0.0 release).

diff too long, truncating to 2000 out of 91959 lines

diff --git a/.gitignore b/.gitignore
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,14 @@
 .hg
 .svn
 
+# VIM
+*.swp
+*.swo
+
 *.pyc
 *.pyo
 *~
+__pycache__/
 
 bin/pypy-c
 include/*.h
@@ -22,4 +27,6 @@
 pypy/translator/goal/pypy-c
 pypy/translator/goal/target*-c
 release/
+!pypy/tool/release/
 rpython/_cache/
+__pycache__/
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -15,3 +15,4 @@
 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0
+f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -168,7 +168,6 @@
   Michael Twomey
   Lucian Branescu Mihaila
   Yichao Yu
-  Anton Gulenko
   Gabriel Lavoie
   Olivier Dormond
   Jared Grubb
@@ -215,6 +214,7 @@
   Carl Meyer
   Karl Ramm
   Pieter Zieschang
+  Anton Gulenko
   Gabriel
   Lukas Vacek
   Andrew Dalke
@@ -247,6 +247,7 @@
   Toni Mattis
   Lucas Stadler
   Julian Berman
+  Markus Holtermann
   roberto@goyle
   Yury V. Zaytsev
   Anna Katrina Dominguez
@@ -429,7 +430,7 @@
 _gdbm module, provided in the file lib_pypy/_gdbm.py, is redistributed
 under the terms of the GPL license as well.
 
-License for 'pypy/module/_vmprof/src'
+License for 'rpython/rlib/rvmprof/src'
 --------------------------------------
 
 The code is based on gperftools. You may see a copy of the License for it at
diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py
--- a/dotviewer/graphclient.py
+++ b/dotviewer/graphclient.py
@@ -127,16 +127,8 @@
         return spawn_graphserver_handler((host, port))
 
 def spawn_local_handler():
-    if hasattr(sys, 'pypy_objspaceclass'):
-        # if 'python' is actually PyPy, e.g. in a virtualenv, then
-        # try hard to find a real CPython
-        try:
-            python = subprocess.check_output(
-                'env -i $SHELL -l -c "which python"', shell=True).strip()
-        except subprocess.CalledProcessError:
-            # did not work, fall back to 'python'
-            python = 'python'
-    else:
+    python = os.getenv('PYPY_PYGAME_PYTHON')
+    if not python:
         python = sys.executable
     args = [python, '-u', GRAPHSERVER, '--stdio']
     p = subprocess.Popen(args,
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -310,6 +310,22 @@
         """
         return self._backend.from_buffer(self.BCharA, python_buffer)
 
+    def memmove(self, dest, src, n):
+        """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.
+
+        Like the C function memmove(), the memory areas may overlap;
+        apart from that it behaves like the C function memcpy().
+
+        'src' can be any cdata ptr or array, or any Python buffer object.
+        'dest' can be any cdata ptr or array, or a writable Python buffer
+        object.  The size to copy, 'n', is always measured in bytes.
+
+        Unlike other methods, this one supports all Python buffer including
+        byte strings and bytearrays---but it still does not support
+        non-contiguous buffers.
+        """
+        return self._backend.memmove(dest, src, n)
+
     def callback(self, cdecl, python_callable=None, error=None, onerror=None):
         """Return a callback object or a decorator making such a
         callback object.  'cdecl' must name a C function pointer type.
@@ -609,7 +625,7 @@
     def make_accessor_locked(name):
         key = 'function ' + name
         if key in ffi._parser._declarations:
-            tp = ffi._parser._declarations[key]
+            tp, _ = ffi._parser._declarations[key]
             BType = ffi._get_cached_btype(tp)
             try:
                 value = backendlib.load_function(BType, name)
@@ -620,7 +636,7 @@
         #
         key = 'variable ' + name
         if key in ffi._parser._declarations:
-            tp = ffi._parser._declarations[key]
+            tp, _ = ffi._parser._declarations[key]
             BType = ffi._get_cached_btype(tp)
             read_variable = backendlib.read_variable
             write_variable = backendlib.write_variable
@@ -631,12 +647,23 @@
         #
         if not copied_enums:
             from . import model
-            for key, tp in ffi._parser._declarations.items():
+            error = None
+            for key, (tp, _) in ffi._parser._declarations.items():
                 if not isinstance(tp, model.EnumType):
                     continue
+                try:
+                    tp.check_not_partial()
+                except Exception as e:
+                    error = e
+                    continue
                 for enumname, enumval in zip(tp.enumerators, tp.enumvalues):
                     if enumname not in library.__dict__:
                         library.__dict__[enumname] = enumval
+            if error is not None:
+                if name in library.__dict__:
+                    return     # ignore error, about a different enum
+                raise error
+
             for key, val in ffi._parser._int_constants.items():
                 if key not in library.__dict__:
                     library.__dict__[key] = val
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -26,6 +26,9 @@
 _r_words = re.compile(r"\w+|\S")
 _parser_cache = None
 _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE)
+_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b")
+_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b")
+_r_cdecl = re.compile(r"\b__cdecl\b")
 
 def _get_parser():
     global _parser_cache
@@ -44,6 +47,14 @@
         macrovalue = macrovalue.replace('\\\n', '').strip()
         macros[macroname] = macrovalue
     csource = _r_define.sub('', csource)
+    # BIG HACK: replace WINAPI or __stdcall with "volatile const".
+    # It doesn't make sense for the return type of a function to be
+    # "volatile volatile const", so we abuse it to detect __stdcall...
+    # Hack number 2 is that "int(volatile *fptr)();" is not valid C
+    # syntax, so we place the "volatile" before the opening parenthesis.
+    csource = _r_stdcall2.sub(' volatile volatile const(', csource)
+    csource = _r_stdcall1.sub(' volatile volatile const ', csource)
+    csource = _r_cdecl.sub(' ', csource)
     # Replace "[...]" with "[__dotdotdotarray__]"
     csource = _r_partial_array.sub('[__dotdotdotarray__]', csource)
     # Replace "...}" with "__dotdotdotNUM__}".  This construction should
@@ -75,9 +86,13 @@
     # but should be fine for all the common types.
     look_for_words = set(COMMON_TYPES)
     look_for_words.add(';')
+    look_for_words.add(',')
+    look_for_words.add('(')
+    look_for_words.add(')')
     look_for_words.add('typedef')
     words_used = set()
     is_typedef = False
+    paren = 0
     previous_word = ''
     for word in _r_words.findall(csource):
         if word in look_for_words:
@@ -88,6 +103,15 @@
                     is_typedef = False
             elif word == 'typedef':
                 is_typedef = True
+                paren = 0
+            elif word == '(':
+                paren += 1
+            elif word == ')':
+                paren -= 1
+            elif word == ',':
+                if is_typedef and paren == 0:
+                    words_used.discard(previous_word)
+                    look_for_words.discard(previous_word)
             else:   # word in COMMON_TYPES
                 words_used.add(word)
         previous_word = word
@@ -192,6 +216,7 @@
                     if not decl.name:
                         raise api.CDefError("typedef does not declare any 
name",
                                             decl)
+                    quals = 0
                     if (isinstance(decl.type.type, 
pycparser.c_ast.IdentifierType)
                             and decl.type.type.names[-1] == '__dotdotdot__'):
                         realtype = self._get_unknown_type(decl)
@@ -202,8 +227,9 @@
                           decl.type.type.type.names == ['__dotdotdot__']):
                         realtype = model.unknown_ptr_type(decl.name)
                     else:
-                        realtype = self._get_type(decl.type, name=decl.name)
-                    self._declare('typedef ' + decl.name, realtype)
+                        realtype, quals = self._get_type_and_quals(
+                            decl.type, name=decl.name)
+                    self._declare('typedef ' + decl.name, realtype, 
quals=quals)
                 else:
                     raise api.CDefError("unrecognized construct", decl)
         except api.FFIError as e:
@@ -255,9 +281,9 @@
     def _parse_decl(self, decl):
         node = decl.type
         if isinstance(node, pycparser.c_ast.FuncDecl):
-            tp = self._get_type(node, name=decl.name)
+            tp, quals = self._get_type_and_quals(node, name=decl.name)
             assert isinstance(tp, model.RawFunctionType)
-            tp = self._get_type_pointer(tp)
+            tp = self._get_type_pointer(tp, quals)
             self._declare('function ' + decl.name, tp)
         else:
             if isinstance(node, pycparser.c_ast.Struct):
@@ -271,9 +297,10 @@
                                     decl)
             #
             if decl.name:
-                tp = self._get_type(node, partial_length_ok=True)
+                tp, quals = self._get_type_and_quals(node,
+                                                     partial_length_ok=True)
                 if tp.is_raw_function:
-                    tp = self._get_type_pointer(tp)
+                    tp = self._get_type_pointer(tp, quals)
                     self._declare('function ' + decl.name, tp)
                 elif (tp.is_integer_type() and
                         hasattr(decl, 'init') and
@@ -287,10 +314,10 @@
                         _r_int_literal.match(decl.init.expr.value)):
                     self._add_integer_constant(decl.name,
                                                '-' + decl.init.expr.value)
-                elif self._is_constant_globalvar(node):
-                    self._declare('constant ' + decl.name, tp)
+                elif (quals & model.Q_CONST) and not tp.is_array_type:
+                    self._declare('constant ' + decl.name, tp, quals=quals)
                 else:
-                    self._declare('variable ' + decl.name, tp)
+                    self._declare('variable ' + decl.name, tp, quals=quals)
 
     def parse_type(self, cdecl):
         ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2]
@@ -298,40 +325,51 @@
         exprnode = ast.ext[-1].type.args.params[0]
         if isinstance(exprnode, pycparser.c_ast.ID):
             raise api.CDefError("unknown identifier '%s'" % (exprnode.name,))
-        return self._get_type(exprnode.type)
+        tp, quals = self._get_type_and_quals(exprnode.type)
+        return tp
 
-    def _declare(self, name, obj, included=False):
+    def _declare(self, name, obj, included=False, quals=0):
         if name in self._declarations:
-            if self._declarations[name] is obj:
+            prevobj, prevquals = self._declarations[name]
+            if prevobj is obj and prevquals == quals:
                 return
             if not self._override:
                 raise api.FFIError(
                     "multiple declarations of %s (for interactive usage, "
                     "try cdef(xx, override=True))" % (name,))
         assert '__dotdotdot__' not in name.split()
-        self._declarations[name] = obj
+        self._declarations[name] = (obj, quals)
         if included:
             self._included_declarations.add(obj)
 
-    def _get_type_pointer(self, type, const=False, declname=None):
+    def _extract_quals(self, type):
+        quals = 0
+        if isinstance(type, (pycparser.c_ast.TypeDecl,
+                             pycparser.c_ast.PtrDecl)):
+            if 'const' in type.quals:
+                quals |= model.Q_CONST
+            if 'restrict' in type.quals:
+                quals |= model.Q_RESTRICT
+        return quals
+
+    def _get_type_pointer(self, type, quals, declname=None):
         if isinstance(type, model.RawFunctionType):
             return type.as_function_pointer()
         if (isinstance(type, model.StructOrUnionOrEnum) and
                 type.name.startswith('$') and type.name[1:].isdigit() and
                 type.forcename is None and declname is not None):
-            return model.NamedPointerType(type, declname)
-        if const:
-            return model.ConstPointerType(type)
-        return model.PointerType(type)
+            return model.NamedPointerType(type, declname, quals)
+        return model.PointerType(type, quals)
 
-    def _get_type(self, typenode, name=None, partial_length_ok=False):
+    def _get_type_and_quals(self, typenode, name=None, 
partial_length_ok=False):
         # first, dereference typedefs, if we have it already parsed, we're good
         if (isinstance(typenode, pycparser.c_ast.TypeDecl) and
             isinstance(typenode.type, pycparser.c_ast.IdentifierType) and
             len(typenode.type.names) == 1 and
             ('typedef ' + typenode.type.names[0]) in self._declarations):
-            type = self._declarations['typedef ' + typenode.type.names[0]]
-            return type
+            tp, quals = self._declarations['typedef ' + typenode.type.names[0]]
+            quals |= self._extract_quals(typenode)
+            return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.ArrayDecl):
             # array type
@@ -340,18 +378,19 @@
             else:
                 length = self._parse_constant(
                     typenode.dim, partial_length_ok=partial_length_ok)
-            tp = self._get_type(typenode.type,
+            tp, quals = self._get_type_and_quals(typenode.type,
                                 partial_length_ok=partial_length_ok)
-            return model.ArrayType(tp, length)
+            return model.ArrayType(tp, length), quals
         #
         if isinstance(typenode, pycparser.c_ast.PtrDecl):
             # pointer type
-            const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl)
-                     and 'const' in typenode.type.quals)
-            return self._get_type_pointer(self._get_type(typenode.type), const,
-                                          declname=name)
+            itemtype, itemquals = self._get_type_and_quals(typenode.type)
+            tp = self._get_type_pointer(itemtype, itemquals, declname=name)
+            quals = self._extract_quals(typenode)
+            return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.TypeDecl):
+            quals = self._extract_quals(typenode)
             type = typenode.type
             if isinstance(type, pycparser.c_ast.IdentifierType):
                 # assume a primitive type.  get it from .names, but reduce
@@ -379,35 +418,38 @@
                     names = newnames + names
                 ident = ' '.join(names)
                 if ident == 'void':
-                    return model.void_type
+                    return model.void_type, quals
                 if ident == '__dotdotdot__':
                     raise api.FFIError(':%d: bad usage of "..."' %
                             typenode.coord.line)
-                return resolve_common_type(ident)
+                return resolve_common_type(ident), quals
             #
             if isinstance(type, pycparser.c_ast.Struct):
                 # 'struct foobar'
-                return self._get_struct_union_enum_type('struct', type, name)
+                tp = self._get_struct_union_enum_type('struct', type, name)
+                return tp, quals
             #
             if isinstance(type, pycparser.c_ast.Union):
                 # 'union foobar'
-                return self._get_struct_union_enum_type('union', type, name)
+                tp = self._get_struct_union_enum_type('union', type, name)
+                return tp, quals
             #
             if isinstance(type, pycparser.c_ast.Enum):
                 # 'enum foobar'
-                return self._get_struct_union_enum_type('enum', type, name)
+                tp = self._get_struct_union_enum_type('enum', type, name)
+                return tp, quals
         #
         if isinstance(typenode, pycparser.c_ast.FuncDecl):
             # a function type
-            return self._parse_function_type(typenode, name)
+            return self._parse_function_type(typenode, name), 0
         #
         # nested anonymous structs or unions end up here
         if isinstance(typenode, pycparser.c_ast.Struct):
             return self._get_struct_union_enum_type('struct', typenode, name,
-                                                    nested=True)
+                                                    nested=True), 0
         if isinstance(typenode, pycparser.c_ast.Union):
             return self._get_struct_union_enum_type('union', typenode, name,
-                                                    nested=True)
+                                                    nested=True), 0
         #
         raise api.FFIError(":%d: bad or unsupported type declaration" %
                 typenode.coord.line)
@@ -426,28 +468,28 @@
                 raise api.CDefError(
                     "%s: a function with only '(...)' as argument"
                     " is not correct C" % (funcname or 'in expression'))
-        args = [self._as_func_arg(self._get_type(argdeclnode.type))
+        args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type))
                 for argdeclnode in params]
         if not ellipsis and args == [model.void_type]:
             args = []
-        result = self._get_type(typenode.type)
-        return model.RawFunctionType(tuple(args), result, ellipsis)
+        result, quals = self._get_type_and_quals(typenode.type)
+        # the 'quals' on the result type are ignored.  HACK: we absure them
+        # to detect __stdcall functions: we textually replace "__stdcall"
+        # with "volatile volatile const" above.
+        abi = None
+        if hasattr(typenode.type, 'quals'): # else, probable syntax error 
anyway
+            if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']:
+                abi = '__stdcall'
+        return model.RawFunctionType(tuple(args), result, ellipsis, abi)
 
-    def _as_func_arg(self, type):
+    def _as_func_arg(self, type, quals):
         if isinstance(type, model.ArrayType):
-            return model.PointerType(type.item)
+            return model.PointerType(type.item, quals)
         elif isinstance(type, model.RawFunctionType):
             return type.as_function_pointer()
         else:
             return type
 
-    def _is_constant_globalvar(self, typenode):
-        if isinstance(typenode, pycparser.c_ast.PtrDecl):
-            return 'const' in typenode.quals
-        if isinstance(typenode, pycparser.c_ast.TypeDecl):
-            return 'const' in typenode.quals
-        return False
-
     def _get_struct_union_enum_type(self, kind, type, name=None, nested=False):
         # First, a level of caching on the exact 'type' node of the AST.
         # This is obscure, but needed because pycparser "unrolls" declarations
@@ -486,7 +528,7 @@
         else:
             explicit_name = name
             key = '%s %s' % (kind, name)
-            tp = self._declarations.get(key, None)
+            tp, _ = self._declarations.get(key, (None, None))
         #
         if tp is None:
             if kind == 'struct':
@@ -528,6 +570,7 @@
         fldnames = []
         fldtypes = []
         fldbitsize = []
+        fldquals = []
         for decl in type.decls:
             if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and
                     ''.join(decl.type.names) == '__dotdotdot__'):
@@ -541,7 +584,8 @@
             else:
                 bitsize = self._parse_constant(decl.bitsize)
             self._partial_length = False
-            type = self._get_type(decl.type, partial_length_ok=True)
+            type, fqual = self._get_type_and_quals(decl.type,
+                                                   partial_length_ok=True)
             if self._partial_length:
                 self._make_partial(tp, nested)
             if isinstance(type, model.StructType) and type.partial:
@@ -549,9 +593,11 @@
             fldnames.append(decl.name or '')
             fldtypes.append(type)
             fldbitsize.append(bitsize)
+            fldquals.append(fqual)
         tp.fldnames = tuple(fldnames)
         tp.fldtypes = tuple(fldtypes)
         tp.fldbitsize = tuple(fldbitsize)
+        tp.fldquals = tuple(fldquals)
         if fldbitsize != [-1] * len(fldbitsize):
             if isinstance(tp, model.StructType) and tp.partial:
                 raise NotImplementedError("%s: using both bitfields and '...;'"
@@ -632,14 +678,12 @@
         return tp
 
     def include(self, other):
-        for name, tp in other._declarations.items():
+        for name, (tp, quals) in other._declarations.items():
             if name.startswith('anonymous $enum_$'):
                 continue   # fix for test_anonymous_enum_include
             kind = name.split(' ', 1)[0]
-            if kind in ('struct', 'union', 'enum', 'anonymous'):
-                self._declare(name, tp, included=True)
-            elif kind == 'typedef':
-                self._declare(name, tp, included=True)
+            if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'):
+                self._declare(name, tp, included=True, quals=quals)
         for k, v in other._int_constants.items():
             self._add_constants(k, v)
 
diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py
--- a/lib_pypy/cffi/ffiplatform.py
+++ b/lib_pypy/cffi/ffiplatform.py
@@ -14,17 +14,7 @@
 LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs',
                       'extra_objects', 'depends']
 
-def _hack_at_distutils():
-    # Windows-only workaround for some configurations: see
-    # https://bugs.python.org/issue23246 (Python 2.7.9)
-    if sys.platform == "win32":
-        try:
-            import setuptools    # for side-effects, patches distutils
-        except ImportError:
-            pass
-
 def get_extension(srcfilename, modname, sources=(), **kwds):
-    _hack_at_distutils()   # *before* the following import
     from distutils.core import Extension
     allsources = [srcfilename]
     allsources.extend(sources)
@@ -47,7 +37,6 @@
 
 def _build(tmpdir, ext):
     # XXX compact but horrible :-(
-    _hack_at_distutils()
     from distutils.core import Distribution
     import distutils.errors
     #
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -1,14 +1,29 @@
-import types
+import types, sys
 import weakref
 
 from .lock import allocate_lock
 
 
+# type qualifiers
+Q_CONST    = 0x01
+Q_RESTRICT = 0x02
+
+def qualify(quals, replace_with):
+    if quals & Q_CONST:
+        replace_with = ' const ' + replace_with.lstrip()
+    if quals & Q_RESTRICT:
+        # It seems that __restrict is supported by gcc and msvc.
+        # If you hit some different compiler, add a #define in
+        # _cffi_include.h for it (and in its copies, documented there)
+        replace_with = ' __restrict ' + replace_with.lstrip()
+    return replace_with
+
+
 class BaseTypeByIdentity(object):
     is_array_type = False
     is_raw_function = False
 
-    def get_c_name(self, replace_with='', context='a C file'):
+    def get_c_name(self, replace_with='', context='a C file', quals=0):
         result = self.c_name_with_marker
         assert result.count('&') == 1
         # some logic duplication with ffi.getctype()... :-(
@@ -18,6 +33,7 @@
                 replace_with = '(%s)' % replace_with
             elif not replace_with[0] in '[(':
                 replace_with = ' ' + replace_with
+        replace_with = qualify(quals, replace_with)
         result = result.replace('&', replace_with)
         if '$' in result:
             from .ffiplatform import VerificationError
@@ -177,18 +193,21 @@
 
 
 class BaseFunctionType(BaseType):
-    _attrs_ = ('args', 'result', 'ellipsis')
+    _attrs_ = ('args', 'result', 'ellipsis', 'abi')
 
-    def __init__(self, args, result, ellipsis):
+    def __init__(self, args, result, ellipsis, abi=None):
         self.args = args
         self.result = result
         self.ellipsis = ellipsis
+        self.abi = abi
         #
         reprargs = [arg._get_c_name() for arg in self.args]
         if self.ellipsis:
             reprargs.append('...')
         reprargs = reprargs or ['void']
         replace_with = self._base_pattern % (', '.join(reprargs),)
+        if abi is not None:
+            replace_with = replace_with[:1] + abi + ' ' + replace_with[1:]
         self.c_name_with_marker = (
             self.result.c_name_with_marker.replace('&', replace_with))
 
@@ -206,7 +225,7 @@
                             "type, not a pointer-to-function type" % (self,))
 
     def as_function_pointer(self):
-        return FunctionPtrType(self.args, self.result, self.ellipsis)
+        return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi)
 
 
 class FunctionPtrType(BaseFunctionType):
@@ -217,24 +236,29 @@
         args = []
         for tp in self.args:
             args.append(tp.get_cached_btype(ffi, finishlist))
+        abi_args = ()
+        if self.abi == "__stdcall":
+            if not self.ellipsis:    # __stdcall ignored for variadic funcs
+                try:
+                    abi_args = (ffi._backend.FFI_STDCALL,)
+                except AttributeError:
+                    pass
         return global_cache(self, ffi, 'new_function_type',
-                            tuple(args), result, self.ellipsis)
+                            tuple(args), result, self.ellipsis, *abi_args)
 
     def as_raw_function(self):
-        return RawFunctionType(self.args, self.result, self.ellipsis)
+        return RawFunctionType(self.args, self.result, self.ellipsis, self.abi)
 
 
 class PointerType(BaseType):
-    _attrs_ = ('totype',)
-    _base_pattern       = " *&"
-    _base_pattern_array = "(*&)"
+    _attrs_ = ('totype', 'quals')
 
-    def __init__(self, totype):
+    def __init__(self, totype, quals=0):
         self.totype = totype
+        self.quals = quals
+        extra = qualify(quals, " *&")
         if totype.is_array_type:
-            extra = self._base_pattern_array
-        else:
-            extra = self._base_pattern
+            extra = "(%s)" % (extra.lstrip(),)
         self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
 
     def build_backend_type(self, ffi, finishlist):
@@ -243,10 +267,8 @@
 
 voidp_type = PointerType(void_type)
 
-
-class ConstPointerType(PointerType):
-    _base_pattern       = " const *&"
-    _base_pattern_array = "(const *&)"
+def ConstPointerType(totype):
+    return PointerType(totype, Q_CONST)
 
 const_voidp_type = ConstPointerType(void_type)
 
@@ -254,8 +276,8 @@
 class NamedPointerType(PointerType):
     _attrs_ = ('totype', 'name')
 
-    def __init__(self, totype, name):
-        PointerType.__init__(self, totype)
+    def __init__(self, totype, name, quals=0):
+        PointerType.__init__(self, totype, quals)
         self.name = name
         self.c_name_with_marker = name + '&'
 
@@ -315,11 +337,12 @@
     partial = False
     packed = False
 
-    def __init__(self, name, fldnames, fldtypes, fldbitsize):
+    def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None):
         self.name = name
         self.fldnames = fldnames
         self.fldtypes = fldtypes
         self.fldbitsize = fldbitsize
+        self.fldquals = fldquals
         self.build_c_name_with_marker()
 
     def has_anonymous_struct_fields(self):
@@ -331,14 +354,17 @@
         return False
 
     def enumfields(self):
-        for name, type, bitsize in zip(self.fldnames, self.fldtypes,
-                                       self.fldbitsize):
+        fldquals = self.fldquals
+        if fldquals is None:
+            fldquals = (0,) * len(self.fldnames)
+        for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes,
+                                              self.fldbitsize, fldquals):
             if name == '' and isinstance(type, StructOrUnion):
                 # nested anonymous struct/union
                 for result in type.enumfields():
                     yield result
             else:
-                yield (name, type, bitsize)
+                yield (name, type, bitsize, quals)
 
     def force_flatten(self):
         # force the struct or union to have a declaration that lists
@@ -347,13 +373,16 @@
         names = []
         types = []
         bitsizes = []
-        for name, type, bitsize in self.enumfields():
+        fldquals = []
+        for name, type, bitsize, quals in self.enumfields():
             names.append(name)
             types.append(type)
             bitsizes.append(bitsize)
+            fldquals.append(quals)
         self.fldnames = tuple(names)
         self.fldtypes = tuple(types)
         self.fldbitsize = tuple(bitsizes)
+        self.fldquals = tuple(fldquals)
 
     def get_cached_btype(self, ffi, finishlist, can_delay=False):
         BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -5,7 +5,7 @@
 
 #define _CFFI_OP(opcode, arg)   (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) 
<< 8))
 #define _CFFI_GETOP(cffi_opcode)    ((unsigned char)(uintptr_t)cffi_opcode)
-#define _CFFI_GETARG(cffi_opcode)   (((uintptr_t)cffi_opcode) >> 8)
+#define _CFFI_GETARG(cffi_opcode)   (((intptr_t)cffi_opcode) >> 8)
 
 #define _CFFI_OP_PRIMITIVE       1
 #define _CFFI_OP_POINTER         3
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -195,17 +195,15 @@
             elif isinstance(tp, model.StructOrUnion):
                 if tp.fldtypes is not None and (
                         tp not in self.ffi._parser._included_declarations):
-                    for name1, tp1, _ in tp.enumfields():
+                    for name1, tp1, _, _ in tp.enumfields():
                         self._do_collect_type(self._field_type(tp, name1, tp1))
             else:
                 for _, x in tp._get_items():
                     self._do_collect_type(x)
 
-    def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
-
     def _generate(self, step_name):
-        for name, tp in self._get_declarations():
+        lst = self.ffi._parser._declarations.items()
+        for name, (tp, quals) in sorted(lst):
             kind, realname = name.split(' ', 1)
             try:
                 method = getattr(self, '_generate_cpy_%s_%s' % (kind,
@@ -214,6 +212,7 @@
                 raise ffiplatform.VerificationError(
                     "not implemented in recompile(): %r" % name)
             try:
+                self._current_quals = quals
                 method(tp, realname)
             except Exception as e:
                 model.attach_exception_info(e, name)
@@ -608,7 +607,11 @@
             call_arguments.append('x%d' % i)
         repr_arguments = ', '.join(arguments)
         repr_arguments = repr_arguments or 'void'
-        name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments)
+        if tp.abi:
+            abi = tp.abi + ' '
+        else:
+            abi = ''
+        name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments)
         prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
         prnt('{')
         call_arguments = ', '.join(call_arguments)
@@ -711,7 +714,8 @@
         if difference:
             repr_arguments = ', '.join(arguments)
             repr_arguments = repr_arguments or 'void'
-            name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments)
+            name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name,
+                                                       repr_arguments)
             prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
             prnt('{')
             if result_decl:
@@ -774,7 +778,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             try:
                 if ftype.is_integer_type() or fbitsize >= 0:
                     # accept all integers, but complain on float or double
@@ -789,7 +793,8 @@
                     ftype = ftype.item
                     fname = fname + '[0]'
                 prnt('  { %s = &p->%s; (void)tmp; }' % (
-                    ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                    ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual),
+                    fname))
             except ffiplatform.VerificationError as e:
                 prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -823,7 +828,7 @@
         c_fields = []
         if reason_for_not_expanding is None:
             enumfields = list(tp.enumfields())
-            for fldname, fldtype, fbitsize in enumfields:
+            for fldname, fldtype, fbitsize, fqual in enumfields:
                 fldtype = self._field_type(tp, fldname, fldtype)
                 # cname is None for _add_missing_struct_unions() only
                 op = OP_NOOP
@@ -879,7 +884,9 @@
         # because they don't have any known C name.  Check that they are
         # not partial (we can't complete or verify them!) and emit them
         # anonymously.
-        for tp in list(self._struct_unions):
+        lst = list(self._struct_unions.items())
+        lst.sort(key=lambda tp_order: tp_order[1])
+        for tp, order in lst:
             if tp not in self._seen_struct_unions:
                 if tp.partial:
                     raise NotImplementedError("internal inconsistency: %r is "
@@ -1004,6 +1011,8 @@
     def _enum_ctx(self, tp, cname):
         type_index = self._typesdict[tp]
         type_op = CffiOp(OP_ENUM, -1)
+        if self.target_is_python:
+            tp.check_not_partial()
         for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
             self._lsts["global"].append(
                 GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
@@ -1081,7 +1090,8 @@
         # if 'tp' were a function type, but that is not possible here.
         # (If 'tp' is a function _pointer_ type, then casts from "fn_t
         # **" to "void *" are again no-ops, as far as I can tell.)
-        prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,)))
+        decl = '*_cffi_var_%s(void)' % (name,)
+        prnt('static ' + tp.get_c_name(decl, quals=self._current_quals))
         prnt('{')
         prnt('  return %s(%s);' % (ampersand, name))
         prnt('}')
@@ -1130,7 +1140,13 @@
                 else:
                     self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
             index += 1
-        self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis))
+        flags = int(tp.ellipsis)
+        if tp.abi is not None:
+            if tp.abi == '__stdcall':
+                flags |= 2
+            else:
+                raise NotImplementedError("abi=%r" % (tp.abi,))
+        self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags)
 
     def _emit_bytecode_PointerType(self, tp, index):
         self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -197,7 +197,10 @@
         return library
 
     def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
+        lst = [(key, tp) for (key, (tp, qual)) in
+                                self.ffi._parser._declarations.items()]
+        lst.sort()
+        return lst
 
     def _generate(self, step_name):
         for name, tp in self._get_declarations():
@@ -468,7 +471,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if (isinstance(ftype, model.PrimitiveType)
                 and ftype.is_integer_type()) or fbitsize >= 0:
                 # accept all integers, but complain on float or double
@@ -477,7 +480,8 @@
                 # only accept exactly the type declared.
                 try:
                     prnt('  { %s = &p->%s; (void)tmp; }' % (
-                        ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                        ftype.get_c_name('*tmp', 'field %r'%fname, 
quals=fqual),
+                        fname))
                 except ffiplatform.VerificationError as e:
                     prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -488,7 +492,7 @@
         prnt('  static Py_ssize_t nums[] = {')
         prnt('    sizeof(%s),' % cname)
         prnt('    offsetof(struct _cffi_aligncheck, y),')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if fbitsize >= 0:
                 continue      # xxx ignore fbitsize for now
             prnt('    offsetof(%s, %s),' % (cname, fname))
@@ -552,7 +556,7 @@
             check(layout[0], ffi.sizeof(BStruct), "wrong total size")
             check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
             i = 2
-            for fname, ftype, fbitsize in tp.enumfields():
+            for fname, ftype, fbitsize, fqual in tp.enumfields():
                 if fbitsize >= 0:
                     continue        # xxx ignore fbitsize for now
                 check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py
--- a/lib_pypy/cffi/vengine_gen.py
+++ b/lib_pypy/cffi/vengine_gen.py
@@ -87,7 +87,10 @@
         return library
 
     def _get_declarations(self):
-        return sorted(self.ffi._parser._declarations.items())
+        lst = [(key, tp) for (key, (tp, qual)) in
+                                self.ffi._parser._declarations.items()]
+        lst.sort()
+        return lst
 
     def _generate(self, step_name):
         for name, tp in self._get_declarations():
@@ -156,7 +159,11 @@
         arglist = ', '.join(arglist) or 'void'
         wrappername = '_cffi_f_%s' % name
         self.export_symbols.append(wrappername)
-        funcdecl = ' %s(%s)' % (wrappername, arglist)
+        if tp.abi:
+            abi = tp.abi + ' '
+        else:
+            abi = ''
+        funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist)
         context = 'result of %s' % name
         prnt(tpresult.get_c_name(funcdecl, context))
         prnt('{')
@@ -260,7 +267,7 @@
         prnt('{')
         prnt('  /* only to generate compile-time warnings or errors */')
         prnt('  (void)p;')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if (isinstance(ftype, model.PrimitiveType)
                 and ftype.is_integer_type()) or fbitsize >= 0:
                 # accept all integers, but complain on float or double
@@ -269,7 +276,8 @@
                 # only accept exactly the type declared.
                 try:
                     prnt('  { %s = &p->%s; (void)tmp; }' % (
-                        ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                        ftype.get_c_name('*tmp', 'field %r'%fname, 
quals=fqual),
+                        fname))
                 except ffiplatform.VerificationError as e:
                     prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -280,7 +288,7 @@
         prnt('  static intptr_t nums[] = {')
         prnt('    sizeof(%s),' % cname)
         prnt('    offsetof(struct _cffi_aligncheck, y),')
-        for fname, ftype, fbitsize in tp.enumfields():
+        for fname, ftype, fbitsize, fqual in tp.enumfields():
             if fbitsize >= 0:
                 continue      # xxx ignore fbitsize for now
             prnt('    offsetof(%s, %s),' % (cname, fname))
@@ -342,7 +350,7 @@
             check(layout[0], ffi.sizeof(BStruct), "wrong total size")
             check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
             i = 2
-            for fname, ftype, fbitsize in tp.enumfields():
+            for fname, ftype, fbitsize, fqual in tp.enumfields():
                 if fbitsize >= 0:
                     continue        # xxx ignore fbitsize for now
                 check(layout[i], ffi.offsetof(BStruct, fname),
diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py
--- a/lib_pypy/cffi/verifier.py
+++ b/lib_pypy/cffi/verifier.py
@@ -22,6 +22,16 @@
                 s = s.encode('ascii')
             super(NativeIO, self).write(s)
 
+def _hack_at_distutils():
+    # Windows-only workaround for some configurations: see
+    # https://bugs.python.org/issue23246 (Python 2.7 with 
+    # a specific MS compiler suite download)
+    if sys.platform == "win32":
+        try:
+            import setuptools    # for side-effects, patches distutils
+        except ImportError:
+            pass
+
 
 class Verifier(object):
 
@@ -112,6 +122,7 @@
         return basename
 
     def get_extension(self):
+        _hack_at_distutils() # backward compatibility hack
         if not self._has_source:
             with self.ffi._lock:
                 if not self._has_source:
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: greenlet
-Version: 0.4.7
+Version: 0.4.9
 Summary: Lightweight in-process concurrent programming
 Home-page: https://github.com/python-greenlet/greenlet
 Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -2,7 +2,7 @@
 import __pypy__
 import _continuation
 
-__version__ = "0.4.7"
+__version__ = "0.4.9"
 
 # ____________________________________________________________
 # Exceptions
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -40,7 +40,9 @@
     "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy"
 ])
 
-if sys.platform.startswith('linux') and os.uname()[4] == 'x86_64':
+if ((sys.platform.startswith('linux') or sys.platform == 'darwin')
+    and os.uname()[4] == 'x86_64' and sys.maxint > 2**32):
+    # it's not enough that we get x86_64
     working_modules.add('_vmprof')
 
 translation_modules = default_modules.copy()
@@ -76,6 +78,11 @@
     if "cppyy" in working_modules:
         working_modules.remove("cppyy")  # depends on ctypes
 
+if sys.platform.startswith("linux"):
+    _mach = os.popen('uname -m', 'r').read().strip()
+    if _mach.startswith('ppc'):
+        working_modules.remove("_continuation")
+
 
 module_dependencies = {
     '_multiprocessing': [('objspace.usemodules.time', True),
@@ -93,6 +100,8 @@
 if sys.platform == "win32":
     module_suggests["cpyext"].append(("translation.shared", True))
 
+
+# NOTE: this dictionary is not used any more
 module_import_dependencies = {
     # no _rawffi if importing rpython.rlib.clibffi raises ImportError
     # or CompilationError or py.test.skip.Exception
@@ -109,6 +118,7 @@
     }
 
 def get_module_validator(modname):
+    # NOTE: this function is not used any more
     if modname in module_import_dependencies:
         modlist = module_import_dependencies[modname]
         def validator(config):
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -67,7 +67,7 @@
 # The short X.Y version.
 version = '2.6'
 # The full version, including alpha/beta/rc tags.
-release = '2.6.0'
+release = '2.6.1'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -32,6 +32,7 @@
   Lukas Diekmann
   Sven Hager
   Anders Lehmann
+  Richard Plangger
   Aurelien Campeas
   Remi Meier
   Niklaus Haldimann
@@ -57,7 +58,6 @@
   Ludovic Aubry
   Jacob Hallen
   Jason Creighton
-  Richard Plangger
   Alex Martelli
   Michal Bendowski
   stian
@@ -138,7 +138,6 @@
   Michael Twomey
   Lucian Branescu Mihaila
   Yichao Yu
-  Anton Gulenko
   Gabriel Lavoie
   Olivier Dormond
   Jared Grubb
@@ -185,6 +184,7 @@
   Carl Meyer
   Karl Ramm
   Pieter Zieschang
+  Anton Gulenko
   Gabriel
   Lukas Vacek
   Andrew Dalke
@@ -217,6 +217,7 @@
   Toni Mattis
   Lucas Stadler
   Julian Berman
+  Markus Holtermann
   roberto@goyle
   Yury V. Zaytsev
   Anna Katrina Dominguez
@@ -252,6 +253,7 @@
   shoma hosaka
   Daniel Neuh&#228;user
   Ben Mather
+  Niclas Olofsson
   halgari
   Boglarka Vezer
   Chris Pressey
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -20,10 +20,6 @@
    It initializes the RPython/PyPy GC and does a bunch of necessary startup
    code. This function cannot fail.
 
-.. function:: void pypy_init_threads(void);
-
-   Initialize threads. Only need to be called if there are any threads involved
-
 .. function:: int pypy_setup_home(char* home, int verbose);
 
    This function searches the PyPy standard library starting from the given
@@ -38,6 +34,11 @@
    Function returns 0 on success or -1 on failure, can be called multiple times
    until the library is found.
 
+.. function:: void pypy_init_threads(void);
+
+   Initialize threads. Only need to be called if there are any threads 
involved.
+   *Must be called after pypy_setup_home()*
+
 .. function:: int pypy_execute_source(char* source);
 
    Execute the Python source code given in the ``source`` argument. In case of
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -5,8 +5,8 @@
 with any external library.
 
 Right now, there are the following possibilities of providing
-third-party modules for the PyPy python interpreter (in order of
-usefulness):
+third-party modules for the PyPy python interpreter (in order, from most
+directly useful to most messy to use with PyPy):
 
 * Write them in pure Python and use CFFI_.
 
@@ -83,7 +83,7 @@
 
 
 RPython Mixed Modules
-=====================
+---------------------
 
 This is the internal way to write built-in extension modules in PyPy.
 It cannot be used by any 3rd-party module: the extension modules are
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -67,7 +67,7 @@
 The other commands of ``setup.py`` are available too, like ``build``.
 
 .. _PyPI: https://pypi.python.org/pypi
-.. _`use virtualenv (as documented here)`: 
getting-started.html#installing-using-virtualenv
+.. _`use virtualenv (as documented here)`: 
install.html#installing-using-virtualenv
 
 
 Module xyz does not work in the sandboxed PyPy?
diff --git a/pypy/doc/index-of-release-notes.rst 
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,8 @@
 
 .. toctree::
 
+   release-4.0.0.rst
+   release-2.6.1.rst
    release-2.6.0.rst
    release-2.5.1.rst
    release-2.5.0.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
 .. toctree::
 
    whatsnew-head.rst
+   whatsnew-4.0.0.rst
    whatsnew-2.6.1.rst
    whatsnew-2.6.0.rst
    whatsnew-2.5.1.rst
diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst
--- a/pypy/doc/jit-hooks.rst
+++ b/pypy/doc/jit-hooks.rst
@@ -5,19 +5,8 @@
 understanding what's pypy's JIT doing while running your program. There
 are three functions related to that coming from the ``pypyjit`` module:
 
-.. function:: set_optimize_hook(callable)
 
-    Set a compiling hook that will be called each time a loop is optimized,
-    but before assembler compilation. This allows adding additional
-    optimizations on Python level.
-
-    The callable will be called with the ``pypyjit.JitLoopInfo`` object.
-    Refer to it's documentation for details.
-
-    Result value will be the resulting list of operations, or None
-
-
-.. function:: set_compile_hook(callable)
+.. function:: set_compile_hook(callable, operations=True)
 
     Set a compiling hook that will be called each time a loop is compiled.
 
@@ -28,6 +17,9 @@
     inside the jit hook is itself jitted, it will get compiled, but the
     jit hook won't be called for that.
 
+    if operations=False, no list of operations will be available. Useful
+    if the hook is supposed to be very lighweight.
+
 .. function:: set_abort_hook(hook)
 
     Set a hook (callable) that will be called each time there is tracing
@@ -66,3 +58,25 @@
 
     * ``loop_run_times`` - counters for number of times loops are run, only
       works when ``enable_debug`` is called.
+
+.. class:: JitLoopInfo
+
+   A class containing information about the compiled loop. Usable attributes:
+
+   * ``operations`` - list of operations, if requested
+
+   * ``jitdriver_name`` - the name of jitdriver associated with this loop
+
+   * ``greenkey`` - a key at which the loop got compiled (e.g. code position,
+     is_being_profiled, pycode tuple for python jitdriver)
+
+   * ``loop_no`` - loop cardinal number
+
+   * ``bridge_no`` - id of the fail descr
+
+   * ``type`` - "entry bridge", "loop" or "bridge"
+
+   * ``asmaddr`` - an address in raw memory where assembler resides
+
+   * ``asmlen`` - length of raw memory with assembler associated
+
diff --git a/pypy/doc/release-2.6.1.rst b/pypy/doc/release-2.6.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.6.1.rst
@@ -0,0 +1,129 @@
+==========
+PyPy 2.6.1 
+==========
+
+We're pleased to announce PyPy 2.6.1, an update to PyPy 2.6.0 released June 1.
+We have updated stdlib to 2.7.10, `cffi`_ to version 1.3, extended support for
+the new vmprof_ statistical profiler for multiple threads, and increased
+functionality of numpy.
+
+You can download the PyPy 2.6.1 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project, and our volunteers and contributors.  
+
+.. _`cffi`: https://cffi.readthedocs.org
+
+We would also like to encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ 
with making
+RPython's JIT even better. 
+
+.. _`PyPy`: http://doc.pypy.org 
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: 
http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_),
+as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+.. _`pypy and cpython 2.7.x`: http://speed.pypy.org
+.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy
+.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/
+.. _`dynamic languages`: http://pypyjs.org
+
+Highlights 
+===========
+
+* Bug Fixes
+
+  * Revive non-SSE2 support
+
+  * Fixes for detaching _io.Buffer*
+
+  * On Windows, close (and flush) all open sockets on exiting
+
+  * Drop support for ancient macOS v10.4 and before
+
+  * Clear up contention in the garbage collector between trace-me-later and 
pinning
+
+  * Issues reported with our previous release were resolved_ after reports 
from users on
+    our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+    #pypy.
+
+* New features:
+
+  * cffi was updated to version 1.3
+
+  * The python stdlib was updated to 2.7.10 from 2.7.9
+
+  * vmprof now supports multiple threads and OS X
+
+  * The translation process builds cffi import libraries for some stdlib
+    packages, which should prevent confusion when package.py is not used
+
+  * better support for gdb debugging
+
+  * freebsd should be able to translate PyPy "out of the box" with no patches
+
+* Numpy:
+
+  * Better support for record dtypes, including the ``align`` keyword
+
+  * Implement casting and create output arrays accordingly (still missing some 
corner cases)
+
+  * Support creation of unicode ndarrays
+
+  * Better support ndarray.flags
+
+  * Support ``axis`` argument in more functions
+
+  * Refactor array indexing to support ellipses
+
+  * Allow the docstrings of built-in numpy objects to be set at run-time
+
+  * Support the ``buffered`` nditer creation keyword
+
+* Performance improvements:
+
+  * Delay recursive calls to make them non-recursive
+
+  * Skip loop unrolling if it compiles too much code
+
+  * Tweak the heapcache
+
+  * Add a list strategy for lists that store both floats and 32-bit integers.
+    The latter are encoded as nonstandard NaNs.  Benchmarks show that the speed
+    of such lists is now very close to the speed of purely-int or purely-float
+    lists. 
+
+  * Simplify implementation of ffi.gc() to avoid most weakrefs
+
+  * Massively improve the performance of map() with more than
+    one sequence argument
+
+.. _`vmprof`: https://vmprof.readthedocs.org
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-2.6.1.html
+
+Please try it out and let us know what you think. We welcome
+success stories, `experiments`_,  or `benchmarks`_, we know you are using 
PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
+.. _`experiments`: 
https://morepypy.blogspot.com/2015/02/experiments-in-pyrlang-with-rpython.html
+.. _`benchmarks`: 
https://mithrandi.net/blog/2015/03/axiom-benchmark-results-on-pypy-2-5-0
diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-4.0.0.rst
@@ -0,0 +1,210 @@
+============
+PyPy 4.0.0
+============
+
+We're pleased and proud to unleash PyPy 4.0.0, a major update of the PyPy
+python 2.7.10 compatible interpreter with a Just In Time compiler.
+We have improved `warmup time and memory overhead used for tracing`_, added
+`vectorization`_ for numpy and general loops where possible on x86 hardware
+(disabled by default),
+refactored rough edges in rpython, and increased functionality of numpy.
+
+You can download the PyPy 4.0.0 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and 
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ 
+with making RPython's JIT even better. 
+
+New Version Numbering
+=====================
+
+Since the past release, PyPy 2.6.1, we decided to update the PyPy 2.x.x
+versioning directly to PyPy 4.x.x, to avoid confusion with CPython 2.7
+and 3.5. Note that this version of PyPy uses the stdlib and implements the
+syntax of CPython 2.7.10. 
+
+Vectorization
+=============
+
+Richard Plangger began work in March and continued over a Google Summer of Code
+to add a `vectorization` step to the trace optimizer. The step recognizes 
common
+constructs and emits SIMD code where possible, much as any modern compiler 
does.
+This vectorization happens while tracing running code,  so it is actually 
easier
+at run-time to determine the
+availability of possible vectorization than it is for ahead-of-time compilers.
+
+Availability of SIMD hardware is detected at run time, without needing to
+precompile various code paths into the executable.
+
+The first version of the vectorization has been merged in this release, since
+it is so new it is off by default. To enable the vectorization in built-in JIT
+drivers (like numpy ufuncs), add `--jit vec=1`, to enable all implemented
+vectorization add `--jit vec_all=1`
+
+Benchmarks and a summary of this work appear `here`_
+
+Internal Refactoring and Warmup Time Improvement
+================================================
+
+Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now 
allow
+PyPy to more efficiently use `guards`_ in jitted code. They also rewrote 
unrolling,
+leading to a warmup time improvement of 20% or so.
+
+Numpy
+=====
+
+Our implementation of `numpy`_ continues to improve. ndarray and the numeric 
dtypes
+are very close to feature-complete; record, string and unicode dtypes are 
mostly
+supported.  We have reimplemented numpy linalg, random and fft as cffi-1.0
+modules that call out to the same underlying libraries that upstream numpy 
uses.
+Please try it out, especially using the new vectorization (via `--jit vec=1` 
on the
+command line) and let us know what is missing for your code.
+
+CFFI
+====
+
+While not applicable only to PyPy, `cffi`_ is arguably our most significant
+contribution to the python ecosystem. Armin Rigo continued improving it,
+and PyPy reaps the benefits of `cffi-1.3`_: improved manangement of object
+lifetimes, __stdcall on Win32, ffi.memmove(), and percolate ``const``,
+``restrict`` keywords from cdef to C code.
+
+.. _`warmup time and memory overhead used for tracing`: 
http://morepypy.blogspot.com/2015/10/pypy-memory-and-warmup-improvements-2.html
+.. _`vectorization`: http://pypyvecopt.blogspot.co.at/
+.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html
+.. _`PyPy`: http://doc.pypy.org 
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`cffi`: https://cffi.readthedocs.org
+.. _`cffi-1.3`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-3-0
+.. _`modules`: 
http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+.. _`numpy`: https://bitbucket.org/pypy/numpy
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_),
+as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+We also introduce `support for the 64 bit PowerPC`_ hardware, specifically 
+Linux running the big- and little-endian variants of ppc64.
+
+.. _`pypy and cpython 2.7.x`: http://speed.pypy.org
+.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy
+.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/
+.. _`dynamic languages`: http://pypyjs.org
+.. _`support for the 64 bit PowerPC`: 
http://morepypy.blogspot.com/2015/10/powerpc-backend-for-jit.html
+.. _`here`: 
http://morepypy.blogspot.com/2015/10/automatic-simd-vectorization-support-in.html
+
+Other Highlights (since 2.6.1 release two months ago)
+=====================================================
+
+* Bug Fixes
+
+  * Applied OPENBSD downstream fixes
+
+  * Fix a crash on non-linux when running more than 20 threads
+
+  * In cffi, ffi.new_handle() is more cpython compliant
+
+  * Accept unicode in functions inside the _curses cffi backend exactly like 
cpython
+
+  * Fix a segfault in itertools.islice()
+
+  * Use gcrootfinder=shadowstack by default, asmgcc on linux only
+
+  * Fix ndarray.copy() for upstream compatability when copying non-contiguous 
arrays
+
+  * Fix assumption that lltype.UniChar is unsigned
+
+  * Fix a subtle bug with stacklets on shadowstack
+
+  * Improve support for the cpython capi in cpyext (our capi compatibility
+    layer). Fixing these issues inspired some thought about cpyext in general,
+    stay tuned for more improvements
+
+  * When loading dynamic libraries, in case of a certain loading error, retry
+    loading the library assuming it is actually a linker script, like on Arch
+    and Gentoo
+
+  * Issues reported with our previous release were resolved_ after reports 
from users on
+    our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+    #pypy
+
+* New features:
+
+  * Add an optimization pass to vectorize loops using x86 SIMD intrinsics.
+
+  * Support __stdcall on Windows in CFFI
+
+  * Improve debug logging when using PYPYLOG=???
+
+  * Deal with platforms with no RAND_egd() in OpenSSL
+
+* Numpy:
+
+  * Add support for ndarray.ctypes
+
+  * Fast path for mixing numpy scalars and floats
+
+  * Add support for creating Fortran-ordered ndarrays
+
+  * Fix casting failures in linalg (by extending ufunc casting)
+
+  * Recognize and disallow (for now) pickling of ndarrays with objects
+    embedded in them
+
+* Performance improvements and refactorings:
+
+  * Reuse hashed keys across dictionaries and sets
+
+  * Refactor JIT interals to improve warmup time by 20% or so at the cost of a
+    minor regression in JIT speed
+
+  * Recognize patterns of common sequences in the JIT backends and optimize 
them
+
+  * Make the garbage collecter more incremental over external_malloc() calls
+
+  * Share guard resume data where possible which reduces memory usage
+
+  * Fast path for zip(list, list)
+
+  * Reduce the number of checks in the JIT for lst[a:]
+
+  * Move the non-optimizable part of callbacks outside the JIT
+
+  * Factor in field immutability when invalidating heap information
+
+  * Unroll itertools.izip_longest() with two sequences
+
+  * Minor optimizations after analyzing output from `vmprof`_ and trace logs
+
+  * Remove many class attributes in rpython classes
+
+  * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py
+
+.. _`vmprof`: https://vmprof.readthedocs.org
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html
+
+Please try it out and let us know what you think. We welcome feedback,
+we know you are using PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py
--- a/pypy/doc/test/test_whatsnew.py
+++ b/pypy/doc/test/test_whatsnew.py
@@ -102,3 +102,13 @@
     assert not not_documented
     if branch == 'py3k':
         assert not not_merged
+
+def test_startrev_on_default():
+    doc = ROOT.join('pypy', 'doc')
+    last_whatsnew = doc.join('whatsnew-head.rst').read()
+    startrev, documented = parse_doc(last_whatsnew)
+    errcode, wc_branch = getstatusoutput(
+        "hg log -r %s --template '{branch}'" % startrev)
+    if errcode != 0:
+        py.test.skip('no Mercurial repo')
+    assert wc_branch == 'default'
diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-4.0.0.rst
@@ -0,0 +1,94 @@
+========================
+What's new in PyPy 4.0.0
+========================
+
+.. this is a revision shortly after release-2.6.1
+.. startrev: 3a8f5481dab4
+
+.. branch: keys_with_hash
+Improve the performance of dict.update() and a bunch of methods from
+sets, by reusing the hash value stored in one dict when inspecting
+or changing another dict with that key.
+
+.. branch: optresult-unroll 
+A major refactoring of the ResOperations that kills Box. Also rewrote
+unrolling to enable future enhancements.  Should improve warmup time
+by 20% or so.
+
+.. branch: optimize-cond-call
+Optimize common sequences of operations like
+``int_lt/cond_call`` in the JIT backends
+
+.. branch: missing_openssl_include
+Fix for missing headers in OpenBSD, already applied in downstream ports
+
+.. branch: gc-more-incremental
+Remove a source of non-incremental-ness in the GC: now
+external_malloc() no longer runs gc_step_until() any more. If there
+is a currently-running major collection, we do only so many steps
+before returning. This number of steps depends on the size of the
+allocated object. It is controlled by tracking the general progress
+of these major collection steps and the size of old objects that
+keep adding up between them.
+
+.. branch: remember-tracing-counts
+Reenable jithooks
+
+.. branch: detect_egd2
+
+.. branch: shadowstack-no-move-2
+Issue #2141: fix a crash on Windows and OS/X and ARM when running
+at least 20 threads.
+
+.. branch: numpy-ctypes
+
+Add support for ndarray.ctypes property.
+
+.. branch: share-guard-info
+
+Share guard resume data between consecutive guards that have only
+pure operations and guards in between.
+
+.. branch: issue-2148
+
+Fix performance regression on operations mixing numpy scalars and Python 
+floats, cf. issue #2148.
+
+.. branch: cffi-stdcall
+Win32: support '__stdcall' in CFFI.
+
+.. branch: callfamily
+
+Refactorings of annotation and rtyping of function calls.
+
+.. branch: fortran-order
+
+Allow creation of fortran-ordered ndarrays
+
+.. branch: type_system-cleanup
+
+Remove some remnants of the old ootypesystem vs lltypesystem dichotomy.
+
+.. branch: cffi-handle-lifetime
+
+ffi.new_handle() returns handles that work more like CPython's: they
+remain valid as long as the target exists (unlike the previous
+version, where handles become invalid *before* the __del__ is called).
+
+.. branch: ufunc-casting
+
+allow automatic casting in ufuncs (and frompypyfunc) to cast the
+arguments to the allowed function type declarations, fixes various
+failures in linalg cffi functions
+
+.. branch: vecopt
+.. branch: vecopt-merge
+
+A new optimization pass to use emit vectorized loops
+
+.. branch: ppc-updated-backend
+
+The PowerPC JIT backend is merged.
+
+.. branch: osx-libffi
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,7 +1,10 @@
-=======================
-What's new in PyPy 2.6+
-=======================
+=========================
+What's new in PyPy 4.0.+
+=========================
 
-.. this is a revision shortly after release-2.6.1
-.. startrev: 07769be4057b
+.. this is a revision shortly after release-4.0.0
+.. startrev: 9397d7c6f5aa
 
+.. branch: lazy-fast2locals
+improve the performance of simple trace functions by lazily calling
+fast2locals and locals2fast only if f_locals is actually accessed.
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1079,6 +1079,14 @@
         args = Arguments.frompacked(self, w_args, w_kwds)
         return self.call_args(w_callable, args)
 
+    def _try_fetch_pycode(self, w_func):
+        from pypy.interpreter.function import Function, Method
+        if isinstance(w_func, Method):
+            w_func = w_func.w_function
+        if isinstance(w_func, Function):
+            return w_func.code
+        return None
+
     def call_function(self, w_func, *args_w):
         nargs = len(args_w) # used for pruning funccall versions
         if not self.config.objspace.disable_call_speedhacks and nargs < 5:
diff --git a/pypy/interpreter/executioncontext.py 
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -322,10 +322,14 @@
                 w_arg = space.newtuple([operr.w_type, w_value,
                                      space.wrap(operr.get_traceback())])
 
-            frame.fast2locals()
+            d = frame.getorcreatedebug()
+            if d.w_locals is not None:
+                # only update the w_locals dict if it exists
+                # if it does not exist yet and the tracer accesses it via
+                # frame.f_locals, it is filled by PyFrame.getdictscope
+                frame.fast2locals()
             self.is_tracing += 1
             try:
-                d = frame.getorcreatedebug()
                 try:
                     w_result = space.call_function(w_callback, 
space.wrap(frame), space.wrap(event), w_arg)
                     if space.is_w(w_result, space.w_None):
@@ -338,7 +342,8 @@
                     raise
             finally:
                 self.is_tracing -= 1
-                frame.locals2fast()
+                if d.w_locals is not None:
+                    frame.locals2fast()
 
         # Profile cases
         if self.profilefunc is not None:
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -9,6 +9,7 @@
     implementation for this feature, and patches 'space.threadlocals' when
     'thread' is initialized.
     """
+    _immutable_fields_ = ['_value?']
     _value = None
 
     def get_ec(self):
diff --git a/pypy/interpreter/test/test_pyframe.py 
b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -1,10 +1,14 @@
 from rpython.tool import udir
 from pypy.conftest import option
+from pypy.interpreter.gateway import interp2app
 
+def check_no_w_locals(space, w_frame):
+    return space.wrap(w_frame.getorcreatedebug().w_locals is None)
 
 class AppTestPyFrame:
 
     def setup_class(cls):
+        space = cls.space
         cls.w_udir = cls.space.wrap(str(udir.udir))
         cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1')))
         if not option.runappdirect:
@@ -17,6 +21,8 @@
             w_call_further.code.hidden_applevel = True       # hack
             cls.w_call_further = w_call_further
 
+            cls.w_check_no_w_locals = space.wrap(interp2app(check_no_w_locals))
+
     # test for the presence of the attributes, not functionality
 
     def test_f_locals(self):
@@ -463,6 +469,25 @@
                      (firstlineno + 3, 'line'),
                      (firstlineno + 3, 'return')]
 
+    def test_fast2locals_called_lazily(self):
+        import sys
+        class FrameHolder:
+            pass
+        fh = FrameHolder()
+        def trace(frame, what, arg):
+            # trivial trace function, does not access f_locals
+            fh.frame = frame
+            return trace
+        def f(x):
+            x += 1
+            return x
+        sys.settrace(trace)
+        res = f(1)
+        sys.settrace(None)
+        assert res == 2
+        if hasattr(self, "check_no_w_locals"): # not appdirect
+            assert self.check_no_w_locals(fh.frame)
+
     def test_set_unset_f_trace(self):
         import sys
         seen = []
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -109,7 +109,6 @@
                                      'interp_magic.mapdict_cache_counter')
         PYC_MAGIC = get_pyc_magic(self.space)
         self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC)
-        #
         try:
             from rpython.jit.backend import detect_cpu
             model = detect_cpu.autodetect()
@@ -119,7 +118,7 @@
                 raise
             else:
                 pass   # ok fine to ignore in this case
-        #
+        
         if self.space.config.translation.jit:
             features = detect_cpu.getcpufeatures(model)
             self.extra_interpdef('jit_backend_features',
diff --git a/pypy/module/_cffi_backend/__init__.py 
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -1,9 +1,16 @@
 import sys
 from pypy.interpreter.mixedmodule import MixedModule
-from rpython.rlib import rdynload
+from rpython.rlib import rdynload, clibffi
 
 VERSION = "1.3.0"
 
+FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
+try:
+    FFI_STDCALL = clibffi.FFI_STDCALL
+    has_stdcall = True
+except AttributeError:
+    has_stdcall = False
+
 
 class Module(MixedModule):
 
@@ -40,12 +47,13 @@
 
         'string': 'func.string',
         'buffer': 'cbuffer.buffer',
+        'memmove': 'func.memmove',
 
         'get_errno': 'cerrno.get_errno',
         'set_errno': 'cerrno.set_errno',
 
-        'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")',
-        'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name
+        'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI,
+        'FFI_CDECL':       'space.wrap(%d)' % FFI_DEFAULT_ABI,  # win32 name
 
         # CFFI 1.0
         'FFI': 'ffi_obj.W_FFIObject',
@@ -53,6 +61,9 @@
     if sys.platform == 'win32':
         interpleveldefs['getwinerror'] = 'cerrno.getwinerror'
 
+    if has_stdcall:
+        interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL
+
 
 def get_dict_rtld_constants():
     found = {}
diff --git a/pypy/module/_cffi_backend/ccallback.py 
b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -1,11 +1,11 @@
 """
 Callbacks.
 """
-import sys, os
+import sys, os, py
 
-from rpython.rlib import clibffi, rweakref, jit, jit_libffi
-from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here
-from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel
+from rpython.rlib.objectmodel import keepalive_until_here
+from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
 
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.module._cffi_backend import cerrno, misc
@@ -19,19 +19,52 @@
 # ____________________________________________________________
 
 
+@jit.dont_look_inside
+def make_callback(space, ctype, w_callable, w_error, w_onerror):
+    # Allocate a callback as a nonmovable W_CDataCallback instance, which
+    # we can cast to a plain VOIDP.  As long as the object is not freed,
+    # we can cast the VOIDP back to a W_CDataCallback in reveal_callback().
+    cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True)
+    gcref = rgc.cast_instance_to_gcref(cdata)
+    raw_cdata = rgc.hide_nonmovable_gcref(gcref)
+    cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata)
+    return cdata
+
+def reveal_callback(raw_ptr):
+    addr = rffi.cast(llmemory.Address, raw_ptr)
+    gcref = rgc.reveal_gcref(addr)
+    return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref)
+
+
+class Closure(object):
+    """This small class is here to have a __del__ outside any cycle."""
+
+    ll_error = lltype.nullptr(rffi.CCHARP.TO)     # set manually
+
+    def __init__(self, ptr):
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to