Author: Matti Picus <matti.pi...@gmail.com>
Branch: winoverlapped
Changeset: r96353:c2dba4448ebf
Date: 2019-03-26 21:23 +0200
http://bitbucket.org/pypy/pypy/changeset/c2dba4448ebf/

Log:    merge py3.6 into branch

diff too long, truncating to 2000 out of 2299 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -67,3 +67,11 @@
 928a4f70d3de7d17449456946154c5da6e600162 release-pypy3.5-v7.0.0
 dab365a465140aa79a5f3ba4db784c4af4d5c195 release-pypy3.6-v7.0.0
 fb40f7a5524c77b80e6c468e087d621610137261 release-pypy3.6-v7.0.0
+990cef41fe11e5d46b019a46aa956ff46ea1a234 release-pypy2.7-v7.1.0
+bb0d05b190b9c579f0c889a368636e14f6205bab release-pypy3.6-v7.1.0
+bb0d05b190b9c579f0c889a368636e14f6205bab release-pypy3.6-v7.1.0
+6fd188f8f903b7555920adf7d5e7fe21db1bd593 release-pypy3.6-v7.1.0
+6fd188f8f903b7555920adf7d5e7fe21db1bd593 release-pypy3.6-v7.1.0
+7a2e437acfceafe2665b23b1394dc6c66add3b89 release-pypy3.6-v7.1.0
+7a2e437acfceafe2665b23b1394dc6c66add3b89 release-pypy3.6-v7.1.0
+de061d87e39c7df4e436974096d7982c676a859d release-pypy3.6-v7.1.0
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -123,7 +123,9 @@
   Wenzhu Man
   Konstantin Lopuhin
   John Witulski
+  Stefan Beyer
   Jeremy Thurgood
+  Andrew Lawrence
   Greg Price
   Ivan Sichmann Freitas
   Dario Bertini
@@ -134,7 +136,6 @@
   Jean-Philippe St. Pierre
   Guido van Rossum
   Pavel Vinogradov
-  Stefan Beyer
   William Leslie
   Pawe&#322; Piotr Przeradowski
   marky1991
@@ -152,6 +153,7 @@
   Wanja Saatkamp
   Mike Blume
   Gerald Klix
+  Julian Berman
   Oscar Nierstrasz
   Rami Chowdhury
   Stefan H. Muller
@@ -174,6 +176,7 @@
   Anton Gulenko
   Sergey Matyunin
   Andrew Chambers
+  &#321;ukasz Langa
   Nicolas Chauvat
   Andrew Durdin
   Ben Young
@@ -296,7 +299,6 @@
   Bobby Impollonia
   Roberto De Ioris
   Jeong YunWon
-  andrewjlawrence
   Christopher Armstrong
   Aaron Tubbs
   Vasantha Ganesh K
@@ -328,7 +330,6 @@
   Ben Darnell
   Juan Francisco Cantero Hurtado
   Godefroid Chappelle
-  Julian Berman
   Stephan Busemann
   Dan Colish
   timo
diff --git a/extra_tests/ctypes_tests/test_structures.py 
b/extra_tests/ctypes_tests/test_structures.py
--- a/extra_tests/ctypes_tests/test_structures.py
+++ b/extra_tests/ctypes_tests/test_structures.py
@@ -119,12 +119,15 @@
             ms.n = 0xff00
             return repr(ba[:])
 
+        nstruct = dostruct(Native)
         if sys.byteorder == 'little':
-            assert dostruct(Native) == dostruct(Little)
-            assert dostruct(Native) != dostruct(Big)
+            assert nstruct == dostruct(Little)
+            assert nstruct != dostruct(Big)
+            assert Big._fields_[0][1] is not i
         else:
-            assert dostruct(Native) == dostruct(Big)
-            assert dostruct(Native) != dostruct(Little)
+            assert nstruct == dostruct(Big)
+            assert nstruct != dostruct(Little)
+            assert Little._fields_[0][1] is not i
 
 def test_from_buffer_copy():
     from array import array
@@ -185,3 +188,20 @@
     assert sizeof(s) == 3 * sizeof(c_int)
     assert s.a == 4     # 256 + 4
     assert s.b == -123
+
+def test_memoryview():
+    class S(Structure):
+        _fields_ = [('a', c_int16),
+                    ('b', c_int16),
+                   ]
+
+    S3 = S * 3
+    c_array = (2 * S3)(
+        S3(S(a=0, b=1), S(a=2, b=3), S(a=4,  b=5)),
+        S3(S(a=6, b=7), S(a=8, b=9), S(a=10, b=11)),
+        )
+
+    mv = memoryview(c_array)
+    assert mv.format == 'T{<h:a:<h:b:}'
+    assert mv.shape == (2, 3)
+    assert mv.itemsize == 4
diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
--- a/extra_tests/requirements.txt
+++ b/extra_tests/requirements.txt
@@ -1,3 +1,3 @@
-pytest
+pytest<=4.0
 hypothesis
 vmprof
diff --git a/lib_pypy/_blake2/_blake2_build.py 
b/lib_pypy/_blake2/_blake2_build.py
--- a/lib_pypy/_blake2/_blake2_build.py
+++ b/lib_pypy/_blake2/_blake2_build.py
@@ -4,17 +4,18 @@
 
 from cffi import FFI
 
-IS_ARM = platform.machine().startswith('arm')
 IS_WIN = sys.platform == 'win32'
-if IS_ARM:
-    # XXX Choose neon accelaration
-    define_macros = []
-    extra_compile_args = []
-elif IS_WIN:
+if IS_WIN:
+    BLAKE2_USE_SSE = True
     extra_compile_args = []
     define_macros = [('__SSE2__', '1')]
+elif platform.machine().startswith('x86'):
+    BLAKE2_USE_SSE = True
+    extra_compile_args = ['-msse2']
+    define_macros = []
 else:
-    extra_compile_args = ['-msse2']
+    BLAKE2_USE_SSE = False
+    extra_compile_args = []
     define_macros = []
     
     
@@ -80,13 +81,18 @@
 
 
 _libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'impl'))
+if BLAKE2_USE_SSE:
+    sourcesB=[os.path.join(_libdir, 'blake2b.c'), ]
+    sourcesS=[os.path.join(_libdir, 'blake2s.c'), ]
+else:    
+    sourcesB=[os.path.join(_libdir, 'blake2b-ref.c'), ]
+    sourcesS=[os.path.join(_libdir, 'blake2s-ref.c'), ]
 
 blake2b_ffi = FFI()
 blake2b_ffi.cdef(blake_cdef)
 blake2b_ffi.set_source(
     '_blake2b_cffi', blake2b_source,
-    sources=[os.path.join(_libdir, 'blake2b.c'),
-            ],
+    sources=sourcesB,
     include_dirs=[_libdir],
     extra_compile_args=extra_compile_args,
     define_macros=define_macros,
@@ -102,8 +108,7 @@
 blake2s_ffi.cdef(blake_cdef)
 blake2s_ffi.set_source(
     '_blake2s_cffi', _replace_b2s(blake2b_source),
-    sources=[os.path.join(_libdir, 'blake2s.c'),
-            ],
+    sources=sourcesS,
     include_dirs=[_libdir],
     extra_compile_args=extra_compile_args,
     define_macros=define_macros,
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -4,6 +4,7 @@
 from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof
 from _ctypes.basics import keepalive_key, store_reference, ensure_objects
 from _ctypes.basics import CArgObject, as_ffi_pointer
+import sys, __pypy__, struct
 
 class ArrayMeta(_CDataMeta):
     def __new__(self, name, cls, typedict):
@@ -247,6 +248,24 @@
     def _as_ffi_pointer_(self, ffitype):
         return as_ffi_pointer(self, ffitype)
 
+    def __buffer__(self, flags):
+        shape = []
+        obj = self
+        while 1:
+            shape.append(obj._length_)
+            try:
+                obj[0]._length_
+            except (AttributeError, IndexError):
+                break
+            obj = obj[0]
+
+        fmt = get_format_str(obj._type_)
+        try:
+            itemsize = struct.calcsize(fmt[1:])
+        except:
+            itemsize = sizeof(obj[0])
+        return __pypy__.newmemoryview(memoryview(self._buffer), itemsize, fmt, 
shape)
+
 ARRAY_CACHE = {}
 
 def create_array_type(base, length):
@@ -266,3 +285,31 @@
         cls = ArrayMeta(name, (Array,), tpdict)
         ARRAY_CACHE[key] = cls
         return cls
+
+byteorder = {'little': '<', 'big': '>'}
+swappedorder = {'little': '>', 'big': '<'}
+
+def get_format_str(typ):
+    if hasattr(typ, '_fields_'):
+        if hasattr(typ, '_swappedbytes_'):
+            bo = swappedorder[sys.byteorder]
+        else:
+            bo = byteorder[sys.byteorder]
+        flds = []
+        for name, obj in typ._fields_:
+            # Trim off the leading '<' or '>'
+            ch = get_format_str(obj)[1:]
+            if (ch) == 'B':
+                flds.append(byteorder[sys.byteorder])
+            else:
+                flds.append(bo)
+            flds.append(ch)
+            flds.append(':')
+            flds.append(name)
+            flds.append(':')
+        return 'T{' + ''.join(flds) + '}'
+    elif hasattr(typ, '_type_'):
+        ch = typ._type_
+        return byteorder[sys.byteorder] + ch
+    else:
+        raise ValueError('cannot get format string for %r' % typ)
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -2,8 +2,15 @@
 from _rawffi import alt as _ffi
 import sys
 
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
+try:
+    from __pypy__ import builtinify
+except ImportError:
+    builtinify = lambda f: f
+
+try:
+    from __pypy__.bufferable import bufferable
+except ImportError:
+    bufferable = object
 
 keepalive_key = str # XXX fix this when provided with test
 
@@ -64,7 +71,7 @@
         'resbuffer' is a _rawffi array of length 1 containing the value,
         and this returns a general Python object that corresponds.
         """
-        res = object.__new__(self)
+        res = bufferable.__new__(self)
         res.__class__ = self
         res.__dict__['_buffer'] = resbuffer
         if base is not None:
@@ -148,7 +155,7 @@
     def __ne__(self, other):
         return self._obj != other
 
-class _CData(object, metaclass=_CDataMeta):
+class _CData(bufferable, metaclass=_CDataMeta):
     """ The most basic object for all ctypes types
     """
     _objects = None
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -7,8 +7,7 @@
 from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\
      array_slice_setitem
 
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
+from __pypy__ import builtinify, newmemoryview
 
 # This cache maps types to pointers to them.
 _pointer_type_cache = {}
@@ -134,6 +133,9 @@
     def _as_ffi_pointer_(self, ffitype):
         return as_ffi_pointer(self, ffitype)
 
+    def __buffer__(self, flags):
+        mv = memoryview(self.getcontents())
+        return newmemoryview(mv, mv.itemsize, '&' + mv.format, mv.shape)
 
 def _cast_addr(obj, _, tp):
     if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()):
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -2,9 +2,9 @@
 import _rawffi
 from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\
      store_reference, ensure_objects, CArgObject
-from _ctypes.array import Array
+from _ctypes.array import Array, get_format_str
 from _ctypes.pointer import _Pointer
-import inspect
+import inspect, __pypy__
 
 
 def names_and_fields(self, _fields_, superclass, anonymous_fields=None):
@@ -176,6 +176,11 @@
 class StructOrUnionMeta(_CDataMeta):
     def __new__(self, name, cls, typedict):
         res = type.__new__(self, name, cls, typedict)
+        if hasattr(res, '_swappedbytes_') and '_fields_' in typedict:
+            # Activate the stdlib ctypes._swapped_meta.__setattr__ to convert 
fields
+            tmp = res._fields_
+            delattr(res, '_fields_')
+            setattr(res, '_fields_', tmp)
         if "_abstract_" in typedict:
             return res
         cls = cls or (object,)
@@ -253,17 +258,7 @@
                                          or cls is union.Union):
             raise TypeError("abstract class")
         if hasattr(cls, '_swappedbytes_'):
-            fields = [None] * len(cls._fields_)
-            for i in range(len(cls._fields_)):
-                if cls._fields_[i][1] == 
cls._fields_[i][1].__dict__.get('__ctype_be__', None):
-                    swapped = cls._fields_[i][1].__dict__.get('__ctype_le__', 
cls._fields_[i][1])
-                else:
-                    swapped = cls._fields_[i][1].__dict__.get('__ctype_be__', 
cls._fields_[i][1])
-                if len(cls._fields_[i]) < 3:
-                    fields[i] = (cls._fields_[i][0], swapped)
-                else:
-                    fields[i] = (cls._fields_[i][0], swapped, 
cls._fields_[i][2])
-            names_and_fields(cls, fields, _CData, 
cls.__dict__.get('_anonymous_', None))
+            names_and_fields(cls, cls._fields_, _CData, 
cls.__dict__.get('_anonymous_', None))
         self = super(_CData, cls).__new__(cls)
         if hasattr(cls, '_ffistruct_'):
             self.__dict__['_buffer'] = self._ffistruct_(autofree=True)
@@ -303,6 +298,10 @@
     def _to_ffi_param(self):
         return self._buffer
 
+    def __buffer__(self, flags):
+        fmt = get_format_str(self)
+        itemsize = type(self)._sizeofinstances() 
+        return __pypy__.newmemoryview(memoryview(self._buffer), itemsize, fmt)
 
 class StructureMeta(StructOrUnionMeta):
     _is_union = False
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.12.1
+Version: 1.12.2
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -5,8 +5,8 @@
 from .error import CDefError, FFIError, VerificationError, VerificationMissing
 from .error import PkgConfigError
 
-__version__ = "1.12.1"
-__version_info__ = (1, 12, 1)
+__version__ = "1.12.2"
+__version_info__ = (1, 12, 2)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -221,7 +221,7 @@
 
         if (f != NULL && f != Py_None) {
             PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
-                               "\ncompiled with cffi version: 1.12.1"
+                               "\ncompiled with cffi version: 1.12.2"
                                "\n_cffi_backend module: ", f);
             modules = PyImport_GetModuleDict();
             mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst
--- a/pypy/doc/__pypy__-module.rst
+++ b/pypy/doc/__pypy__-module.rst
@@ -1,26 +1,186 @@
-.. comment: this document is very incomplete, should we generate
-            it automatically?
+.. comment: this document may get out of synch with the code, but to generate
+    it automatically we would need to use pypy to run sphinx-build
 
 The ``__pypy__`` module
 =======================
 
 The ``__pypy__`` module is the main entry point to special features provided
-by PyPy's standard interpreter. Its content depends on :doc:`configuration 
options <config/index>`
-which may add new functionality and functions whose existence or non-existence
-indicates the presence of such features.
-
+by PyPy's standard interpreter. Its content depends on :doc:`configuration
+options <config/index>` which may add new functionality and functions whose
+existence or non-existence indicates the presence of such features. These are
+generally used for compatibility when writing pure python modules that in
+CPython are written in C. Not available in CPython, and so must be used inside 
a
+``if platform.python_implementation == 'PyPy'`` block or otherwise hidden from
+the CPython interpreter.
 
 Generally available functionality
 ---------------------------------
 
- - ``internal_repr(obj)``: return the interpreter-level representation of an
-   object.
- - ``bytebuffer(length)``: return a new read-write buffer of the given length.
-   It works like a simplified array of characters (actually, depending on the
-   configuration the ``array`` module internally uses this).
- - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before 
translation).
+  - ``internal_repr(obj)``: return the interpreter-level representation of an
+    object.
+  - ``bytebuffer(length)``: return a new read-write buffer of the given length.
+    It works like a simplified array of characters (actually, depending on the
+    configuration the ``array`` module internally uses this).
 
+  - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before 
translation).
 
+ - ``newmemoryview(buffer, itemsize, format, shape=None, strides=None)``:
+   create a `memoryview` instance with the data from ``buffer`` and the
+   specified itemsize, format, and optional shape and strides.
+
+ - ``bufferable``: a base class that provides a ``__buffer__(self, flags)``
+   method for subclasses to override. This method should return a memoryview
+   instance of the class instance. It is called by the C-API's ``tp_as_buffer.
+   bf_getbuffer``.
+
+  - ``builtinify(func)``: To implement at app-level modules that are, in 
CPython,
+    implemented in C: this decorator protects a function from being ever bound
+    like a method.  Useful because some tests do things like put a "built-in"
+    function on a class and access it via the instance.
+
+  - ``hidden_applevel(func)``: Decorator that hides a function's frame from
+    app-level
+
+  - ``get_hidden_tb()``: Return the traceback of the current exception being
+    handled by a frame hidden from applevel.
+
+  - ``lookup_special(obj, meth)``: Lookup up a special method on an object.
+  - ``do_what_I_mean``
+
+  - ``resizelist_hint(...)``: Reallocate the underlying storage of the argument
+    list to sizehint
+
+  - ``newlist_hint(...)``: Create a new empty list that has an underlying
+    storage of length sizehint
+
+  - ``add_memory_pressure(bytes)``: Add memory pressure of estimate bytes.
+    Useful when calling a C function that internally allocates a big chunk of
+    memory. This instructs the GC to garbage collect sooner than it would
+    otherwise.
+
+  - ``newdict(type)``: Create a normal dict with a special implementation
+    strategy. ``type`` is a string and can be:
+
+    * ``"module"`` - equivalent to ``some_module.__dict__``
+
+    * ``"instance"`` - equivalent to an instance dict with a not-changing-much
+      set of keys
+
+    * ``"kwargs"`` - keyword args dict equivalent of what you get from
+      ``**kwargs`` in a function, optimized for passing around
+
+    * ``"strdict"`` - string-key only dict. This one should be chosen
+      automatically
+
+  - ``reversed_dict``: Enumerate the keys in a dictionary object in reversed
+    order.  This is a ``__pypy__`` function instead of being simply done by
+    calling reversed(), for CPython compatibility: dictionaries are ordered in
+    PyPY but not in Cpython2.7.  You should use the collections.OrderedDict
+    class for cases where ordering is important. That class implements
+    ``__reversed__`` by calling __pypy__.reversed_dict()
+
+  - ``dict_popitem_first``: Interp-level implementation of
+    ``OrderedDict.popitem(last=False)``.
+
+  - ``delitem_if_value_is`` Atomic equivalent to: ``if dict.get(key) is value:
+    del dict[key]``.
+
+    SPECIAL USE CASES ONLY!  Avoid using on dicts which are specialized,
+    e.g. to ``int`` or ``str`` keys, because it switches to the object
+    strategy. Also, the ``is`` operation is really pointer equality, so avoid
+    using it if ``value`` is an immutable object like ``int`` or ``str``.
+
+  - ``move_to_end``: Move the key in a dictionary object into the first or last
+    position. This is used in Python 3.x to implement 
``OrderedDict.move_to_end()``.
+
+  - ``strategy(dict or list or set)``: Return the underlying strategy currently
+    used by the object
+
+  - ``specialized_zip_2_lists``
+  - ``locals_to_fast``
+  - ``set_code_callback``
+  - ``save_module_content_for_future_reload``
+  - ``decode_long``
+  - ``side_effects_ok``: For use with the reverse-debugger: this function
+    normally returns True, but will return False if we are evaluating a
+    debugging command like a watchpoint.  You are responsible for not doing any
+    side effect at all (including no caching) when evaluating watchpoints. This
+    function is meant to help a bit---you can write::
+
+        if not __pypy__.side_effects_ok():
+            skip the caching logic
+
+    inside getter methods or properties, to make them usable from
+    watchpoints.  Note that you need to re-run ``REVDB=.. pypy``
+    after changing the Python code.
+
+  - ``stack_almost_full``: Return True if the stack is more than 15/16th full.
+  - ``pyos_inputhook``: Call PyOS_InputHook() from the CPython C API
+  - ``os.real_getenv(...)`` gets OS environment variables skipping python code
+  - ``_pypydatetime`` provides base classes with correct C API interactions for
+    the pure-python ``datetime`` stdlib module
+
+Fast String Concatenation
+-------------------------
+Rather than in-place concatenation ``+=``, use these to enable fast, minimal
+copy, string building.
+
+  - ``builders.StringBuilder``
+  - ``builders.UnicodeBuilder``
+
+Interacting with the PyPy debug log
+------------------------------------
+
+The following functions can be used to write your own content to the
+:ref:`PYPYLOG <pypylog>`.
+
+  - ``debug_start(category, timestamp=False)``: open a new section; if
+    ``timestamp`` is ``True``, also return the timestamp which was written to
+    the log.
+
+  - ``debug_stop(category, timestamp=False)``: close a section opened by
+    ``debug_start``.
+
+  - ``debug_print(...)``: print arbitrary text to the log.
+
+  - ``debug_print_once(category, ...)``: equivalent to ``debug_start`` +
+    ``debug_print`` + ``debug_stop``.
+
+  - ``debug_flush``: flush the log.
+
+  - ``debug_read_timestamp()``: read the timestamp from the same timer used by
+    the log.
+
+  - ``debug_get_timestamp_unit()``: get the unit of the value returned by
+    ``debug_read_timestamp()``.
+
+
+Depending on the architecture and operating system, PyPy uses different ways
+to read timestamps, so the timestamps used in the log file are expressed in
+varying units. It is possible to know which by calling
+``debug_get_timestamp_unit()``, which can be one of the following values:
+
+``tsc``
+    The default on ``x86`` machines: timestamps are expressed in CPU ticks, as
+    read by the `Time Stamp Counter`_.
+
+``ns``
+    Timestamps are expressed in nanoseconds.
+
+``QueryPerformanceCounter``
+    On Windows, in case for some reason ``tsc`` is not available: timestamps
+    are read using the win API ``QueryPerformanceCounter()``.
+
+
+Unfortunately, there does not seem to be a reliable standard way for
+converting ``tsc`` ticks into nanoseconds, although in practice on modern CPUs
+it is enough to divide the ticks by the maximum nominal frequency of the CPU.
+For this reason, PyPy gives the raw value, and leaves the job of doing the
+conversion to external libraries.
+
+.. _`Time Stamp Counter`: https://en.wikipedia.org/wiki/Time_Stamp_Counter    
+    
+   
 Transparent Proxy Functionality
 -------------------------------
 
@@ -34,6 +194,30 @@
    its controller. Otherwise return None.
 
 
+Additional Clocks for Timing
+----------------------------
+The ``time`` submodule exposes the platform-dependent clock types such as
+``CLOCK_BOOTTIME``, ``CLOCK_MONOTONIC``, ``CLOCK_MONOTONIC_COARSE``,
+``CLOCK_MONOTONIC_RAW`` and two functions:
+
+  - ``clock_gettime(m)`` which returns the clock type time in seconds and
+  - ``clock_getres(m)`` which returns the clock resolution in seconds.
+
+Extended Signal Handling
+------------------------
+``thread.signals_enbaled`` is a context manager to use in non-main threads.
+    enables receiving signals in a "with" statement.  More precisely, if a
+    signal is received by the process, then the signal handler might be
+    called either in the main thread (as usual) or within another thread
+    that is within a "with signals_enabled:".  This other thread should be
+    ready to handle unexpected exceptions that the signal handler might
+    raise --- notably KeyboardInterrupt.
+
+Integer Operations with Overflow
+--------------------------------
+  - ``intop`` provides a module with integer operations that have
+    two-complement overflow behaviour instead of overflowing to longs
+
 Functionality available on py.py (not after translation)
 --------------------------------------------------------
 
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -71,9 +71,9 @@
 #    module/cpyext/include/patchlevel.h
 #
 # The short X.Y version.
-version = '7.1'
+version = '7.2'
 # The full version, including alpha/beta/rc tags.
-release = '7.1.0'
+release = '7.2.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -90,7 +90,9 @@
   Wenzhu Man
   Konstantin Lopuhin
   John Witulski
+  Stefan Beyer
   Jeremy Thurgood
+  Andrew Lawrence
   Greg Price
   Ivan Sichmann Freitas
   Dario Bertini
@@ -101,7 +103,6 @@
   Jean-Philippe St. Pierre
   Guido van Rossum
   Pavel Vinogradov
-  Stefan Beyer
   William Leslie
   Pawe&#322; Piotr Przeradowski
   marky1991
@@ -119,6 +120,7 @@
   Wanja Saatkamp
   Mike Blume
   Gerald Klix
+  Julian Berman
   Oscar Nierstrasz
   Rami Chowdhury
   Stefan H. Muller
@@ -141,6 +143,7 @@
   Anton Gulenko
   Sergey Matyunin
   Andrew Chambers
+  &#321;ukasz Langa
   Nicolas Chauvat
   Andrew Durdin
   Ben Young
@@ -263,7 +266,6 @@
   Bobby Impollonia
   Roberto De Ioris
   Jeong YunWon
-  andrewjlawrence
   Christopher Armstrong
   Aaron Tubbs
   Vasantha Ganesh K
@@ -295,7 +297,6 @@
   Ben Darnell
   Juan Francisco Cantero Hurtado
   Godefroid Chappelle
-  Julian Berman
   Stephan Busemann
   Dan Colish
   timo
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -495,6 +495,9 @@
 * SyntaxError_ s try harder to give details about the cause of the failure, so
   the error messages are not the same as in CPython
 
+* Dictionaries and sets are ordered on PyPy.  On CPython < 3.6 they are not;
+  on CPython >= 3.6 dictionaries (but not sets) are ordered.
+
 
 .. _extension-modules:
 
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -203,7 +203,7 @@
 
 ``duration``
     The total time spent inside minor collections since the last hook
-    call. See below for more information on the unit.
+    call, in seconds.
 
 ``duration_min``
     The duration of the fastest minor collection since the last hook call.
@@ -265,30 +265,6 @@
 ``gc-collect-done`` is used only to give additional stats, but doesn't do any
 actual work.
 
-A note about the ``duration`` field: depending on the architecture and
-operating system, PyPy uses different ways to read timestamps, so ``duration``
-is expressed in varying units. It is possible to know which by calling
-``__pypy__.debug_get_timestamp_unit()``, which can be one of the following
-values:
-
-``tsc``
-    The default on ``x86`` machines: timestamps are expressed in CPU ticks, as
-    read by the `Time Stamp Counter`_.
-
-``ns``
-    Timestamps are expressed in nanoseconds.
-
-``QueryPerformanceCounter``
-    On Windows, in case for some reason ``tsc`` is not available: timestamps
-    are read using the win API ``QueryPerformanceCounter()``.
-
-
-Unfortunately, there does not seem to be a reliable standard way for
-converting ``tsc`` ticks into nanoseconds, although in practice on modern CPUs
-it is enough to divide the ticks by the maximum nominal frequency of the CPU.
-For this reason, PyPy gives the raw value, and leaves the job of doing the
-conversion to external libraries.
-
 Here is an example of GC hooks in use::
 
     import sys
@@ -321,8 +297,6 @@
         lst = [lst, 1, 2, 3]
 
 
-.. _`Time Stamp Counter`: https://en.wikipedia.org/wiki/Time_Stamp_Counter    
-    
 .. _minimark-environment-variables:
 
 Environment variables
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -40,11 +40,11 @@
   $ hg up -r default
   $ # edit the version to e.g. 7.0.0-final
   $ hg ci
-  $ hg branch release-pypy2.7-7.x && hg ci
+  $ hg branch release-pypy2.7-v7.x && hg ci
   $ hg up -r default
   $ # edit the version to 7.1.0-alpha0
   $ hg ci
-  $ hg up -r release-pypy2.7-7.x
+  $ hg up -r release-pypy2.7-v7.x
   $ hg merge default
   $ # edit the version to AGAIN 7.0.0-final
   $ hg ci
@@ -53,11 +53,11 @@
 
   $ hg up -r py3.5
   $ hg merge default # this brings the version fo 7.1.0-alpha0
-  $ hg branch release-pypy3.5-7.x
+  $ hg branch release-pypy3.5-v7.x
   $ # edit the version to 7.0.0-final
   $ hg ci
   $ hg up -r py3.5
-  $ hg merge release-pypy3.5-7.x
+  $ hg merge release-pypy3.5-v7.x
   $ # edit the version to 7.1.0-alpha0
   $ hg ci
 
@@ -109,9 +109,11 @@
   * add a tag on the pypy/jitviewer repo that corresponds to pypy release, so
     that the source tarball can be produced in the next steps
 
-  * download the builds, repackage binaries. Tag the release version
-    and download and repackage source from bitbucket. You may find it
-    convenient to use the ``repackage.sh`` script in pypy/tool/release to do 
this. 
+  * download the builds, repackage binaries. Tag the release-candidate version
+    (it is important to mark this as a candidate since usually at least two
+    tries are needed to complete the process) and download and repackage source
+    from bitbucket. You may find it convenient to use the ``repackage.sh``
+    script in pypy/tool/release to do this. 
 
     Otherwise repackage and upload source "-src.tar.bz2" to bitbucket
     and to cobra, as some packagers prefer a clearly labeled source package
@@ -135,3 +137,5 @@
 
   * add a tag on the codespeed web site that corresponds to pypy release
   * revise versioning at https://readthedocs.org/projects/pypy
+  * tag the final release(s) with appropriate tags
+
diff --git a/pypy/doc/index-of-release-notes.rst 
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -1,11 +1,12 @@
 Historical release notes
 ========================
 
-CPython 2.7 compatible versions
--------------------------------
+Combined releases
+-----------------
 
 .. toctree::
 
+   release-v7.1.0.rst
    release-v7.0.0.rst
    release-v6.0.0.rst
    release-v5.10.1.rst
@@ -14,6 +15,12 @@
    release-v5.8.0.rst
    release-v5.7.1.rst
    release-v5.7.0.rst
+
+CPython 2.7 compatible versions
+-------------------------------
+
+.. toctree::
+
    release-pypy2.7-v5.6.0.rst
    release-pypy2.7-v5.4.1.rst
    release-pypy2.7-v5.4.0.rst
@@ -61,15 +68,6 @@
    release-0.7.0.rst
    release-0.6
 
-CPython 3.5 compatible versions
--------------------------------
-
-.. toctree::
-
-   release-v5.8.0.rst
-   release-v5.7.1.rst
-   release-v5.7.0.rst
-
 CPython 3.3 compatible versions
 -------------------------------
 
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -99,6 +99,8 @@
     If set, equivalent to the ``-W`` option (warning control).
     The value should be a comma-separated list of ``-W`` parameters.
 
+.. _pypylog:
+
 ``PYPYLOG``
     If set to a non-empty value, enable logging, the format is:
 
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -5,7 +5,7 @@
 ----------------
 
 We are happy to discuss ideas around the PyPy ecosystem.
-If you are interested in palying with RPython or PyPy, or have a new idea not
+If you are interested in playing with RPython or PyPy, or have a new idea not
 mentioned here please join us on irc, channel #pypy (freenode). If you are 
unsure,
 but still think that you can make a valuable contribution to PyPy, dont
 hesitate to contact us on #pypy or on our mailing list. Here are some ideas
diff --git a/pypy/doc/release-v7.1.0.rst b/pypy/doc/release-v7.1.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-v7.1.0.rst
@@ -0,0 +1,121 @@
+=========================================
+PyPy v7.1.0: release of 2.7, and 3.6-beta
+=========================================
+
+The PyPy team is proud to release the version 7.1.0 of PyPy, which includes
+two different interpreters:
+
+  - PyPy2.7, which is an interpreter supporting the syntax and the features of
+    Python 2.7
+
+  - PyPy3.6-beta: this is the second official release of PyPy to support 3.6
+    features, although it is still considered beta quality.
+    
+The interpreters are based on much the same codebase, thus the double
+release.
+
+This release, coming fast on the heels of 7.0 in February, finally merges the
+internal refactoring of unicode representation as UTF-8. Removing the
+conversions from strings to unicode internally lead to a nice speed bump.
+
+We also improved the ability to use the buffer protocol with ctype structures
+and arrays.
+
+Until we can work with downstream providers to distribute builds with PyPy, we
+have made packages for some common packages `available as wheels`_.
+
+The `CFFI`_ backend has been updated to version 1.12.2. We recommend using CFFI
+rather than c-extensions to interact with C, and `cppyy`_ for interacting with
+C++ code.
+
+As always, this release is 100% compatible with the previous one and fixed
+several issues and bugs raised by the growing community of PyPy users.
+We strongly recommend updating.
+
+The PyPy3.6 release is still not production quality so your mileage may vary.
+There are open issues with incomplete compatibility and c-extension support.
+
+You can download the v7.1 releases here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project. If PyPy is not quite good enough for your needs, we are available for
+direct consulting work.
+
+We would also like to thank our contributors and encourage new people to join
+the project. PyPy has many layers and we need help with all of them: `PyPy`_
+and `RPython`_ documentation improvements, tweaking popular modules to run
+on pypy, or general `help`_ with making RPython's JIT even better.
+
+.. _`PyPy`: index.html
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`help`: project-ideas.html
+.. _`CFFI`: http://cffi.readthedocs.io
+.. _`cppyy`: https://cppyy.readthedocs.io
+.. _`available as wheels`: https://github.com/antocuni/pypy-wheels
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7, 3.6. It's fast (`PyPy and CPython 2.7.x`_ performance
+comparison) due to its integrated tracing JIT compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+This PyPy release supports:
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+Unfortunately at the moment of writing our ARM buildbots are out of service,
+so for now we are **not** releasing any binary for the ARM architecture,
+although PyPy does support ARM 32 bit processors.
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://rpython.readthedocs.io/en/latest/examples.html
+
+
+Changelog
+=========
+
+Changes shared across versions
+
+* Use utf8 internally to represent unicode, with the goal of never using
+  rpython-level unicode
+* Update ``cffi`` to 1.12.2
+* Improve performance of ``long`` operations where one of the operands fits
+  into an ``int``
+* Since _ctypes is implemented in pure python over libffi, add interfaces and
+  methods to support the buffer interface from python. Specifically, add a
+  ``__pypy__.newmemoryview`` function to create a memoryview and extend the use
+  of the PyPy-specific ``__buffer__`` class method. This enables better
+  buffer sharing between ctypes and NumPy.
+* Add copying to zlib
+* Improve register allocation in the JIT by using better heuristics
+* Include ``<sys/sysmacros.h>`` on Gnu/Hurd
+* Mostly for completeness sake: support for ``rlib.jit.promote_unicode``, which
+  behaves like ``promote_string``, but for rpython unicode objects
+* Correctly initialize the ``d_type`` and ``d_name`` members of builtin
+  descriptors to fix a segfault related to classmethods in Cython
+* Expand documentation of ``__pypy_`` module
+
+C-API (cpyext) improvements shared across versions
+
+* Move PyTuple_Type.tp_new to C
+* Call internal methods from ``PyDict_XXXItem()`` instead of going through
+  dunder methods (CPython cpyext compatibility)
+
+Python 3.6 only
+
+* Support for os.PathLike in the posix module
+* Update ``idellib`` for 3.6.1
+* Make ``BUILD_CONST_KEY_MAP`` JIT-friendly
+* Adapt code that optimizes ``sys.exc_info()`` to wordcode
+* Fix annotation bug found by ``attrs``
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
--- a/pypy/doc/tool/makecontributor.py
+++ b/pypy/doc/tool/makecontributor.py
@@ -1,4 +1,5 @@
 # NOTE: run this script with LANG=en_US.UTF-8
+# works with pip install mercurial==3.0
 
 import py
 import sys
@@ -89,6 +90,7 @@
     'Laurence Tratt': ['ltratt'],
     'Pieter Zieschang': ['pzieschang', 'p_ziesch...@yahoo.de'],
     'John Witulski': ['witulski'],
+    'Andrew Lawrence': ['andrew.lawre...@siemens.com', 'andrewjlawrence'],
     }
 
 alias_map = {}
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,34 +1,11 @@
 ==========================
-What's new in PyPy2.7 7.0+
+What's new in PyPy2.7 7.1+
 ==========================
 
-.. this is a revision shortly after release-pypy-7.0.0
-.. startrev: 481c69f7d81f
+.. this is a revision shortly after release-pypy-7.1.0
+.. startrev: d3aefbf6dae7
 
-.. branch: zlib-copying-third-time-a-charm
+.. branch: Twirrim/minor-typo-fix-1553456951526
 
-Make sure zlib decompressobjs have their streams deallocated immediately
-on flush.
+Fix typo
 
-.. branch: zlib-copying-redux
-
-Fix calling copy on already-flushed compressobjs.
-
-
-
-.. branch: math-improvements
-
-Improve performance of long operations where one of the operands fits into
-an int.
-
-.. branch: regalloc-playground
-
-Improve register allocation in the JIT.
-
-.. branch: promote-unicode
-
-Implement rlib.jit.promote_unicode to complement promote_string
-
-.. branch: unicode-utf8
-
-Use utf8 internally to represent unicode, with the goal of never using 
rpython-level unicode
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-pypy2-7.1.0.rst
copy from pypy/doc/whatsnew-head.rst
copy to pypy/doc/whatsnew-pypy2-7.1.0.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-pypy2-7.1.0.rst
@@ -32,3 +32,10 @@
 .. branch: unicode-utf8
 
 Use utf8 internally to represent unicode, with the goal of never using 
rpython-level unicode
+
+.. branch: newmemoryview-app-level
+
+Since _ctypes is implemented in pure python over libffi, add interfaces and
+methods to support the buffer interface from python. Specifically, add a
+``__pypy__.newmemoryview`` function to create a memoryview and extend the use
+of the PyPy-specific ``__buffer__`` class method.
\ No newline at end of file
diff --git a/pypy/doc/whatsnew-pypy3-head.rst 
b/pypy/doc/whatsnew-pypy3-7.1.0.rst
copy from pypy/doc/whatsnew-pypy3-head.rst
copy to pypy/doc/whatsnew-pypy3-7.1.0.rst
diff --git a/pypy/doc/whatsnew-pypy3-head.rst b/pypy/doc/whatsnew-pypy3-head.rst
--- a/pypy/doc/whatsnew-pypy3-head.rst
+++ b/pypy/doc/whatsnew-pypy3-head.rst
@@ -1,11 +1,10 @@
 ========================
-What's new in PyPy3 7.0+
+What's new in PyPy3 7.1+
 ========================
 
-.. this is the revision after release-pypy3.6-v7.0
-.. startrev: 33fe3b2cf186
+.. this is the revision after release-pypy3.6-v7.1
+.. startrev: d642a3c217cb
 
-.. branch: py3.5
+.. branch: zlib-make-py3-go-boom
 
-Merge in py.35 and use this branch as the primary pypy3 one
-
+Complain if you try to copy a flushed zlib decompress on py3
diff --git a/pypy/interpreter/unicodehelper.py 
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -362,6 +362,8 @@
     valid so we're trying to either raise or pack stuff with error handler.
     The key difference is that this is call_may_force
     """
+    if errors is None:
+        errors = 'strict'
     slen = len(s)
     res = StringBuilder(slen)
     pos = 0
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -62,11 +62,18 @@
 class PyPyDateTime(MixedModule):
     appleveldefs = {}
     interpleveldefs = {
-        'dateinterop': 'interp_pypydatetime.W_DateTime_Date',
-        'timeinterop'    : 'interp_pypydatetime.W_DateTime_Time',
-        'deltainterop'   : 'interp_pypydatetime.W_DateTime_Delta',
+        'dateinterop'  : 'interp_pypydatetime.W_DateTime_Date',
+        'timeinterop'  : 'interp_pypydatetime.W_DateTime_Time',
+        'deltainterop' : 'interp_pypydatetime.W_DateTime_Delta',
     }
 
+class PyPyBufferable(MixedModule):
+    appleveldefs = {}
+    interpleveldefs = {
+        'bufferable': 'interp_buffer.W_Bufferable',
+    }
+        
+
 class Module(MixedModule):
     """ PyPy specific "magic" functions. A lot of them are experimental and
     subject to change, many are internal. """
@@ -111,6 +118,7 @@
         'fsencode'                  : 'interp_magic.fsencode',
         'fsdecode'                  : 'interp_magic.fsdecode',
         'pyos_inputhook'            : 'interp_magic.pyos_inputhook',
+        'newmemoryview'             : 'interp_buffer.newmemoryview',
     }
 
     submodules = {
@@ -120,6 +128,7 @@
         "intop": IntOpModule,
         "os": OsModule,
         '_pypydatetime': PyPyDateTime,
+        'bufferable': PyPyBufferable,
     }
 
     def setup_after_space_initialization(self):
diff --git a/pypy/module/__pypy__/interp_buffer.py 
b/pypy/module/__pypy__/interp_buffer.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/interp_buffer.py
@@ -0,0 +1,100 @@
+#
+# An app-level interface to tp_as_buffer->bf_getbuffer.
+#
+
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import unwrap_spec, interp2app
+from pypy.objspace.std.memoryobject import BufferViewND
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.typedef import TypeDef, generic_new_descr
+
+class W_Bufferable(W_Root):
+    def __init__(self, space):
+        pass
+
+    def descr_buffer(self, space, w_flags):
+        if type(self) is W_Bufferable:
+            raise oefmt(space.w_ValueError, "override __buffer__ in a 
subclass")
+        return space.call_method(self, '__buffer__', w_flags)
+
+    def readbuf_w(self, space):
+        mv = space.call_method(self, '__buffer__', space.newint(0))
+        return mv.buffer_w(space, 0).as_readbuf()
+
+W_Bufferable.typedef = TypeDef("Bufferable", None, None, 'read-write',
+    __doc__ = """a helper class for a app-level class (like _ctypes.Array)
+that want to support tp_as_buffer.bf_getbuffer via a __buffer__ method""",
+    __new__ = generic_new_descr(W_Bufferable),
+    __buffer__ = interp2app(W_Bufferable.descr_buffer),
+)
+
+@unwrap_spec(itemsize=int, format='text')
+def newmemoryview(space, w_obj, itemsize, format, w_shape=None, 
w_strides=None):
+    '''
+    newmemoryview(buf, itemsize, format, shape=None, strides=None)
+    '''
+    if not space.isinstance_w(w_obj, space.w_memoryview):
+        raise oefmt(space.w_ValueError, "memoryview expected")
+    # minimal error checking
+    lgt = space.len_w(w_obj)
+    old_size = w_obj.getitemsize()
+    nbytes = lgt * old_size
+    if w_shape:
+        tot = 1
+        shape = []
+        for w_v in space.listview(w_shape):
+            v = space.int_w(w_v)
+            shape.append(v)
+            tot *= v
+        if tot * itemsize != nbytes:
+            raise oefmt(space.w_ValueError,
+                  "shape/itemsize %s/%d does not match obj len/itemsize %d/%d",
+                  str(shape), itemsize, lgt, old_size)
+    else:
+        if nbytes % itemsize != 0:
+            raise oefmt(space.w_ValueError,
+                  "itemsize %d does not match obj len/itemsize %d/%d",
+                  itemsize, lgt, old_size)
+        shape = [nbytes / itemsize,]
+    ndim = len(shape)
+    if w_strides:
+        strides = [] 
+        for w_v in space.listview(w_strides):
+            v = space.int_w(w_v)
+            strides.append(v)
+        if not w_shape and len(strides) != 1:
+            raise oefmt(space.w_ValueError,
+                  "strides must have one value if shape not provided")
+        if len(strides) != ndim:
+            raise oefmt(space.w_ValueError,
+                  "shape %s does not match strides %s",
+                  str(shape), str(strides))
+    else:
+        # start from the right, c-order layout
+        strides = [itemsize] * ndim
+        for v in range(ndim - 2, -1, -1):
+            strides[v] = strides[v + 1] * shape[v + 1]
+    # check that the strides are not too big
+    for i in range(ndim):
+        if strides[i] * shape[i] > nbytes:
+            raise oefmt(space.w_ValueError,
+                  "shape %s and strides %s exceed object size %d",
+                  shape, strides, nbytes)
+    view = space.buffer_w(w_obj, 0)
+    return space.newmemoryview(FormatBufferViewND(view, itemsize, format, ndim,
+                                                  shape, strides))
+
+class FormatBufferViewND(BufferViewND):
+    _immutable_ = True
+    _attrs_ = ['readonly', 'parent', 'ndim', 'shape', 'strides',
+               'format', 'itemsize']
+    def __init__(self, parent, itemsize, format, ndim, shape, strides):
+        BufferViewND.__init__(self, parent, ndim, shape, strides)
+        self.format = format
+        self.itemsize = itemsize
+
+    def getformat(self):
+        return self.format
+
+    def getitemsize(self):
+        return self.itemsize
diff --git a/pypy/module/__pypy__/interp_builders.py 
b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -64,9 +64,12 @@
         return W_UnicodeBuilder(space, 3 * size)
 
     def descr_append(self, space, w_s):
-        w_unicode = W_UnicodeObject.convert_arg_to_w_unicode(space, w_s)
-        s = space.utf8_w(w_unicode)
-        self.builder.append(s)
+        if isinstance(w_s, W_UnicodeObject):
+            self.builder.append_utf8(w_s._utf8, w_s._len())
+        else:
+            w_unicode = W_UnicodeObject.convert_arg_to_w_unicode(space, w_s)
+            s = space.utf8_w(w_unicode)
+            self.builder.append(s)
 
     @unwrap_spec(start=int, end=int)
     def descr_append_slice(self, space, w_s, start, end):
diff --git a/pypy/module/__pypy__/test/test_builders.py 
b/pypy/module/__pypy__/test/test_builders.py
--- a/pypy/module/__pypy__/test/test_builders.py
+++ b/pypy/module/__pypy__/test/test_builders.py
@@ -1,14 +1,16 @@
+# -*- encoding: utf-8 -*-
+
 class AppTestBuilders(object):
     spaceconfig = dict(usemodules=['__pypy__'])
 
     def test_simple(self):
         from __pypy__.builders import StringBuilder
         b = StringBuilder()
-        b.append(u"abc")
+        b.append(u"abc&#228;")
         b.append(u"123")
         b.append(u"1")
         s = b.build()
-        assert s == u"abc1231"
+        assert s == u"abc&#228;1231"
         assert b.build() == s
         b.append(u"123")
         assert b.build() == s + u"123"
diff --git a/pypy/module/__pypy__/test/test_newmemoryview.py 
b/pypy/module/__pypy__/test/test_newmemoryview.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/test/test_newmemoryview.py
@@ -0,0 +1,32 @@
+
+
+class AppTestMinimal:
+    spaceconfig = dict(usemodules=['__pypy__'])
+
+    def test_newmemoryview(self):
+        from __pypy__ import newmemoryview
+        b = bytearray(12)
+        # The format can be anything, we only verify shape, strides, and 
itemsize
+        m = newmemoryview(memoryview(b), 2, 'T{<h:a}', shape=(2, 3))
+        assert m.strides == (6, 2)
+        m = newmemoryview(memoryview(b), 2, 'T{<h:a}', shape=(2, 3),
+                          strides=(6, 2))
+        assert m.strides == (6, 2)
+        assert m.format == 'T{<h:a}'
+        assert m.itemsize == 2
+
+    def test_bufferable(self):
+        from __pypy__ import bufferable, newmemoryview
+        class B(bufferable.bufferable):
+            def __init__(self):
+                self.data = bytearray(b'abc')
+
+            def __buffer__(self, flags):
+                return newmemoryview(memoryview(self.data), 1, 'B')
+
+
+        obj = B()
+        buf = memoryview(obj)
+        v = obj.data[2]
+        assert buf[2] == v
+
diff --git a/pypy/module/_cffi_backend/__init__.py 
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
 from rpython.rlib import rdynload, clibffi
 from rpython.rtyper.lltypesystem import rffi
 
-VERSION = "1.12.1"
+VERSION = "1.12.2"
 
 FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
 try:
diff --git a/pypy/module/_cffi_backend/ctypestruct.py 
b/pypy/module/_cffi_backend/ctypestruct.py
--- a/pypy/module/_cffi_backend/ctypestruct.py
+++ b/pypy/module/_cffi_backend/ctypestruct.py
@@ -238,26 +238,32 @@
         else:
             self.ctype.convert_from_object(cdata, w_ob)
 
+    def add_varsize_length(self, space, itemsize, varsizelength, optvarsize):
+        # returns an updated 'optvarsize' to account for an array of
+        # 'varsizelength' elements, each of size 'itemsize', that starts
+        # at 'self.offset'.
+        try:
+            varsize = ovfcheck(itemsize * varsizelength)
+            size = ovfcheck(self.offset + varsize)
+        except OverflowError:
+            raise oefmt(space.w_OverflowError,
+                        "array size would overflow a ssize_t")
+        assert size >= 0
+        return max(size, optvarsize)
+
     def write_v(self, cdata, w_ob, optvarsize):
         # a special case for var-sized C99 arrays
         from pypy.module._cffi_backend import ctypearray
         ct = self.ctype
+        space = ct.space
         if isinstance(ct, ctypearray.W_CTypeArray) and ct.length < 0:
-            space = ct.space
             w_ob, varsizelength = ct.get_new_array_length(w_ob)
             if optvarsize != -1:
                 # in this mode, the only purpose of this function is to compute
                 # the real size of the structure from a var-sized C99 array
                 assert cdata == lltype.nullptr(rffi.CCHARP.TO)
-                itemsize = ct.ctitem.size
-                try:
-                    varsize = ovfcheck(itemsize * varsizelength)
-                    size = ovfcheck(self.offset + varsize)
-                except OverflowError:
-                    raise oefmt(space.w_OverflowError,
-                                "array size would overflow a ssize_t")
-                assert size >= 0
-                return max(size, optvarsize)
+                return self.add_varsize_length(space, ct.ctitem.size,
+                    varsizelength, optvarsize)
             # if 'value' was only an integer, get_new_array_length() returns
             # w_ob = space.w_None.  Detect if this was the case,
             # and if so, stop here, leaving the content uninitialized
@@ -267,6 +273,12 @@
         #
         if optvarsize == -1:
             self.write(cdata, w_ob)
+        elif (isinstance(ct, W_CTypeStructOrUnion) and ct._with_var_array and
+              not isinstance(w_ob, cdataobj.W_CData)):
+            subsize = ct.size
+            subsize = ct.convert_struct_from_object(
+                lltype.nullptr(rffi.CCHARP.TO), w_ob, subsize)
+            optvarsize = self.add_varsize_length(space, 1, subsize, optvarsize)
         return optvarsize
 
     def convert_bitfield_to_object(self, cdata):
diff --git a/pypy/module/_cffi_backend/newtype.py 
b/pypy/module/_cffi_backend/newtype.py
--- a/pypy/module/_cffi_backend/newtype.py
+++ b/pypy/module/_cffi_backend/newtype.py
@@ -368,6 +368,16 @@
                 raise oefmt(space.w_TypeError,
                             "field '%s.%s' has ctype '%s' of unknown size",
                             w_ctype.name, fname, ftype.name)
+        elif isinstance(ftype, ctypestruct.W_CTypeStructOrUnion):
+            ftype.force_lazy_struct()
+            # GCC (or maybe C99) accepts var-sized struct fields that are not
+            # the last field of a larger struct.  That's why there is no
+            # check here for "last field": we propagate the flag
+            # '_with_var_array' to any struct that contains either an open-
+            # ended array or another struct that recursively contains an
+            # open-ended array.
+            if ftype._with_var_array:
+                with_var_array = True
         #
         if is_union:
             boffset = 0         # reset each field at offset 0
@@ -419,7 +429,6 @@
                 # a nested anonymous struct or union
                 # note: it seems we only get here with ffi.verify()
                 srcfield2names = {}
-                ftype.force_lazy_struct()
                 for name, srcfld in ftype._fields_dict.items():
                     srcfield2names[srcfld] = name
                 for srcfld in ftype._fields_list:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py 
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
 # ____________________________________________________________
 
 import sys
-assert __version__ == "1.12.1", ("This test_c.py file is for testing a version"
+assert __version__ == "1.12.2", ("This test_c.py file is for testing a version"
                                  " of cffi that differs from the one that we"
                                  " get from 'import _cffi_backend'")
 if sys.version_info < (3,):
@@ -3441,6 +3441,15 @@
     assert p.a[1] == 20
     assert p.a[2] == 30
     assert p.a[3] == 0
+    #
+    # struct of struct of varsized array
+    BStruct2 = new_struct_type("bar")
+    complete_struct_or_union(BStruct2, [('head', BInt),
+                                        ('tail', BStruct)])
+    for i in range(2):   # try to detect heap overwrites
+        p = newp(new_pointer_type(BStruct2), [100, [200, list(range(50))]])
+        assert p.tail.y[49] == 49
+
 
 def test_struct_array_no_length_explicit_position():
     BInt = new_primitive_type("int")
diff --git a/pypy/module/_codecs/interp_codecs.py 
b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -304,7 +304,7 @@
         while pos < end:
             oc = ord(obj[pos])
             raw_unicode_escape_helper(builder, oc)
-            pos += 1 
+            pos += 1
         return space.newtuple([space.newtext(builder.build()), w_end])
     else:
         raise oefmt(space.w_TypeError,
@@ -526,7 +526,10 @@
 
 def _call_codec(space, w_coder, w_obj, action, encoding, errors):
     try:
-        w_res = space.call_function(w_coder, w_obj, space.newtext(errors))
+        if errors:
+            w_res = space.call_function(w_coder, w_obj, space.newtext(errors))
+        else:
+            w_res = space.call_function(w_coder, w_obj)
     except OperationError as operr:
         raise _wrap_codec_error(space, operr, action, encoding)
     if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 
2):
@@ -558,8 +561,8 @@
     return w_err_handler
 
 
-@unwrap_spec(errors='text')
-def encode(space, w_obj, w_encoding=None, errors='strict'):
+@unwrap_spec(encoding='text_or_none', errors='text_or_none')
+def encode(space, w_obj, encoding=None, errors=None):
     """encode(obj, [encoding[,errors]]) -> object
 
     Encodes obj using the codec registered for encoding. encoding defaults
@@ -569,20 +572,26 @@
     'xmlcharrefreplace' as well as any other name registered with
     codecs.register_error that can handle ValueErrors.
     """
-    if w_encoding is None:
+    if encoding is None:
         encoding = space.sys.defaultencoding
-    else:
-        encoding = space.text_w(w_encoding)
     w_encoder = space.getitem(lookup_codec(space, encoding), space.newint(0))
-    return _call_codec(space, w_encoder, w_obj, "encoding", encoding, errors)
+    w_retval =  _call_codec(space, w_encoder, w_obj, "encoding", encoding, 
errors)
+    if not space.isinstance_w(w_retval, space.w_bytes):
+        raise oefmt(space.w_TypeError,
+                    "'%s' encoder returned '%T' instead of 'bytes'; "
+                    "use codecs.encode() to encode to arbitrary types",
+                    encoding,
+                    w_retval)
+    return w_retval
 
 @unwrap_spec(errors='text_or_none')
 def readbuffer_encode(space, w_data, errors='strict'):
     s = space.getarg_w('s#', w_data)
     return space.newtuple([space.newbytes(s), space.newint(len(s))])
 
-@unwrap_spec(errors='text')
-def decode(space, w_obj, w_encoding=None, errors='strict'):
+@unwrap_spec(encoding='text_or_none', errors='text_or_none')
+def decode(space, w_obj, encoding=None, errors=None):
+    from pypy.objspace.std.unicodeobject import W_UnicodeObject
     """decode(obj, [encoding[,errors]]) -> object
 
     Decodes obj using the codec registered for encoding. encoding defaults
@@ -592,12 +601,17 @@
     as well as any other name registered with codecs.register_error that is
     able to handle ValueErrors.
     """
-    if w_encoding is None:
+    if encoding is None:
         encoding = space.sys.defaultencoding
-    else:
-        encoding = space.text_w(w_encoding)
     w_decoder = space.getitem(lookup_codec(space, encoding), space.newint(1))
-    return _call_codec(space, w_decoder, w_obj, "decoding", encoding, errors)
+    w_retval = _call_codec(space, w_decoder, w_obj, "decoding", encoding, 
errors)
+    if not isinstance(w_retval, W_UnicodeObject):
+        raise oefmt(space.w_TypeError,
+                    "'%s' decoder returned '%T' instead of 'str'; "
+                    "use codecs.decode() to decode to arbitrary types",
+                    encoding,
+                    w_retval)
+    return w_retval
 
 @unwrap_spec(errors='text')
 def register_error(space, errors, w_handler):
@@ -633,20 +647,6 @@
                     "use %s to handle arbitrary codecs", encoding, action)
     return codec_info
 
-def encode_text(space, w_obj, encoding, errors):
-    if errors is None:
-        errors = 'strict'
-    w_encoder = space.getitem(
-        lookup_text_codec(space, "codecs.encode()", encoding), space.newint(0))
-    return _call_codec(space, w_encoder, w_obj, "encoding", encoding, errors)
-
-def decode_text(space, w_obj, encoding, errors):
-    if errors is None:
-        errors = 'strict'
-    w_decoder = space.getitem(
-        lookup_text_codec(space, "codecs.decode()", encoding), space.newint(1))
-    return _call_codec(space, w_decoder, w_obj, "decoding", encoding, errors)
-
 # ____________________________________________________________
 
 def _find_implementation(impl_name):
@@ -736,7 +736,7 @@
         result = unicodehelper.utf8_encode_utf_8(utf8, errors,
                      state.encode_error_handler, allow_surrogates=False)
     except unicodehelper.ErrorHandlerError as e:
-        raise oefmt(space.w_IndexError, 
+        raise oefmt(space.w_IndexError,
                    "position %d from error handler invalid, already encoded 
%d",
                     e.new,e.old)
 
diff --git a/pypy/module/_codecs/test/test_codecs.py 
b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -696,6 +696,22 @@
         exc = raises(RuntimeError, u"hello".encode, "test.failingenc")
         assert exc.value == to_raise
 
+    def test_one_arg_encoder(self):
+        import _codecs
+        def search_function(encoding):
+            def encode_one(u):
+                return (b'foo', len(u))
+            def decode_one(u):
+                return (u'foo', len(u))
+            if encoding == 'onearg':
+                return (encode_one, decode_one, None, None)
+            return None
+        _codecs.register(search_function)
+        assert u"hello".encode("onearg") == b'foo'
+        assert b"hello".decode("onearg") == u'foo'
+        assert _codecs.encode(u"hello", "onearg") == b'foo'
+        assert _codecs.decode(b"hello", "onearg") == u'foo'
+
     def test_cpytest_decode(self):
         import codecs
         assert codecs.decode(b'\xe4\xf6\xfc', 'latin-1') == '\xe4\xf6\xfc'
diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py 
b/pypy/module/_multiprocessing/test/test_semaphore.py
--- a/pypy/module/_multiprocessing/test/test_semaphore.py
+++ b/pypy/module/_multiprocessing/test/test_semaphore.py
@@ -11,7 +11,7 @@
                                    'binascii', 'struct', '_posixsubprocess'))
 
     if sys.platform == 'win32':
-        spaceconfig['usemodules'] += ('_rawffi',)
+        spaceconfig['usemodules'] += ('_rawffi', '_cffi_backend')
     else:
         spaceconfig['usemodules'] += ('fcntl',)
 
diff --git a/pypy/module/_multiprocessing/test/test_win32.py 
b/pypy/module/_multiprocessing/test/test_win32.py
--- a/pypy/module/_multiprocessing/test/test_win32.py
+++ b/pypy/module/_multiprocessing/test/test_win32.py
@@ -2,7 +2,7 @@
 import sys
 
 class AppTestWin32:
-    spaceconfig = dict(usemodules=('_multiprocessing',
+    spaceconfig = dict(usemodules=('_multiprocessing', '_cffi_backend',
                                    'signal', '_rawffi', 'binascii'))
 
     def setup_class(cls):
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -30,6 +30,7 @@
 from pypy.objspace.std.unicodeobject import encode_object
 from pypy.module.__builtin__.descriptor import W_Property
 #from pypy.module.micronumpy.base import W_NDimArray
+from pypy.module.__pypy__.interp_buffer import W_Bufferable
 from rpython.rlib.entrypoint import entrypoint_lowlevel
 from rpython.rlib.rposix import FdValidator
 from rpython.rlib.unroll import unrolling_iterable
@@ -731,6 +732,7 @@
         'PyMethodDescr_Type': 
'space.gettypeobject(cpyext.methodobject.W_PyCMethodObject.typedef)',
         'PyWrapperDescr_Type': 
'space.gettypeobject(cpyext.methodobject.W_PyCWrapperObject.typedef)',
         'PyInstanceMethod_Type': 
'space.gettypeobject(cpyext.classobject.InstanceMethod.typedef)',
+        'PyBufferable_Type': 'space.gettypeobject(W_Bufferable.typedef)',
         }.items():
         register_global(cpyname, 'PyTypeObject*', pypyexpr, header=pypy_decl)
 
@@ -1189,7 +1191,9 @@
     state.C.get_pyos_inputhook = rffi.llexternal(
         '_PyPy_get_PyOS_InputHook', [], FUNCPTR,
         compilation_info=eci, _nowrapper=True)
-
+    state.C.tuple_new = rffi.llexternal(
+        'tuple_new', [PyTypeObjectPtr, PyObject, PyObject], PyObject,
+        compilation_info=eci, _nowrapper=True)
 
 def init_function(func):
     INIT_FUNCTIONS.append(func)
diff --git a/pypy/module/cpyext/include/patchlevel.h 
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -32,8 +32,8 @@
  *     module/sys/version.py
  *     doc/conf.py
  */
-#define PYPY_VERSION "7.1.0-alpha0"
-#define PYPY_VERSION_NUM  0x07010000
+#define PYPY_VERSION "7.2.0-alpha0"
+#define PYPY_VERSION_NUM  0x07020000
 /* Defined to mean a PyPy where cpyext holds more regular references
    to PyObjects, e.g. staying alive as long as the internal PyPy object
    stays alive. */
diff --git a/pypy/module/cpyext/include/tupleobject.h 
b/pypy/module/cpyext/include/tupleobject.h
--- a/pypy/module/cpyext/include/tupleobject.h
+++ b/pypy/module/cpyext/include/tupleobject.h
@@ -18,6 +18,7 @@
 
 PyAPI_FUNC(PyObject *) PyTuple_New(Py_ssize_t size);
 PyAPI_FUNC(void) _PyPy_tuple_dealloc(PyObject *);
+PyAPI_FUNC(PyObject *) tuple_new(PyTypeObject *type, PyObject *args, PyObject 
*kwds);
 
 /* defined in varargswrapper.c */
 PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...);
diff --git a/pypy/module/cpyext/memoryobject.py 
b/pypy/module/cpyext/memoryobject.py
--- a/pypy/module/cpyext/memoryobject.py
+++ b/pypy/module/cpyext/memoryobject.py
@@ -43,7 +43,9 @@
     fill_Py_buffer(space, w_obj.view, view)
     try:
         view.c_buf = rffi.cast(rffi.VOIDP, w_obj.view.get_raw_address())
-        view.c_obj = make_ref(space, w_userdata)
+        # not used in PyPy to keep something alive,
+        # but some c-extensions check the type without checking for NULL
+        view.c_obj = make_ref(space, space.w_None)
         rffi.setintfield(view, 'c_readonly', w_obj.view.readonly)
     except ValueError:
         w_s = w_obj.descr_tobytes(space)
diff --git a/pypy/module/cpyext/methodobject.py 
b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -46,15 +46,15 @@
     _dealloc(space, py_obj)
 
 def w_kwargs_from_args(space, __args__):
-    w_kwargs = None
-    if __args__.keywords:
-        # CCC: we should probably have a @jit.look_inside_iff if the
-        # keyword count is constant, as we do in Arguments.unpack
-        w_kwargs = space.newdict()
-        for i in range(len(__args__.keywords)):
-            key = __args__.keywords[i]
-            w_obj = __args__.keywords_w[i]
-            space.setitem(w_kwargs, space.newtext(key), w_obj)
+    if __args__.keywords is None:
+        return None
+    # CCC: we should probably have a @jit.look_inside_iff if the
+    # keyword count is constant, as we do in Arguments.unpack
+    w_kwargs = space.newdict()
+    for i in range(len(__args__.keywords)):
+        key = __args__.keywords[i]
+        w_obj = __args__.keywords_w[i]
+        space.setitem(w_kwargs, space.newtext(key), w_obj)
     return w_kwargs
 
 def undotted_name(name):
diff --git a/pypy/module/cpyext/parse/cpyext_memoryobject.h 
b/pypy/module/cpyext/parse/cpyext_memoryobject.h
--- a/pypy/module/cpyext/parse/cpyext_memoryobject.h
+++ b/pypy/module/cpyext/parse/cpyext_memoryobject.h
@@ -1,6 +1,12 @@
 /* The struct is declared here but it shouldn't
    be considered public. Don't access those fields directly,
    use the functions instead! */
+
+
+/* this is wrong, PyMemoryViewObject should use PyObject_VAR_HEAD, and use
+   ob_data[1] to hold the shapes, strides, and offsets for the view. Then
+   we should use specialized allocators (that break the cpyext model) to
+   allocate ob_data = malloc(sizeof(Py_ssize_t) * view.ndims * 3) */
 typedef struct {
     PyObject_HEAD
     Py_buffer view;
diff --git a/pypy/module/cpyext/parse/cpyext_object.h 
b/pypy/module/cpyext/parse/cpyext_object.h
--- a/pypy/module/cpyext/parse/cpyext_object.h
+++ b/pypy/module/cpyext/parse/cpyext_object.h
@@ -52,7 +52,8 @@
 
 
 /* Py3k buffer interface, adapted for PyPy */
-#define Py_MAX_NDIMS 32
+/* XXX remove this constant, us a PyObject_VAR_HEAD instead */
+#define Py_MAX_NDIMS 36
 #define Py_MAX_FMT 128
 typedef struct bufferinfo {
     void *buf;
diff --git a/pypy/module/cpyext/src/tupleobject.c 
b/pypy/module/cpyext/src/tupleobject.c
--- a/pypy/module/cpyext/src/tupleobject.c
+++ b/pypy/module/cpyext/src/tupleobject.c
@@ -89,3 +89,48 @@
 done:
     Py_TRASHCAN_SAFE_END(op)
 }
+
+static PyObject *
+tuple_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+
+PyObject *
+tuple_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+    PyObject *arg = NULL;
+    static char *kwlist[] = {"sequence", 0};
+
+    if (type != &PyTuple_Type)
+        return tuple_subtype_new(type, args, kwds);
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O:tuple", kwlist, &arg))
+        return NULL;
+
+    if (arg == NULL)
+        return PyTuple_New(0);
+    else
+        return PySequence_Tuple(arg);
+}
+
+static PyObject *
+tuple_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+    PyObject *tmp, *newobj, *item;
+    Py_ssize_t i, n;
+
+    assert(PyType_IsSubtype(type, &PyTuple_Type));
+    tmp = tuple_new(&PyTuple_Type, args, kwds);
+    if (tmp == NULL)
+        return NULL;
+    assert(PyTuple_Check(tmp));
+    newobj = type->tp_alloc(type, n = PyTuple_GET_SIZE(tmp));
+    if (newobj == NULL)
+        return NULL;
+    for (i = 0; i < n; i++) {
+        item = PyTuple_GET_ITEM(tmp, i);
+        Py_INCREF(item);
+        PyTuple_SET_ITEM(newobj, i, item);
+    }
+    Py_DECREF(tmp);
+    return newobj;
+}
+
+
diff --git a/pypy/module/cpyext/test/test_memoryobject.py 
b/pypy/module/cpyext/test/test_memoryobject.py
--- a/pypy/module/cpyext/test/test_memoryobject.py
+++ b/pypy/module/cpyext/test/test_memoryobject.py
@@ -36,6 +36,23 @@
         decref(space, ref)
         decref(space, c_memoryview)
 
+    def test_class_with___buffer__(self, space, api):
+        w_obj = space.appexec([], """():
+            from __pypy__.bufferable import bufferable
+            class B(bufferable):
+                def __init__(self):
+                    self.buf = bytearray(10)
+
+                def __buffer__(self, flags):
+                    return memoryview(self.buf)
+            return B()""")
+        py_obj = make_ref(space, w_obj)
+        assert py_obj.c_ob_type.c_tp_as_buffer
+        assert py_obj.c_ob_type.c_tp_as_buffer.c_bf_getbuffer
+        assert py_obj.c_ob_type.c_tp_as_buffer.c_bf_getreadbuffer
+        assert py_obj.c_ob_type.c_tp_as_buffer.c_bf_getwritebuffer
+         
+
 class AppTestPyBuffer_FillInfo(AppTestCpythonExtensionBase):
     def test_fillWithObject(self):
         module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_methodobject.py 
b/pypy/module/cpyext/test/test_methodobject.py
--- a/pypy/module/cpyext/test/test_methodobject.py
+++ b/pypy/module/cpyext/test/test_methodobject.py
@@ -87,6 +87,7 @@
         assert mod.getarg_KW(a=3, b=4) == ((), {'a': 3, 'b': 4})
         assert mod.getarg_KW(1, 2, a=3, b=4) == ((1, 2), {'a': 3, 'b': 4})
         assert mod.getarg_KW.__name__ == "getarg_KW"
+        assert mod.getarg_KW(*(), **{}) == ((), {})
 
 
     def test_func_attributes(self):
diff --git a/pypy/module/cpyext/test/test_tupleobject.py 
b/pypy/module/cpyext/test/test_tupleobject.py
--- a/pypy/module/cpyext/test/test_tupleobject.py
+++ b/pypy/module/cpyext/test/test_tupleobject.py
@@ -226,3 +226,44 @@
             raises(SystemError, module.set_after_use, s)
         else:
             module.set_after_use(s)
+
+    def test_mp_length(self):
+        # issue 2968: creating a subclass of tuple in C led to recursion
+        # since the default tp_new needs to build a w_obj, but that needs
+        # to call space.len_w, which needs to call tp_new.
+        module = self.import_extension('foo', [
+            ("get_size", "METH_NOARGS",
+             """
+                return (PyObject*)&THPSizeType;
+             """),
+            ], prologue='''
+                #include "Python.h"
+
+                struct THPSize {
+                  PyTupleObject tuple;
+                } THPSize;
+
+                static PyMappingMethods THPSize_as_mapping = {
+                    0, //PyTuple_Type.tp_as_mapping->mp_length,
+                    0,
+                    0
+                };
+
+                PyTypeObject THPSizeType = {
+                  PyVarObject_HEAD_INIT(0, 0)
+                  "torch.Size",                          /* tp_name */
+                  sizeof(THPSize),                       /* tp_basicsize */
+                };
+            ''' , more_init = '''
+                THPSize_as_mapping.mp_length = 
PyTuple_Type.tp_as_mapping->mp_length;
+                THPSizeType.tp_base = &PyTuple_Type;
+                THPSizeType.tp_flags = Py_TPFLAGS_DEFAULT;
+                THPSizeType.tp_as_mapping = &THPSize_as_mapping;
+                THPSizeType.tp_new = PyTuple_Type.tp_new;
+                if (PyType_Ready(&THPSizeType) < 0) INITERROR;
+            ''')
+        SZ = module.get_size()
+        s = SZ((1, 2, 3))
+        assert len(s) == 3
+        assert len(s) == 3
+
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -686,6 +686,11 @@
         update_all_slots(space, w_type, pto)
     else:
         update_all_slots_builtin(space, w_type, pto)
+
+    # XXX generlize this pattern for various slot functions implemented in C
+    if space.is_w(w_type, space.w_tuple):
+        pto.c_tp_new = state.C.tuple_new
+
     if not pto.c_tp_new:
         base_object_pyo = make_ref(space, space.w_object)
         base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo)
diff --git a/pypy/module/errno/interp_errno.py 
b/pypy/module/errno/interp_errno.py
--- a/pypy/module/errno/interp_errno.py
+++ b/pypy/module/errno/interp_errno.py
@@ -43,6 +43,18 @@
     "WSAGETASYNCBUFLE", "WSAEDESTADDRREQ", "WSAECONNREFUSED", "WSAENETRESET",
     "WSAN", "WSAEDQUOT"]
 
+# The following constants were added to errno.h in VS2010 but have
+# preferred WSA equivalents, so errno.EADDRINUSE == errno.WSAEADDRINUSE.
+win_errors_override = [
+    "WSAEADDRINUSE", "WSAEADDRNOTAVAI", "WSAEAFNOSUPPORT", "WSAEALREADY",
+    "WSAECONNABORTED", "WSAECONNREFUSED", "WSAECONNRESET", "WSAEDESTADDRREQ",
+    "WSAEHOSTUNREACH", "WSAEINPROGRESS", "WSAEISCONN", "WSAELOOP",
+    "WSAEMSGSIZE", "WSAENETDOWN", "WSAENETRESET", "WSAENETUNREACH",
+    "WSAENOBUFS", "WSAENOPROTOOPT", "WSAENOTCONN", "WSAENOTSOCK",
+    "WSAEOPNOTSUPP", "WSAEPROTONOSUPPORT", "WSAEPROTOTYPE", "WSAETIMEDOUT",
+    "WSAEWOULDBLOCK",
+    ]
+
 more_errors = [
     "ENOMEDIUM", "EMEDIUMTYPE", "ECANCELED", "ENOKEY", "EKEYEXPIRED",
     "EKEYREVOKED", "EKEYREJECTED", "EOWNERDEAD", "ENOTRECOVERABLE", "ERFKILL",
@@ -80,7 +92,8 @@
     assert name.startswith('WSA')
     code = config[name]
     if code is not None:
-        if name[3:] in errors and name[3:] not in name2code:
+        if name[3:] in errors and (name in win_errors_override or 
+                                   name[3:] not in name2code):
             # errno.EFOO = <WSAEFOO>
             name2code[name[3:]] = code
         # errno.WSABAR = <WSABAR>
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -13,7 +13,7 @@
 # make sure to keep PYPY_VERSION in sync with:
 #    module/cpyext/include/patchlevel.h
 #    doc/conf.py
-PYPY_VERSION               = (7, 1, 0, "alpha", 0)
+PYPY_VERSION               = (7, 2, 0, "alpha", 0)
 
 
 import pypy
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -313,6 +313,11 @@
         try:
             self.lock()
             try:
+                if not self.stream:
+                    raise oefmt(
+                        space.w_ValueError,
+                        "Decompressor was already flushed",
+                    )
                 copied = rzlib.inflateCopy(self.stream)
             finally:
                 self.unlock()
@@ -338,6 +343,9 @@
             if length <= 0:
                 raise oefmt(space.w_ValueError,
                             "length must be greater than zero")
+        if not self.stream:
+            raise zlib_error(space,
+                             "compressor object already flushed")
         data = self.unconsumed_tail
         try:
             self.lock()
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to