Author: mattip <matti.pi...@gmail.com>
Branch: cpyext-ext
Changeset: r83317:51207eafb0bd
Date: 2016-03-24 00:35 +0200
http://bitbucket.org/pypy/pypy/changeset/51207eafb0bd/

Log:    merge default into branch

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -19,3 +19,4 @@
 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
+bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
diff --git a/pypy/doc/config/translation.gc.txt 
b/pypy/doc/config/translation.gc.txt
--- a/pypy/doc/config/translation.gc.txt
+++ b/pypy/doc/config/translation.gc.txt
@@ -1,24 +1,26 @@
 Choose the Garbage Collector used by the translated program.
-The good performing collectors are "hybrid" and "minimark".
-The default is "minimark".
+The recommended default is "incminimark".
 
   - "ref": reference counting. Takes very long to translate and the result is
-    slow.
+    slow.  Used only for tests.  Don't use it for real RPython programs.
 
-  - "marksweep": naive mark & sweep.
+  - "none": no GC.  Leaks everything.  Don't use it for real RPython
+    programs: the rate of leaking is immense.
 
   - "semispace": a copying semi-space GC.
 
   - "generation": a generational GC using the semi-space GC for the
     older generation.
 
-  - "boehm": use the Boehm conservative GC.
-
   - "hybrid": a hybrid collector of "generation" together with a
     mark-n-sweep old space
 
-  - "markcompact": a slow, but memory-efficient collector,
-    influenced e.g. by Smalltalk systems.
+  - "boehm": use the Boehm conservative GC.
 
   - "minimark": a generational mark-n-sweep collector with good
     performance.  Includes page marking for large arrays.
+
+  - "incminimark": like minimark, but adds incremental major
+    collections.  Seems to come with no performance drawback over
+    "minimark", so it is the default.  A few recent features of PyPy
+    (like cpyext) are only working with this GC.
diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst
--- a/pypy/doc/extradoc.rst
+++ b/pypy/doc/extradoc.rst
@@ -80,7 +80,7 @@
 .. _How to *not* write Virtual Machines for Dynamic Languages: 
https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf
 .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: 
https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf
 .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: 
https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf
-.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: 
http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf
+.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: 
http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf
 .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO 
Languages`: 
http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07
 .. _EU Reports: index-report.html
 .. _Hardware Transactional Memory Support for Lightweight Dynamic Language 
Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf
diff --git a/pypy/doc/index-of-release-notes.rst 
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-5.0.1.rst
    release-5.0.0.rst
    release-4.0.1.rst
    release-4.0.0.rst
diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.0.1.rst
@@ -0,0 +1,40 @@
+==========
+PyPy 5.0.1
+==========
+
+We have released a bugfix for PyPy 5.0, after reports that the newly released
+`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_.
+Thanks to those who reported the crash. Please update, downloads are available
+at pypy.org/download.html
+
+.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0
+.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260
+
+The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in
+cpyext, which fixes notably (but not only) lxml; and another for a
+corner case of the JIT.
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the
+big- and little-endian variants of **PPC64** running Linux.
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -23,3 +23,7 @@
 
 Implement yet another strange numpy indexing compatibility; indexing by a 
scalar 
 returns a scalar
+
+.. branch: fix_transpose_for_list_v3
+
+Allow arguments to transpose to be sequences
diff --git a/pypy/goal/targetpypystandalone.py 
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -327,7 +327,7 @@
             # XXX possibly adapt options using modules
             failures = create_cffi_import_libraries(exename, options, basedir)
             # if failures, they were already printed
-            print  >> sys.stderr, str(exename),'successfully built, but errors 
while building the above modules will be ignored'
+            print  >> sys.stderr, str(exename),'successfully built (errors, if 
any, while building the above modules are ignored)'
         driver.task_build_cffi_imports = 
types.MethodType(task_build_cffi_imports, driver)
         driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, 
[compile_goal]
         driver.default_goal = 'build_cffi_imports'
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -417,7 +417,10 @@
         self.wait_for_thread_shutdown()
         w_exitfunc = self.sys.getdictvalue(self, 'exitfunc')
         if w_exitfunc is not None:
-            self.call_function(w_exitfunc)
+            try:
+                self.call_function(w_exitfunc)
+            except OperationError as e:
+                e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc)
         from pypy.interpreter.module import Module
         for w_mod in self.builtin_modules.values():
             if isinstance(w_mod, Module) and w_mod.startup_called:
diff --git a/pypy/interpreter/test/test_objspace.py 
b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -416,3 +416,14 @@
             i -= 1
             assert i >= 0
             gc.collect()
+
+    def test_exitfunc_catches_exceptions(self):
+        from pypy.tool.pytest.objspace import maketestobjspace
+        space = maketestobjspace()
+        space.appexec([], """():
+            import sys
+            sys.exitfunc = lambda: this_is_an_unknown_name
+        """)
+        space.finish()
+        # assert that we reach this point without getting interrupted
+        # by the OperationError(NameError)
diff --git a/pypy/module/_cffi_backend/ctypeptr.py 
b/pypy/module/_cffi_backend/ctypeptr.py
--- a/pypy/module/_cffi_backend/ctypeptr.py
+++ b/pypy/module/_cffi_backend/ctypeptr.py
@@ -124,7 +124,7 @@
                         s = rffi.charp2str(ptr)
                     else:
                         s = rffi.charp2strn(ptr, length)
-                    return space.wrap(s)
+                    return space.wrapbytes(s)
                 #
                 # pointer to a wchar_t: builds and returns a unicode
                 if self.is_unichar_ptr_or_array():
@@ -372,15 +372,15 @@
         rffi_fclose(self.llf)
 
 
-def prepare_file_argument(space, fileobj):
-    fileobj.direct_flush()
-    if fileobj.cffi_fileobj is None:
-        fd = fileobj.direct_fileno()
+def prepare_file_argument(space, w_fileobj):
+    w_fileobj.direct_flush()
+    if w_fileobj.cffi_fileobj is None:
+        fd = w_fileobj.direct_fileno()
         if fd < 0:
             raise OperationError(space.w_ValueError,
                                  space.wrap("file has no OS file descriptor"))
         try:
-            fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode)
+            w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
         except OSError, e:
             raise wrap_oserror(space, e)
-    return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf)
+    return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
diff --git a/pypy/module/_vmprof/test/test__vmprof.py 
b/pypy/module/_vmprof/test/test__vmprof.py
--- a/pypy/module/_vmprof/test/test__vmprof.py
+++ b/pypy/module/_vmprof/test/test__vmprof.py
@@ -72,9 +72,9 @@
 
     def test_enable_ovf(self):
         import _vmprof
-        raises(_vmprof.VMProfError, _vmprof.enable, 999, 0)
-        raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5)
-        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300)
-        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300)
+        raises(_vmprof.VMProfError, _vmprof.enable, 2, 0)
+        raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5)
+        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300)
+        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300)
         NaN = (1e300*1e300) / (1e300*1e300)
-        raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN)
+        raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN)
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -937,14 +937,14 @@
     modulename = py.path.local(eci.libraries[-1])
 
     def dealloc_trigger():
-        from pypy.module.cpyext.pyobject import _Py_Dealloc
+        from pypy.module.cpyext.pyobject import decref
         print 'dealloc_trigger...'
         while True:
             ob = rawrefcount.next_dead(PyObject)
             if not ob:
                 break
             print 'deallocating PyObject', ob
-            _Py_Dealloc(space, ob)
+            decref(space, ob)
         print 'dealloc_trigger DONE'
         return "RETRY"
     rawrefcount.init(dealloc_trigger)
diff --git a/pypy/module/cpyext/bytesobject.py 
b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -80,7 +80,8 @@
     buflen = length + 1
     py_str.c_ob_size = length
     py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen,
-                                    flavor='raw', zero=True)
+                                    flavor='raw', zero=True,
+                                    add_memory_pressure=True)
     py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED
     return py_str
 
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -18,7 +18,8 @@
 def PyObject_Malloc(space, size):
     # returns non-zero-initialized memory, like CPython
     return lltype.malloc(rffi.VOIDP.TO, size,
-                         flavor='raw')
+                         flavor='raw',
+                         add_memory_pressure=True)
 
 @cpython_api([rffi.VOIDP], lltype.Void)
 def PyObject_Free(space, ptr):
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -50,7 +50,8 @@
             size += itemcount * pytype.c_tp_itemsize
         assert size >= rffi.sizeof(PyObject.TO)
         buf = lltype.malloc(rffi.VOIDP.TO, size,
-                            flavor='raw', zero=True)
+                            flavor='raw', zero=True,
+                            add_memory_pressure=True)
         pyobj = rffi.cast(PyObject, buf)
         pyobj.c_ob_refcnt = 1
         #pyobj.c_ob_pypy_link should get assigned very quickly
diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
--- a/pypy/module/cpyext/state.py
+++ b/pypy/module/cpyext/state.py
@@ -147,10 +147,10 @@
     """
 
     def perform(self, executioncontext, frame):
-        from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc
+        from pypy.module.cpyext.pyobject import PyObject, decref
 
         while True:
             py_obj = rawrefcount.next_dead(PyObject)
             if not py_obj:
                 break
-            _Py_Dealloc(self.space, py_obj)
+            decref(self.space, py_obj)
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py 
b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -75,7 +75,6 @@
         assert len(s) == 4
         assert s == u'a\xe9\x00c'
 
-
     def test_hash(self):
         module = self.import_extension('foo', [
             ("test_hash", "METH_VARARGS",
diff --git a/pypy/module/cpyext/tupleobject.py 
b/pypy/module/cpyext/tupleobject.py
--- a/pypy/module/cpyext/tupleobject.py
+++ b/pypy/module/cpyext/tupleobject.py
@@ -59,7 +59,8 @@
     py_tup = rffi.cast(PyTupleObject, py_obj)
 
     py_tup.c_ob_item = lltype.malloc(ObjectItems, length,
-                                     flavor='raw', zero=True)
+                                     flavor='raw', zero=True,
+                                     add_memory_pressure=True)
     py_tup.c_ob_size = length
     return py_tup
 
@@ -70,7 +71,8 @@
     """
     items_w = space.fixedview(w_obj)
     l = len(items_w)
-    p = lltype.malloc(ObjectItems, l, flavor='raw')
+    p = lltype.malloc(ObjectItems, l, flavor='raw',
+                      add_memory_pressure=True)
     i = 0
     try:
         while i < l:
@@ -177,7 +179,8 @@
     ref = rffi.cast(PyTupleObject, ref)
     oldsize = ref.c_ob_size
     oldp = ref.c_ob_item
-    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw')
+    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw',
+                         add_memory_pressure=True)
     try:
         if oldsize < newsize:
             to_cp = oldsize
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -502,7 +502,8 @@
             Py_DecRef(space, w_metatype)
 
     heaptype = lltype.malloc(PyHeapTypeObject.TO,
-                             flavor='raw', zero=True)
+                             flavor='raw', zero=True,
+                             add_memory_pressure=True)
     pto = heaptype.c_ht_type
     pto.c_ob_refcnt = 1
     pto.c_ob_pypy_link = 0
diff --git a/pypy/module/cpyext/unicodeobject.py 
b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -22,7 +22,7 @@
 PyUnicodeObjectStruct = lltype.ForwardReference()
 PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct)
 PyUnicodeObjectFields = (PyObjectFields +
-    (("str", rffi.CWCHARP), ("length", Py_ssize_t),
+    (("str", rffi.CWCHARP), ("size", Py_ssize_t),
      ("hash", rffi.LONG), ("defenc", PyObject)))
 cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct)
 
@@ -43,31 +43,6 @@
 
 Py_UNICODE = lltype.UniChar
 
-def unicode_alloc(space, w_type, length):
-    '''
-    see comments with string_alloc in stringobject.py
-    '''
-    XXX
-    from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
-    pytype = as_pyobj(space, w_type)
-    pytype = rffi.cast(PyTypeObjectPtr, pytype)
-    assert pytype
-    size = pytype.c_tp_basicsize
-    buf = lltype.malloc(rffi.VOIDP.TO, size,
-                        flavor='raw', zero=True)
-    py_uni = rffi.cast(PyUnicodeObject, buf)
-    py_uni.c_ob_refcnt = 1
-    py_uni.c_ob_type = pytype
-    if length > 0:
-        py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, length+1,
-                                        flavor='raw', zero=True)
-        py_uni.c_length = length
-        s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length)
-        w_obj = space.wrap(s)
-        py_uni.c_hash = space.hash_w(w_obj)
-        track_reference(space, rffi.cast(PyObject, py_uni), w_obj)
-    return rffi.cast(PyObject, py_uni)
-
 def new_empty_unicode(space, length):
     """
     Allocate a PyUnicodeObject and its buffer, but without a corresponding
@@ -79,9 +54,10 @@
     py_uni = rffi.cast(PyUnicodeObject, py_obj)
 
     buflen = length + 1
-    py_uni.c_length = length
+    py_uni.c_size = length
     py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen,
-                                    flavor='raw', zero=True)
+                                 flavor='raw', zero=True,
+                                 add_memory_pressure=True)
     py_uni.c_hash = -1
     py_uni.c_defenc = lltype.nullptr(PyObject.TO)
     return py_uni
@@ -89,7 +65,7 @@
 def unicode_attach(space, py_obj, w_obj):
     "Fills a newly allocated PyUnicodeObject with a unicode string"
     py_unicode = rffi.cast(PyUnicodeObject, py_obj)
-    py_unicode.c_length = len(space.unicode_w(w_obj))
+    py_unicode.c_size = len(space.unicode_w(w_obj))
     py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO)
     py_unicode.c_hash = space.hash_w(w_obj)
     py_unicode.c_defenc = lltype.nullptr(PyObject.TO)
@@ -100,7 +76,7 @@
     be modified after this call.
     """
     py_uni = rffi.cast(PyUnicodeObject, py_obj)
-    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length)
+    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size)
     w_obj = space.wrap(s)
     py_uni.c_hash = space.hash_w(w_obj)
     track_reference(space, py_obj, w_obj)
@@ -259,7 +235,7 @@
 def PyUnicode_GetSize(space, ref):
     if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode:
         ref = rffi.cast(PyUnicodeObject, ref)
-        return ref.c_length
+        return ref.c_size
     else:
         w_obj = from_ref(space, ref)
         return space.len_w(w_obj)
@@ -274,11 +250,11 @@
     to make sure that the wchar_t string is 0-terminated in case this is
     required by the application."""
     c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
-    c_length = ref.c_length
+    c_size = ref.c_size
 
     # If possible, try to copy the 0-termination as well
-    if size > c_length:
-        size = c_length + 1
+    if size > c_size:
+        size = c_size + 1
 
 
     i = 0
@@ -286,8 +262,8 @@
         buf[i] = c_str[i]
         i += 1
 
-    if size > c_length:
-        return c_length
+    if size > c_size:
+        return c_size
     else:
         return size
 
@@ -493,7 +469,7 @@
         ref[0] = lltype.nullptr(PyObject.TO)
         raise
     to_cp = newsize
-    oldsize = py_uni.c_length
+    oldsize = py_uni.c_size
     if oldsize < newsize:
         to_cp = oldsize
     for i in range(to_cp):
diff --git a/pypy/module/imp/test/test_import.py 
b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -109,7 +109,7 @@
         import marshal, stat, struct, os, imp
         code = py.code.Source(p.join("x.py").read()).compile()
         s3 = marshal.dumps(code)
-        s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME])
+        s2 = struct.pack("<i", os.stat(str(p.join("x.py")))[stat.ST_MTIME])
         p.join("x.pyc").write(imp.get_magic() + s2 + s3, mode='wb')
     else:
         w = space.wrap
diff --git a/pypy/module/micronumpy/ndarray.py 
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -502,29 +502,34 @@
         return W_NDimArray(self.implementation.transpose(self, axes))
 
     def descr_transpose(self, space, args_w):
-        if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple):
-            args_w = space.fixedview(args_w[0])
-        if (len(args_w) == 0 or
-                len(args_w) == 1 and space.is_none(args_w[0])):
+        if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]):
             return self.descr_get_transpose(space)
         else:
-            if len(args_w) != self.ndims():
-                raise oefmt(space.w_ValueError, "axes don't match array")
-            axes = []
-            axes_seen = [False] * self.ndims()
-            for w_arg in args_w:
-                try:
-                    axis = support.index_w(space, w_arg)
-                except OperationError:
-                    raise oefmt(space.w_TypeError, "an integer is required")
-                if axis < 0 or axis >= self.ndims():
-                    raise oefmt(space.w_ValueError, "invalid axis for this 
array")
-                if axes_seen[axis] is True:
-                    raise oefmt(space.w_ValueError, "repeated axis in 
transpose")
-                axes.append(axis)
-                axes_seen[axis] = True
-            return self.descr_get_transpose(space, axes)
+            if len(args_w) > 1:
+                axes = args_w
+            else:  # Iterable in the only argument (len(arg_w) == 1 and 
arg_w[0] is not None)
+                axes = space.fixedview(args_w[0])
 
+        axes = self._checked_axes(axes, space)
+        return self.descr_get_transpose(space, axes)
+
+    def _checked_axes(self, axes_raw, space):
+        if len(axes_raw) != self.ndims():
+            raise oefmt(space.w_ValueError, "axes don't match array")
+        axes = []
+        axes_seen = [False] * self.ndims()
+        for elem in axes_raw:
+            try:
+                axis = support.index_w(space, elem)
+            except OperationError:
+                raise oefmt(space.w_TypeError, "an integer is required")
+            if axis < 0 or axis >= self.ndims():
+                raise oefmt(space.w_ValueError, "invalid axis for this array")
+            if axes_seen[axis] is True:
+                raise oefmt(space.w_ValueError, "repeated axis in transpose")
+            axes.append(axis)
+            axes_seen[axis] = True
+        return axes
 
     @unwrap_spec(axis1=int, axis2=int)
     def descr_swapaxes(self, space, axis1, axis2):
diff --git a/pypy/module/micronumpy/test/test_ndarray.py 
b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -2960,6 +2960,36 @@
         assert (a.transpose() == b).all()
         assert (a.transpose(None) == b).all()
 
+    def test_transpose_arg_tuple(self):
+        import numpy as np
+        a = np.arange(24).reshape(2, 3, 4)
+        transpose_args = a.transpose(1, 2, 0)
+
+        transpose_test = a.transpose((1, 2, 0))
+
+        assert transpose_test.shape == (3, 4, 2)
+        assert (transpose_args == transpose_test).all()
+
+    def test_transpose_arg_list(self):
+        import numpy as np
+        a = np.arange(24).reshape(2, 3, 4)
+        transpose_args = a.transpose(1, 2, 0)
+
+        transpose_test = a.transpose([1, 2, 0])
+
+        assert transpose_test.shape == (3, 4, 2)
+        assert (transpose_args == transpose_test).all()
+
+    def test_transpose_arg_array(self):
+        import numpy as np
+        a = np.arange(24).reshape(2, 3, 4)
+        transpose_args = a.transpose(1, 2, 0)
+
+        transpose_test = a.transpose(np.array([1, 2, 0]))
+
+        assert transpose_test.shape == (3, 4, 2)
+        assert (transpose_args == transpose_test).all()
+
     def test_transpose_error(self):
         import numpy as np
         a = np.arange(24).reshape(2, 3, 4)
@@ -2968,6 +2998,11 @@
         raises(ValueError, a.transpose, 1, 0, 1)
         raises(TypeError, a.transpose, 1, 0, '2')
 
+    def test_transpose_unexpected_argument(self):
+        import numpy as np
+        a = np.array([[1, 2], [3, 4], [5, 6]])
+        raises(TypeError, 'a.transpose(axes=(1,2,0))')
+
     def test_flatiter(self):
         from numpy import array, flatiter, arange, zeros
         a = array([[10, 30], [40, 60]])
@@ -3439,7 +3474,7 @@
 
     def test_index_int(self):
         import numpy as np
-        a = np.array([10, 20, 30])
+        a = np.array([10, 20, 30], dtype='int64')
         res = a[np.int64(1)]
         assert isinstance(res, np.int64)
         assert res == 20
diff --git a/pypy/module/select/test/test_select.py 
b/pypy/module/select/test/test_select.py
--- a/pypy/module/select/test/test_select.py
+++ b/pypy/module/select/test/test_select.py
@@ -287,8 +287,7 @@
             t = thread.start_new_thread(pollster.poll, ())
             try:
                 time.sleep(0.3)
-                # TODO restore print '', if this is not the reason
-                for i in range(5): print 'release gil select'  # to release 
GIL untranslated
+                for i in range(5): print '',  # to release GIL untranslated
                 # trigger ufds array reallocation
                 for fd in rfds:
                     pollster.unregister(fd)
diff --git a/pypy/module/thread/test/test_lock.py 
b/pypy/module/thread/test/test_lock.py
--- a/pypy/module/thread/test/test_lock.py
+++ b/pypy/module/thread/test/test_lock.py
@@ -3,7 +3,7 @@
 import sys, os
 from pypy.module.thread.test.support import GenericTestThread
 from rpython.translator.c.test.test_genc import compile
-import platform
+from platform import machine
 
 
 class AppTestLock(GenericTestThread):
@@ -64,8 +64,7 @@
         else:
             assert self.runappdirect, "missing lock._py3k_acquire()"
 
-    @py.test.mark.xfail(platform.machine() == 's390x',
-                        reason='may fail this test under heavy load')
+    @py.test.mark.xfail(machine()=='s390x', reason='may fail under heavy load')
     def test_ping_pong(self):
         # The purpose of this test is that doing a large number of ping-pongs
         # between two threads, using locks, should complete in a reasonable
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -144,14 +144,6 @@
             cache[name, index] = attr
         return attr
 
-    @jit.elidable
-    def _get_cache_attr(self, name, index):
-        key = name, index
-        # this method is not actually elidable, but it's fine anyway
-        if self.cache_attrs is not None:
-            return self.cache_attrs.get(key, None)
-        return None
-
     def add_attr(self, obj, name, index, w_value):
         self._reorder_and_add(obj, name, index, w_value)
         if not jit.we_are_jitted():
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -1,9 +1,9 @@
 # Edit these appropriately before running this script
 maj=5
 min=0
-rev=0
+rev=1
 branchname=release-$maj.x  # ==OR== release-$maj.$min.x
-tagname=release-$maj.$min  # ==OR== release-$maj.$min.$rev
+tagname=release-$maj.$min.$rev
 # This script will download latest builds from the buildmaster, rename the top
 # level directory, and repackage ready to be uploaded to bitbucket. It will 
also
 # download source, assuming a tag for the release already exists, and 
repackage them.
diff --git a/rpython/jit/backend/detect_cpu.py 
b/rpython/jit/backend/detect_cpu.py
--- a/rpython/jit/backend/detect_cpu.py
+++ b/rpython/jit/backend/detect_cpu.py
@@ -66,6 +66,7 @@
             'x86_64': MODEL_X86,
             'amd64': MODEL_X86,    # freebsd
             'AMD64': MODEL_X86,    # win64
+            'armv8l': MODEL_ARM,   # 32-bit ARMv8
             'armv7l': MODEL_ARM,
             'armv6l': MODEL_ARM,
             'arm': MODEL_ARM,      # freebsd
diff --git a/rpython/jit/codewriter/jtransform.py 
b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -688,6 +688,10 @@
         ARRAY = op.args[0].concretetype.TO
         if self._array_of_voids(ARRAY):
             return []
+        if isinstance(ARRAY, lltype.FixedSizeArray):
+            raise NotImplementedError(
+                "%r uses %r, which is not supported by the JIT codewriter"
+                % (self.graph, ARRAY))
         if op.args[0] in self.vable_array_vars:     # for virtualizables
             vars = self.vable_array_vars[op.args[0]]
             (v_base, arrayfielddescr, arraydescr) = vars
@@ -718,6 +722,10 @@
         ARRAY = op.args[0].concretetype.TO
         if self._array_of_voids(ARRAY):
             return []
+        if isinstance(ARRAY, lltype.FixedSizeArray):
+            raise NotImplementedError(
+                "%r uses %r, which is not supported by the JIT codewriter"
+                % (self.graph, ARRAY))
         if op.args[0] in self.vable_array_vars:     # for virtualizables
             vars = self.vable_array_vars[op.args[0]]
             (v_base, arrayfielddescr, arraydescr) = vars
@@ -784,11 +792,13 @@
             return []
         # check for _immutable_fields_ hints
         immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value)
+        need_live = False
         if immut:
             if (self.callcontrol is not None and
                 self.callcontrol.could_be_green_field(v_inst.concretetype.TO,
                                                       c_fieldname.value)):
                 pure = '_greenfield'
+                need_live = True
             else:
                 pure = '_pure'
         else:
@@ -815,10 +825,12 @@
             descr1 = self.cpu.fielddescrof(
                 v_inst.concretetype.TO,
                 quasiimmut.get_mutate_field_name(c_fieldname.value))
-            op1 = [SpaceOperation('-live-', [], None),
+            return [SpaceOperation('-live-', [], None),
                    SpaceOperation('record_quasiimmut_field',
                                   [v_inst, descr, descr1], None),
                    op1]
+        if need_live:
+            return [SpaceOperation('-live-', [], None), op1]
         return op1
 
     def rewrite_op_setfield(self, op, override_type=None):
diff --git a/rpython/jit/codewriter/test/test_jtransform.py 
b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -1012,7 +1012,8 @@
     v1 = varoftype(lltype.Ptr(S))
     v2 = varoftype(lltype.Char)
     op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2)
-    op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
+    op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
+    assert op0.opname == '-live-'
     assert op1.opname == 'getfield_gc_i_greenfield'
     assert op1.args == [v1, ('fielddescr', S, 'x')]
     assert op1.result == v2
@@ -1315,6 +1316,21 @@
     tr = Transformer(None, None)
     py.test.raises(NotImplementedError, tr.rewrite_operation, op)
 
+def test_no_fixedsizearray():
+    A = lltype.FixedSizeArray(lltype.Signed, 5)
+    v_x = varoftype(lltype.Ptr(A))
+    op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)],
+                        varoftype(lltype.Signed))
+    tr = Transformer(None, None)
+    tr.graph = 'demo'
+    py.test.raises(NotImplementedError, tr.rewrite_operation, op)
+    op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed),
+                                              Constant(42, lltype.Signed)],
+                        varoftype(lltype.Void))
+    e = py.test.raises(NotImplementedError, tr.rewrite_operation, op)
+    assert str(e.value) == (
+        "'demo' uses %r, which is not supported by the JIT codewriter" % (A,))
+
 def _test_threadlocalref_get(loop_inv):
     from rpython.rlib.rthread import ThreadLocalField
     tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_',
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2929,10 +2929,19 @@
             ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99),
                       "refcount underflow from REFCNT_FROM_PYPY_LIGHT?")
             rc -= REFCNT_FROM_PYPY
-            self._pyobj(pyobject).ob_refcnt = rc
             self._pyobj(pyobject).ob_pypy_link = 0
             if rc == 0:
                 self.rrc_dealloc_pending.append(pyobject)
+                # an object with refcnt == 0 cannot stay around waiting
+                # for its deallocator to be called.  Some code (lxml)
+                # expects that tp_dealloc is called immediately when
+                # the refcnt drops to 0.  If it isn't, we get some
+                # uncleared raw pointer that can still be used to access
+                # the object; but (PyObject *)raw_pointer is then bogus
+                # because after a Py_INCREF()/Py_DECREF() on it, its
+                # tp_dealloc is also called!
+                rc = 1
+            self._pyobj(pyobject).ob_refcnt = rc
     _rrc_free._always_inline_ = True
 
     def rrc_major_collection_trace(self):
diff --git a/rpython/memory/gc/test/test_rawrefcount.py 
b/rpython/memory/gc/test/test_rawrefcount.py
--- a/rpython/memory/gc/test/test_rawrefcount.py
+++ b/rpython/memory/gc/test/test_rawrefcount.py
@@ -174,7 +174,7 @@
         p1 = check_alive(0)
         self._collect(major=True, expected_trigger=1)
         py.test.raises(RuntimeError, "p1.x")            # dead
-        assert r1.ob_refcnt == 0
+        assert r1.ob_refcnt == 1       # in the pending list
         assert r1.ob_pypy_link == 0
         assert self.gc.rawrefcount_next_dead() == r1addr
         assert self.gc.rawrefcount_next_dead() == llmemory.NULL
@@ -197,7 +197,7 @@
         assert p1.x == 42
         self._collect(major=True, expected_trigger=1)
         py.test.raises(RuntimeError, "p1.x")            # dead
-        assert r1.ob_refcnt == 0
+        assert r1.ob_refcnt == 1
         assert r1.ob_pypy_link == 0
         assert self.gc.rawrefcount_next_dead() == r1addr
         self.gc.check_no_more_rawrefcount_state()
@@ -214,7 +214,7 @@
         else:
             self._collect(major=False, expected_trigger=1)
         py.test.raises(RuntimeError, "p1.x")            # dead
-        assert r1.ob_refcnt == 0
+        assert r1.ob_refcnt == 1
         assert r1.ob_pypy_link == 0
         assert self.gc.rawrefcount_next_dead() == r1addr
         self.gc.check_no_more_rawrefcount_state()
@@ -252,7 +252,7 @@
             self._collect(major=True, expected_trigger=1)
         else:
             self._collect(major=False, expected_trigger=1)
-        assert r1.ob_refcnt == 0     # refcnt dropped to 0
+        assert r1.ob_refcnt == 1     # refcnt 1, in the pending list
         assert r1.ob_pypy_link == 0  # detached
         assert self.gc.rawrefcount_next_dead() == r1addr
         self.gc.check_no_more_rawrefcount_state()
@@ -277,7 +277,7 @@
         assert self.trigger == []
         self._collect(major=True, expected_trigger=1)
         py.test.raises(RuntimeError, "p1.x")            # dead
-        assert r1.ob_refcnt == 0
+        assert r1.ob_refcnt == 1
         assert r1.ob_pypy_link == 0
         assert self.gc.rawrefcount_next_dead() == r1addr
         self.gc.check_no_more_rawrefcount_state()
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -1059,6 +1059,14 @@
     of JIT running like JIT loops compiled, aborts etc.
     An instance of this class will be available as policy.jithookiface.
     """
+    # WARNING: You should make a single prebuilt instance of a subclass
+    # of this class.  You can, before translation, initialize some
+    # attributes on this instance, and then read or change these
+    # attributes inside the methods of the subclass.  But this prebuilt
+    # instance *must not* be seen during the normal annotation/rtyping
+    # of the program!  A line like ``pypy_hooks.foo = ...`` must not
+    # appear inside your interpreter's RPython code.
+
     def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, 
operations):
         """ A hook called each time a loop is aborted with jitdriver and
         greenkey where it started, reason is a string why it got aborted
diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
--- a/rpython/rlib/rawrefcount.py
+++ b/rpython/rlib/rawrefcount.py
@@ -77,6 +77,12 @@
     return p
 
 def next_dead(OB_PTR_TYPE):
+    """NOT_RPYTHON.  When the GC runs, it finds some pyobjs to be dead
+    but cannot immediately dispose of them (it doesn't know how to call
+    e.g. tp_dealloc(), and anyway calling it immediately would cause all
+    sorts of bugs).  So instead, it stores them in an internal list,
+    initially with refcnt == 1.  This pops the next item off this list.
+    """
     if len(_d_list) == 0:
         return lltype.nullptr(OB_PTR_TYPE.TO)
     ob = _d_list.pop()
@@ -141,6 +147,7 @@
                 ob.c_ob_refcnt -= REFCNT_FROM_PYPY
                 ob.c_ob_pypy_link = 0
                 if ob.c_ob_refcnt == 0:
+                    ob.c_ob_refcnt = 1
                     _d_list.append(ob)
             return None
 
diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
--- a/rpython/rlib/rposix.py
+++ b/rpython/rlib/rposix.py
@@ -22,6 +22,22 @@
     from rpython.rlib import rwin32
     from rpython.rlib.rwin32file import make_win32_traits
 
+class CConfig:
+    _compilation_info_ = ExternalCompilationInfo(
+        includes=['sys/stat.h',
+                  'unistd.h',
+                  'fcntl.h'],
+    )
+    for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir
+                    fpathconf fstat fstatat fstatvfs ftruncate futimens futimes
+                    futimesat linkat lchflags lchmod lchown lstat lutimes
+                    mkdirat mkfifoat mknodat openat readlinkat renameat
+                    symlinkat unlinkat utimensat""".split():
+        locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name)
+cConfig = rffi_platform.configure(CConfig)
+globals().update(cConfig)
+
+
 class CConstantErrno(CConstant):
     # these accessors are used when calling get_errno() or set_errno()
     # on top of CPython
@@ -1024,6 +1040,13 @@
         if not win32traits.MoveFile(path1, path2):
             raise rwin32.lastSavedWindowsError()
 
+@specialize.argtype(0, 1)
+def replace(path1, path2):
+    if os.name == 'nt':
+        raise NotImplementedError(
+            'On windows, os.replace() should overwrite the destination')
+    return rename(path1, path2)
+
 #___________________________________________________________________
 
 c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT,
diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py
--- a/rpython/rlib/rtime.py
+++ b/rpython/rlib/rtime.py
@@ -9,7 +9,6 @@
 from rpython.rtyper.tool import rffi_platform
 from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.rlib.objectmodel import register_replacement_for
-from rpython.rlib import jit
 from rpython.rlib.rarithmetic import intmask, UINT_MAX
 from rpython.rlib import rposix
 
@@ -149,13 +148,12 @@
 
 if _WIN32:
     # hacking to avoid LARGE_INTEGER which is a union...
-    A = lltype.FixedSizeArray(lltype.SignedLongLong, 1)
     QueryPerformanceCounter = external(
-        'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void,
-        releasegil=False)
+        'QueryPerformanceCounter', [rffi.CArrayPtr(lltype.SignedLongLong)],
+         lltype.Void, releasegil=False)
     QueryPerformanceFrequency = external(
-        'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT,
-        releasegil=False)
+        'QueryPerformanceFrequency', [rffi.CArrayPtr(lltype.SignedLongLong)], 
+        rffi.INT, releasegil=False)
     class State(object):
         divisor = 0.0
         counter_start = 0
@@ -170,19 +168,16 @@
                                [lltype.Signed, lltype.Ptr(TIMESPEC)],
                                rffi.INT, releasegil=False,
                                compilation_info=eci_with_lrt)
-else:
+if need_rusage:
     RUSAGE = RUSAGE
     RUSAGE_SELF = RUSAGE_SELF or 0
     c_getrusage = external('getrusage',
                            [rffi.INT, lltype.Ptr(RUSAGE)],
-                           lltype.Void,
+                           rffi.INT,
                            releasegil=False)
 
-@replace_time_function('clock')
-@jit.dont_look_inside  # the JIT doesn't like FixedSizeArray
-def clock():
-    if _WIN32:
-        a = lltype.malloc(A, flavor='raw')
+def win_perf_counter():
+    with lltype.scoped_alloc(rffi.CArray(rffi.lltype.SignedLongLong), 1) as a:
         if state.divisor == 0.0:
             QueryPerformanceCounter(a)
             state.counter_start = a[0]
@@ -190,8 +185,12 @@
             state.divisor = float(a[0])
         QueryPerformanceCounter(a)
         diff = a[0] - state.counter_start
-        lltype.free(a, flavor='raw')
-        return float(diff) / state.divisor
+    return float(diff) / state.divisor
+
+@replace_time_function('clock')
+def clock():
+    if _WIN32:
+        return win_perf_counter()
     elif CLOCK_PROCESS_CPUTIME_ID is not None:
         with lltype.scoped_alloc(TIMESPEC) as a:
             c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a)
diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h 
b/rpython/rlib/rvmprof/src/vmprof_common.h
--- a/rpython/rlib/rvmprof/src/vmprof_common.h
+++ b/rpython/rlib/rvmprof/src/vmprof_common.h
@@ -24,14 +24,14 @@
     char padding[sizeof(long) - 1];
     char marker;
     long count, depth;
-    void *stack[];
+    intptr_t stack[];
 } prof_stacktrace_s;
 
 
 RPY_EXTERN
 char *vmprof_init(int fd, double interval, char *interp_name)
 {
-    if (interval < 1e-6 || interval >= 1.0)
+    if (!(interval >= 1e-6 && interval < 1.0))   /* also if it is NaN */
         return "bad value for 'interval'";
     prepare_interval_usec = (int)(interval * 1000000.0);
 
diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h 
b/rpython/rlib/rvmprof/src/vmprof_config.h
--- a/rpython/rlib/rvmprof/src/vmprof_config.h
+++ b/rpython/rlib/rvmprof/src/vmprof_config.h
@@ -1,6 +1,10 @@
 #define HAVE_SYS_UCONTEXT_H
-#if defined(__FreeBSD__)
-#define PC_FROM_UCONTEXT uc_mcontext.mc_rip
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+  #ifdef __i386__
+    #define PC_FROM_UCONTEXT uc_mcontext.mc_eip
+  #else
+    #define PC_FROM_UCONTEXT uc_mcontext.mc_rip
+  #endif
 #elif defined( __APPLE__)
   #if ((ULONG_MAX) == (UINT_MAX))
     #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip
@@ -8,10 +12,10 @@
     #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip
   #endif
 #elif defined(__arm__)
-#define PC_FROM_UCONTEXT uc_mcontext.arm_ip
+  #define PC_FROM_UCONTEXT uc_mcontext.arm_ip
 #elif defined(__linux) && defined(__i386) && defined(__GNUC__)
-#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
+  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
 #else
-/* linux, gnuc */
-#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
+  /* linux, gnuc */
+  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
 #endif
diff --git a/rpython/rlib/rvmprof/src/vmprof_main_win32.h 
b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
--- a/rpython/rlib/rvmprof/src/vmprof_main_win32.h
+++ b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
@@ -101,7 +101,7 @@
     depth = get_stack_trace(p->vmprof_tl_stack,
                      stack->stack, MAX_STACK_DEPTH-2, ctx.Eip);
     stack->depth = depth;
-    stack->stack[depth++] = (void*)p->thread_ident;
+    stack->stack[depth++] = p->thread_ident;
     stack->count = 1;
     stack->marker = MARKER_STACKTRACE;
     ResumeThread(hThread);
diff --git a/rpython/rlib/test/test_rawrefcount.py 
b/rpython/rlib/test/test_rawrefcount.py
--- a/rpython/rlib/test/test_rawrefcount.py
+++ b/rpython/rlib/test/test_rawrefcount.py
@@ -116,7 +116,7 @@
         assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS)
         assert rawrefcount._o_list == []
         assert wr_p() is None
-        assert ob.c_ob_refcnt == 0
+        assert ob.c_ob_refcnt == 1       # from the pending list
         assert ob.c_ob_pypy_link == 0
         lltype.free(ob, flavor='raw')
 
@@ -173,7 +173,7 @@
         assert rawrefcount._d_list == [ob]
         assert rawrefcount._p_list == []
         assert wr_p() is None
-        assert ob.c_ob_refcnt == 0
+        assert ob.c_ob_refcnt == 1       # from _d_list
         assert ob.c_ob_pypy_link == 0
         lltype.free(ob, flavor='raw')
 
diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py
--- a/rpython/tool/runsubprocess.py
+++ b/rpython/tool/runsubprocess.py
@@ -20,6 +20,8 @@
 def _run(executable, args, env, cwd):
     # note that this function can be *overridden* below
     # in some cases!
+    if sys.platform == 'win32':
+        executable = executable.replace('/','\\')
     if isinstance(args, str):
         args = str(executable) + ' ' + args
         shell = True
diff --git a/rpython/translator/c/src/thread.h 
b/rpython/translator/c/src/thread.h
--- a/rpython/translator/c/src/thread.h
+++ b/rpython/translator/c/src/thread.h
@@ -42,13 +42,13 @@
 RPY_EXTERN long rpy_fastgil;
 
 static inline void _RPyGilAcquire(void) {
-    long old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
+    long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
     if (old_fastgil != 0)
         RPyGilAcquireSlowPath(old_fastgil);
 }
 static inline void _RPyGilRelease(void) {
     assert(RPY_FASTGIL_LOCKED(rpy_fastgil));
-    lock_release(&rpy_fastgil);
+    pypy_lock_release(&rpy_fastgil);
 }
 static inline long *_RPyFetchFastGil(void) {
     return &rpy_fastgil;
diff --git a/rpython/translator/c/src/thread_gil.c 
b/rpython/translator/c/src/thread_gil.c
--- a/rpython/translator/c/src/thread_gil.c
+++ b/rpython/translator/c/src/thread_gil.c
@@ -70,7 +70,7 @@
 {
     /* Acquires the GIL.  This assumes that we already did:
 
-          old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
+          old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
      */
     if (!RPY_FASTGIL_LOCKED(old_fastgil)) {
         /* The fastgil was not previously locked: success.
@@ -122,7 +122,7 @@
                released.
             */
             if (!RPY_FASTGIL_LOCKED(rpy_fastgil)) {
-                old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
+                old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
                 if (!RPY_FASTGIL_LOCKED(old_fastgil))
                     /* yes, got a non-held value!  Now we hold it. */
                     break;
diff --git a/rpython/translator/c/src/thread_nt.c 
b/rpython/translator/c/src/thread_nt.c
--- a/rpython/translator/c/src/thread_nt.c
+++ b/rpython/translator/c/src/thread_nt.c
@@ -255,7 +255,7 @@
     LeaveCriticalSection(mutex);
 }
 
-//#define lock_test_and_set(ptr, value)  see thread_nt.h
+//#define pypy_lock_test_and_set(ptr, value)  see thread_nt.h
 #define atomic_increment(ptr)          InterlockedIncrement(ptr)
 #define atomic_decrement(ptr)          InterlockedDecrement(ptr)
 
diff --git a/rpython/translator/c/src/thread_nt.h 
b/rpython/translator/c/src/thread_nt.h
--- a/rpython/translator/c/src/thread_nt.h
+++ b/rpython/translator/c/src/thread_nt.h
@@ -36,8 +36,8 @@
 
 #ifdef _M_IA64
 /* On Itanium, use 'acquire' memory ordering semantics */
-#define lock_test_and_set(ptr, value)  InterlockedExchangeAcquire(ptr, value)
+#define pypy_lock_test_and_set(ptr, value) 
InterlockedExchangeAcquire(ptr,value)
 #else
-#define lock_test_and_set(ptr, value)  InterlockedExchange(ptr, value)
+#define pypy_lock_test_and_set(ptr, value) InterlockedExchange(ptr, value)
 #endif
-#define lock_release(ptr)              (*((volatile long *)ptr) = 0)
+#define pypy_lock_release(ptr)             (*((volatile long *)ptr) = 0)
diff --git a/rpython/translator/c/src/thread_pthread.c 
b/rpython/translator/c/src/thread_pthread.c
--- a/rpython/translator/c/src/thread_pthread.c
+++ b/rpython/translator/c/src/thread_pthread.c
@@ -37,7 +37,7 @@
 #  define THREAD_STACK_SIZE   0   /* use default stack size */
 # endif
 
-# if (defined(__APPLE__) || defined(__FreeBSD__)) && 
defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
+# if (defined(__APPLE__) || defined(__FreeBSD__) || 
defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE 
== 0
    /* The default stack size for new threads on OSX is small enough that
     * we'll get hard crashes instead of 'maximum recursion depth exceeded'
     * exceptions.
@@ -85,7 +85,7 @@
        if (tss != 0)
                pthread_attr_setstacksize(&attrs, tss);
 #endif
-#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
+#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || 
defined(__FreeBSD_kernel__))
         pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
 #endif
 
@@ -551,7 +551,7 @@
     return result;
 }
 
-//#define lock_test_and_set(ptr, value)  see thread_pthread.h
+//#define pypy_lock_test_and_set(ptr, value)  see thread_pthread.h
 #define atomic_increment(ptr)          __sync_fetch_and_add(ptr, 1)
 #define atomic_decrement(ptr)          __sync_fetch_and_sub(ptr, 1)
 #define HAVE_PTHREAD_ATFORK            1
diff --git a/rpython/translator/c/src/thread_pthread.h 
b/rpython/translator/c/src/thread_pthread.h
--- a/rpython/translator/c/src/thread_pthread.h
+++ b/rpython/translator/c/src/thread_pthread.h
@@ -82,5 +82,5 @@
 void RPyThreadAfterFork(void);
 
 
-#define lock_test_and_set(ptr, value)  __sync_lock_test_and_set(ptr, value)
-#define lock_release(ptr)              __sync_lock_release(ptr)
+#define pypy_lock_test_and_set(ptr, value)  __sync_lock_test_and_set(ptr, 
value)
+#define pypy_lock_release(ptr)              __sync_lock_release(ptr)
diff --git a/rpython/translator/c/src/threadlocal.c 
b/rpython/translator/c/src/threadlocal.c
--- a/rpython/translator/c/src/threadlocal.c
+++ b/rpython/translator/c/src/threadlocal.c
@@ -15,14 +15,14 @@
 static int check_valid(void);
 
 void _RPython_ThreadLocals_Acquire(void) {
-    while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) {
+    while (!pypy_lock_test_and_set(&pypy_threadlocal_lock, 1)) {
         /* busy loop */
     }
     assert(check_valid());
 }
 void _RPython_ThreadLocals_Release(void) {
     assert(check_valid());
-    lock_release(&pypy_threadlocal_lock);
+    pypy_lock_release(&pypy_threadlocal_lock);
 }
 
 
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to