Author: Manuel Jacob <[email protected]>
Branch: py3k
Changeset: r82825:c8ebbe78a212
Date: 2016-03-06 20:30 +0100
http://bitbucket.org/pypy/pypy/changeset/c8ebbe78a212/
Log: hg merge b68cfadb2cb8
This is the last changeset which was merged into release-5.x.
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -11,29 +11,29 @@
Amaury Forgeot d'Arc
Antonio Cuni
Samuele Pedroni
+ Matti Picus
Alex Gaynor
Brian Kearns
- Matti Picus
Philip Jenvey
Michael Hudson
+ Ronan Lamy
David Schneider
+ Manuel Jacob
Holger Krekel
Christian Tismer
Hakan Ardo
- Manuel Jacob
- Ronan Lamy
Benjamin Peterson
+ Richard Plangger
Anders Chrigstrom
Eric van Riet Paap
Wim Lavrijsen
- Richard Plangger
Richard Emslie
Alexander Schremmer
Dan Villiom Podlaski Christiansen
+ Remi Meier
Lukas Diekmann
Sven Hager
Anders Lehmann
- Remi Meier
Aurelien Campeas
Niklaus Haldimann
Camillo Bruni
@@ -42,8 +42,8 @@
Romain Guillebert
Leonardo Santagada
Seo Sanghyeon
+ Ronny Pfannschmidt
Justin Peel
- Ronny Pfannschmidt
David Edelsohn
Anders Hammarquist
Jakub Gustak
@@ -65,6 +65,7 @@
Tyler Wade
Michael Foord
Stephan Diehl
+ Vincent Legoll
Stefan Schwarzer
Valentino Volonghi
Tomek Meka
@@ -75,9 +76,9 @@
Jean-Paul Calderone
Timo Paulssen
Squeaky
+ Marius Gedminas
Alexandre Fayolle
Simon Burton
- Marius Gedminas
Martin Matusiak
Konstantin Lopuhin
Wenzhu Man
@@ -86,16 +87,20 @@
Ivan Sichmann Freitas
Greg Price
Dario Bertini
+ Stefano Rivera
Mark Pearse
Simon Cross
Andreas Stührk
- Stefano Rivera
+ Edd Barrett
Jean-Philippe St. Pierre
Guido van Rossum
Pavel Vinogradov
+ Jeremy Thurgood
Paweł Piotr Przeradowski
+ Spenser Bauman
Paul deGrandis
Ilya Osadchiy
+ marky1991
Tobias Oberstein
Adrian Kuhn
Boris Feigin
@@ -104,14 +109,12 @@
Georg Brandl
Bert Freudenberg
Stian Andreassen
- Edd Barrett
+ Tobias Pape
Wanja Saatkamp
Gerald Klix
Mike Blume
- Tobias Pape
Oscar Nierstrasz
Stefan H. Muller
- Jeremy Thurgood
Rami Chowdhury
Eugene Oden
Henry Mason
@@ -123,6 +126,8 @@
Lukas Renggli
Guenter Jantzen
Ned Batchelder
+ Tim Felgentreff
+ Anton Gulenko
Amit Regmi
Ben Young
Nicolas Chauvat
@@ -132,12 +137,12 @@
Nicholas Riley
Jason Chu
Igor Trindade Oliveira
- Tim Felgentreff
+ Yichao Yu
Rocco Moretti
Gintautas Miliauskas
Michael Twomey
Lucian Branescu Mihaila
- Yichao Yu
+ Devin Jeanpierre
Gabriel Lavoie
Olivier Dormond
Jared Grubb
@@ -161,33 +166,33 @@
Stanislaw Halik
Mikael Schönenberg
Berkin Ilbeyi
- Elmo M?ntynen
+ Elmo Mäntynen
+ Faye Zhao
Jonathan David Riehl
Anders Qvist
Corbin Simpson
Chirag Jadwani
Beatrice During
Alex Perry
- Vincent Legoll
+ Vaibhav Sood
Alan McIntyre
- Spenser Bauman
+ William Leslie
Alexander Sedov
Attila Gobi
+ Jasper.Schulz
Christopher Pope
- Devin Jeanpierre
- Vaibhav Sood
Christian Tismer
Marc Abramowitz
Dan Stromberg
Arjun Naik
Valentina Mukhamedzhanova
Stefano Parmesan
+ Mark Young
Alexis Daboville
Jens-Uwe Mager
Carl Meyer
Karl Ramm
Pieter Zieschang
- Anton Gulenko
Gabriel
Lukas Vacek
Andrew Dalke
@@ -195,6 +200,7 @@
Jakub Stasiak
Nathan Taylor
Vladimir Kryachko
+ Omer Katz
Jacek Generowicz
Alejandro J. Cura
Jacob Oscarson
@@ -209,6 +215,7 @@
Lars Wassermann
Philipp Rustemeuer
Henrik Vendelbo
+ Richard Lancaster
Dan Buch
Miguel de Val Borro
Artur Lisiecki
@@ -220,18 +227,18 @@
Tomo Cocoa
Kim Jin Su
Toni Mattis
+ Amber Brown
Lucas Stadler
Julian Berman
Markus Holtermann
roberto@goyle
Yury V. Zaytsev
Anna Katrina Dominguez
- William Leslie
Bobby Impollonia
- Faye Zhao
[email protected]
Andrew Thompson
Yusei Tahara
+ Aaron Tubbs
Ben Darnell
Roberto De Ioris
Juan Francisco Cantero Hurtado
@@ -243,6 +250,7 @@
Christopher Armstrong
Michael Hudson-Doyle
Anders Sigfridsson
+ Nikolay Zinov
Yasir Suhail
Jason Michalski
[email protected]
@@ -252,6 +260,7 @@
Gustavo Niemeyer
Stephan Busemann
Rafał Gałczyński
+ Matt Bogosian
Christian Muirhead
Berker Peksag
James Lan
@@ -286,9 +295,9 @@
Stefan Marr
jiaaro
Mads Kiilerich
- Richard Lancaster
opassembler.py
Antony Lee
+ Jason Madden
Yaroslav Fedevych
Jim Hunziker
Markus Unterwaditzer
@@ -297,6 +306,7 @@
squeaky
Zearin
soareschen
+ Jonas Pfannschmidt
Kurt Griffiths
Mike Bayer
Matthew Miller
@@ -311,4 +321,3 @@
Julien Phalip
Roman Podoliaka
Dan Loewenherz
-
diff --git a/pypy/doc/index-of-release-notes.rst
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.0.0.rst
release-4.0.1.rst
release-4.0.0.rst
release-2.6.1.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
.. toctree::
whatsnew-head.rst
+ whatsnew-5.0.0.rst
whatsnew-4.0.1.rst
whatsnew-4.0.0.rst
whatsnew-2.6.1.rst
diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.0.0.rst
@@ -0,0 +1,100 @@
+==========
+PyPy 5.0.0
+==========
+
+We have released PyPy 5.0.0, about three months after PyPy 4.0.0.
+We encourage all users of PyPy to update to this version. There are
+bug fixes and a major upgrade to our c-api layer (cpyext)
+
+You can download the PyPy 5.0.0 release here:
+
+ http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+CFFI
+====
+
+While not applicable only to PyPy, `cffi`_ is arguably our most significant
+contribution to the python ecosystem. PyPy 5.0.0 ships with
+`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program.
+
+.. _`PyPy`: http://doc.pypy.org
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`cffi`: https://cffi.readthedocs.org
+.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2
+.. _`modules`:
http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+.. _`numpy`: https://bitbucket.org/pypy/numpy
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd),
+newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the
+big- and little-endian variants of **ppc64** running Linux.
+
+.. _`pypy and cpython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Other Highlights (since 4.0.1 released in November 2015)
+=======================================================
+
+* Bug Fixes
+
+ *
+
+ *
+
+ *
+
+ * Issues reported with our previous release were resolved_ after reports
from users on
+ our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+ #pypy
+
+* New features:
+
+ *
+
+ *
+
+ *
+
+* Numpy:
+
+ *
+
+ *
+
+
+* Performance improvements and refactorings:
+
+ *
+
+ *
+
+ *
+
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst
copy from pypy/doc/whatsnew-head.rst
copy to pypy/doc/whatsnew-5.0.0.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-5.0.0.rst
@@ -1,6 +1,6 @@
-=========================
-What's new in PyPy 4.1.+
-=========================
+========================
+What's new in PyPy 5.0.0
+========================
.. this is a revision shortly after release-4.0.1
.. startrev: 4b5c840d0da2
@@ -183,4 +183,11 @@
.. branch: vlen-resume
-Compress resume data, saving 10-20% of memory consumed by the JIT
\ No newline at end of file
+Compress resume data, saving 10-20% of memory consumed by the JIT
+
+.. branch: issue-2248
+
+.. branch: ndarray-setitem-filtered
+
+Fix boolean-array indexing in micronumpy
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,186 +1,8 @@
=========================
-What's new in PyPy 4.1.+
+What's new in PyPy 5.0.+
=========================
-.. this is a revision shortly after release-4.0.1
-.. startrev: 4b5c840d0da2
+.. this is a revision shortly after release-5.0.0
+.. startrev: 6d13e55b962a
-Fixed ``_PyLong_FromByteArray()``, which was buggy.
-Fixed a crash with stacklets (or greenlets) on non-Linux machines
-which showed up if you forget stacklets without resuming them.
-
-.. branch: numpy-1.10
-
-Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy
-which is now 1.10.2
-
-.. branch: osx-flat-namespace
-
-Fix the cpyext tests on OSX by linking with -flat_namespace
-
-.. branch: anntype
-
-Refactor and improve exception analysis in the annotator.
-
-.. branch: posita/2193-datetime-timedelta-integrals
-
-Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(...,
numbers.Integral)``
-to allow for alternate ``int``-like implementations (e.g.,
``future.types.newint``)
-
-.. branch: faster-rstruct
-
-Improve the performace of struct.unpack, which now directly reads inside the
-string buffer and directly casts the bytes to the appropriate type, when
-allowed. Unpacking of floats and doubles is about 15 times faster now, while
-for integer types it's up to ~50% faster for 64bit integers.
-
-.. branch: wrap-specialisation
-
-Remove unnecessary special handling of space.wrap().
-
-.. branch: compress-numbering
-
-Improve the memory signature of numbering instances in the JIT. This should
massively
-decrease the amount of memory consumed by the JIT, which is significant for
most programs.
-
-.. branch: fix-trace-too-long-heuristic
-
-Improve the heuristic when disable trace-too-long
-
-.. branch: fix-setslice-can-resize
-
-Make rlist's ll_listsetslice() able to resize the target list to help
-simplify objspace/std/listobject.py. Was issue #2196.
-
-.. branch: anntype2
-
-A somewhat random bunch of changes and fixes following up on branch 'anntype'.
Highlights:
-
-- Implement @doubledispatch decorator and use it for intersection() and
difference().
-
-- Turn isinstance into a SpaceOperation
-
-- Create a few direct tests of the fundamental annotation invariant in
test_model.py
-
-- Remove bookkeeper attribute from DictDef and ListDef.
-
-.. branch: cffi-static-callback
-
-.. branch: vecopt-absvalue
-
-- Enhancement. Removed vector fields from AbstractValue.
-
-.. branch: memop-simplify2
-
-Simplification. Backends implement too many loading instructions, only having
a slightly different interface.
-Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed)
replace all the
-commonly known loading operations
-
-.. branch: more-rposix
-
-Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and
-turn them into regular RPython functions. Most RPython-compatible `os.*`
-functions are now directly accessible as `rpython.rposix.*`.
-
-.. branch: always-enable-gil
-
-Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205.
-
-.. branch: flowspace-cleanups
-
-Trivial cleanups in flowspace.operation : fix comment & duplicated method
-
-.. branch: test-AF_NETLINK
-
-Add a test for pre-existing AF_NETLINK support. Was part of issue #1942.
-
-.. branch: small-cleanups-misc
-
-Trivial misc cleanups: typo, whitespace, obsolete comments
-
-.. branch: cpyext-slotdefs
-.. branch: fix-missing-canraise
-.. branch: whatsnew
-
-.. branch: fix-2211
-
-Fix the cryptic exception message when attempting to use extended slicing
-in rpython. Was issue #2211.
-
-.. branch: ec-keepalive
-
-Optimize the case where, in a new C-created thread, we keep invoking
-short-running Python callbacks. (CFFI on CPython has a hack to achieve
-the same result.) This can also be seen as a bug fix: previously,
-thread-local objects would be reset between two such calls.
-
-.. branch: globals-quasiimmut
-
-Optimize global lookups.
-
-.. branch: cffi-static-callback-embedding
-
-Updated to CFFI 1.5, which supports a new way to do embedding.
-Deprecates http://pypy.readthedocs.org/en/latest/embedding.html.
-
-.. branch: fix-cpython-ssl-tests-2.7
-
-Fix SSL tests by importing cpython's patch
-
-
-.. branch: remove-getfield-pure
-
-Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant
-optimizations instead consult the field descriptor to determine the purity of
-the operation. Additionally, pure ``getfield`` operations are now handled
-entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than
-`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better
codegen
-for traces containing a large number of pure getfield operations.
-
-.. branch: exctrans
-
-Try to ensure that no new functions get annotated during the 'source_c' phase.
-Refactor sandboxing to operate at a higher level.
-
-.. branch: cpyext-bootstrap
-
-.. branch: vmprof-newstack
-
-Refactor vmprof to work cross-operating-system.
-
-.. branch: seperate-strucmember_h
-
-Seperate structmember.h from Python.h Also enhance creating api functions
-to specify which header file they appear in (previously only pypy_decl.h)
-
-.. branch: llimpl
-
-Refactor register_external(), remove running_on_llinterp mechanism and
-apply sandbox transform on externals at the end of annotation.
-
-.. branch: cffi-embedding-win32
-
-.. branch: windows-vmprof-support
-
-vmprof should work on Windows.
-
-
-.. branch: reorder-map-attributes
-
-When creating instances and adding attributes in several different orders
-depending on some condition, the JIT would create too much code. This is now
-fixed.
-
-.. branch: cpyext-gc-support-2
-
-Improve CPython C API support, which means lxml now runs unmodified
-(after removing pypy hacks, pending pull request)
-
-.. branch: look-inside-tuple-hash
-
-Look inside tuple hash, improving mdp benchmark
-
-.. branch: vlen-resume
-
-Compress resume data, saving 10-20% of memory consumed by the JIT
\ No newline at end of file
diff --git a/pypy/interpreter/test/test_app_main.py
b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -143,7 +143,7 @@
self.check(['-S', '-O', '--info'], {}, output_contains='translation')
self.check(['-S', '-O', '--version'], {}, output_contains='Python')
self.check(['-S', '-OV'], {}, output_contains='Python')
- self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''],
+ self.check(['--jit', 'off', '-S'], {}, sys_argv=[''],
run_stdin=True, no_site=1)
self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass')
self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass')
diff --git a/pypy/module/cpyext/include/patchlevel.h
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,8 +29,8 @@
#define PY_VERSION "3.2.5"
/* PyPy version as a string */
-#define PYPY_VERSION "4.1.0-alpha0"
-#define PYPY_VERSION_NUM 0x04010000
+#define PYPY_VERSION "5.1.0-alpha0"
+#define PYPY_VERSION_NUM 0x05010000
/* Defined to mean a PyPy where cpyext holds more regular references
to PyObjects, e.g. staying alive as long as the internal PyPy object
diff --git a/pypy/module/micronumpy/concrete.py
b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -298,7 +298,14 @@
except IndexError:
# not a single result
chunks = self._prepare_slice_args(space, w_index)
- return new_view(space, orig_arr, chunks)
+ copy = False
+ if isinstance(chunks[0], BooleanChunk):
+ # numpy compatibility
+ copy = True
+ w_ret = new_view(space, orig_arr, chunks)
+ if copy:
+ w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order()))
+ return w_ret
def descr_setitem(self, space, orig_arr, w_index, w_value):
try:
diff --git a/pypy/module/micronumpy/ndarray.py
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -22,7 +22,8 @@
from pypy.module.micronumpy.flagsobj import W_FlagsObject
from pypy.module.micronumpy.strides import (
get_shape_from_iterable, shape_agreement, shape_agreement_multiple,
- is_c_contiguous, is_f_contiguous, calc_strides, new_view)
+ is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk,
+ SliceChunk)
from pypy.module.micronumpy.casting import can_cast_array
from pypy.module.micronumpy.descriptor import get_dtype_cache
@@ -204,7 +205,13 @@
if iter_shape is None:
# w_index is a list of slices, return a view
chunks = self.implementation._prepare_slice_args(space, w_index)
- return new_view(space, self, chunks)
+ copy = False
+ if isinstance(chunks[0], BooleanChunk):
+ copy = True
+ w_ret = new_view(space, self, chunks)
+ if copy:
+ w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order()))
+ return w_ret
shape = res_shape + self.get_shape()[len(indexes):]
w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(),
self.get_order(), w_instance=self)
@@ -220,8 +227,24 @@
if iter_shape is None:
# w_index is a list of slices
chunks = self.implementation._prepare_slice_args(space, w_index)
- view = new_view(space, self, chunks)
- view.implementation.setslice(space, val_arr)
+ dim = -1
+ view = self
+ for i, c in enumerate(chunks):
+ if isinstance(c, BooleanChunk):
+ dim = i
+ idx = c.w_idx
+ chunks.pop(i)
+ chunks.insert(0, SliceChunk(space.newslice(space.wrap(0),
+ space.w_None, space.w_None)))
+ break
+ if dim > 0:
+ view = self.implementation.swapaxes(space, self, 0, dim)
+ if dim >= 0:
+ view = new_view(space, self, chunks)
+ view.setitem_filter(space, idx, val_arr)
+ else:
+ view = new_view(space, self, chunks)
+ view.implementation.setslice(space, val_arr)
return
if support.product(iter_shape) == 0:
return
diff --git a/pypy/module/micronumpy/strides.py
b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -97,22 +97,19 @@
# filter by axis dim
filtr = chunks[dim]
assert isinstance(filtr, BooleanChunk)
+ # XXX this creates a new array, and fails in setitem
w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim)
arr = w_arr.implementation
chunks[dim] = SliceChunk(space.newslice(space.wrap(0),
- space.wrap(-1), space.w_None))
+ space.w_None, space.w_None))
r = calculate_slice_strides(space, arr.shape, arr.start,
arr.get_strides(), arr.get_backstrides(), chunks)
else:
r = calculate_slice_strides(space, arr.shape, arr.start,
arr.get_strides(), arr.get_backstrides(), chunks)
shape, start, strides, backstrides = r
- w_ret = W_NDimArray.new_slice(space, start, strides[:], backstrides[:],
+ return W_NDimArray.new_slice(space, start, strides[:], backstrides[:],
shape[:], arr, w_arr)
- if dim == 0:
- # Do not return a view
- return w_ret.descr_copy(space, space.wrap(w_ret.get_order()))
- return w_ret
@jit.unroll_safe
def _extend_shape(old_shape, chunks):
diff --git a/pypy/module/micronumpy/test/test_ndarray.py
b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -2541,8 +2541,10 @@
assert b.base is None
b = a[:, np.array([True, False, True])]
assert b.base is not None
+ a[np.array([True, False]), 0] = 100
b = a[np.array([True, False]), 0]
- assert (b ==[0]).all()
+ assert b.shape == (1,)
+ assert (b ==[100]).all()
def test_scalar_indexing(self):
import numpy as np
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -68,9 +68,12 @@
pipe.returncode,))
if stderr.startswith('SKIP:'):
py.test.skip(stderr)
- if stderr.startswith('debug_alloc.h:'): # lldebug builds
- stderr = ''
+ #if stderr.startswith('debug_alloc.h:'): # lldebug builds
+ # stderr = ''
#assert not stderr
+ if stderr:
+ print '*** stderr of the subprocess: ***'
+ print stderr
#
if discard_stdout_before_last_line:
stdout = stdout.splitlines(True)[-1]
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,7 @@
#XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h
-PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h
+PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h
import pypy
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py
@@ -12,7 +12,9 @@
def create_venv(name):
tmpdir = udir.join(name)
try:
- subprocess.check_call(['virtualenv', '--distribute',
+ subprocess.check_call(['virtualenv',
+ #'--never-download', <= could be added, but causes failures
+ # in random cases on random machines
'-p', os.path.abspath(sys.executable),
str(tmpdir)])
except OSError as e:
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
extern int add1(int, int);
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
extern int add1(int, int);
diff --git
a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#ifdef _MSC_VER
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c
@@ -1,10 +1,12 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#ifdef PTEST_USE_THREAD
# include <pthread.h>
-# include <semaphore.h>
-static sem_t done;
+static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER;
+static int remaining;
#endif
@@ -54,8 +56,11 @@
printf("time per call: %.3g\n", t);
#ifdef PTEST_USE_THREAD
- int status = sem_post(&done);
- assert(status == 0);
+ pthread_mutex_lock(&mutex1);
+ remaining -= 1;
+ if (!remaining)
+ pthread_cond_signal(&cond1);
+ pthread_mutex_unlock(&mutex1);
#endif
return arg;
@@ -68,19 +73,19 @@
start_routine(0);
#else
pthread_t th;
- int i, status = sem_init(&done, 0, 0);
- assert(status == 0);
+ int i, status;
add1(0, 0); /* this is the main thread */
+ remaining = PTEST_USE_THREAD;
for (i = 0; i < PTEST_USE_THREAD; i++) {
status = pthread_create(&th, NULL, start_routine, NULL);
assert(status == 0);
}
- for (i = 0; i < PTEST_USE_THREAD; i++) {
- status = sem_wait(&done);
- assert(status == 0);
- }
+ pthread_mutex_lock(&mutex1);
+ while (remaining)
+ pthread_cond_wait(&cond1, &mutex1);
+ pthread_mutex_unlock(&mutex1);
#endif
return 0;
}
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py
@@ -33,8 +33,12 @@
pythonpath.insert(0, cffi_base)
return os.pathsep.join(pythonpath)
-def setup_module(mod):
- mod.org_env = os.environ.copy()
+def copy_away_env():
+ global org_env
+ try:
+ org_env
+ except NameError:
+ org_env = os.environ.copy()
class EmbeddingTests:
@@ -122,6 +126,7 @@
os.chdir(curdir)
def patch_environment(self):
+ copy_away_env()
path = self.get_path()
# for libpypy-c.dll or Python27.dll
path = os.path.split(sys.executable)[0] + os.path.pathsep + path
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h
b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h
@@ -1,10 +1,45 @@
+/* Generated by pypy/tool/import_cffi.py */
/************************************************************/
#ifndef _MSC_VER
/************************************************************/
#include <pthread.h>
-#include <semaphore.h>
+
+/* don't include <semaphore.h>, it is not available on OS/X */
+
+typedef struct {
+ pthread_mutex_t mutex1;
+ pthread_cond_t cond1;
+ unsigned int value;
+} sem_t;
+
+static int sem_init(sem_t *sem, int pshared, unsigned int value)
+{
+ assert(pshared == 0);
+ sem->value = value;
+ return (pthread_mutex_init(&sem->mutex1, NULL) ||
+ pthread_cond_init(&sem->cond1, NULL));
+}
+
+static int sem_post(sem_t *sem)
+{
+ pthread_mutex_lock(&sem->mutex1);
+ sem->value += 1;
+ pthread_cond_signal(&sem->cond1);
+ pthread_mutex_unlock(&sem->mutex1);
+ return 0;
+}
+
+static int sem_wait(sem_t *sem)
+{
+ pthread_mutex_lock(&sem->mutex1);
+ while (sem->value == 0)
+ pthread_cond_wait(&sem->cond1, &sem->mutex1);
+ sem->value -= 1;
+ pthread_mutex_unlock(&sem->mutex1);
+ return 0;
+}
/************************************************************/
@@ -22,7 +57,7 @@
typedef HANDLE sem_t;
typedef HANDLE pthread_t;
-int sem_init(sem_t *sem, int pshared, unsigned int value)
+static int sem_init(sem_t *sem, int pshared, unsigned int value)
{
assert(pshared == 0);
assert(value == 0);
@@ -30,26 +65,26 @@
return *sem ? 0 : -1;
}
-int sem_post(sem_t *sem)
+static int sem_post(sem_t *sem)
{
return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1;
}
-int sem_wait(sem_t *sem)
+static int sem_wait(sem_t *sem)
{
WaitForSingleObject(*sem, INFINITE);
return 0;
}
-DWORD WINAPI myThreadProc(LPVOID lpParameter)
+static DWORD WINAPI myThreadProc(LPVOID lpParameter)
{
void *(* start_routine)(void *) = (void *(*)(void *))lpParameter;
start_routine(NULL);
return 0;
}
-int pthread_create(pthread_t *thread, void *attr,
- void *start_routine(void *), void *arg)
+static int pthread_create(pthread_t *thread, void *attr,
+ void *start_routine(void *), void *arg)
{
assert(arg == NULL);
*thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL);
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#include <assert.h>
#include "thread-test.h"
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#include <assert.h>
#include "thread-test.h"
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#include <assert.h>
#include "thread-test.h"
diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c
b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c
--- a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c
+++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c
@@ -1,3 +1,4 @@
+/* Generated by pypy/tool/import_cffi.py */
#include <stdio.h>
#include <assert.h>
#include "thread-test.h"
diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
--- a/pypy/objspace/std/celldict.py
+++ b/pypy/objspace/std/celldict.py
@@ -64,6 +64,9 @@
def setitem_str(self, w_dict, key, w_value):
cell = self.getdictvalue_no_unwrapping(w_dict, key)
+ return self._setitem_str_cell_known(cell, w_dict, key, w_value)
+
+ def _setitem_str_cell_known(self, cell, w_dict, key, w_value):
w_value = write_cell(self.space, cell, w_value)
if w_value is None:
return
@@ -74,10 +77,11 @@
space = self.space
if space.is_w(space.type(w_key), space.w_unicode):
key = space.str_w(w_key)
- w_result = self.getitem_str(w_dict, key)
+ cell = self.getdictvalue_no_unwrapping(w_dict, key)
+ w_result = unwrap_cell(self.space, cell)
if w_result is not None:
return w_result
- self.setitem_str(w_dict, key, w_default)
+ self._setitem_str_cell_known(cell, w_dict, key, w_default)
return w_default
else:
self.switch_to_object_strategy(w_dict)
diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py
--- a/pypy/objspace/std/floatobject.py
+++ b/pypy/objspace/std/floatobject.py
@@ -170,15 +170,11 @@
return self.floatval
def int(self, space):
+ # this is a speed-up only, for space.int(w_float).
if (type(self) is not W_FloatObject and
space.is_overloaded(self, space.w_float, '__int__')):
return W_Root.int(self, space)
- try:
- value = ovfcheck_float_to_int(self.floatval)
- except OverflowError:
- return newlong_from_float(space, self.floatval)
- else:
- return space.newint(value)
+ return self.descr_trunc(space)
def is_w(self, space, w_other):
from rpython.rlib.longlong2float import float2longlong
@@ -417,11 +413,10 @@
return W_FloatObject(a)
def descr_trunc(self, space):
- whole = math.modf(self.floatval)[1]
try:
- value = ovfcheck_float_to_int(whole)
+ value = ovfcheck_float_to_int(self.floatval)
except OverflowError:
- return newlong_from_float(space, whole)
+ return newlong_from_float(space, self.floatval)
else:
return space.newint(value)
@@ -656,7 +651,7 @@
__hash__ = interp2app(W_FloatObject.descr_hash),
__format__ = interp2app(W_FloatObject.descr_format),
__bool__ = interp2app(W_FloatObject.descr_bool),
- __int__ = interp2app(W_FloatObject.int),
+ __int__ = interp2app(W_FloatObject.descr_trunc),
__float__ = interp2app(W_FloatObject.descr_float),
__trunc__ = interp2app(W_FloatObject.descr_trunc),
__neg__ = interp2app(W_FloatObject.descr_neg),
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -1056,7 +1056,7 @@
if self is w_other.strategy:
strategy = self
if w_set.length() > w_other.length():
- # swap operants
+ # swap operands
storage = self._intersect_unwrapped(w_other, w_set)
else:
storage = self._intersect_unwrapped(w_set, w_other)
@@ -1066,7 +1066,7 @@
else:
strategy = self.space.fromcache(ObjectSetStrategy)
if w_set.length() > w_other.length():
- # swap operants
+ # swap operands
storage = w_other.strategy._intersect_wrapped(w_other, w_set)
else:
storage = self._intersect_wrapped(w_set, w_other)
diff --git a/pypy/objspace/std/test/test_celldict.py
b/pypy/objspace/std/test/test_celldict.py
--- a/pypy/objspace/std/test/test_celldict.py
+++ b/pypy/objspace/std/test/test_celldict.py
@@ -114,22 +114,11 @@
class TestModuleDictImplementation(BaseTestRDictImplementation):
StrategyClass = ModuleDictStrategy
-
-class
TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation):
- StrategyClass = ModuleDictStrategy
-
- string = "int"
- string2 = "isinstance"
-
+ setdefault_hash_count = 2
class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation):
StrategyClass = ModuleDictStrategy
-
-class
TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation):
- StrategyClass = ModuleDictStrategy
-
- string = "int"
- string2 = "isinstance"
+ setdefault_hash_count = 2
class AppTestCellDict(object):
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py
b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -1305,6 +1305,9 @@
impl.setitem(x, x)
assert type(impl.get_strategy()) is ObjectDictStrategy
+
+ setdefault_hash_count = 1
+
def test_setdefault_fast(self):
on_pypy = "__pypy__" in sys.builtin_module_names
impl = self.impl
@@ -1312,11 +1315,11 @@
x = impl.setdefault(key, 1)
assert x == 1
if on_pypy and self.FakeString is FakeString:
- assert key.hash_count == 1
+ assert key.hash_count == self.setdefault_hash_count
x = impl.setdefault(key, 2)
assert x == 1
if on_pypy and self.FakeString is FakeString:
- assert key.hash_count == 2
+ assert key.hash_count == self.setdefault_hash_count + 1
def test_fallback_evil_key(self):
class F(object):
diff --git a/pypy/objspace/std/test/test_kwargsdict.py
b/pypy/objspace/std/test/test_kwargsdict.py
--- a/pypy/objspace/std/test/test_kwargsdict.py
+++ b/pypy/objspace/std/test/test_kwargsdict.py
@@ -119,10 +119,16 @@
def test_delitem(self):
pass # delitem devolves for now
+ def test_setdefault_fast(self):
+ pass # not based on hashing at all
+
class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation):
get_impl = get_impl
StrategyClass = KwargsDictStrategy
+ def test_setdefault_fast(self):
+ pass # not based on hashing at all
+
class AppTestKwargsDictStrategy(object):
def setup_class(cls):
diff --git a/pypy/tool/import_cffi.py b/pypy/tool/import_cffi.py
--- a/pypy/tool/import_cffi.py
+++ b/pypy/tool/import_cffi.py
@@ -7,11 +7,18 @@
import sys, py
-def mangle(lines):
- yield "# Generated by pypy/tool/import_cffi.py\n"
- for line in lines:
- line = line.replace('from testing', 'from
pypy.module.test_lib_pypy.cffi_tests')
- yield line
+def mangle(lines, ext):
+ if ext == '.py':
+ yield "# Generated by pypy/tool/import_cffi.py\n"
+ for line in lines:
+ line = line.replace('from testing', 'from
pypy.module.test_lib_pypy.cffi_tests')
+ yield line
+ elif ext in ('.c', '.h'):
+ yield "/* Generated by pypy/tool/import_cffi.py */\n"
+ for line in lines:
+ yield line
+ else:
+ raise AssertionError(ext)
def main(cffi_dir):
cffi_dir = py.path.local(cffi_dir)
@@ -23,10 +30,12 @@
for p in (list(cffi_dir.join('cffi').visit(fil='*.py')) +
list(cffi_dir.join('cffi').visit(fil='*.h'))):
cffi_dest.join('..', p.relto(cffi_dir)).write(p.read())
- for p in cffi_dir.join('testing').visit(fil='*.py'):
+ for p in (list(cffi_dir.join('testing').visit(fil='*.py')) +
+ list(cffi_dir.join('testing').visit(fil='*.h')) +
+ list(cffi_dir.join('testing').visit(fil='*.c'))):
path = test_dest.join(p.relto(cffi_dir.join('testing')))
path.join('..').ensure(dir=1)
- path.write(''.join(mangle(p.readlines())))
+ path.write(''.join(mangle(p.readlines(), p.ext)))
if __name__ == '__main__':
if len(sys.argv) != 2:
diff --git a/rpython/jit/backend/test/runner_test.py
b/rpython/jit/backend/test/runner_test.py
--- a/rpython/jit/backend/test/runner_test.py
+++ b/rpython/jit/backend/test/runner_test.py
@@ -548,7 +548,9 @@
if cpu.supports_floats:
def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9):
+ seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9))
return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7
+ f8 + f9
+ seen = []
F = lltype.Float
I = lltype.Signed
FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F)
@@ -557,13 +559,15 @@
calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
EffectInfo.MOST_GENERAL)
funcbox = self.get_funcbox(cpu, func_ptr)
- args = ([boxfloat(.1) for i in range(7)] +
- [InputArgInt(1), boxfloat(.2), InputArgInt(2),
boxfloat(.3),
- boxfloat(.4)])
+ args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3),
+ boxfloat(.4), boxfloat(.5), boxfloat(.6),
+ InputArgInt(1), boxfloat(.7), InputArgInt(2),
boxfloat(.8),
+ boxfloat(.9)])
res = self.execute_operation(rop.CALL_F,
[funcbox] + args,
'float', descr=calldescr)
- assert abs(longlong.getrealfloat(res) - 4.6) < 0.0001
+ assert seen == [(.0, .1, .2, .3, .4, .5, .6, 1, .7, 2, .8, .9)]
+ assert abs(longlong.getrealfloat(res) - 7.5) < 0.0001
def test_call_many_arguments(self):
# Test calling a function with a large number of arguments (more than
diff --git a/rpython/memory/gctransform/boehm.py
b/rpython/memory/gctransform/boehm.py
--- a/rpython/memory/gctransform/boehm.py
+++ b/rpython/memory/gctransform/boehm.py
@@ -156,9 +156,9 @@
resulttype = lltype.Signed)
hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result)
- def gcheader_initdata(self, defnode):
+ def gcheader_initdata(self, obj):
hdr = lltype.malloc(self.HDR, immortal=True)
- hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr())
+ hdr.hash = lltype.identityhash_nocache(obj._as_ptr())
return hdr._obj
diff --git a/rpython/memory/gctransform/framework.py
b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -1479,8 +1479,8 @@
resulttype=llmemory.Address)
llops.genop('raw_memclear', [v_adr, v_totalsize])
- def gcheader_initdata(self, defnode):
- o = lltype.top_container(defnode.obj)
+ def gcheader_initdata(self, obj):
+ o = lltype.top_container(obj)
needs_hash = self.get_prebuilt_hash(o) is not None
hdr = self.gc_header_for(o, needs_hash)
return hdr._obj
diff --git a/rpython/memory/gctransform/refcounting.py
b/rpython/memory/gctransform/refcounting.py
--- a/rpython/memory/gctransform/refcounting.py
+++ b/rpython/memory/gctransform/refcounting.py
@@ -286,6 +286,6 @@
hop.genop("direct_call", [self.identityhash_ptr, v_adr],
resultvar=hop.spaceop.result)
- def gcheader_initdata(self, defnode):
- top = lltype.top_container(defnode.obj)
+ def gcheader_initdata(self, obj):
+ top = lltype.top_container(obj)
return self.gcheaderbuilder.header_of_object(top)._obj
diff --git a/rpython/rtyper/lltypesystem/rstr.py
b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -717,10 +717,7 @@
return cls.ll_count_char(s1, s2.chars[0], start, end)
res = cls.ll_search(s1, s2, start, end, FAST_COUNT)
- # For a few cases ll_search can return -1 to indicate an "impossible"
- # condition for a string match, count just returns 0 in these cases.
- if res < 0:
- res = 0
+ assert res >= 0
return res
@staticmethod
@@ -741,6 +738,8 @@
w = n - m
if w < 0:
+ if mode == FAST_COUNT:
+ return 0
return -1
mlast = m - 1
diff --git a/rpython/rtyper/test/test_rdict.py
b/rpython/rtyper/test/test_rdict.py
--- a/rpython/rtyper/test/test_rdict.py
+++ b/rpython/rtyper/test/test_rdict.py
@@ -1,25 +1,62 @@
+import sys
+from contextlib import contextmanager
+import signal
+
from rpython.translator.translator import TranslationContext
+from rpython.annotator.model import (
+ SomeInteger, SomeString, SomeChar, SomeUnicodeString, SomeUnicodeCodePoint)
+from rpython.annotator.dictdef import DictKey, DictValue
from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.rtyper import rint
-from rpython.rtyper.lltypesystem import rdict, rstr
+from rpython.rtyper.lltypesystem import rdict
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.objectmodel import r_dict
from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong
import py
-py.log.setconsumer("rtyper", py.log.STDOUT)
+from hypothesis import settings
+from hypothesis.strategies import (
+ builds, sampled_from, binary, just, integers, text, characters, tuples)
+from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test
-def not_really_random():
- """A random-ish generator, which also generates nice patterns from time to
time.
- Could be useful to detect problems associated with specific usage
patterns."""
- import random
- x = random.random()
- print 'random seed: %r' % (x,)
- for i in range(12000):
- r = 3.4 + i/20000.0
- x = r*x - x*x
- assert 0 <= x < 4
- yield x
+def ann2strategy(s_value):
+ if isinstance(s_value, SomeChar):
+ return builds(chr, integers(min_value=0, max_value=255))
+ elif isinstance(s_value, SomeString):
+ if s_value.can_be_None:
+ return binary() | just(None)
+ else:
+ return binary()
+ elif isinstance(s_value, SomeUnicodeCodePoint):
+ return characters()
+ elif isinstance(s_value, SomeUnicodeString):
+ if s_value.can_be_None:
+ return text() | just(None)
+ else:
+ return text()
+ elif isinstance(s_value, SomeInteger):
+ return integers(min_value=~sys.maxint, max_value=sys.maxint)
+ else:
+ raise TypeError("Cannot convert annotation %s to a strategy" % s_value)
+
+
+if hasattr(signal, 'alarm'):
+ @contextmanager
+ def signal_timeout(n):
+ """A flaky context manager that throws an exception if the body of the
+ `with` block runs for longer than `n` seconds.
+ """
+ def handler(signum, frame):
+ raise RuntimeError('timeout')
+ signal.signal(signal.SIGALRM, handler)
+ signal.alarm(n)
+ try:
+ yield
+ finally:
+ signal.alarm(0)
+else:
+ @contextmanager
+ def signal_timeout(n):
+ yield
class BaseTestRDict(BaseRtypingTest):
@@ -199,9 +236,8 @@
def test_dict_copy(self):
def func():
- # XXX this does not work if we use chars, only!
dic = self.newdict()
- dic['ab'] = 1
+ dic['a'] = 1
dic['b'] = 2
d2 = dic.copy()
ok = 1
@@ -999,33 +1035,11 @@
s_BA_dic = s.items[1]
r_AB_dic = rtyper.getrepr(s_AB_dic)
- r_BA_dic = rtyper.getrepr(s_AB_dic)
+ r_BA_dic = rtyper.getrepr(s_BA_dic)
assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype
- def test_dict_resize(self):
- py.test.skip("test written for non-ordered dicts, update or kill")
- # XXX we no longer automatically resize on 'del'. We need to
- # hack a bit in this test to trigger a resize by continuing to
- # fill the dict's table while keeping the actual size very low
- # in order to force a resize to shrink the table back
- def func(want_empty):
- d = self.newdict()
- for i in range(rdict.DICT_INITSIZE << 1):
- d[chr(ord('a') + i)] = i
- if want_empty:
- for i in range(rdict.DICT_INITSIZE << 1):
- del d[chr(ord('a') + i)]
- for i in range(rdict.DICT_INITSIZE << 3):
- d[chr(ord('A') - i)] = i
- del d[chr(ord('A') - i)]
- return d
- res = self.interpret(func, [0])
- assert len(res.entries) > rdict.DICT_INITSIZE
- res = self.interpret(func, [1])
- assert len(res.entries) == rdict.DICT_INITSIZE
-
def test_opt_dummykeymarker(self):
def f():
d = {"hello": None}
@@ -1117,183 +1131,131 @@
DICT = lltype.typeOf(llres.item1)
assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key',
'value']
- def test_deleted_entry_reusage_with_colliding_hashes(self):
- py.test.skip("test written for non-ordered dicts, update or kill")
- def lowlevelhash(value):
- p = rstr.mallocstr(len(value))
- for i in range(len(value)):
- p.chars[i] = value[i]
- return rstr.LLHelpers.ll_strhash(p)
- def func(c1, c2):
- c1 = chr(c1)
- c2 = chr(c2)
- d = self.newdict()
- d[c1] = 1
- d[c2] = 2
- del d[c1]
- return d[c2]
+class Action(object):
+ def __init__(self, method, args):
+ self.method = method
+ self.args = args
- char_by_hash = {}
- base = rdict.DICT_INITSIZE
- for y in range(0, 256):
- y = chr(y)
- y_hash = lowlevelhash(y) % base
- char_by_hash.setdefault(y_hash, []).append(y)
+ def execute(self, space):
+ getattr(space, self.method)(*self.args)
- x, y = char_by_hash[0][:2] # find a collision
+ def __repr__(self):
+ return "space.%s(%s)" % (self.method, ', '.join(map(repr, self.args)))
- res = self.interpret(func, [ord(x), ord(y)])
- assert res == 2
+class PseudoRTyper:
+ cache_dummy_values = {}
- def func2(c1, c2):
- c1 = chr(c1)
- c2 = chr(c2)
- d = self.newdict()
- d[c1] = 1
- d[c2] = 2
- del d[c1]
- d[c1] = 3
- return d
+# XXX: None keys crash the test, but translation sort-of allows it
+keytypes_s = [
+ SomeString(), SomeInteger(), SomeChar(),
+ SomeUnicodeString(), SomeUnicodeCodePoint()]
+st_keys = sampled_from(keytypes_s)
+st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)])
- res = self.interpret(func2, [ord(x), ord(y)])
- for i in range(len(res.entries)):
- assert not (res.entries.everused(i) and not res.entries.valid(i))
+class MappingSpace(object):
+ def __init__(self, s_key, s_value):
+ self.s_key = s_key
+ self.s_value = s_value
+ rtyper = PseudoRTyper()
+ r_key = s_key.rtyper_makerepr(rtyper)
+ r_value = s_value.rtyper_makerepr(rtyper)
+ dictrepr = self.MappingRepr(rtyper, r_key, r_value,
+ DictKey(None, s_key),
+ DictValue(None, s_value))
+ dictrepr.setup()
+ self.l_dict = self.newdict(dictrepr)
+ self.reference = self.new_reference()
+ self.ll_key = r_key.convert_const
+ self.ll_value = r_value.convert_const
- def func3(c0, c1, c2, c3, c4, c5, c6, c7):
- d = self.newdict()
- c0 = chr(c0) ; d[c0] = 1; del d[c0]
- c1 = chr(c1) ; d[c1] = 1; del d[c1]
- c2 = chr(c2) ; d[c2] = 1; del d[c2]
- c3 = chr(c3) ; d[c3] = 1; del d[c3]
- c4 = chr(c4) ; d[c4] = 1; del d[c4]
- c5 = chr(c5) ; d[c5] = 1; del d[c5]
- c6 = chr(c6) ; d[c6] = 1; del d[c6]
- c7 = chr(c7) ; d[c7] = 1; del d[c7]
- return d
+ def setitem(self, key, value):
+ ll_key = self.ll_key(key)
+ ll_value = self.ll_value(value)
+ self.ll_setitem(self.l_dict, ll_key, ll_value)
+ self.reference[key] = value
+ assert self.ll_contains(self.l_dict, ll_key)
- if rdict.DICT_INITSIZE != 8:
- py.test.skip("make dict tests more indepdent from initsize")
- res = self.interpret(func3, [ord(char_by_hash[i][0])
- for i in range(rdict.DICT_INITSIZE)])
- count_frees = 0
- for i in range(len(res.entries)):
- if not res.entries.everused(i):
- count_frees += 1
- assert count_frees >= 3
+ def delitem(self, key):
+ ll_key = self.ll_key(key)
+ self.ll_delitem(self.l_dict, ll_key)
+ del self.reference[key]
+ assert not self.ll_contains(self.l_dict, ll_key)
-class TestStress:
+ def copydict(self):
+ self.l_dict = self.ll_copy(self.l_dict)
+ assert self.ll_len(self.l_dict) == len(self.reference)
- def test_stress(self):
- from rpython.annotator.dictdef import DictKey, DictValue
- from rpython.annotator import model as annmodel
- dictrepr = rdict.DictRepr(None, rint.signed_repr, rint.signed_repr,
- DictKey(None, annmodel.SomeInteger()),
- DictValue(None, annmodel.SomeInteger()))
- dictrepr.setup()
- l_dict = rdict.ll_newdict(dictrepr.DICT)
- referencetable = [None] * 400
- referencelength = 0
- value = 0
+ def cleardict(self):
+ self.ll_clear(self.l_dict)
+ self.reference.clear()
+ assert self.ll_len(self.l_dict) == 0
- def complete_check():
- for n, refvalue in zip(range(len(referencetable)), referencetable):
- try:
- gotvalue = rdict.ll_dict_getitem(l_dict, n)
- except KeyError:
- assert refvalue is None
- else:
- assert gotvalue == refvalue
+ def fullcheck(self):
+ assert self.ll_len(self.l_dict) == len(self.reference)
+ for key, value in self.reference.iteritems():
+ assert (self.ll_getitem(self.l_dict, self.ll_key(key)) ==
+ self.ll_value(value))
- for x in not_really_random():
- n = int(x*100.0) # 0 <= x < 400
- op = repr(x)[-1]
- if op <= '2' and referencetable[n] is not None:
- rdict.ll_dict_delitem(l_dict, n)
- referencetable[n] = None
- referencelength -= 1
- elif op <= '6':
- rdict.ll_dict_setitem(l_dict, n, value)
- if referencetable[n] is None:
- referencelength += 1
- referencetable[n] = value
- value += 1
- else:
- try:
- gotvalue = rdict.ll_dict_getitem(l_dict, n)
- except KeyError:
- assert referencetable[n] is None
- else:
- assert gotvalue == referencetable[n]
- if 1.38 <= x <= 1.39:
- complete_check()
- print 'current dict length:', referencelength
- assert l_dict.num_items == referencelength
- complete_check()
+class MappingSM(GenericStateMachine):
+ def __init__(self):
+ self.space = None
- def test_stress_2(self):
- yield self.stress_combination, True, False
- yield self.stress_combination, False, True
- yield self.stress_combination, False, False
- yield self.stress_combination, True, True
+ def st_setitem(self):
+ return builds(Action,
+ just('setitem'), tuples(self.st_keys, self.st_values))
- def stress_combination(self, key_can_be_none, value_can_be_none):
- from rpython.rtyper.lltypesystem.rstr import string_repr
- from rpython.annotator.dictdef import DictKey, DictValue
- from rpython.annotator import model as annmodel
+ def st_updateitem(self):
+ return builds(Action,
+ just('setitem'),
+ tuples(sampled_from(self.space.reference), self.st_values))
- print
- print "Testing combination with can_be_None: keys %s, values %s" % (
- key_can_be_none, value_can_be_none)
+ def st_delitem(self):
+ return builds(Action,
+ just('delitem'), tuples(sampled_from(self.space.reference)))
- class PseudoRTyper:
- cache_dummy_values = {}
- dictrepr = rdict.DictRepr(PseudoRTyper(), string_repr, string_repr,
- DictKey(None, annmodel.SomeString(key_can_be_none)),
- DictValue(None, annmodel.SomeString(value_can_be_none)))
- dictrepr.setup()
- print dictrepr.lowleveltype
- for key, value in dictrepr.DICTENTRY._adtmeths.items():
- print ' %s = %s' % (key, value)
- l_dict = rdict.ll_newdict(dictrepr.DICT)
- referencetable = [None] * 400
- referencelength = 0
- values = not_really_random()
- keytable = [string_repr.convert_const("foo%d" % n)
- for n in range(len(referencetable))]
+ def steps(self):
+ if not self.space:
+ return builds(Action, just('setup'), tuples(st_keys, st_values))
+ global_actions = [Action('copydict', ()), Action('cleardict', ())]
+ if self.space.reference:
+ return (
+ self.st_setitem() | sampled_from(global_actions) |
+ self.st_updateitem() | self.st_delitem())
+ else:
+ return (self.st_setitem() | sampled_from(global_actions))
- def complete_check():
- for n, refvalue in zip(range(len(referencetable)), referencetable):
- try:
- gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n])
- except KeyError:
- assert refvalue is None
- else:
- assert gotvalue == refvalue
+ def execute_step(self, action):
+ if action.method == 'setup':
+ self.space = self.Space(*action.args)
+ self.st_keys = ann2strategy(self.space.s_key)
+ self.st_values = ann2strategy(self.space.s_value)
+ return
+ with signal_timeout(1): # catches infinite loops
+ action.execute(self.space)
- for x in not_really_random():
- n = int(x*100.0) # 0 <= x < 400
- op = repr(x)[-1]
- if op <= '2' and referencetable[n] is not None:
- rdict.ll_dict_delitem(l_dict, keytable[n])
- referencetable[n] = None
- referencelength -= 1
- elif op <= '6':
- ll_value = string_repr.convert_const(str(values.next()))
- rdict.ll_dict_setitem(l_dict, keytable[n], ll_value)
- if referencetable[n] is None:
- referencelength += 1
- referencetable[n] = ll_value
- else:
- try:
- gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n])
- except KeyError:
- assert referencetable[n] is None
- else:
- assert gotvalue == referencetable[n]
- if 1.38 <= x <= 1.39:
- complete_check()
- print 'current dict length:', referencelength
- assert l_dict.num_items == referencelength
- complete_check()
+ def teardown(self):
+ if self.space:
+ self.space.fullcheck()
+
+class DictSpace(MappingSpace):
+ MappingRepr = rdict.DictRepr
+ new_reference = dict
+ ll_getitem = staticmethod(rdict.ll_dict_getitem)
+ ll_setitem = staticmethod(rdict.ll_dict_setitem)
+ ll_delitem = staticmethod(rdict.ll_dict_delitem)
+ ll_len = staticmethod(rdict.ll_dict_len)
+ ll_contains = staticmethod(rdict.ll_contains)
+ ll_copy = staticmethod(rdict.ll_copy)
+ ll_clear = staticmethod(rdict.ll_clear)
+
+ def newdict(self, repr):
+ return rdict.ll_newdict(repr.DICT)
+
+class DictSM(MappingSM):
+ Space = DictSpace
+
+def test_hypothesis():
+ run_state_machine_as_test(
+ DictSM, settings(max_examples=500, stateful_step_count=100))
diff --git a/rpython/rtyper/test/test_rordereddict.py
b/rpython/rtyper/test/test_rordereddict.py
--- a/rpython/rtyper/test/test_rordereddict.py
+++ b/rpython/rtyper/test/test_rordereddict.py
@@ -1,14 +1,18 @@
-
import py
from collections import OrderedDict
+from hypothesis import settings
+from hypothesis.stateful import run_state_machine_as_test
+
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem import rordereddict, rstr
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.annlowlevel import llstr, hlstr
-from rpython.rtyper.test.test_rdict import BaseTestRDict
+from rpython.rtyper.test.test_rdict import (
+ BaseTestRDict, MappingSpace, MappingSM)
from rpython.rlib import objectmodel
+rodct = rordereddict
def get_indexes(ll_d):
return ll_d.indexes._obj.container._as_ptr()
@@ -330,124 +334,48 @@
assert res == 6
-class TestStress:
+class ODictSpace(MappingSpace):
+ MappingRepr = rodct.OrderedDictRepr
+ new_reference = OrderedDict
+ ll_getitem = staticmethod(rodct.ll_dict_getitem)
+ ll_setitem = staticmethod(rodct.ll_dict_setitem)
+ ll_delitem = staticmethod(rodct.ll_dict_delitem)
+ ll_len = staticmethod(rodct.ll_dict_len)
+ ll_contains = staticmethod(rodct.ll_dict_contains)
+ ll_copy = staticmethod(rodct.ll_dict_copy)
+ ll_clear = staticmethod(rodct.ll_dict_clear)
- def test_stress(self):
- from rpython.annotator.dictdef import DictKey, DictValue
- from rpython.annotator import model as annmodel
- from rpython.rtyper import rint
- from rpython.rtyper.test.test_rdict import not_really_random
- rodct = rordereddict
- dictrepr = rodct.OrderedDictRepr(
- None, rint.signed_repr, rint.signed_repr,
- DictKey(None, annmodel.SomeInteger()),
- DictValue(None, annmodel.SomeInteger()))
- dictrepr.setup()
- l_dict = rodct.ll_newdict(dictrepr.DICT)
- referencetable = [None] * 400
- referencelength = 0
- value = 0
+ def newdict(self, repr):
+ return rodct.ll_newdict(repr.DICT)
- def complete_check():
- for n, refvalue in zip(range(len(referencetable)), referencetable):
- try:
- gotvalue = rodct.ll_dict_getitem(l_dict, n)
- except KeyError:
- assert refvalue is None
- else:
- assert gotvalue == refvalue
+ def get_keys(self):
+ DICT = lltype.typeOf(self.l_dict).TO
+ ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
+ ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict)
+ ll_dictnext = rordereddict._ll_dictnext
+ keys_ll = []
+ while True:
+ try:
+ num = ll_dictnext(ll_iter)
+ keys_ll.append(self.l_dict.entries[num].key)
+ except StopIteration:
+ break
+ return keys_ll
- for x in not_really_random():
- n = int(x*100.0) # 0 <= x < 400
- op = repr(x)[-1]
- if op <= '2' and referencetable[n] is not None:
- rodct.ll_dict_delitem(l_dict, n)
- referencetable[n] = None
- referencelength -= 1
- elif op <= '6':
- rodct.ll_dict_setitem(l_dict, n, value)
- if referencetable[n] is None:
- referencelength += 1
- referencetable[n] = value
- value += 1
- else:
- try:
- gotvalue = rodct.ll_dict_getitem(l_dict, n)
- except KeyError:
- assert referencetable[n] is None
- else:
- assert gotvalue == referencetable[n]
- if 1.38 <= x <= 1.39:
- complete_check()
- print 'current dict length:', referencelength
- assert l_dict.num_live_items == referencelength
- complete_check()
+ def fullcheck(self):
+ # overridden to also check key order
+ assert self.ll_len(self.l_dict) == len(self.reference)
+ keys_ll = self.get_keys()
+ assert len(keys_ll) == len(self.reference)
+ for key, ll_key in zip(self.reference, keys_ll):
+ assert self.ll_key(key) == ll_key
+ assert (self.ll_getitem(self.l_dict, self.ll_key(key)) ==
+ self.ll_value(self.reference[key]))
- def test_stress_2(self):
- yield self.stress_combination, True, False
- yield self.stress_combination, False, True
- yield self.stress_combination, False, False
- yield self.stress_combination, True, True
- def stress_combination(self, key_can_be_none, value_can_be_none):
- from rpython.rtyper.lltypesystem.rstr import string_repr
- from rpython.annotator.dictdef import DictKey, DictValue
- from rpython.annotator import model as annmodel
- from rpython.rtyper.test.test_rdict import not_really_random
- rodct = rordereddict
+class ODictSM(MappingSM):
+ Space = ODictSpace
- print
- print "Testing combination with can_be_None: keys %s, values %s" % (
- key_can_be_none, value_can_be_none)
-
- class PseudoRTyper:
- cache_dummy_values = {}
- dictrepr = rodct.OrderedDictRepr(
- PseudoRTyper(), string_repr, string_repr,
- DictKey(None, annmodel.SomeString(key_can_be_none)),
- DictValue(None, annmodel.SomeString(value_can_be_none)))
- dictrepr.setup()
- print dictrepr.lowleveltype
- #for key, value in dictrepr.DICTENTRY._adtmeths.items():
- # print ' %s = %s' % (key, value)
- l_dict = rodct.ll_newdict(dictrepr.DICT)
- referencetable = [None] * 400
- referencelength = 0
- values = not_really_random()
- keytable = [string_repr.convert_const("foo%d" % n)
- for n in range(len(referencetable))]
-
- def complete_check():
- for n, refvalue in zip(range(len(referencetable)), referencetable):
- try:
- gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n])
- except KeyError:
- assert refvalue is None
- else:
- assert gotvalue == refvalue
-
- for x in not_really_random():
- n = int(x*100.0) # 0 <= x < 400
- op = repr(x)[-1]
- if op <= '2' and referencetable[n] is not None:
- rodct.ll_dict_delitem(l_dict, keytable[n])
- referencetable[n] = None
- referencelength -= 1
- elif op <= '6':
- ll_value = string_repr.convert_const(str(values.next()))
- rodct.ll_dict_setitem(l_dict, keytable[n], ll_value)
- if referencetable[n] is None:
- referencelength += 1
- referencetable[n] = ll_value
- else:
- try:
- gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n])
- except KeyError:
- assert referencetable[n] is None
- else:
- assert gotvalue == referencetable[n]
- if 1.38 <= x <= 1.39:
- complete_check()
- print 'current dict length:', referencelength
- assert l_dict.num_live_items == referencelength
- complete_check()
+def test_hypothesis():
+ run_state_machine_as_test(
+ ODictSM, settings(max_examples=500, stateful_step_count=100))
diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py
--- a/rpython/translator/c/node.py
+++ b/rpython/translator/c/node.py
@@ -546,7 +546,7 @@
if needs_gcheader(T):
gct = self.db.gctransformer
if gct is not None:
- self.gc_init = gct.gcheader_initdata(self)
+ self.gc_init = gct.gcheader_initdata(self.obj)
db.getcontainernode(self.gc_init)
else:
self.gc_init = None
@@ -677,7 +677,7 @@
if needs_gcheader(T):
gct = self.db.gctransformer
if gct is not None:
- self.gc_init = gct.gcheader_initdata(self)
+ self.gc_init = gct.gcheader_initdata(self.obj)
db.getcontainernode(self.gc_init)
else:
self.gc_init = None
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit