Author: Ronan Lamy <[email protected]>
Branch: py3k-update
Changeset: r83997:c1ed6e5f8a6d
Date: 2016-04-28 01:34 +0100
http://bitbucket.org/pypy/pypy/changeset/c1ed6e5f8a6d/
Log: hg merge default (before cpyext-for-merge merge)
diff too long, truncating to 2000 out of 4376 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,3 +20,4 @@
5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v<old>
-r v<new> Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after <from> <to>`
for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -205,15 +205,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -223,34 +214,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method
cache ",
default=11),
@@ -261,22 +232,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and
use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -292,14 +251,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -312,15 +267,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py
b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -102,15 +102,15 @@
apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
- tk-dev
+ tk-dev libgc-dev
For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt
b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for
:config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt
b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt
b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`:
../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt
b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations
<../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt
b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt
b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets
an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation
scripts <translate-pypy>`
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter
<interpreter>` and related objects
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,8 +106,12 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
diff --git a/pypy/doc/interpreter-optimizations.rst
b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized
dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python
interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with
multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict
strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
User Class Optimizations
@@ -114,8 +120,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
What is PyPy?
=============
-In common parlance, PyPy has been used to mean two things. The first is the
-:ref:`RPython translation toolchain <rpython:index>`, which is a framework for
generating
-dynamic programming language implementations. And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself. It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things. The first is the
+:ref:`RPython translation toolchain <rpython:index>` for generating
+interpreters for dynamic programming languages. And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things. From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter. Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
:ref:`RPython translation toolchain <rpython:index>` when we mean the
framework.
Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
========
We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
You can download the PyPy 5.1 release here:
@@ -26,6 +33,9 @@
.. _`modules`:
http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`:
http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`:
http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
What is PyPy?
=============
@@ -46,7 +56,7 @@
* big- and little-endian variants of **PPC64** running Linux,
- * **s960x** running Linux
+ * **s390x** running Linux
.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
.. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
* Fix a corner case in the JIT
* Fix edge cases in the cpyext refcounting-compatible semantics
+ (more work on cpyext compatibility is coming in the ``cpyext-ext``
+ branch, but isn't ready yet)
* Try harder to not emit NEON instructions on ARM processors without NEON
support
@@ -92,11 +104,17 @@
* Fix sandbox startup (a regression in 5.0)
+ * Fix possible segfault for classes with mangled mro or __metaclass__
+
+ * Fix isinstance(deque(), Hashable) on the pure python deque
+
+ * Fix an issue with forkpty()
+
* Issues reported with our previous release were resolved_ after reports
from users on
our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
#pypy
-* Numpy:
+* Numpy_:
* Implemented numpy.where for a single argument
@@ -108,6 +126,8 @@
functions exported from libpypy.so are declared in pypy_numpy.h, which is
included only when building our fork of numpy
+ * Add broadcast
+
* Performance improvements:
* Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
* Remove the forced minor collection that occurs when rewriting the
assembler at the start of the JIT backend
+ * Port the resource module to cffi
+
* Internal refactorings:
* Use a simpler logger to speed up translation
* Drop vestiges of Python 2.5 support in testing
+ * Update rpython functions with ones needed for py3k
+
.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
Please update, and continue to help us make PyPy better.
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
Remove old uneeded numpy headers, what is left is only for testing. Also
generate pypy_numpy.h which exposes functions to directly use micronumpy
ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the
+py3k branch
+
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,14 +3,22 @@
=========================
.. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the
-py3k branch
-
-.. branch: numpy_broadcast
+.. branch: gcheader-decl
-Add broadcast to micronumpy
+Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -138,6 +138,7 @@
e.write_unraisable(self.space, "new_code_hook()")
def _initialize(self):
+ from pypy.objspace.std.mapdict import init_mapdict_cache
if self.co_cellvars:
argcount = self.co_argcount
argcount += self.co_kwonlyargcount
@@ -174,9 +175,7 @@
self._compute_flatcall()
- if self.space.config.objspace.std.withmapdict:
- from pypy.objspace.std.mapdict import init_mapdict_cache
- init_mapdict_cache(self)
+ init_mapdict_cache(self)
def _init_ready(self):
"This is a hook for the vmprof module, which overrides this method."
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -157,7 +157,6 @@
ec.bytecode_trace(self)
next_instr = r_uint(self.last_instr)
opcode = ord(co_code[next_instr])
- #print 'executing', self.last_instr,
bytecode_spec.method_names[opcode]
next_instr += 1
if opcode >= HAVE_ARGUMENT:
@@ -905,8 +904,7 @@
def LOAD_ATTR(self, nameindex, next_instr):
"obj.attributename"
w_obj = self.popvalue()
- if (self.space.config.objspace.std.withmapdict
- and not jit.we_are_jitted()):
+ if not jit.we_are_jitted():
from pypy.objspace.std.mapdict import LOAD_ATTR_caching
w_value = LOAD_ATTR_caching(self.getcode(), w_obj, nameindex)
else:
@@ -1537,7 +1535,6 @@
return r_uint(self.handlerposition) # jump to the handler
-
class WithBlock(FinallyBlock):
_immutable_ = True
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -98,175 +98,51 @@
# reason is that it is missing a place to store the __dict__, the slots,
# the weakref lifeline, and it typically has no interp-level __del__.
# So we create a few interp-level subclasses of W_XxxObject, which add
-# some combination of features.
-#
-# We don't build 2**4 == 16 subclasses for all combinations of requested
-# features, but limit ourselves to 6, chosen a bit arbitrarily based on
-# typical usage (case 1 is the most common kind of app-level subclasses;
-# case 2 is the memory-saving kind defined with __slots__).
-#
-# +----------------------------------------------------------------+
-# | NOTE: if withmapdict is enabled, the following doesn't apply! |
-# | Map dicts can flexibly allow any slots/__dict__/__weakref__ to |
-# | show up only when needed. In particular there is no way with |
-# | mapdict to prevent some objects from being weakrefable. |
-# +----------------------------------------------------------------+
-#
-# dict slots del weakrefable
-#
-# 1. Y N N Y UserDictWeakref
-# 2. N Y N N UserSlots
-# 3. Y Y N Y UserDictWeakrefSlots
-# 4. N Y N Y UserSlotsWeakref
-# 5. Y Y Y Y UserDictWeakrefSlotsDel
-# 6. N Y Y Y UserSlotsWeakrefDel
-#
-# Note that if the app-level explicitly requests no dict, we should not
-# provide one, otherwise storing random attributes on the app-level
-# instance would unexpectedly work. We don't care too much, though, if
-# an object is weakrefable when it shouldn't really be. It's important
-# that it has a __del__ only if absolutely needed, as this kills the
-# performance of the GCs.
-#
-# Interp-level inheritance is like this:
-#
-# W_XxxObject base
-# / \
-# 1 2
-# / \
-# 3 4
-# / \
-# 5 6
+# some combination of features. This is done using mapdict.
-def get_unique_interplevel_subclass(config, cls, hasdict, wants_slots,
- needsdel=False, weakrefable=False):
+# we need two subclasses of the app-level type, one to add mapdict, and then
one
+# to add del to not slow down the GC.
+
+def get_unique_interplevel_subclass(config, cls, needsdel=False):
"NOT_RPYTHON: initialization-time only"
if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False):
needsdel = False
assert cls.typedef.acceptable_as_base_class
- key = config, cls, hasdict, wants_slots, needsdel, weakrefable
+ key = config, cls, needsdel
try:
return _subclass_cache[key]
except KeyError:
- subcls = _getusercls(config, cls, hasdict, wants_slots, needsdel,
- weakrefable)
+ # XXX can save a class if cls already has a __del__
+ if needsdel:
+ cls = get_unique_interplevel_subclass(config, cls, False)
+ subcls = _getusercls(config, cls, needsdel)
assert key not in _subclass_cache
_subclass_cache[key] = subcls
return subcls
get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
-def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable):
+def _getusercls(config, cls, wants_del, reallywantdict=False):
+ from rpython.rlib import objectmodel
+ from pypy.objspace.std.mapdict import (BaseUserClassMapdict,
+ MapdictDictSupport, MapdictWeakrefSupport,
+ _make_storage_mixin_size_n)
typedef = cls.typedef
- if wants_dict and typedef.hasdict:
- wants_dict = False
- if config.objspace.std.withmapdict and not typedef.hasdict:
- # mapdict only works if the type does not already have a dict
- if wants_del:
- parentcls = get_unique_interplevel_subclass(config, cls, True,
True,
- False, True)
- return _usersubclswithfeature(config, parentcls, "del")
- return _usersubclswithfeature(config, cls, "user", "dict", "weakref",
"slots")
- # Forest of if's - see the comment above.
+ name = cls.__name__ + "User"
+
+ mixins_needed = [BaseUserClassMapdict, _make_storage_mixin_size_n()]
+ if reallywantdict or not typedef.hasdict:
+ # the type has no dict, mapdict to provide the dict
+ mixins_needed.append(MapdictDictSupport)
+ name += "Dict"
+ if not typedef.weakrefable:
+ # the type does not support weakrefs yet, mapdict to provide weakref
+ # support
+ mixins_needed.append(MapdictWeakrefSupport)
+ name += "Weakrefable"
if wants_del:
- if wants_dict:
- # case 5. Parent class is 3.
- parentcls = get_unique_interplevel_subclass(config, cls, True,
True,
- False, True)
- else:
- # case 6. Parent class is 4.
- parentcls = get_unique_interplevel_subclass(config, cls, False,
True,
- False, True)
- return _usersubclswithfeature(config, parentcls, "del")
- elif wants_dict:
- if wants_slots:
- # case 3. Parent class is 1.
- parentcls = get_unique_interplevel_subclass(config, cls, True,
False,
- False, True)
- return _usersubclswithfeature(config, parentcls, "slots")
- else:
- # case 1 (we need to add weakrefable unless it's already in 'cls')
- if not typedef.weakrefable:
- return _usersubclswithfeature(config, cls, "user", "dict",
"weakref")
- else:
- return _usersubclswithfeature(config, cls, "user", "dict")
- else:
- if weakrefable and not typedef.weakrefable:
- # case 4. Parent class is 2.
- parentcls = get_unique_interplevel_subclass(config, cls, False,
True,
- False, False)
- return _usersubclswithfeature(config, parentcls, "weakref")
- else:
- # case 2 (if the base is already weakrefable, case 2 == case 4)
- return _usersubclswithfeature(config, cls, "user", "slots")
-
-def _usersubclswithfeature(config, parentcls, *features):
- key = config, parentcls, features
- try:
- return _usersubclswithfeature_cache[key]
- except KeyError:
- subcls = _builduserclswithfeature(config, parentcls, *features)
- _usersubclswithfeature_cache[key] = subcls
- return subcls
-_usersubclswithfeature_cache = {}
-_allusersubcls_cache = {}
-
-def _builduserclswithfeature(config, supercls, *features):
- "NOT_RPYTHON: initialization-time only"
- name = supercls.__name__
- name += ''.join([name.capitalize() for name in features])
- body = {}
- #print '..........', name, '(', supercls.__name__, ')'
-
- def add(Proto):
- for key, value in Proto.__dict__.items():
- if (not key.startswith('__') and not key.startswith('_mixin_')
- or key == '__del__'):
- if hasattr(value, "func_name"):
- value = func_with_new_name(value, value.func_name)
- body[key] = value
-
- if (config.objspace.std.withmapdict and "dict" in features):
- from pypy.objspace.std.mapdict import BaseMapdictObject, ObjectMixin
- add(BaseMapdictObject)
- add(ObjectMixin)
- body["user_overridden_class"] = True
- features = ()
-
- if "user" in features: # generic feature needed by all subcls
-
- class Proto(object):
- user_overridden_class = True
-
- def getclass(self, space):
- return promote(self.w__class__)
-
- def setclass(self, space, w_subtype):
- # only used by descr_set___class__
- self.w__class__ = w_subtype
-
- def user_setup(self, space, w_subtype):
- self.space = space
- self.w__class__ = w_subtype
- self.user_setup_slots(w_subtype.layout.nslots)
-
- def user_setup_slots(self, nslots):
- assert nslots == 0
- add(Proto)
-
- if "weakref" in features:
- class Proto(object):
- _lifeline_ = None
- def getweakref(self):
- return self._lifeline_
- def setweakref(self, space, weakreflifeline):
- self._lifeline_ = weakreflifeline
- def delweakref(self):
- self._lifeline_ = None
- add(Proto)
-
- if "del" in features:
- parent_destructor = getattr(supercls, '__del__', None)
+ name += "Del"
+ parent_destructor = getattr(cls, '__del__', None)
def call_parent_del(self):
assert isinstance(self, subcls)
parent_destructor(self)
@@ -281,57 +157,16 @@
if parent_destructor is not None:
self.enqueue_for_destruction(self.space, call_parent_del,
'internal destructor of ')
- add(Proto)
+ mixins_needed.append(Proto)
- if "slots" in features:
- class Proto(object):
- slots_w = []
- def user_setup_slots(self, nslots):
- if nslots > 0:
- self.slots_w = [None] * nslots
- def setslotvalue(self, index, w_value):
- self.slots_w[index] = w_value
- def delslotvalue(self, index):
- if self.slots_w[index] is None:
- return False
- self.slots_w[index] = None
- return True
- def getslotvalue(self, index):
- return self.slots_w[index]
- add(Proto)
-
- if "dict" in features:
- base_user_setup = supercls.user_setup.im_func
- if "user_setup" in body:
- base_user_setup = body["user_setup"]
- class Proto(object):
- def getdict(self, space):
- return self.w__dict__
-
- def setdict(self, space, w_dict):
- self.w__dict__ = check_new_dictionary(space, w_dict)
-
- def user_setup(self, space, w_subtype):
- self.w__dict__ = space.newdict(
- instance=True)
- base_user_setup(self, space, w_subtype)
-
- add(Proto)
-
- subcls = type(name, (supercls,), body)
- _allusersubcls_cache[subcls] = True
+ class subcls(cls):
+ user_overridden_class = True
+ for base in mixins_needed:
+ objectmodel.import_from_mixin(base)
+ del subcls.base
+ subcls.__name__ = name
return subcls
-# a couple of helpers for the Proto classes above, factored out to reduce
-# the translated code size
-def check_new_dictionary(space, w_dict):
- if not space.isinstance_w(w_dict, space.w_dict):
- raise OperationError(space.w_TypeError,
- space.wrap("setting dictionary to a non-dict"))
- from pypy.objspace.std import dictmultiobject
- assert isinstance(w_dict, dictmultiobject.W_DictMultiObject)
- return w_dict
-check_new_dictionary._dont_inline_ = True
# ____________________________________________________________
diff --git a/pypy/module/__builtin__/test/test_builtin.py
b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -865,6 +865,3 @@
a.__eq__ = 42
assert a.__eq__ == 42
-
-class AppTestGetattrWithGetAttributeShortcut(AppTestGetattr):
- spaceconfig = {"objspace.std.getattributeshortcut": True}
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -108,9 +108,8 @@
'interp_magic.method_cache_counter')
self.extra_interpdef('reset_method_cache_counter',
'interp_magic.reset_method_cache_counter')
- if self.space.config.objspace.std.withmapdict:
- self.extra_interpdef('mapdict_cache_counter',
- 'interp_magic.mapdict_cache_counter')
+ self.extra_interpdef('mapdict_cache_counter',
+ 'interp_magic.mapdict_cache_counter')
PYC_MAGIC = get_pyc_magic(self.space)
self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC)
try:
diff --git a/pypy/module/__pypy__/interp_magic.py
b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -37,17 +37,15 @@
cache = space.fromcache(MethodCache)
cache.misses = {}
cache.hits = {}
- if space.config.objspace.std.withmapdict:
- cache = space.fromcache(MapAttrCache)
- cache.misses = {}
- cache.hits = {}
+ cache = space.fromcache(MapAttrCache)
+ cache.misses = {}
+ cache.hits = {}
@unwrap_spec(name=str)
def mapdict_cache_counter(space, name):
"""Return a tuple (index_cache_hits, index_cache_misses) for lookups
in the mapdict cache with the given attribute name."""
assert space.config.objspace.std.withmethodcachecounter
- assert space.config.objspace.std.withmapdict
cache = space.fromcache(MapAttrCache)
return space.newtuple([space.newint(cache.hits.get(name, 0)),
space.newint(cache.misses.get(name, 0))])
diff --git a/pypy/module/_cffi_backend/__init__.py
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -46,6 +46,7 @@
'_get_types': 'func._get_types',
'_get_common_types': 'func._get_common_types',
'from_buffer': 'func.from_buffer',
+ 'gcp': 'func.gcp',
'string': 'func.string',
'unpack': 'func.unpack',
diff --git a/pypy/module/_cffi_backend/lib_obj.py
b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -64,7 +64,8 @@
#
ptr = rffi.cast(rffi.CCHARP, g.c_address)
assert ptr
- return W_FunctionWrapper(self.space, ptr, g.c_size_or_direct_fn,
+ return W_FunctionWrapper(self.space, self.ffi,
+ ptr, g.c_size_or_direct_fn,
rawfunctype, fnname, self.libname)
@jit.elidable_promote()
diff --git a/pypy/module/_cffi_backend/realize_c_type.py
b/pypy/module/_cffi_backend/realize_c_type.py
--- a/pypy/module/_cffi_backend/realize_c_type.py
+++ b/pypy/module/_cffi_backend/realize_c_type.py
@@ -238,7 +238,7 @@
self.nostruct_nargs = len(ctfuncptr.fargs) - (locs is not None and
locs[0] == 'R')
- def unexpected_fn_type(self, ffi):
+ def repr_fn_type(self, ffi, repl=""):
fargs, fret, ellipsis, abi = self._unpack(ffi)
argnames = [farg.name for farg in fargs]
if ellipsis:
@@ -246,9 +246,14 @@
sargs = ', '.join(argnames)
sret1 = fret.name[:fret.name_position]
sret2 = fret.name[fret.name_position:]
+ if len(repl) > 0 and not sret1.endswith('*'):
+ repl = " " + repl
+ return '%s%s(%s)%s' % (sret1, repl, sargs, sret2)
+
+ def unexpected_fn_type(self, ffi):
raise oefmt(ffi.w_FFIError,
- "the type '%s(%s)%s' is a function type, not a "
- "pointer-to-function type", sret1, sargs, sret2)
+ "the type '%s' is a function type, not a "
+ "pointer-to-function type", self.repr_fn_type(ffi))
def realize_c_type(ffi, opcodes, index):
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -420,9 +420,11 @@
def test_math_sin_type(self):
ffi, lib = self.prepare(
- "double sin(double);",
+ "double sin(double); void *xxtestfunc();",
'test_math_sin_type',
- '#include <math.h>')
+ """#include <math.h>
+ void *xxtestfunc(void) { return 0; }
+ """)
# 'lib.sin' is typed as a <built-in method> object on lib
assert ffi.typeof(lib.sin).cname == "double(*)(double)"
# 'x' is another <built-in method> object on lib, made very indirectly
@@ -432,7 +434,16 @@
# present on built-in functions on CPython; must be emulated on PyPy:
assert lib.sin.__name__ == 'sin'
assert lib.sin.__module__ == '_CFFI_test_math_sin_type'
- assert lib.sin.__doc__=='direct call to the C function of the same
name'
+ assert lib.sin.__doc__ == (
+ "double sin(double);\n"
+ "\n"
+ "CFFI C function from _CFFI_test_math_sin_type.lib")
+
+ assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()"
+ assert lib.xxtestfunc.__doc__ == (
+ "void *xxtestfunc();\n"
+ "\n"
+ "CFFI C function from _CFFI_test_math_sin_type.lib")
def test_verify_anonymous_struct_with_typedef(self):
ffi, lib = self.prepare(
@@ -1762,14 +1773,14 @@
def test_introspect_order(self):
ffi, lib = self.prepare("""
- union aaa { int a; }; typedef struct ccc { int a; } b;
- union g { int a; }; typedef struct cc { int a; } bbb;
- union aa { int a; }; typedef struct a { int a; } bb;
+ union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+ union CFFIg { int a; }; typedef struct CFFIcc { int a; }
CFFIbbb;
+ union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""", "test_introspect_order", """
- union aaa { int a; }; typedef struct ccc { int a; } b;
- union g { int a; }; typedef struct cc { int a; } bbb;
- union aa { int a; }; typedef struct a { int a; } bb;
+ union CFFIaaa { int a; }; typedef struct CFFIccc { int a; } CFFIb;
+ union CFFIg { int a; }; typedef struct CFFIcc { int a; }
CFFIbbb;
+ union CFFIaa { int a; }; typedef struct CFFIa { int a; } CFFIbb;
""")
- assert ffi.list_types() == (['b', 'bb', 'bbb'],
- ['a', 'cc', 'ccc'],
- ['aa', 'aaa', 'g'])
+ assert ffi.list_types() == (['CFFIb', 'CFFIbb', 'CFFIbbb'],
+ ['CFFIa', 'CFFIcc', 'CFFIccc'],
+ ['CFFIaa', 'CFFIaaa', 'CFFIg'])
diff --git a/pypy/module/_cffi_backend/wrapper.py
b/pypy/module/_cffi_backend/wrapper.py
--- a/pypy/module/_cffi_backend/wrapper.py
+++ b/pypy/module/_cffi_backend/wrapper.py
@@ -1,6 +1,7 @@
from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
+from pypy.interpreter.typedef import GetSetProperty
from pypy.interpreter.gateway import interp2app
from rpython.rlib import jit
@@ -24,9 +25,8 @@
This class cannot be used for variadic functions.
"""
_immutable_ = True
- common_doc_str = 'direct call to the C function of the same name'
- def __init__(self, space, fnptr, directfnptr,
+ def __init__(self, space, ffi, fnptr, directfnptr,
rawfunctype, fnname, modulename):
# everything related to the type of the function is accessed
# as immutable attributes of the 'rawfunctype' object, which
@@ -39,6 +39,7 @@
assert locs is None or len(ctype.fargs) == len(locs)
#
self.space = space
+ self.ffi = ffi
self.fnptr = fnptr
self.directfnptr = directfnptr
self.rawfunctype = rawfunctype
@@ -91,7 +92,13 @@
return ctype._call(self.fnptr, args_w)
def descr_repr(self, space):
- return space.wrap("<FFIFunctionWrapper for %s()>" % (self.fnname,))
+ doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
+ return space.wrap("<FFIFunctionWrapper '%s'>" % (doc,))
+
+ def descr_get_doc(self, space):
+ doc = self.rawfunctype.repr_fn_type(self.ffi, self.fnname)
+ doc = '%s;\n\nCFFI C function from %s.lib' % (doc, self.modulename)
+ return space.wrap(doc)
@jit.unroll_safe
@@ -128,6 +135,6 @@
__call__ = interp2app(W_FunctionWrapper.descr_call),
__name__ = interp_attrproperty('fnname', cls=W_FunctionWrapper),
__module__ = interp_attrproperty('modulename', cls=W_FunctionWrapper),
- __doc__ = interp_attrproperty('common_doc_str', cls=W_FunctionWrapper),
+ __doc__ = GetSetProperty(W_FunctionWrapper.descr_get_doc),
)
W_FunctionWrapper.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/cpyext/include/listobject.h
b/pypy/module/cpyext/include/listobject.h
--- a/pypy/module/cpyext/include/listobject.h
+++ b/pypy/module/cpyext/include/listobject.h
@@ -1,2 +1,1 @@
#define PyList_GET_ITEM PyList_GetItem
-#define PyList_SET_ITEM PyList_SetItem
diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py
--- a/pypy/module/cpyext/listobject.py
+++ b/pypy/module/cpyext/listobject.py
@@ -3,7 +3,7 @@
from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t,
build_type_checkers)
from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
-from pypy.module.cpyext.pyobject import Py_DecRef, PyObject
+from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, make_ref
from pypy.objspace.std.listobject import W_ListObject
from pypy.interpreter.error import OperationError
@@ -21,6 +21,25 @@
"""
return space.newlist([None] * len)
+@cpython_api([PyObject, Py_ssize_t, PyObject], PyObject, error=CANNOT_FAIL,
+ result_borrowed=True)
+def PyList_SET_ITEM(space, w_list, index, w_item):
+ """Macro form of PyList_SetItem() without error checking. This is normally
+ only used to fill in new lists where there is no previous content.
+
+ This function "steals" a reference to item, and, unlike PyList_SetItem(),
+ does not discard a reference to any item that it being replaced; any
+ reference in list at position i will be leaked.
+ """
+ assert isinstance(w_list, W_ListObject)
+ assert 0 <= index < w_list.length()
+ # Deliberately leak, so that it can be safely decref'd.
+ make_ref(space, w_list.getitem(index))
+ Py_DecRef(space, w_item)
+ w_list.setitem(index, w_item)
+ return w_item
+
+
@cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1)
def PyList_SetItem(space, w_list, index, w_item):
"""Set the item at index index in list to item. Return 0 on success
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -52,6 +52,9 @@
@cpython_api([PyObject], lltype.Void)
def PyObject_dealloc(space, obj):
+ # This frees an object after its refcount dropped to zero, so we
+ # assert that it is really zero here.
+ assert obj.c_ob_refcnt == 0
pto = obj.c_ob_type
obj_voidp = rffi.cast(rffi.VOIDP, obj)
generic_cpy_call(space, pto.c_tp_free, obj_voidp)
diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py
--- a/pypy/module/cpyext/pystate.py
+++ b/pypy/module/cpyext/pystate.py
@@ -52,7 +52,8 @@
def PyEval_ThreadsInitialized(space):
if not space.config.translation.thread:
return 0
- return 1
+ from pypy.module.thread import os_thread
+ return int(os_thread.threads_initialized(space))
# XXX: might be generally useful
def encapsulator(T, flavor='raw', dealloc=None):
diff --git a/pypy/module/cpyext/test/test_cpyext.py
b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -106,7 +106,6 @@
"""Base class for all cpyext tests."""
spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array',
'itertools', 'time', 'binascii',
'micronumpy'])
- spaceconfig['std.withmethodcache'] = True
enable_leak_checking = True
diff --git a/pypy/module/cpyext/test/test_listobject.py
b/pypy/module/cpyext/test/test_listobject.py
--- a/pypy/module/cpyext/test/test_listobject.py
+++ b/pypy/module/cpyext/test/test_listobject.py
@@ -136,3 +136,45 @@
l = [1, 2, 3]
module.setlistitem(l,0)
assert l == [None, 2, 3]
+
+ def test_get_item_macro(self):
+ module = self.import_extension('foo', [
+ ("test_get_item", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(1);
+
+ PyObject* o2 = PyInt_FromLong(0);
+ PyList_SET_ITEM(o, 0, o2);
+ o2 = NULL;
+
+ PyObject* o3 = PyList_GET_ITEM(o, 0);
+ Py_INCREF(o3);
+ Py_CLEAR(o);
+ return o3;
+ """)])
+ assert module.test_get_item() == 0
+
+ def test_set_item_macro(self):
+ """PyList_SET_ITEM leaks a reference to the target."""
+ module = self.import_extension('foo', [
+ ("test_refcount_diff_after_setitem", "METH_NOARGS",
+ """
+ PyObject* o = PyList_New(0);
+ PyObject* o2 = PyList_New(0);
+
+ PyList_Append(o, o2); // does not steal o2
+
+ Py_ssize_t refcount = Py_REFCNT(o2);
+
+ // Steal a reference to o2, but leak the old reference to o2.
+ // The net result should be no change in refcount.
+ PyList_SET_ITEM(o, 0, o2);
+
+ Py_ssize_t new_refcount = Py_REFCNT(o2);
+
+ Py_CLEAR(o);
+ Py_DECREF(o2); // append incref'd.
+ // Py_CLEAR(o2); // naive implementation would fail here.
+ return PyLong_FromSsize_t(new_refcount - refcount);
+ """)])
+ assert module.test_refcount_diff_after_setitem() == 0
diff --git a/pypy/module/cpyext/test/test_pystate.py
b/pypy/module/cpyext/test/test_pystate.py
--- a/pypy/module/cpyext/test/test_pystate.py
+++ b/pypy/module/cpyext/test/test_pystate.py
@@ -104,7 +104,19 @@
return PyLong_FromLong(3);
"""),
])
+ res = module.bounce()
+ assert res == 3
+ def test_threadsinitialized(self):
+ module = self.import_extension('foo', [
+ ("test", "METH_NOARGS",
+ """
+ return PyInt_FromLong(PyEval_ThreadsInitialized());
+ """),
+ ])
+ res = module.test()
+ print "got", res
+ assert res in (0, 1)
class TestInterpreterState(BaseApiTest):
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -6,15 +6,14 @@
@unwrap_spec(generation=int)
def collect(space, generation=0):
"Run a full collection. The optional argument is ignored."
- # First clear the method cache. See test_gc for an example of why.
- if space.config.objspace.std.withmethodcache:
- from pypy.objspace.std.typeobject import MethodCache
- cache = space.fromcache(MethodCache)
- cache.clear()
- if space.config.objspace.std.withmapdict:
- from pypy.objspace.std.mapdict import MapAttrCache
- cache = space.fromcache(MapAttrCache)
- cache.clear()
+ # First clear the method and the map cache.
+ # See test_gc for an example of why.
+ from pypy.objspace.std.typeobject import MethodCache
+ from pypy.objspace.std.mapdict import MapAttrCache
+ cache = space.fromcache(MethodCache)
+ cache.clear()
+ cache = space.fromcache(MapAttrCache)
+ cache.clear()
rgc.collect()
return space.wrap(0)
diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py
--- a/pypy/module/gc/test/test_gc.py
+++ b/pypy/module/gc/test/test_gc.py
@@ -106,7 +106,6 @@
class AppTestGcMethodCache(object):
- spaceconfig = {"objspace.std.withmethodcache": True}
def test_clear_method_cache(self):
import gc, weakref
@@ -127,10 +126,6 @@
assert r() is None
-class AppTestGcMapDictIndexCache(AppTestGcMethodCache):
- spaceconfig = {"objspace.std.withmethodcache": True,
- "objspace.std.withmapdict": True}
-
def test_clear_index_cache(self):
import gc, weakref
rlist = []
diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py
b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
--- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py
@@ -25,9 +25,9 @@
i61 = int_add(i58, 1)
setfield_gc(p18, i61, descr=<FieldS
pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
guard_not_invalidated(descr=...)
- p65 = getfield_gc_r(p14, descr=<FieldP
pypy.objspace.std.mapdict.W_ObjectObjectSize5.inst_map \d+>)
+ p65 = getfield_gc_r(p14, descr=<FieldP .+inst_map \d+>)
guard_value(p65, ConstPtr(ptr45), descr=...)
- p66 = getfield_gc_r(p14, descr=<FieldP
pypy.objspace.std.mapdict.W_ObjectObjectSize5.inst__value0 \d+>)
+ p66 = getfield_gc_r(p14, descr=<FieldP .+inst__value0 \d+>)
guard_nonnull_class(p66, ..., descr=...)
p67 = force_token()
setfield_gc(p0, p67, descr=<FieldP
pypy.interpreter.pyframe.PyFrame.vable_token \d+>)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -417,8 +417,11 @@
def test_math_sin_type():
ffi = FFI()
- ffi.cdef("double sin(double);")
- lib = verify(ffi, 'test_math_sin_type', '#include <math.h>')
+ ffi.cdef("double sin(double); void *xxtestfunc();")
+ lib = verify(ffi, 'test_math_sin_type', """
+ #include <math.h>
+ void *xxtestfunc(void) { return 0; }
+ """)
# 'lib.sin' is typed as a <built-in method> object on lib
assert ffi.typeof(lib.sin).cname == "double(*)(double)"
# 'x' is another <built-in method> object on lib, made very indirectly
@@ -428,7 +431,16 @@
# present on built-in functions on CPython; must be emulated on PyPy:
assert lib.sin.__name__ == 'sin'
assert lib.sin.__module__ == '_CFFI_test_math_sin_type'
- assert lib.sin.__doc__ == 'direct call to the C function of the same name'
+ assert lib.sin.__doc__ == (
+ "double sin(double);\n"
+ "\n"
+ "CFFI C function from _CFFI_test_math_sin_type.lib")
+
+ assert ffi.typeof(lib.xxtestfunc).cname == "void *(*)()"
+ assert lib.xxtestfunc.__doc__ == (
+ "void *xxtestfunc();\n"
+ "\n"
+ "CFFI C function from _CFFI_test_math_sin_type.lib")
def test_verify_anonymous_struct_with_typedef():
ffi = FFI()
diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py
--- a/pypy/module/thread/gil.py
+++ b/pypy/module/thread/gil.py
@@ -34,6 +34,9 @@
result = False # already set up
return result
+ def threads_initialized(self):
+ return self.gil_ready
+
## def reinit_threads(self, space):
## "Called in the child process after a fork()"
## OSThreadLocals.reinit_threads(self, space)
diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py
--- a/pypy/module/thread/os_thread.py
+++ b/pypy/module/thread/os_thread.py
@@ -148,6 +148,9 @@
space.threadlocals.setup_threads(space)
bootstrapper.setup(space)
+def threads_initialized(space):
+ return space.threadlocals.threads_initialized()
+
def reinit_threads(space):
"Called in the child process after a fork()"
diff --git a/pypy/module/thread/test/test_gil.py
b/pypy/module/thread/test/test_gil.py
--- a/pypy/module/thread/test/test_gil.py
+++ b/pypy/module/thread/test/test_gil.py
@@ -1,5 +1,7 @@
import time
from pypy.module.thread import gil
+from rpython.rtyper.lltypesystem.lloperation import llop
+from rpython.rtyper.lltypesystem import lltype
from rpython.rlib import rgil
from rpython.rlib.test import test_rthread
from rpython.rlib import rthread as thread
@@ -81,10 +83,13 @@
while len(state.data) < 2*N:
debug_print(len(state.data))
if not still_waiting:
+ llop.debug_print(lltype.Void, "timeout. progress: "
+ "%d of 2*N (= %f%%)" % \
+ (len(state.data), 2*N,
100*len(state.data)/(2.0*N)))
raise ValueError("time out")
still_waiting -= 1
if not we_are_translated(): rgil.release()
- time.sleep(0.01)
+ time.sleep(0.1)
if not we_are_translated(): rgil.acquire()
debug_print("leaving!")
i1 = i2 = 0
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -121,6 +121,8 @@
'set', 'frozenset', 'bytearray', 'memoryview']
class FakeObjSpace(ObjSpace):
+ is_fake_objspace = True
+
def __init__(self, config=None):
self._seen_extras = []
ObjSpace.__init__(self, config=config)
diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py
--- a/pypy/objspace/std/bytesobject.py
+++ b/pypy/objspace/std/bytesobject.py
@@ -640,34 +640,12 @@
return [ord(s) for s in value]
W_BytesObject.EMPTY = W_BytesObject('')
-W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)]
-del i
def wrapstr(space, s):
- if space.config.objspace.std.sharesmallstr:
- if space.config.objspace.std.withprebuiltchar:
- # share characters and empty string
- if len(s) <= 1:
- if len(s) == 0:
- return W_BytesObject.EMPTY
- else:
- s = s[0] # annotator hint: a single char
- return wrapchar(space, s)
- else:
- # only share the empty string
- if len(s) == 0:
- return W_BytesObject.EMPTY
return W_BytesObject(s)
-def wrapchar(space, c):
- if space.config.objspace.std.withprebuiltchar and not we_are_jitted():
- return W_BytesObject.PREBUILT[ord(c)]
- else:
- return W_BytesObject(c)
-
-
def getbytevalue(space, w_value):
value = space.getindex_w(w_value, None)
if not 0 <= value < 256:
diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py
--- a/pypy/objspace/std/callmethod.py
+++ b/pypy/objspace/std/callmethod.py
@@ -23,6 +23,7 @@
def LOOKUP_METHOD(f, nameindex, *ignored):
+ from pypy.objspace.std.typeobject import MutableCell
# stack before after
# -------------- --fast-method----fallback-case------------
#
@@ -33,7 +34,7 @@
space = f.space
w_obj = f.popvalue()
- if space.config.objspace.std.withmapdict and not jit.we_are_jitted():
+ if not jit.we_are_jitted():
# mapdict has an extra-fast version of this function
if LOOKUP_METHOD_mapdict(f, nameindex, w_obj):
return
@@ -44,7 +45,18 @@
w_type = space.type(w_obj)
if w_type.has_object_getattribute():
name = space.str_w(w_name)
- w_descr = w_type.lookup(name)
+ # bit of a mess to use these internal functions, but it allows the
+ # mapdict caching below to work without an additional lookup
+ version_tag = w_type.version_tag()
+ if version_tag is None:
+ _, w_descr = w_type._lookup_where(name)
+ w_descr_cell = None
+ else:
+ _, w_descr_cell = w_type._pure_lookup_where_with_method_cache(
+ name, version_tag)
+ w_descr = w_descr_cell
+ if isinstance(w_descr, MutableCell):
+ w_descr = w_descr.unwrap_cell(space)
if w_descr is None:
# this handles directly the common case
# module.function(args..)
@@ -59,11 +71,11 @@
# nothing in the instance
f.pushvalue(w_descr)
f.pushvalue(w_obj)
- if (space.config.objspace.std.withmapdict and
- not jit.we_are_jitted()):
+ if not jit.we_are_jitted():
# let mapdict cache stuff
LOOKUP_METHOD_mapdict_fill_cache_method(
- space, f.getcode(), name, nameindex, w_obj, w_type)
+ space, f.getcode(), name, nameindex, w_obj, w_type,
+ w_descr_cell)
return
if w_value is None:
w_value = space.getattr(w_obj, w_name)
diff --git a/pypy/objspace/std/dictmultiobject.py
b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -66,10 +66,10 @@
w_obj = space.allocate_instance(W_ModuleDictObject, space.w_dict)
W_ModuleDictObject.__init__(w_obj, space, strategy, storage)
return w_obj
- elif space.config.objspace.std.withmapdict and instance:
+ elif instance:
from pypy.objspace.std.mapdict import MapDictStrategy
strategy = space.fromcache(MapDictStrategy)
- elif instance or strdict or module:
+ elif strdict or module:
assert w_type is None
strategy = space.fromcache(UnicodeDictStrategy)
elif kwargs:
@@ -528,7 +528,6 @@
def switch_to_correct_strategy(self, w_dict, w_key):
from pypy.objspace.std.intobject import W_IntObject
- withidentitydict = self.space.config.objspace.std.withidentitydict
if type(w_key) is self.space.StringObjectCls:
self.switch_to_bytes_strategy(w_dict)
return
@@ -539,7 +538,7 @@
self.switch_to_int_strategy(w_dict)
return
w_type = self.space.type(w_key)
- if withidentitydict and w_type.compares_by_identity():
+ if w_type.compares_by_identity():
self.switch_to_identity_strategy(w_dict)
else:
self.switch_to_object_strategy(w_dict)
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -67,12 +67,7 @@
@jit.elidable
def find_map_attr(self, name, index):
- if (self.space.config.objspace.std.withmethodcache):
- return self._find_map_attr_cache(name, index)
- return self._find_map_attr(name, index)
-
- @jit.dont_look_inside
- def _find_map_attr_cache(self, name, index):
+ # attr cache
space = self.space
cache = space.fromcache(MapAttrCache)
SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp
@@ -429,7 +424,6 @@
class MapAttrCache(object):
def __init__(self, space):
- assert space.config.objspace.std.withmethodcache
SIZE = 1 << space.config.objspace.std.methodcachesizeexp
self.attrs = [None] * SIZE
self.names = [None] * SIZE
@@ -456,12 +450,19 @@
INVALID = 2
SLOTS_STARTING_FROM = 3
+# a little bit of a mess of mixin classes that implement various pieces of
+# objspace user object functionality in terms of mapdict
-class BaseMapdictObject:
- _mixin_ = True
+class BaseUserClassMapdict:
+ # everything that's needed to use mapdict for a user subclass at all.
+ # This immediately makes slots possible.
- def _init_empty(self, map):
- raise NotImplementedError("abstract base class")
+ # assumes presence of _init_empty, _mapdict_read_storage,
+ # _mapdict_write_storage, _mapdict_storage_length,
+ # _set_mapdict_storage_and_map
+
+ # _____________________________________________
+ # methods needed for mapdict
def _become(self, new_obj):
self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
@@ -470,49 +471,11 @@
return jit.promote(self.map)
def _set_mapdict_map(self, map):
self.map = map
+
# _____________________________________________
# objspace interface
- def getdictvalue(self, space, attrname):
- return self._get_mapdict_map().read(self, attrname, DICT)
-
- def setdictvalue(self, space, attrname, w_value):
- return self._get_mapdict_map().write(self, attrname, DICT, w_value)
-
- def deldictvalue(self, space, attrname):
- new_obj = self._get_mapdict_map().delete(self, attrname, DICT)
- if new_obj is None:
- return False
- self._become(new_obj)
- return True
-
- def getdict(self, space):
- w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL)
- if w_dict is not None:
- assert isinstance(w_dict, W_DictMultiObject)
- return w_dict
-
- strategy = space.fromcache(MapDictStrategy)
- storage = strategy.erase(self)
- w_dict = W_DictObject(space, strategy, storage)
- flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
- assert flag
- return w_dict
-
- def setdict(self, space, w_dict):
- from pypy.interpreter.typedef import check_new_dictionary
- w_dict = check_new_dictionary(space, w_dict)
- w_olddict = self.getdict(space)
- assert isinstance(w_dict, W_DictMultiObject)
- # The old dict has got 'self' as dstorage, but we are about to
- # change self's ("dict", SPECIAL) attribute to point to the
- # new dict. If the old dict was using the MapDictStrategy, we
- # have to force it now: otherwise it would remain an empty
- # shell that continues to delegate to 'self'.
- if type(w_olddict.get_strategy()) is MapDictStrategy:
- w_olddict.get_strategy().switch_to_object_strategy(w_olddict)
- flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
- assert flag
+ # class access
def getclass(self, space):
return self._get_mapdict_map().terminator.w_cls
@@ -523,9 +486,13 @@
def user_setup(self, space, w_subtype):
self.space = space
- assert not self.typedef.hasdict
+ assert (not self.typedef.hasdict or
+ isinstance(w_subtype.terminator, NoDictTerminator))
self._init_empty(w_subtype.terminator)
+
+ # methods needed for slots
+
def getslotvalue(self, slotindex):
index = SLOTS_STARTING_FROM + slotindex
return self._get_mapdict_map().read(self, "slot", index)
@@ -542,7 +509,9 @@
self._become(new_obj)
return True
- # used by _weakref implemenation
+
+class MapdictWeakrefSupport(object):
+ # stuff used by the _weakref implementation
def getweakref(self):
from pypy.module._weakref.interp__weakref import WeakrefLifeline
@@ -563,8 +532,71 @@
self._get_mapdict_map().write(self, "weakref", SPECIAL, None)
delweakref._cannot_really_call_random_things_ = True
-class ObjectMixin(object):
- _mixin_ = True
+
+class MapdictDictSupport(object):
+
+ # objspace interface for dictionary operations
+
+ def getdictvalue(self, space, attrname):
+ return self._get_mapdict_map().read(self, attrname, DICT)
+
+ def setdictvalue(self, space, attrname, w_value):
+ return self._get_mapdict_map().write(self, attrname, DICT, w_value)
+
+ def deldictvalue(self, space, attrname):
+ new_obj = self._get_mapdict_map().delete(self, attrname, DICT)
+ if new_obj is None:
+ return False
+ self._become(new_obj)
+ return True
+
+ def getdict(self, space):
+ return _obj_getdict(self, space)
+
+ def setdict(self, space, w_dict):
+ _obj_setdict(self, space, w_dict)
+
+# a couple of helpers for the classes above, factored out to reduce
+# the translated code size
+
[email protected]_inline
+def _obj_getdict(self, space):
+ terminator = self._get_mapdict_map().terminator
+ assert isinstance(terminator, DictTerminator) or isinstance(terminator,
DevolvedDictTerminator)
+ w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL)
+ if w_dict is not None:
+ assert isinstance(w_dict, W_DictMultiObject)
+ return w_dict
+
+ strategy = space.fromcache(MapDictStrategy)
+ storage = strategy.erase(self)
+ w_dict = W_DictObject(space, strategy, storage)
+ flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
+ assert flag
+ return w_dict
+
[email protected]_inline
+def _obj_setdict(self, space, w_dict):
+ from pypy.interpreter.error import OperationError
+ terminator = self._get_mapdict_map().terminator
+ assert isinstance(terminator, DictTerminator) or isinstance(terminator,
DevolvedDictTerminator)
+ if not space.isinstance_w(w_dict, space.w_dict):
+ raise OperationError(space.w_TypeError,
+ space.wrap("setting dictionary to a non-dict"))
+ assert isinstance(w_dict, W_DictMultiObject)
+ w_olddict = self.getdict(space)
+ assert isinstance(w_olddict, W_DictMultiObject)
+ # The old dict has got 'self' as dstorage, but we are about to
+ # change self's ("dict", SPECIAL) attribute to point to the
+ # new dict. If the old dict was using the MapDictStrategy, we
+ # have to force it now: otherwise it would remain an empty
+ # shell that continues to delegate to 'self'.
+ if type(w_olddict.get_strategy()) is MapDictStrategy:
+ w_olddict.get_strategy().switch_to_object_strategy(w_olddict)
+ flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
+ assert flag
+
+class MapdictStorageMixin(object):
def _init_empty(self, map):
from rpython.rlib.debug import make_sure_not_resized
self.map = map
@@ -583,51 +615,32 @@
self.storage = storage
self.map = map
-class Object(ObjectMixin, BaseMapdictObject, W_Root):
- pass # mainly for tests
+class ObjectWithoutDict(W_Root):
+ # mainly for tests
+ objectmodel.import_from_mixin(MapdictStorageMixin)
-def get_subclass_of_correct_size(space, cls, w_type):
- assert space.config.objspace.std.withmapdict
- map = w_type.terminator
- classes = memo_get_subclass_of_correct_size(space, cls)
- if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS:
- return classes[0]
- size = map.size_estimate()
- debug.check_nonneg(size)
- if size < len(classes):
- return classes[size]
- else:
- return classes[len(classes)-1]
-get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)"
+ objectmodel.import_from_mixin(BaseUserClassMapdict)
+ objectmodel.import_from_mixin(MapdictWeakrefSupport)
-SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers
-SUBCLASSES_MAX_FIELDS = 5
-def memo_get_subclass_of_correct_size(space, supercls):
- key = space, supercls
- try:
- return _subclass_cache[key]
- except KeyError:
- assert not hasattr(supercls, "__del__")
- result = []
- for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1):
- result.append(_make_subclass_size_n(supercls, i))
- for i in range(SUBCLASSES_MIN_FIELDS):
- result.insert(0, result[0])
- if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS:
- assert len(set(result)) == 1
- _subclass_cache[key] = result
- return result
-memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo"
-_subclass_cache = {}
+class Object(W_Root):
+ # mainly for tests
+ objectmodel.import_from_mixin(MapdictStorageMixin)
-def _make_subclass_size_n(supercls, n):
+ objectmodel.import_from_mixin(BaseUserClassMapdict)
+ objectmodel.import_from_mixin(MapdictWeakrefSupport)
+ objectmodel.import_from_mixin(MapdictDictSupport)
+
+
+SUBCLASSES_NUM_FIELDS = 5
+
+def _make_storage_mixin_size_n(n=SUBCLASSES_NUM_FIELDS):
from rpython.rlib import unroll
rangen = unroll.unrolling_iterable(range(n))
nmin1 = n - 1
rangenmin1 = unroll.unrolling_iterable(range(nmin1))
valnmin1 = "_value%s" % nmin1
- class subcls(BaseMapdictObject, supercls):
+ class subcls(object):
def _init_empty(self, map):
for i in rangenmin1:
setattr(self, "_value%s" % i, None)
@@ -695,7 +708,7 @@
erased = erase_list(storage_list)
setattr(self, "_value%s" % nmin1, erased)
- subcls.__name__ = supercls.__name__ + "Size%s" % n
+ subcls.__name__ = "Size%s" % n
return subcls
# ____________________________________________________________
@@ -962,7 +975,7 @@
name = space.str_w(w_name)
# We need to care for obscure cases in which the w_descr is
# a MutableCell, which may change without changing the version_tag
- _, w_descr = w_type._pure_lookup_where_possibly_with_method_cache(
+ _, w_descr = w_type._pure_lookup_where_with_method_cache(
name, version_tag)
#
attrname, index = ("", INVALID)
@@ -1009,22 +1022,15 @@
return False
def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex,
- w_obj, w_type):
+ w_obj, w_type, w_method):
+ if w_method is None or isinstance(w_method, MutableCell):
+ # don't cache the MutableCell XXX could be fixed
+ return
version_tag = w_type.version_tag()
- if version_tag is None:
- return
+ assert version_tag is not None
map = w_obj._get_mapdict_map()
if map is None or isinstance(map.terminator, DevolvedDictTerminator):
return
- # We know here that w_obj.getdictvalue(space, name) just returned None,
- # so the 'name' is not in the instance. We repeat the lookup to find it
- # in the class, this time taking care of the result: it can be either a
- # quasi-constant class attribute, or actually a MutableCell --- which we
- # must not cache. (It should not be None here, but you never know...)
- _, w_method = w_type._pure_lookup_where_possibly_with_method_cache(
- name, version_tag)
- if w_method is None or isinstance(w_method, MutableCell):
- return
_fill_cache(pycode, nameindex, map, version_tag, -1, w_method)
# XXX fix me: if a function contains a loop with both LOAD_ATTR and
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -372,15 +372,8 @@
if cls.typedef.applevel_subclasses_base is not None:
cls = cls.typedef.applevel_subclasses_base
#
- if (self.config.objspace.std.withmapdict and cls is W_ObjectObject
- and not w_subtype.needsdel):
- from pypy.objspace.std.mapdict import
get_subclass_of_correct_size
- subcls = get_subclass_of_correct_size(self, cls, w_subtype)
- else:
- subcls = get_unique_interplevel_subclass(
- self.config, cls, w_subtype.hasdict,
- w_subtype.layout.nslots != 0,
- w_subtype.needsdel, w_subtype.weakrefable)
+ subcls = get_unique_interplevel_subclass(
+ self.config, cls, w_subtype.needsdel)
instance = instantiate(subcls)
assert isinstance(instance, cls)
instance.user_setup(self, w_subtype)
@@ -543,7 +536,6 @@
return self.int_w(l_w[0]), self.int_w(l_w[1]), self.int_w(l_w[2])
_DescrOperation_is_true = is_true
- _DescrOperation_getattr = getattr
def is_true(self, w_obj):
# a shortcut for performance
@@ -552,8 +544,6 @@
return self._DescrOperation_is_true(w_obj)
def getattr(self, w_obj, w_name):
- if not self.config.objspace.std.getattributeshortcut:
- return self._DescrOperation_getattr(w_obj, w_name)
# an optional shortcut for performance
w_type = self.type(w_obj)
diff --git a/pypy/objspace/std/specialisedtupleobject.py
b/pypy/objspace/std/specialisedtupleobject.py
--- a/pypy/objspace/std/specialisedtupleobject.py
+++ b/pypy/objspace/std/specialisedtupleobject.py
@@ -186,10 +186,9 @@
def specialized_zip_2_lists(space, w_list1, w_list2):
from pypy.objspace.std.listobject import W_ListObject
- if (not isinstance(w_list1, W_ListObject) or
- not isinstance(w_list2, W_ListObject)):
+ if type(w_list1) is not W_ListObject or type(w_list2) is not W_ListObject:
raise OperationError(space.w_TypeError,
- space.wrap("expected two lists"))
+ space.wrap("expected two exact lists"))
if space.config.objspace.std.withspecialisedtuple:
intlist1 = w_list1.getitems_int()
diff --git a/pypy/objspace/std/test/test_bytesobject.py
b/pypy/objspace/std/test/test_bytesobject.py
--- a/pypy/objspace/std/test/test_bytesobject.py
+++ b/pypy/objspace/std/test/test_bytesobject.py
@@ -864,14 +864,3 @@
def __int__(self):
return 42
raises(TypeError, bytes, A())
-
-
-class AppTestPrebuilt(AppTestBytesObject):
- spaceconfig = {"objspace.std.withprebuiltchar": True}
-
-class AppTestShare(AppTestBytesObject):
- spaceconfig = {"objspace.std.sharesmallstr": True}
-
-class AppTestPrebuiltShare(AppTestBytesObject):
- spaceconfig = {"objspace.std.withprebuiltchar": True,
- "objspace.std.sharesmallstr": True}
diff --git a/pypy/objspace/std/test/test_callmethod.py
b/pypy/objspace/std/test/test_callmethod.py
--- a/pypy/objspace/std/test/test_callmethod.py
+++ b/pypy/objspace/std/test/test_callmethod.py
@@ -108,10 +108,6 @@
""")
-class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod):
- spaceconfig = {"objspace.std.getattributeshortcut": True}
-
-
class TestCallMethod:
def test_space_call_method(self):
space = self.space
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py
b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -1183,11 +1183,9 @@
class Config:
class objspace:
class std:
- withsmalldicts = False
withcelldict = False
- withmethodcache = False
- withidentitydict = False
- withmapdict = False
+ methodcachesizeexp = 11
+ withmethodcachecounter = False
FakeSpace.config = Config()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit