Author: Armin Rigo <[email protected]>
Branch: remove-raisingops
Changeset: r84364:734a91c841ee
Date: 2016-05-11 08:24 +0200
http://bitbucket.org/pypy/pypy/changeset/734a91c841ee/
Log: hg merge default
diff too long, truncating to 2000 out of 37072 lines
diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -21,3 +21,4 @@
246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
if __name__ == '__main__':
if len(sys.argv) != 2:
+ if len(sys.argv) == 1:
+ # start locally
+ import sshgraphserver
+ sshgraphserver.ssh_graph_server(['LOCAL'])
+ sys.exit(0)
print >> sys.stderr, __doc__
sys.exit(2)
if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
Usage:
sshgraphserver.py hostname [more args for ssh...]
+ sshgraphserver.py LOCAL
This logs in to 'hostname' by passing the arguments on the command-line
to ssh. No further configuration is required: it works for all programs
using the dotviewer library as long as they run on 'hostname' under the
same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
"""
import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
s1 = socket.socket()
s1.bind(('127.0.0.1', socket.INADDR_ANY))
localhost, localport = s1.getsockname()
- remoteport = random.randrange(10000, 20000)
- # ^^^ and just hope there is no conflict
- args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport,
localport)]
- args = args + sshargs + ['python -u -c "exec input()"']
- print ' '.join(args[:-1])
+ if sshargs[0] != 'LOCAL':
+ remoteport = random.randrange(10000, 20000)
+ # ^^^ and just hope there is no conflict
+
+ args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+ remoteport, localport)]
+ args = args + sshargs + ['python -u -c "exec input()"']
+ else:
+ remoteport = localport
+ args = ['python', '-u', '-c', 'exec input()']
+
+ print ' '.join(args)
p = subprocess.Popen(args, bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
+ try:
+ setattr(self, dst_option,
+ getattr(src_cmd_obj, src_option))
+ except AttributeError:
+ # This was added after problems with setuptools 18.4.
+ # It seems that setuptools 20.9 fixes the problem.
+ # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+ # if I say "virtualenv -p pypy venv-pypy" then it
+ # just installs setuptools 18.4 from some cache...
+ pass
def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/test/test_descr.py
b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "next" : stop}),
- ("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
("__format__", format, format_impl, set(), {}),
("__dir__", dir, empty_seq, set(), {}),
]
+ if test_support.check_impl_detail():
+ specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
class Checker(object):
def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
raise MyException
for name, runner, meth_impl, ok, env in specials:
- if name == '__length_hint__' or name == '__sizeof__':
- if not test_support.check_impl_detail():
- continue
-
class X(Checker):
pass
for attr, obj in env.iteritems():
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
overly detailed
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+ or create branch vendor/stdlib-3-*
2. upgrade the files there
+ 2a. remove lib-python/2.7/ or lib-python/3/
+ 2b. copy the files from the cpython repo
+ 2c. hg add lib-python/2.7/ or lib-python/3/
+ 2d. hg remove --after
+ 2e. show copied files in cpython repo by running `hg diff --git -r v<old>
-r v<new> Lib | grep '^copy \(from\|to\)'`
+ 2f. fix copies / renames manually by running `hg copy --after <from> <to>`
for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
-5. update to default/py3k
+5. update to default / py3k
6. create a integration branch for the new stdlib
(just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
8. commit
10. fix issues
11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -397,20 +397,7 @@
data. Later, when this new cdata object is garbage-collected,
'destructor(old_cdata_object)' will be called.
"""
- try:
- gcp = self._backend.gcp
- except AttributeError:
- pass
- else:
- return gcp(cdata, destructor)
- #
- with self._lock:
- try:
- gc_weakrefs = self.gc_weakrefs
- except AttributeError:
- from .gc_weakref import GcWeakrefs
- gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
- return gc_weakrefs.build(cdata, destructor)
+ return self._backend.gcp(cdata, destructor)
def _get_cached_btype(self, type):
assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
return x._value
raise TypeError("character expected, got %s" %
type(x).__name__)
+ def __nonzero__(self):
+ return ord(self._value) != 0
+ else:
+ def __nonzero__(self):
+ return self._value != 0
if kind == 'float':
@staticmethod
@@ -993,6 +998,31 @@
assert onerror is None # XXX not implemented
return BType(source, error)
+ def gcp(self, cdata, destructor):
+ BType = self.typeof(cdata)
+
+ if destructor is None:
+ if not (hasattr(BType, '_gcp_type') and
+ BType._gcp_type is BType):
+ raise TypeError("Can remove destructor only on a object "
+ "previously returned by ffi.gc()")
+ cdata._destructor = None
+ return None
+
+ try:
+ gcp_type = BType._gcp_type
+ except AttributeError:
+ class CTypesDataGcp(BType):
+ __slots__ = ['_orig', '_destructor']
+ def __del__(self):
+ if self._destructor is not None:
+ self._destructor(self._orig)
+ gcp_type = BType._gcp_type = CTypesDataGcp
+ new_cdata = self.cast(gcp_type, cdata)
+ new_cdata._orig = cdata
+ new_cdata._destructor = destructor
+ return new_cdata
+
typeof = type
def getcname(self, BType, replace_with):
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
default=False),
- BoolOption("withprebuiltchar",
- "use prebuilt single-character string objects",
- default=False),
-
- BoolOption("sharesmallstr",
- "always reuse the prebuilt string objects "
- "(the empty string and potentially single-char strings)",
- default=False),
-
BoolOption("withspecialisedtuple",
"use specialised tuples",
default=False),
@@ -222,39 +213,14 @@
default=False,
requires=[("objspace.honor__builtins__", False)]),
- BoolOption("withmapdict",
- "make instances really small but slow without the JIT",
- default=False,
- requires=[("objspace.std.getattributeshortcut", True),
- ("objspace.std.withtypeversion", True),
- ]),
-
- BoolOption("withrangelist",
- "enable special range list implementation that does not "
- "actually create the full list until the resulting "
- "list is mutated",
- default=False),
BoolOption("withliststrategies",
"enable optimized ways to store lists of primitives ",
default=True),
- BoolOption("withtypeversion",
- "version type objects when changing them",
- cmdline=None,
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
-
- BoolOption("withmethodcache",
- "try to cache method lookups",
- default=False,
- requires=[("objspace.std.withtypeversion", True),
- ("translation.rweakref", True)]),
BoolOption("withmethodcachecounter",
"try to cache methods and provide a counter in __pypy__. "
"for testing purposes only.",
- default=False,
- requires=[("objspace.std.withmethodcache", True)]),
+ default=False),
IntOption("methodcachesizeexp",
" 2 ** methodcachesizeexp is the size of the of the method
cache ",
default=11),
@@ -265,22 +231,10 @@
BoolOption("optimized_list_getitem",
"special case the 'list[integer]' expressions",
default=False),
- BoolOption("getattributeshortcut",
- "track types that override __getattribute__",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
BoolOption("newshortcut",
"cache and shortcut calling __new__ from builtin types",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
+ default=False),
- BoolOption("withidentitydict",
- "track types that override __hash__, __eq__ or __cmp__ and
use a special dict strategy for those which do not",
- default=False,
- # weakrefs needed, because of get_subclasses()
- requires=[("translation.rweakref", True)]),
]),
])
@@ -296,15 +250,10 @@
"""
# all the good optimizations for PyPy should be listed here
if level in ['2', '3', 'jit']:
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withmethodcache=True)
- config.objspace.std.suggest(withprebuiltchar=True)
config.objspace.std.suggest(intshortcut=True)
config.objspace.std.suggest(optimized_list_getitem=True)
- config.objspace.std.suggest(getattributeshortcut=True)
#config.objspace.std.suggest(newshortcut=True)
config.objspace.std.suggest(withspecialisedtuple=True)
- config.objspace.std.suggest(withidentitydict=True)
#if not IS_64_BITS:
# config.objspace.std.suggest(withsmalllong=True)
@@ -317,16 +266,13 @@
# memory-saving optimizations
if level == 'mem':
config.objspace.std.suggest(withprebuiltint=True)
- config.objspace.std.suggest(withrangelist=True)
- config.objspace.std.suggest(withprebuiltchar=True)
- config.objspace.std.suggest(withmapdict=True)
+ config.objspace.std.suggest(withliststrategies=True)
if not IS_64_BITS:
config.objspace.std.suggest(withsmalllong=True)
# extra optimizations with the JIT
if level == 'jit':
config.objspace.std.suggest(withcelldict=True)
- config.objspace.std.suggest(withmapdict=True)
def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py
b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
assert conf.objspace.usemodules.gc
- conf.objspace.std.withmapdict = True
- assert conf.objspace.std.withtypeversion
- conf = get_pypy_config()
- conf.objspace.std.withtypeversion = False
- py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
def test_conflicting_gcrootfinder():
conf = get_pypy_config()
conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
def test_set_pypy_opt_level():
conf = get_pypy_config()
set_pypy_opt_level(conf, '2')
- assert conf.objspace.std.getattributeshortcut
+ assert conf.objspace.std.intshortcut
conf = get_pypy_config()
set_pypy_opt_level(conf, '0')
- assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
- conf = get_pypy_config()
- conf.translation.rweakref = False
- set_pypy_opt_level(conf, '3')
-
- assert not conf.objspace.std.withtypeversion
- assert not conf.objspace.std.withmethodcache
+ assert not conf.objspace.std.intshortcut
def test_check_documentation():
def check_file_exists(fn):
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -108,9 +108,9 @@
On Fedora::
- yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
- lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
- (XXX plus the Febora version of libgdbm-dev and tk-dev)
+ dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+ gdbm-devel
For the optional lzma module on PyPy3 you will also need ``xz-devel``.
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
To raise an application-level exception::
- raise OperationError(space.w_XxxError, space.wrap("message"))
+ from pypy.interpreter.error import oefmt
+
+ raise oefmt(space.w_XxxError, "message")
+
+ raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+ raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
To catch a specific application-level exception::
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt
b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for
:config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt
b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``. This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt
b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`:
../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt
b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations
<../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt
b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt
b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the
Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`:
../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt
b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets
an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
The work on the cling backend has so far been done only for CPython, but
bringing it to PyPy is a lot less work than developing it in the first place.
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
.. _llvm: http://llvm.org/
.. _clang: http://clang.llvm.org/
@@ -283,7 +283,8 @@
core reflection set, but for the moment assume we want to have it in the
reflection library that we are building for this example.
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
which is a simple XML file specifying, either explicitly or by using a
pattern, which classes, variables, namespaces, etc. to select from the given
header file.
@@ -305,7 +306,7 @@
<function name="BaseFactory" />
</lcgdict>
-.. _selection file:
http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
Now the reflection info can be generated and compiled::
@@ -811,7 +812,7 @@
immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
variable.
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
There are a couple of minor differences between PyCintex and cppyy, most to do
with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
wrappers. On PyPy we can't tell the difference, so
``ismethod([].__add__) == ismethod(list.__add__) == True``.
+* in CPython, the built-in types have attributes that can be
+ implemented in various ways. Depending on the way, if you try to
+ write to (or delete) a read-only (or undeletable) attribute, you get
+ either a ``TypeError`` or an ``AttributeError``. PyPy tries to
+ strike some middle ground between full consistency and full
+ compatibility here. This means that a few corner cases don't raise
+ the same exception, like ``del (lambda:None).__closure__``.
+
* in pure Python, if you write ``class A(object): def f(self): pass``
and have a subclass ``B`` which doesn't override ``f()``, then
``B.f(x)`` still checks that ``x`` is an instance of ``B``. In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
:source:`pypy/doc/discussion/` drafts of ideas and documentation
-:source:`pypy/goal/` our :ref:`main PyPy-translation
scripts <translate-pypy>`
+:source:`pypy/goal/` our main PyPy-translation scripts
live here
:source:`pypy/interpreter/` :doc:`bytecode interpreter
<interpreter>` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst
b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``. To
+make it possible, the RPython interface is now the following one (from
+May 2016):
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``. These are called
+ immediately by the GC when the last reference to the object goes
+ away, like in CPython. However, the long-term goal is that all
+ ``__del__()`` methods should only contain simple enough code. If
+ they do, we call them "destructors". They can't use operations that
+ would resurrect the object, for example. Use the decorator
+ ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+ are supported for backward compatibility, but deprecated. The rest
+ of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+ object with a __del__ --- we don't use the RPython-level
+ ``__del__()`` method. Instead we use
+ ``rgc.FinalizerController.register_finalizer()``. This allows us to
+ attach a finalizer method to the object, giving more control over
+ the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer. A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough. A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory. Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it. These restrictions are
+checked. In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object. As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+ base class of all instances with a finalizer. (If you need
+ finalizers on several unrelated classes, you need several unrelated
+ ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``. At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer. Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on. It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance. Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes). If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly. It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues. This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
*some* of the objects that have one and that have become unreachable.
Basically, if there is a reference chain from an object a to an object b
then it should not call the finalizer for b immediately, but just keep b
alive and try again to call its finalizer after the next collection.
-This basic idea fails when there are cycles. It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow). In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection. That branch, ``gc-del``, was never merged. It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles. It's not a good idea to
keep the objects alive forever or to never call any of the finalizers.
The model we came up with is that in this case, we could just call the
finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +141,7 @@
detach the finalizer (so that it's not called more than once)
call the finalizer
+
Algorithm
---------
@@ -136,28 +245,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
- ===== ============= ======== ====================
- state is_forwarded? bit set? bit set in the copy?
- ===== ============= ======== ====================
- 0 no no n/a
- 1 no yes n/a
- 2 yes yes yes
- 3 yes whatever no
- ===== ============= ======== ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied(). We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
===== ============== ============================
state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +256,8 @@
2 yes yes
3 yes no
===== ============== ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit. We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
discussion/improve-rpython
discussion/ctypes-implementation
discussion/jit-profiler
+ discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
:doc:`Full details <cppyy>` are `available here <cppyy>`.
.. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
For information on which third party extensions work (or do not work)
with PyPy see the `compatibility wiki`_.
+For more information about how we manage refcounting semamtics see
+rawrefcount_
+
.. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
.. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html
On which platforms does PyPy run?
---------------------------------
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions. PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
another (e.g. older) PyPy. Cross-translation is not really supported:
e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/index-of-release-notes.rst
b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
.. toctree::
+ release-5.1.1.rst
release-5.1.0.rst
release-5.0.1.rst
release-5.0.0.rst
diff --git a/pypy/doc/interpreter-optimizations.rst
b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
Dictionary Optimizations
~~~~~~~~~~~~~~~~~~~~~~~~
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
-Multi-dicts are a special implementation of dictionaries. It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime. Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized
dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
-This is now the default implementation of dictionaries in the Python
interpreter.
+Identity Dicts
++++++++++++++++
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``. This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
+++++++++++++
-Sharing dictionaries are a special representation used together with
multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict
strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
The idea is the following: Most instances of the same class have very similar
attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
dicts:
the representation of the instance dict contains only a list of values.
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
List Optimizations
@@ -114,8 +120,8 @@
created. This gives the memory and speed behaviour of ``xrange`` and the
generality
of use of ``range``, and makes ``xrange`` essentially useless.
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
User Class Optimizations
@@ -133,8 +139,7 @@
base classes is changed). On subsequent lookups the cached version can be used,
as long as the instance did not shadow any of its classes attributes.
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
Interpreter Optimizations
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+ * **x86** machines on most common operating systems
+ (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+ * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+ * big- and little-endian variants of **PPC64** running Linux,
+
+ * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
help="output format")
options, args = parser.parse_args()
if len(args) != 1:
- raise ValueError, "need exactly one argument"
+ raise ValueError("need exactly one argument")
epsfile = process_dot(py.path.local(args[0]))
if options.format == "ps" or options.format == "eps":
print epsfile.read()
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -10,3 +10,82 @@
.. branch: gcheader-decl
Reduce the size of generated C sources.
+
+
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo. Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+
+Update cpyext C-API support After this branch, we are almost able to support
+upstream numpy via cpyext, so we created (yet another) fork of numpy at
+github.com/pypy/numpy with the needed changes. Among the significant changes
+to cpyext:
+ - allow c-snippet tests to be run with -A so we can verify we are compatible
+ - fix many edge cases exposed by fixing tests to run with -A
+ - issequence() logic matches cpython
+ - make PyStringObject and PyUnicodeObject field names compatible with cpython
+ - add prelminary support for PyDateTime_*
+ - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+ PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+ - PyAnySet_CheckExact, PyUnicode_Concat
+ - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+ primitives, also find a case where CPython will allow thread creation
+ before PyEval_InitThreads is run, dissallow on PyPy
+ - create a PyObject-specific list strategy
+ - rewrite slot assignment for typeobjects
+ - improve tracking of PyObject to rpython object mapping
+ - support tp_as_{number, sequence, mapping, buffer} slots
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL. Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
+
+.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods. Fixes notably issue #2287. (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
diff --git a/pypy/goal/targetpypystandalone.py
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -63,7 +63,7 @@
## from pypy.interpreter import main, interactive, error
## con = interactive.PyPyConsole(space)
## con.interact()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " +
space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
finally:
try:
space.finish()
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " +
space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
space.wrap('__import__'))
space.call_function(import_, space.wrap('site'))
return rffi.cast(rffi.INT, 0)
- except OperationError, e:
+ except OperationError as e:
if verbose:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
sys._pypy_execute_source.append(glob)
exec stmt in glob
""")
- except OperationError, e:
+ except OperationError as e:
debug("OperationError:")
debug(" operror-type: " + e.w_type.getname(space))
debug(" operror-value: " +
space.str_w(space.str(e.get_w_value(space))))
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -78,7 +78,11 @@
"""
try:
# run it
- f(*fargs, **fkwds)
+ try:
+ f(*fargs, **fkwds)
+ finally:
+ sys.settrace(None)
+ sys.setprofile(None)
# we arrive here if no exception is raised. stdout cosmetics...
try:
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -84,7 +84,7 @@
space = self.space
try:
args_w = space.fixedview(w_stararg)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
raise oefmt(space.w_TypeError,
"argument after * must be a sequence, not %T",
@@ -111,7 +111,7 @@
else:
try:
w_keys = space.call_method(w_starstararg, "keys")
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_AttributeError):
raise oefmt(space.w_TypeError,
"argument after ** must be a mapping, not %T",
@@ -134,11 +134,11 @@
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.keywords:
- raise ValueError, "no keyword arguments expected"
+ raise ValueError("no keyword arguments expected")
if len(self.arguments_w) > argcount:
- raise ValueError, "too many arguments (%d expected)" % argcount
+ raise ValueError("too many arguments (%d expected)" % argcount)
elif len(self.arguments_w) < argcount:
- raise ValueError, "not enough arguments (%d expected)" % argcount
+ raise ValueError("not enough arguments (%d expected)" % argcount)
return self.arguments_w
def firstarg(self):
@@ -279,7 +279,7 @@
try:
self._match_signature(w_firstarg,
scope_w, signature, defaults_w, 0)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
return signature.scope_length()
@@ -301,7 +301,7 @@
"""
try:
return self._parse(w_firstarg, signature, defaults_w, blindargs)
- except ArgErr, e:
+ except ArgErr as e:
raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg())
@staticmethod
@@ -352,11 +352,9 @@
for w_key in keys_w:
try:
key = space.str_w(w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError):
- raise OperationError(
- space.w_TypeError,
- space.wrap("keywords must be strings"))
+ raise oefmt(space.w_TypeError, "keywords must be strings")
if e.match(space, space.w_UnicodeEncodeError):
# Allow this to pass through
key = None
diff --git a/pypy/interpreter/astcompiler/ast.py
b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -16,8 +16,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/astcompiler/astbuilder.py
b/pypy/interpreter/astcompiler/astbuilder.py
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -115,16 +115,16 @@
def check_forbidden_name(self, name, node):
try:
misc.check_forbidden_name(name)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error("cannot assign to %s" % (e.name,), node)
def set_context(self, expr, ctx):
"""Set the context of an expression to Store or Del if possible."""
try:
expr.set_context(ctx)
- except ast.UnacceptableExpressionContext, e:
+ except ast.UnacceptableExpressionContext as e:
self.error_ast(e.msg, e.node)
- except misc.ForbiddenNameAssignment, e:
+ except misc.ForbiddenNameAssignment as e:
self.error_ast("cannot assign to %s" % (e.name,), e.node)
def handle_print_stmt(self, print_node):
@@ -1080,7 +1080,7 @@
return self.space.call_function(tp, w_num_str)
try:
return self.space.call_function(self.space.w_int, w_num_str,
w_base)
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(self.space, self.space.w_ValueError):
raise
return self.space.call_function(self.space.w_float, w_num_str)
@@ -1100,7 +1100,7 @@
sub_strings_w = [parsestring.parsestr(space, encoding,
atom_node.get_child(i).get_value(),
unicode_literals)
for i in range(atom_node.num_children())]
- except error.OperationError, e:
+ except error.OperationError as e:
if not e.match(space, space.w_UnicodeError):
raise
# UnicodeError in literal: turn into SyntaxError
diff --git a/pypy/interpreter/astcompiler/symtable.py
b/pypy/interpreter/astcompiler/symtable.py
--- a/pypy/interpreter/astcompiler/symtable.py
+++ b/pypy/interpreter/astcompiler/symtable.py
@@ -325,7 +325,7 @@
try:
module.walkabout(self)
top.finalize(None, {}, {})
- except SyntaxError, e:
+ except SyntaxError as e:
e.filename = compile_info.filename
raise
self.pop_scope()
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -705,7 +705,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'unexpected indent'
else:
raise Exception("DID NOT RAISE")
@@ -717,7 +717,7 @@
""")
try:
self.simple_test(source, None, None)
- except IndentationError, e:
+ except IndentationError as e:
assert e.msg == 'expected an indented block'
else:
raise Exception("DID NOT RAISE")
@@ -969,7 +969,7 @@
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
- except AssertionError, e:
+ except AssertionError as e:
assert str(e) == "(3,)"
# BUILD_LIST_FROM_ARG is PyPy specific
diff --git a/pypy/interpreter/astcompiler/tools/asdl.py
b/pypy/interpreter/astcompiler/tools/asdl.py
--- a/pypy/interpreter/astcompiler/tools/asdl.py
+++ b/pypy/interpreter/astcompiler/tools/asdl.py
@@ -96,7 +96,7 @@
def t_default(self, s):
r" . +"
- raise ValueError, "unmatched input: %s" % `s`
+ raise ValueError("unmatched input: %s" % `s`)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
@@ -377,7 +377,7 @@
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
- except ASDLSyntaxError, err:
+ except ASDLSyntaxError as err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py
b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -399,8 +399,8 @@
def check_string(space, w_obj):
if not (space.isinstance_w(w_obj, space.w_str) or
space.isinstance_w(w_obj, space.w_unicode)):
- raise OperationError(space.w_TypeError, space.wrap(
- 'AST string must be of type str or unicode'))
+ raise oefmt(space.w_TypeError,
+ "AST string must be of type str or unicode")
return w_obj
def get_field(space, w_node, name, optional):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -52,7 +53,7 @@
try:
space.delitem(w_dict, space.wrap(attr))
return True
- except OperationError, ex:
+ except OperationError as ex:
if not ex.match(space, space.w_KeyError):
raise
return False
@@ -67,8 +68,8 @@
return space.gettypeobject(self.typedef)
def setclass(self, space, w_subtype):
- raise OperationError(space.w_TypeError,
- space.wrap("__class__ assignment: only for heap
types"))
+ raise oefmt(space.w_TypeError,
+ "__class__ assignment: only for heap types")
def user_setup(self, space, w_subtype):
raise NotImplementedError("only for interp-level user subclasses "
@@ -77,7 +78,7 @@
def getname(self, space):
try:
return space.str_w(space.getattr(self, space.wrap('__name__')))
- except OperationError, e:
+ except OperationError as e:
if e.match(space, space.w_TypeError) or e.match(space,
space.w_AttributeError):
return '?'
raise
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,37 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
+ By default, it is *not called*. See self.register_finalizer().
+ Be ready to handle the case where the object is only half
+ initialized. Also, in some cases the object might still be
+ visible to app-level after _finalize_() is called (e.g. if
+ there is a __del__ that resurrects).
+ """
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
+ def register_finalizer(self, space):
+ """Register a finalizer for this object, so that
+ self._finalize_() will be called. You must call this method at
+ most once. Be ready to handle in _finalize_() the case where
+ the object is half-initialized, even if you only call
+ self.register_finalizer() at the end of the initialization.
+ This is because there are cases where the finalizer is already
+ registered before: if the user makes an app-level subclass with
+ a __del__. (In that case only, self.register_finalizer() does
+ nothing, because the finalizer is already registered in
+ allocate_instance().)
"""
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+ if self.user_overridden_class and self.getclass(space).hasuserdel:
+ # already registered by space.allocate_instance()
+ if not we_are_translated():
+ assert space.finalizer_queue._already_registered(self)
+ else:
+ if not we_are_translated():
+ # does not make sense if _finalize_ is not overridden
+ assert self._finalize_.im_func is not W_Root._finalize_.im_func
+ space.finalizer_queue.register_finalizer(self)
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -318,7 +330,7 @@
space = self.space
try:
return space.next(self.w_iter)
- except OperationError, e:
+ except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise
raise StopIteration
@@ -389,9 +401,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
@@ -406,7 +418,7 @@
self.sys.get('builtin_module_names')):
try:
w_mod = self.getitem(w_modules, w_modname)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
continue
raise
@@ -440,7 +452,7 @@
try:
self.call_method(w_mod, "_shutdown")
- except OperationError, e:
+ except OperationError as e:
e.write_unraisable(self, "threading._shutdown()")
def __repr__(self):
@@ -476,7 +488,7 @@
assert reuse
try:
return self.getitem(w_modules, w_name)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_KeyError):
raise
@@ -706,8 +718,7 @@
try:
return rthread.allocate_lock()
except rthread.error:
- raise OperationError(self.w_RuntimeError,
- self.wrap("out of resources"))
+ raise oefmt(self.w_RuntimeError, "out of resources")
# Following is a friendly interface to common object space operations
# that can be defined in term of more primitive ones. Subclasses
@@ -764,7 +775,7 @@
def finditem(self, w_obj, w_key):
try:
return self.getitem(w_obj, w_key)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_KeyError):
return None
raise
@@ -772,7 +783,7 @@
def findattr(self, w_object, w_name):
try:
return self.getattr(w_object, w_name)
- except OperationError, e:
+ except OperationError as e:
# a PyPy extension: let SystemExit and KeyboardInterrupt go through
if e.async(self):
raise
@@ -872,7 +883,7 @@
items=items)
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
@@ -896,13 +907,12 @@
while True:
try:
w_item = self.next(w_iterator)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
break # done
if idx == expected_length:
- raise OperationError(self.w_ValueError,
- self.wrap("too many values to unpack"))
+ raise oefmt(self.w_ValueError, "too many values to unpack")
items[idx] = w_item
idx += 1
if idx < expected_length:
@@ -942,7 +952,7 @@
"""
try:
return self.len_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -952,7 +962,7 @@
return default
try:
w_hint = self.get_and_call_function(w_descr, w_obj)
- except OperationError, e:
+ except OperationError as e:
if not (e.match(self, self.w_TypeError) or
e.match(self, self.w_AttributeError)):
raise
@@ -962,8 +972,8 @@
hint = self.int_w(w_hint)
if hint < 0:
- raise OperationError(self.w_ValueError, self.wrap(
- "__length_hint__() should return >= 0"))
+ raise oefmt(self.w_ValueError,
+ "__length_hint__() should return >= 0")
return hint
def fixedview(self, w_iterable, expected_length=-1):
@@ -1049,7 +1059,7 @@
else:
return False
return self.exception_issubclass_w(w_exc_type, w_check_class)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_TypeError): # string exceptions maybe
return False
raise
@@ -1167,7 +1177,7 @@
try:
self.getattr(w_obj, self.wrap("__call__"))
return self.w_True
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_AttributeError):
raise
return self.w_False
@@ -1176,7 +1186,27 @@
return self.w_False
def issequence_w(self, w_obj):
- return (self.findattr(w_obj, self.wrap("__getitem__")) is not None)
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return False
+ elif flag == 'S':
+ return True
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None)
+
+ def ismapping_w(self, w_obj):
+ if self.is_oldstyle_instance(w_obj):
+ return (self.findattr(w_obj, self.wrap('__getitem__')) is not None)
+ flag = self.type(w_obj).flag_map_or_seq
+ if flag == 'M':
+ return True
+ elif flag == 'S':
+ return False
+ else:
+ return (self.lookup(w_obj, '__getitem__') is not None and
+ self.lookup(w_obj, '__getslice__') is None)
# The code below only works
# for the simple case (new-style instance).
@@ -1267,7 +1297,7 @@
def _next_or_none(self, w_it):
try:
return self.next(w_it)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_StopIteration):
raise
return None
@@ -1310,8 +1340,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
return start, stop, step
@@ -1331,8 +1360,7 @@
if start < 0:
start += seqlength
if not (0 <= start < seqlength):
- raise OperationError(self.w_IndexError,
- self.wrap("index out of range"))
+ raise oefmt(self.w_IndexError, "index out of range")
stop = 0
step = 0
length = 1
@@ -1345,7 +1373,7 @@
"""
try:
w_index = self.index(w_obj)
- except OperationError, err:
+ except OperationError as err:
if objdescr is None or not err.match(self, self.w_TypeError):
raise
raise oefmt(self.w_TypeError, "%s must be an integer, not %T",
@@ -1355,7 +1383,7 @@
# return type of __index__ is already checked by space.index(),
# but there is no reason to allow conversions anyway
index = self.int_w(w_index, allow_conversion=False)
- except OperationError, err:
+ except OperationError as err:
if not err.match(self, self.w_OverflowError):
raise
if not w_exception:
@@ -1376,20 +1404,17 @@
try:
return bigint.tolonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
def r_ulonglong_w(self, w_obj, allow_conversion=True):
bigint = self.bigint_w(w_obj, allow_conversion)
try:
return bigint.toulonglong()
except OverflowError:
- raise OperationError(self.w_OverflowError,
- self.wrap('integer too large'))
+ raise oefmt(self.w_OverflowError, "integer too large")
except ValueError:
- raise OperationError(self.w_ValueError,
- self.wrap('cannot convert negative integer '
- 'to unsigned int'))
+ raise oefmt(self.w_ValueError,
+ "cannot convert negative integer to unsigned int")
BUF_SIMPLE = 0x0000
BUF_WRITABLE = 0x0001
@@ -1506,7 +1531,7 @@
# the unicode buffer.)
try:
return self.str_w(w_obj)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_TypeError):
raise
try:
@@ -1535,8 +1560,8 @@
from rpython.rlib import rstring
result = w_obj.str_w(self)
if '\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a string without NUL characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a string without NUL characters")
return rstring.assert_str0(result)
def int_w(self, w_obj, allow_conversion=True):
@@ -1576,8 +1601,7 @@
def realstr_w(self, w_obj):
# Like str_w, but only works if w_obj is really of type 'str'.
if not self.isinstance_w(w_obj, self.w_str):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a string'))
+ raise oefmt(self.w_TypeError, "argument must be a string")
return self.str_w(w_obj)
def unicode_w(self, w_obj):
@@ -1588,16 +1612,16 @@
from rpython.rlib import rstring
result = w_obj.unicode_w(self)
if u'\x00' in result:
- raise OperationError(self.w_TypeError, self.wrap(
- 'argument must be a unicode string without NUL
characters'))
+ raise oefmt(self.w_TypeError,
+ "argument must be a unicode string without NUL "
+ "characters")
return rstring.assert_str0(result)
def realunicode_w(self, w_obj):
# Like unicode_w, but only works if w_obj is really of type
# 'unicode'.
if not self.isinstance_w(w_obj, self.w_unicode):
- raise OperationError(self.w_TypeError,
- self.wrap('argument must be a unicode'))
+ raise oefmt(self.w_TypeError, "argument must be a unicode")
return self.unicode_w(w_obj)
def bool_w(self, w_obj):
@@ -1616,8 +1640,8 @@
def gateway_r_uint_w(self, w_obj):
if self.isinstance_w(w_obj, self.w_float):
- raise OperationError(self.w_TypeError,
- self.wrap("integer argument expected, got float"))
+ raise oefmt(self.w_TypeError,
+ "integer argument expected, got float")
return self.uint_w(self.int(w_obj))
def gateway_nonnegint_w(self, w_obj):
@@ -1625,8 +1649,7 @@
# the integer is negative. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
return value
def c_int_w(self, w_obj):
@@ -1634,8 +1657,7 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.gateway_int_w(w_obj)
if value < INT_MIN or value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_uint_w(self, w_obj):
@@ -1643,8 +1665,8 @@
# the integer does not fit in 32 bits. Here for gateway.py.
value = self.uint_w(w_obj)
if value > UINT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected an unsigned 32-bit integer"))
+ raise oefmt(self.w_OverflowError,
+ "expected an unsigned 32-bit integer")
return value
def c_nonnegint_w(self, w_obj):
@@ -1653,11 +1675,9 @@
# for gateway.py.
value = self.int_w(w_obj)
if value < 0:
- raise OperationError(self.w_ValueError,
- self.wrap("expected a non-negative integer"))
+ raise oefmt(self.w_ValueError, "expected a non-negative integer")
if value > INT_MAX:
- raise OperationError(self.w_OverflowError,
- self.wrap("expected a 32-bit integer"))
+ raise oefmt(self.w_OverflowError, "expected a 32-bit integer")
return value
def c_short_w(self, w_obj):
@@ -1685,7 +1705,7 @@
# instead of raising OverflowError. For obscure cases only.
try:
return self.int_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import intmask
@@ -1696,7 +1716,7 @@
# instead of raising OverflowError.
try:
return self.r_longlong_w(w_obj, allow_conversion)
- except OperationError, e:
+ except OperationError as e:
if not e.match(self, self.w_OverflowError):
raise
from rpython.rlib.rarithmetic import longlongmask
@@ -1711,22 +1731,20 @@
not self.isinstance_w(w_fd, self.w_long)):
try:
w_fileno = self.getattr(w_fd, self.wrap("fileno"))
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_AttributeError):
- raise OperationError(self.w_TypeError,
- self.wrap("argument must be an int, or have a fileno()
"
- "method.")
- )
+ raise oefmt(self.w_TypeError,
+ "argument must be an int, or have a fileno() "
+ "method.")
raise
w_fd = self.call_function(w_fileno)
if (not self.isinstance_w(w_fd, self.w_int) and
not self.isinstance_w(w_fd, self.w_long)):
- raise OperationError(self.w_TypeError,
- self.wrap("fileno() returned a non-integer")
- )
+ raise oefmt(self.w_TypeError,
+ "fileno() returned a non-integer")
try:
fd = self.c_int_w(w_fd)
- except OperationError, e:
+ except OperationError as e:
if e.match(self, self.w_OverflowError):
fd = -1
else:
@@ -1838,7 +1856,6 @@
('get', 'get', 3, ['__get__']),
('set', 'set', 3, ['__set__']),
('delete', 'delete', 2, ['__delete__']),
- ('userdel', 'del', 1, ['__del__']),
]
ObjSpace.BuiltinModuleTable = [
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -214,9 +214,8 @@
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
- raise OperationError(space.w_TypeError,
- space.wrap("instance exception may not "
- "have a separate value"))
+ raise oefmt(space.w_TypeError,
+ "instance exception may not have a separate value")
w_value = w_inst
w_type = w_instclass
diff --git a/pypy/interpreter/executioncontext.py
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -141,6 +141,12 @@
actionflag.action_dispatcher(self, frame) # slow path
bytecode_trace._always_inline_ = True
+ def _run_finalizers_now(self):
+ # Tests only: run the actions now, to ensure that the
+ # finalizable objects are really finalized. Used notably by
+ # pypy.tool.pytest.apptest.
+ self.space.actionflag.action_dispatcher(self, None)
+
def bytecode_only_trace(self, frame):
"""
Like bytecode_trace() but doesn't invoke any other events besides the
@@ -214,6 +220,7 @@
self._trace(frame, 'exception', None, operationerr)
#operationerr.print_detailed_traceback(self.space)
+ @jit.dont_look_inside
@specialize.arg(1)
def sys_exc_info(self, for_hidden=False):
"""Implements sys.exc_info().
@@ -225,15 +232,7 @@
# NOTE: the result is not the wrapped sys.exc_info() !!!
"""
- frame = self.gettopframe()
- while frame:
- if frame.last_exception is not None:
- if ((for_hidden or not frame.hide()) or
- frame.last_exception is
- get_cleared_operation_error(self.space)):
- return frame.last_exception
- frame = frame.f_backref()
- return None
+ return self.gettopframe()._exc_info_unroll(self.space, for_hidden)
def set_sys_exc_info(self, operror):
frame = self.gettopframe_nohidden()
@@ -467,6 +466,13 @@
list = self.fired_actions
if list is not None:
self.fired_actions = None
+ # NB. in case there are several actions, we reset each
+ # 'action._fired' to false only when we're about to call
+ # 'action.perform()'. This means that if
+ # 'action.fire()' happens to be called any time before
+ # the corresponding perform(), the fire() has no
+ # effect---which is the effect we want, because
+ # perform() will be called anyway.
for action in list:
action._fired = False
action.perform(ec, frame)
@@ -522,75 +528,98 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
+ self.pending_with_disabled_del = None
def perform(self, executioncontext, frame):
- if self.finalizers_lock_count > 0:
- return
self._run_finalizers()
+ @jit.dont_look_inside
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+ self._call_finalizer(w_obj)
+
+ def gc_disabled(self, w_obj):
+ # If we're running in 'gc.disable()' mode, record w_obj in the
+ # "call me later" list and return True. In normal mode, return
+ # False. Use this function from some _finalize_() methods:
+ # if a _finalize_() method would call some user-defined
+ # app-level function, like a weakref callback, then first do
+ # 'if gc.disabled(self): return'. Another attempt at
+ # calling _finalize_() will be made after 'gc.enable()'.
+ # (The exact rule for when to use gc_disabled() or not is a bit
+ # vague, but most importantly this includes all user-level
+ # __del__().)
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit