Author: Armin Rigo <[email protected]>
Branch: use-gc-del-3
Changeset: r84215:3c2a2910cc82
Date: 2016-05-05 15:18 +0200
http://bitbucket.org/pypy/pypy/changeset/3c2a2910cc82/
Log: in-progress
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -11,7 +11,7 @@
INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX
from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag,
- UserDelAction)
+ make_finalizer_queue)
from pypy.interpreter.error import OperationError, new_exception_class, oefmt
from pypy.interpreter.argument import Arguments
from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary
@@ -28,6 +28,7 @@
"""This is the abstract root class of all wrapped objects that live
in a 'normal' object space like StdObjSpace."""
__slots__ = ('__weakref__',)
+ _must_be_light_finalizer_ = True
user_overridden_class = False
def getdict(self, space):
@@ -136,9 +137,8 @@
pass
def clear_all_weakrefs(self):
- """Call this at the beginning of interp-level __del__() methods
- in subclasses. It ensures that weakrefs (if any) are cleared
- before the object is further destroyed.
+ """Ensures that weakrefs (if any) are cleared now. This is
+ called by UserDelAction before the object is finalized further.
"""
lifeline = self.getweakref()
if lifeline is not None:
@@ -151,25 +151,10 @@
self.delweakref()
lifeline.clear_all_weakrefs()
- __already_enqueued_for_destruction = ()
+ def _finalize_(self):
+ """The RPython-level finalizer.
- def enqueue_for_destruction(self, space, callback, descrname):
- """Put the object in the destructor queue of the space.
- At a later, safe point in time, UserDelAction will call
- callback(self). If that raises OperationError, prints it
- to stderr with the descrname string.
-
- Note that 'callback' will usually need to start with:
- assert isinstance(self, W_SpecificClass)
- """
- # this function always resurect the object, so when
- # running on top of CPython we must manually ensure that
- # we enqueue it only once
- if not we_are_translated():
- if callback in self.__already_enqueued_for_destruction:
- return
- self.__already_enqueued_for_destruction += (callback,)
- space.user_del_action.register_callback(self, callback, descrname)
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"""
# hooks that the mapdict implementations needs:
def _get_mapdict_map(self):
@@ -389,9 +374,9 @@
self.interned_strings = make_weak_value_dictionary(self, str, W_Root)
self.actionflag = ActionFlag() # changed by the signal module
self.check_signal_action = None # changed by the signal module
- self.user_del_action = UserDelAction(self)
+ make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
-
+
# can be overridden to a subclass
self.initialize()
diff --git a/pypy/interpreter/executioncontext.py
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -2,7 +2,7 @@
from pypy.interpreter.error import OperationError, get_cleared_operation_error
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
-from rpython.rlib import jit
+from rpython.rlib import jit, rgc
TICK_COUNTER_STEP = 100
@@ -515,75 +515,70 @@
"""
-class UserDelCallback(object):
- def __init__(self, w_obj, callback, descrname):
- self.w_obj = w_obj
- self.callback = callback
- self.descrname = descrname
- self.next = None
-
class UserDelAction(AsyncAction):
"""An action that invokes all pending app-level __del__() method.
This is done as an action instead of immediately when the
- interp-level __del__() is invoked, because the latter can occur more
+ WRootFinalizerQueue is triggered, because the latter can occur more
or less anywhere in the middle of code that might not be happy with
random app-level code mutating data structures under its feet.
"""
def __init__(self, space):
AsyncAction.__init__(self, space)
- self.dying_objects = None
- self.dying_objects_last = None
- self.finalizers_lock_count = 0
- self.enabled_at_app_level = True
-
- def register_callback(self, w_obj, callback, descrname):
- cb = UserDelCallback(w_obj, callback, descrname)
- if self.dying_objects_last is None:
- self.dying_objects = cb
- else:
- self.dying_objects_last.next = cb
- self.dying_objects_last = cb
- self.fire()
+ self.finalizers_lock_count = 0 # see pypy/module/gc
+ self.enabled_at_app_level = True # see pypy/module/gc
def perform(self, executioncontext, frame):
if self.finalizers_lock_count > 0:
return
self._run_finalizers()
+ def _report_error(self, e, where, w_obj):
+ space = self.space
+ if isinstance(e, OperationError):
+ e.write_unraisable(space, where, w_obj)
+ e.clear(space) # break up reference cycles
+ else:
+ addrstring = w_obj.getaddrstring(space)
+ msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % (
+ str(e), where, space.type(w_obj).name, addrstring))
+ space.call_method(space.sys.get('stderr'), 'write',
+ space.wrap(msg))
+
def _run_finalizers(self):
- # Each call to perform() first grabs the self.dying_objects
- # and replaces it with an empty list. We do this to try to
- # avoid too deep recursions of the kind of __del__ being called
- # while in the middle of another __del__ call.
- pending = self.dying_objects
- self.dying_objects = None
- self.dying_objects_last = None
- space = self.space
- while pending is not None:
+ while True:
+ w_obj = self.space.finalizer_queue.next_dead()
+ if w_obj is None:
+ break
+
+ # Before calling the finalizers, clear the weakrefs, if any.
+ w_obj.clear_all_weakrefs()
+
+ # Look up and call the app-level __del__, if any.
try:
- pending.callback(pending.w_obj)
- except OperationError as e:
- e.write_unraisable(space, pending.descrname, pending.w_obj)
- e.clear(space) # break up reference cycles
- pending = pending.next
- #
- # Note: 'dying_objects' used to be just a regular list instead
- # of a chained list. This was the cause of "leaks" if we have a
- # program that constantly creates new objects with finalizers.
- # Here is why: say 'dying_objects' is a long list, and there
- # are n instances in it. Then we spend some time in this
- # function, possibly triggering more GCs, but keeping the list
- # of length n alive. Then the list is suddenly freed at the
- # end, and we return to the user program. At this point the
- # GC limit is still very high, because just before, there was
- # a list of length n alive. Assume that the program continues
- # to allocate a lot of instances with finalizers. The high GC
- # limit means that it could allocate a lot of instances before
- # reaching it --- possibly more than n. So the whole procedure
- # repeats with higher and higher values of n.
- #
- # This does not occur in the current implementation because
- # there is no list of length n: if n is large, then the GC
- # will run several times while walking the list, but it will
- # see lower and lower memory usage, with no lower bound of n.
+ self.space.userdel(w_obj)
+ except Exception as e:
+ self._report_error(e, "method __del__ of ", w_obj)
+
+ # Call the RPython-level _finalize_() method.
+ try:
+ w_obj._finalize_()
+ except Exception as e:
+ self._report_error(e, "internal finalizer of ", w_obj)
+
+
+def make_finalizer_queue(W_Root, space):
+ """Make a FinalizerQueue subclass which responds to GC finalizer
+ events by 'firing' the UserDelAction class above. It does not
+ directly fetches the objects to finalize at all; they stay in the
+ GC-managed queue, and will only be fetched by UserDelAction
+ (between bytecodes)."""
+
+ class WRootFinalizerQueue(rgc.FinalizerQueue):
+ Class = W_Root
+
+ def finalizer_trigger(self):
+ space.user_del_action.fire()
+
+ space.user_del_action = UserDelAction(space)
+ space.finalizer_queue = WRootFinalizerQueue()
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit