https://github.com/python/cpython/commit/44e4c479fbf2c28605bd39303b1ce484753f6177 commit: 44e4c479fbf2c28605bd39303b1ce484753f6177 branch: main author: Mark Shannon <m...@hotpy.org> committer: markshannon <m...@hotpy.org> date: 2025-04-30T11:37:53+01:00 summary:
GH-124715: Move trashcan mechanism into `Py_Dealloc` (GH-132280) files: A Misc/NEWS.d/next/Core_and_Builtins/2025-04-08-17-48-11.gh-issue-124715.xxzQoD.rst M Include/cpython/object.h M Include/internal/pycore_ceval.h M Include/internal/pycore_pystate.h M Include/pythonrun.h M Modules/_elementtree.c M Objects/descrobject.c M Objects/dictobject.c M Objects/exceptions.c M Objects/frameobject.c M Objects/listobject.c M Objects/methodobject.c M Objects/object.c M Objects/odictobject.c M Objects/setobject.c M Objects/tupleobject.c M Objects/typeobject.c M Python/bltinmodule.c M Python/ceval.c M Python/gc.c M Python/gc_free_threading.c M Python/hamt.c M Python/instruction_sequence.c M Python/pylifecycle.c M Python/pystate.c M Python/traceback.c diff --git a/Include/cpython/object.h b/Include/cpython/object.h index e2300aee7a207a..b6c508e6e29649 100644 --- a/Include/cpython/object.h +++ b/Include/cpython/object.h @@ -429,81 +429,14 @@ PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed( const char *function); -/* Trashcan mechanism, thanks to Christian Tismer. - -When deallocating a container object, it's possible to trigger an unbounded -chain of deallocations, as each Py_DECREF in turn drops the refcount on "the -next" object in the chain to 0. This can easily lead to stack overflows, -especially in threads (which typically have less stack space to work with). - -A container object can avoid this by bracketing the body of its tp_dealloc -function with a pair of macros: - -static void -mytype_dealloc(mytype *p) -{ - ... declarations go here ... - - PyObject_GC_UnTrack(p); // must untrack first - Py_TRASHCAN_BEGIN(p, mytype_dealloc) - ... The body of the deallocator goes here, including all calls ... - ... to Py_DECREF on contained objects. ... - Py_TRASHCAN_END // there should be no code after this -} - -CAUTION: Never return from the middle of the body! If the body needs to -"get out early", put a label immediately before the Py_TRASHCAN_END -call, and goto it. Else the call-depth counter (see below) will stay -above 0 forever, and the trashcan will never get emptied. - -How it works: The BEGIN macro increments a call-depth counter. So long -as this counter is small, the body of the deallocator is run directly without -further ado. But if the counter gets large, it instead adds p to a list of -objects to be deallocated later, skips the body of the deallocator, and -resumes execution after the END macro. The tp_dealloc routine then returns -without deallocating anything (and so unbounded call-stack depth is avoided). - -When the call stack finishes unwinding again, code generated by the END macro -notices this, and calls another routine to deallocate all the objects that -may have been added to the list of deferred deallocations. In effect, a -chain of N deallocations is broken into (N-1)/(Py_TRASHCAN_HEADROOM-1) pieces, -with the call stack never exceeding a depth of Py_TRASHCAN_HEADROOM. - -Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base -class, we need to ensure that the trashcan is only triggered on the tp_dealloc -of the actual class being deallocated. Otherwise we might end up with a -partially-deallocated object. To check this, the tp_dealloc function must be -passed as second argument to Py_TRASHCAN_BEGIN(). -*/ - - PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op); PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(PyThreadState *tstate); - -/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */ - -/* To avoid raising recursion errors during dealloc trigger trashcan before we reach - * recursion limit. To avoid trashing, we don't attempt to empty the trashcan until - * we have headroom above the trigger limit */ -#define Py_TRASHCAN_HEADROOM 50 - -/* Helper function for Py_TRASHCAN_BEGIN */ PyAPI_FUNC(int) _Py_ReachedRecursionLimitWithMargin(PyThreadState *tstate, int margin_count); -#define Py_TRASHCAN_BEGIN(op, dealloc) \ -do { \ - PyThreadState *tstate = PyThreadState_Get(); \ - if (_Py_ReachedRecursionLimitWithMargin(tstate, 2) && Py_TYPE(op)->tp_dealloc == (destructor)dealloc) { \ - _PyTrash_thread_deposit_object(tstate, (PyObject *)op); \ - break; \ - } - /* The body of the deallocator is here. */ -#define Py_TRASHCAN_END \ - if (tstate->delete_later && !_Py_ReachedRecursionLimitWithMargin(tstate, 4)) { \ - _PyTrash_thread_destroy_chain(tstate); \ - } \ -} while (0); +/* For backwards compatibility with the old trashcan mechanism */ +#define Py_TRASHCAN_BEGIN(op, dealloc) +#define Py_TRASHCAN_END PyAPI_FUNC(void *) PyObject_GetItemData(PyObject *obj); diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 115dd40b8bdfce..3d8247df31ce32 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -196,25 +196,6 @@ extern void _PyEval_DeactivateOpCache(void); /* --- _Py_EnterRecursiveCall() ----------------------------------------- */ -#if !_Py__has_builtin(__builtin_frame_address) && !defined(_MSC_VER) -static uintptr_t return_pointer_as_int(char* p) { - return (uintptr_t)p; -} -#endif - -static inline uintptr_t -_Py_get_machine_stack_pointer(void) { -#if _Py__has_builtin(__builtin_frame_address) - return (uintptr_t)__builtin_frame_address(0); -#elif defined(_MSC_VER) - return (uintptr_t)_AddressOfReturnAddress(); -#else - char here; - /* Avoid compiler warning about returning stack address */ - return return_pointer_as_int(&here); -#endif -} - static inline int _Py_MakeRecCheck(PyThreadState *tstate) { uintptr_t here_addr = _Py_get_machine_stack_pointer(); _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; @@ -249,12 +230,7 @@ PyAPI_FUNC(void) _Py_InitializeRecursionLimits(PyThreadState *tstate); static inline int _Py_ReachedRecursionLimit(PyThreadState *tstate) { uintptr_t here_addr = _Py_get_machine_stack_pointer(); _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; - if (here_addr > _tstate->c_stack_soft_limit) { - return 0; - } - if (_tstate->c_stack_hard_limit == 0) { - _Py_InitializeRecursionLimits(tstate); - } + assert(_tstate->c_stack_hard_limit != 0); return here_addr <= _tstate->c_stack_soft_limit; } diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index 601b9790001a03..e8b630f5441b3a 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -9,6 +9,7 @@ extern "C" { #endif #include "pycore_typedefs.h" // _PyRuntimeState +#include "pycore_tstate.h" // Values for PyThreadState.state. A thread must be in the "attached" state @@ -299,6 +300,34 @@ _Py_AssertHoldsTstateFunc(const char *func) #define _Py_AssertHoldsTstate() #endif +#if !_Py__has_builtin(__builtin_frame_address) && !defined(_MSC_VER) +static uintptr_t return_pointer_as_int(char* p) { + return (uintptr_t)p; +} +#endif + +static inline uintptr_t +_Py_get_machine_stack_pointer(void) { +#if _Py__has_builtin(__builtin_frame_address) + return (uintptr_t)__builtin_frame_address(0); +#elif defined(_MSC_VER) + return (uintptr_t)_AddressOfReturnAddress(); +#else + char here; + /* Avoid compiler warning about returning stack address */ + return return_pointer_as_int(&here); +#endif +} + +static inline intptr_t +_Py_RecursionLimit_GetMargin(PyThreadState *tstate) +{ + _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; + assert(_tstate->c_stack_hard_limit != 0); + intptr_t here_addr = _Py_get_machine_stack_pointer(); + return Py_ARITHMETIC_RIGHT_SHIFT(intptr_t, here_addr - (intptr_t)_tstate->c_stack_soft_limit, PYOS_STACK_MARGIN_SHIFT); +} + #ifdef __cplusplus } #endif diff --git a/Include/pythonrun.h b/Include/pythonrun.h index 4d459cb92e36ea..fad2b3c77476e4 100644 --- a/Include/pythonrun.h +++ b/Include/pythonrun.h @@ -26,17 +26,25 @@ PyAPI_DATA(int) (*PyOS_InputHook)(void); * apart. In practice, that means it must be larger than the C * stack consumption of PyEval_EvalDefault */ #if defined(_Py_ADDRESS_SANITIZER) || defined(_Py_THREAD_SANITIZER) -# define PYOS_STACK_MARGIN 4096 +# define PYOS_LOG2_STACK_MARGIN 12 #elif defined(Py_DEBUG) && defined(WIN32) -# define PYOS_STACK_MARGIN 4096 +# define PYOS_LOG2_STACK_MARGIN 12 #elif defined(__wasi__) /* Web assembly has two stacks, so this isn't really a size */ -# define PYOS_STACK_MARGIN 500 +# define PYOS_LOG2_STACK_MARGIN 9 #else -# define PYOS_STACK_MARGIN 2048 +# define PYOS_LOG2_STACK_MARGIN 11 #endif +#define PYOS_STACK_MARGIN (1 << PYOS_LOG2_STACK_MARGIN) #define PYOS_STACK_MARGIN_BYTES (PYOS_STACK_MARGIN * sizeof(void *)) +#if SIZEOF_VOID_P == 8 +#define PYOS_STACK_MARGIN_SHIFT (PYOS_LOG2_STACK_MARGIN + 3) +#else +#define PYOS_STACK_MARGIN_SHIFT (PYOS_LOG2_STACK_MARGIN + 2) +#endif + + #if defined(WIN32) #define USE_STACKCHECK #endif diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-04-08-17-48-11.gh-issue-124715.xxzQoD.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-04-08-17-48-11.gh-issue-124715.xxzQoD.rst new file mode 100644 index 00000000000000..f0e3318560902a --- /dev/null +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-04-08-17-48-11.gh-issue-124715.xxzQoD.rst @@ -0,0 +1,3 @@ +Prevents against stack overflows when calling :c:func:`Py_DECREF`. Third-party +extension objects no longer need to use the "trashcan" mechanism, as +protection is now built into the :c:func:`Py_DECREF` macro. diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c index 24b567b6caa260..8c3efa36353e24 100644 --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -689,7 +689,6 @@ element_dealloc(PyObject *op) /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, element_dealloc) if (self->weakreflist != NULL) PyObject_ClearWeakRefs(op); @@ -700,7 +699,6 @@ element_dealloc(PyObject *op) tp->tp_free(self); Py_DECREF(tp); - Py_TRASHCAN_END } /* -------------------------------------------------------------------- */ diff --git a/Objects/descrobject.c b/Objects/descrobject.c index 5ff36edd3ddb72..10c465b95ac192 100644 --- a/Objects/descrobject.c +++ b/Objects/descrobject.c @@ -1311,11 +1311,9 @@ wrapper_dealloc(PyObject *self) { wrapperobject *wp = (wrapperobject *)self; PyObject_GC_UnTrack(wp); - Py_TRASHCAN_BEGIN(wp, wrapper_dealloc) Py_XDECREF(wp->descr); Py_XDECREF(wp->self); PyObject_GC_Del(wp); - Py_TRASHCAN_END } static PyObject * diff --git a/Objects/dictobject.c b/Objects/dictobject.c index b6f623b8ce9df6..00658a8ac35bcf 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -3285,7 +3285,6 @@ dict_dealloc(PyObject *self) /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(mp); - Py_TRASHCAN_BEGIN(mp, dict_dealloc) if (values != NULL) { if (values->embedded == 0) { for (i = 0, n = values->capacity; i < n; i++) { @@ -3305,7 +3304,6 @@ dict_dealloc(PyObject *self) else { Py_TYPE(mp)->tp_free((PyObject *)mp); } - Py_TRASHCAN_END } diff --git a/Objects/exceptions.c b/Objects/exceptions.c index d642130eaae625..b17cac83551670 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -150,10 +150,8 @@ BaseException_dealloc(PyObject *op) // bpo-44348: The trashcan mechanism prevents stack overflow when deleting // long chains of exceptions. For example, exceptions can be chained // through the __context__ attributes or the __traceback__ attribute. - Py_TRASHCAN_BEGIN(self, BaseException_dealloc) (void)BaseException_clear(op); Py_TYPE(self)->tp_free(self); - Py_TRASHCAN_END } static int diff --git a/Objects/frameobject.c b/Objects/frameobject.c index 7a62219c139ee5..76b52efccf804f 100644 --- a/Objects/frameobject.c +++ b/Objects/frameobject.c @@ -1917,7 +1917,6 @@ frame_dealloc(PyObject *op) _PyObject_GC_UNTRACK(f); } - Py_TRASHCAN_BEGIN(f, frame_dealloc); /* GH-106092: If f->f_frame was on the stack and we reached the maximum * nesting depth for deallocations, the trashcan may have delayed this * deallocation until after f->f_frame is freed. Avoid dereferencing @@ -1942,7 +1941,6 @@ frame_dealloc(PyObject *op) Py_CLEAR(f->f_locals_cache); Py_CLEAR(f->f_overwritten_fast_locals); PyObject_GC_Del(f); - Py_TRASHCAN_END; } static int diff --git a/Objects/listobject.c b/Objects/listobject.c index 7648c1dfe9f0a8..ad132cf6b59cad 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -550,7 +550,6 @@ list_dealloc(PyObject *self) PyListObject *op = (PyListObject *)self; Py_ssize_t i; PyObject_GC_UnTrack(op); - Py_TRASHCAN_BEGIN(op, list_dealloc) if (op->ob_item != NULL) { /* Do it backwards, for Christian Tismer. There's a simple test case where somehow this reduces @@ -569,7 +568,6 @@ list_dealloc(PyObject *self) else { PyObject_GC_Del(op); } - Py_TRASHCAN_END } static PyObject * diff --git a/Objects/methodobject.c b/Objects/methodobject.c index 8b28662631b227..c3dcd09ad1cdb6 100644 --- a/Objects/methodobject.c +++ b/Objects/methodobject.c @@ -166,10 +166,7 @@ static void meth_dealloc(PyObject *self) { PyCFunctionObject *m = _PyCFunctionObject_CAST(self); - // The Py_TRASHCAN mechanism requires that we be able to - // call PyObject_GC_UnTrack twice on an object. PyObject_GC_UnTrack(m); - Py_TRASHCAN_BEGIN(m, meth_dealloc); if (m->m_weakreflist != NULL) { PyObject_ClearWeakRefs((PyObject*) m); } @@ -190,7 +187,6 @@ meth_dealloc(PyObject *self) assert(Py_IS_TYPE(self, &PyCFunction_Type)); _Py_FREELIST_FREE(pycfunctionobject, m, PyObject_GC_Del); } - Py_TRASHCAN_END; } static PyObject * diff --git a/Objects/object.c b/Objects/object.c index a33a4267d62d65..70d10b071d2d98 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2913,13 +2913,15 @@ Py_ReprLeave(PyObject *obj) void _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op) { - _PyObject_ASSERT(op, _PyObject_IS_GC(op)); - _PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op)); _PyObject_ASSERT(op, Py_REFCNT(op) == 0); #ifdef Py_GIL_DISABLED op->ob_tid = (uintptr_t)tstate->delete_later; #else - _PyGCHead_SET_PREV(_Py_AS_GC(op), (PyGC_Head*)tstate->delete_later); + /* Store the delete_later pointer in the refcnt field. + * As this object may still be tracked by the GC, + * it is important that we never store 0 (NULL). */ + uintptr_t refcnt = (uintptr_t)tstate->delete_later; + *((uintptr_t*)op) = refcnt+1; #endif tstate->delete_later = op; } @@ -2938,7 +2940,11 @@ _PyTrash_thread_destroy_chain(PyThreadState *tstate) op->ob_tid = 0; _Py_atomic_store_ssize_relaxed(&op->ob_ref_shared, _Py_REF_MERGED); #else - tstate->delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op)); + /* Get the delete_later pointer from the refcnt field. + * See _PyTrash_thread_deposit_object(). */ + uintptr_t refcnt = *((uintptr_t*)op); + tstate->delete_later = (PyObject *)(refcnt - 1); + op->ob_refcnt = 0; #endif /* Call the deallocator directly. This used to try to @@ -3003,13 +3009,25 @@ _PyObject_AssertFailed(PyObject *obj, const char *expr, const char *msg, } +/* +When deallocating a container object, it's possible to trigger an unbounded +chain of deallocations, as each Py_DECREF in turn drops the refcount on "the +next" object in the chain to 0. This can easily lead to stack overflows. +To avoid that, if the C stack is nearing its limit, instead of calling +dealloc on the object, it is added to a queue to be freed later when the +stack is shallower */ void _Py_Dealloc(PyObject *op) { PyTypeObject *type = Py_TYPE(op); destructor dealloc = type->tp_dealloc; -#ifdef Py_DEBUG PyThreadState *tstate = _PyThreadState_GET(); + intptr_t margin = _Py_RecursionLimit_GetMargin(tstate); + if (margin < 2) { + _PyTrash_thread_deposit_object(tstate, (PyObject *)op); + return; + } +#ifdef Py_DEBUG #if !defined(Py_GIL_DISABLED) && !defined(Py_STACKREF_DEBUG) /* This assertion doesn't hold for the free-threading build, as * PyStackRef_CLOSE_SPECIALIZED is not implemented */ @@ -3051,6 +3069,9 @@ _Py_Dealloc(PyObject *op) Py_XDECREF(old_exc); Py_DECREF(type); #endif + if (tstate->delete_later && margin >= 4) { + _PyTrash_thread_destroy_chain(tstate); + } } diff --git a/Objects/odictobject.c b/Objects/odictobject.c index 1412acb50ac5ff..891f6197401503 100644 --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -1389,7 +1389,6 @@ odict_dealloc(PyObject *op) { PyODictObject *self = _PyODictObject_CAST(op); PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, odict_dealloc) Py_XDECREF(self->od_inst_dict); if (self->od_weakreflist != NULL) @@ -1397,8 +1396,6 @@ odict_dealloc(PyObject *op) _odict_clear_nodes(self); PyDict_Type.tp_dealloc((PyObject *)self); - - Py_TRASHCAN_END } /* tp_repr */ diff --git a/Objects/setobject.c b/Objects/setobject.c index 90e7e6ae14affe..8aa6b0d180907b 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -536,7 +536,6 @@ set_dealloc(PyObject *self) /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(so); - Py_TRASHCAN_BEGIN(so, set_dealloc) if (so->weakreflist != NULL) PyObject_ClearWeakRefs((PyObject *) so); @@ -549,7 +548,6 @@ set_dealloc(PyObject *self) if (so->table != so->smalltable) PyMem_Free(so->table); Py_TYPE(so)->tp_free(so); - Py_TRASHCAN_END } static PyObject * diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c index 737c4e6d97794a..9b31758485ca5e 100644 --- a/Objects/tupleobject.c +++ b/Objects/tupleobject.c @@ -207,7 +207,6 @@ tuple_dealloc(PyObject *self) } PyObject_GC_UnTrack(op); - Py_TRASHCAN_BEGIN(op, tuple_dealloc) Py_ssize_t i = Py_SIZE(op); while (--i >= 0) { @@ -217,8 +216,6 @@ tuple_dealloc(PyObject *self) if (!maybe_freelist_push(op)) { Py_TYPE(op)->tp_free((PyObject *)op); } - - Py_TRASHCAN_END } static PyObject * diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 4e614daaa6955b..ff68311281ce6f 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -2575,7 +2575,6 @@ subtype_dealloc(PyObject *self) /* UnTrack and re-Track around the trashcan macro, alas */ /* See explanation at end of function for full disclosure */ PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, subtype_dealloc); /* Find the nearest base with a different tp_dealloc */ base = type; @@ -2590,7 +2589,7 @@ subtype_dealloc(PyObject *self) _PyObject_GC_TRACK(self); if (PyObject_CallFinalizerFromDealloc(self) < 0) { /* Resurrected */ - goto endlabel; + return; } _PyObject_GC_UNTRACK(self); } @@ -2612,7 +2611,7 @@ subtype_dealloc(PyObject *self) type->tp_del(self); if (Py_REFCNT(self) > 0) { /* Resurrected */ - goto endlabel; + return; } _PyObject_GC_UNTRACK(self); } @@ -2675,46 +2674,6 @@ subtype_dealloc(PyObject *self) if (type_needs_decref) { _Py_DECREF_TYPE(type); } - - endlabel: - Py_TRASHCAN_END - - /* Explanation of the weirdness around the trashcan macros: - - Q. What do the trashcan macros do? - - A. Read the comment titled "Trashcan mechanism" in object.h. - For one, this explains why there must be a call to GC-untrack - before the trashcan begin macro. Without understanding the - trashcan code, the answers to the following questions don't make - sense. - - Q. Why do we GC-untrack before the trashcan and then immediately - GC-track again afterward? - - A. In the case that the base class is GC-aware, the base class - probably GC-untracks the object. If it does that using the - UNTRACK macro, this will crash when the object is already - untracked. Because we don't know what the base class does, the - only safe thing is to make sure the object is tracked when we - call the base class dealloc. But... The trashcan begin macro - requires that the object is *untracked* before it is called. So - the dance becomes: - - GC untrack - trashcan begin - GC track - - Q. Why did the last question say "immediately GC-track again"? - It's nowhere near immediately. - - A. Because the code *used* to re-track immediately. Bad Idea. - self has a refcount of 0, and if gc ever gets its hands on it - (which can happen if any weakref callback gets invoked), it - looks like trash to gc too, and gc also tries to delete self - then. But we're already deleting self. Double deallocation is - a subtle disaster. - */ } static PyTypeObject *solid_base(PyTypeObject *type); diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c index 8ed0a96270b4ee..3221d5acf96f71 100644 --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -566,11 +566,9 @@ filter_dealloc(PyObject *self) { filterobject *lz = _filterobject_CAST(self); PyObject_GC_UnTrack(lz); - Py_TRASHCAN_BEGIN(lz, filter_dealloc) Py_XDECREF(lz->func); Py_XDECREF(lz->it); Py_TYPE(lz)->tp_free(lz); - Py_TRASHCAN_END } static int diff --git a/Python/ceval.c b/Python/ceval.c index c3a7a27f2917ca..4a75b60c9f0063 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -482,12 +482,6 @@ _Py_CheckRecursiveCall(PyThreadState *tstate, const char *where) _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; uintptr_t here_addr = _Py_get_machine_stack_pointer(); assert(_tstate->c_stack_soft_limit != 0); - if (_tstate->c_stack_hard_limit == 0) { - _Py_InitializeRecursionLimits(tstate); - } - if (here_addr >= _tstate->c_stack_soft_limit) { - return 0; - } assert(_tstate->c_stack_hard_limit != 0); if (here_addr < _tstate->c_stack_hard_limit) { /* Overflowing while handling an overflow. Give up. */ diff --git a/Python/gc.c b/Python/gc.c index 58224acff2cdd9..b7b48c8af39c4a 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -2207,9 +2207,8 @@ void PyObject_GC_UnTrack(void *op_raw) { PyObject *op = _PyObject_CAST(op_raw); - /* Obscure: the Py_TRASHCAN mechanism requires that we be able to - * call PyObject_GC_UnTrack twice on an object. - */ + /* The code for some objects, such as tuples, is a bit + * sloppy about when the object is tracked and untracked. */ if (_PyObject_GC_IS_TRACKED(op)) { _PyObject_GC_UNTRACK(op); } diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index d22307ae4ff74e..2db75e0fd416f9 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -2511,9 +2511,8 @@ void PyObject_GC_UnTrack(void *op_raw) { PyObject *op = _PyObject_CAST(op_raw); - /* Obscure: the Py_TRASHCAN mechanism requires that we be able to - * call PyObject_GC_UnTrack twice on an object. - */ + /* The code for some objects, such as tuples, is a bit + * sloppy about when the object is tracked and untracked. */ if (_PyObject_GC_IS_TRACKED(op)) { _PyObject_GC_UNTRACK(op); } diff --git a/Python/hamt.c b/Python/hamt.c index e4d1e1663dd573..f9bbf63961d8de 100644 --- a/Python/hamt.c +++ b/Python/hamt.c @@ -1118,7 +1118,6 @@ hamt_node_bitmap_dealloc(PyObject *self) } PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, hamt_node_bitmap_dealloc) if (len > 0) { i = len; @@ -1128,7 +1127,6 @@ hamt_node_bitmap_dealloc(PyObject *self) } Py_TYPE(self)->tp_free(self); - Py_TRASHCAN_END } #ifdef Py_DEBUG @@ -1508,7 +1506,6 @@ hamt_node_collision_dealloc(PyObject *self) /* Collision's tp_dealloc */ Py_ssize_t len = Py_SIZE(self); PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, hamt_node_collision_dealloc) if (len > 0) { PyHamtNode_Collision *node = _PyHamtNode_Collision_CAST(self); while (--len >= 0) { @@ -1516,7 +1513,6 @@ hamt_node_collision_dealloc(PyObject *self) } } Py_TYPE(self)->tp_free(self); - Py_TRASHCAN_END } #ifdef Py_DEBUG @@ -1878,13 +1874,11 @@ hamt_node_array_dealloc(PyObject *self) { /* Array's tp_dealloc */ PyObject_GC_UnTrack(self); - Py_TRASHCAN_BEGIN(self, hamt_node_array_dealloc) PyHamtNode_Array *obj = _PyHamtNode_Array_CAST(self); for (Py_ssize_t i = 0; i < HAMT_ARRAY_NODE_SIZE; i++) { Py_XDECREF(obj->a_array[i]); } Py_TYPE(self)->tp_free(self); - Py_TRASHCAN_END } #ifdef Py_DEBUG diff --git a/Python/instruction_sequence.c b/Python/instruction_sequence.c index b068e4fa3dbf43..e2db46b48930d6 100644 --- a/Python/instruction_sequence.c +++ b/Python/instruction_sequence.c @@ -419,10 +419,8 @@ inst_seq_dealloc(PyObject *op) { _PyInstructionSequence *seq = (_PyInstructionSequence *)op; PyObject_GC_UnTrack(seq); - Py_TRASHCAN_BEGIN(seq, inst_seq_dealloc) PyInstructionSequence_Fini(seq); PyObject_GC_Del(seq); - Py_TRASHCAN_END } static int diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 9eaf493a652aed..c4c1d9fd9e1380 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -854,6 +854,10 @@ pycore_init_builtins(PyThreadState *tstate) static PyStatus pycore_interp_init(PyThreadState *tstate) { + _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; + if (_tstate->c_stack_hard_limit == 0) { + _Py_InitializeRecursionLimits(tstate); + } PyInterpreterState *interp = tstate->interp; PyStatus status; PyObject *sysmod = NULL; diff --git a/Python/pystate.c b/Python/pystate.c index b0c79ba9d3e645..5685957b160dba 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -2168,7 +2168,10 @@ _PyThreadState_Attach(PyThreadState *tstate) if (current_fast_get() != NULL) { Py_FatalError("non-NULL old thread state"); } - + _PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate; + if (_tstate->c_stack_hard_limit == 0) { + _Py_InitializeRecursionLimits(tstate); + } while (1) { _PyEval_AcquireLock(tstate); diff --git a/Python/traceback.c b/Python/traceback.c index 0ac0b28201c483..c06cb1a59089e2 100644 --- a/Python/traceback.c +++ b/Python/traceback.c @@ -236,11 +236,9 @@ tb_dealloc(PyObject *op) { PyTracebackObject *tb = _PyTracebackObject_CAST(op); PyObject_GC_UnTrack(tb); - Py_TRASHCAN_BEGIN(tb, tb_dealloc) Py_XDECREF(tb->tb_next); Py_XDECREF(tb->tb_frame); PyObject_GC_Del(tb); - Py_TRASHCAN_END } static int _______________________________________________ Python-checkins mailing list -- python-checkins@python.org To unsubscribe send an email to python-checkins-le...@python.org https://mail.python.org/mailman3/lists/python-checkins.python.org/ Member address: arch...@mail-archive.com