https://github.com/python/cpython/commit/07522755ae6094b86c0d79249f6d0d81ef990f39
commit: 07522755ae6094b86c0d79249f6d0d81ef990f39
branch: 3.13
author: Sam Gross <[email protected]>
committer: colesbury <[email protected]>
date: 2025-03-04T23:35:22Z
summary:
[3.13] gh-130794: Process interpreter QSBR queue in _PyMem_AbandonDelayed.
(gh-130808) (#130857)
This avoids a case where the interpreter's queue of memory to be freed
could grow rapidly if there are many short lived threads.
(cherry picked from commit 2f6e0e9f7001769be746ee96356656d3ebdc7f96)
files:
A
Misc/NEWS.d/next/Core_and_Builtins/2025-03-03-20-33-44.gh-issue-130794.LwtGQc.rst
M Objects/obmalloc.c
M Python/qsbr.c
diff --git
a/Misc/NEWS.d/next/Core_and_Builtins/2025-03-03-20-33-44.gh-issue-130794.LwtGQc.rst
b/Misc/NEWS.d/next/Core_and_Builtins/2025-03-03-20-33-44.gh-issue-130794.LwtGQc.rst
new file mode 100644
index 00000000000000..2dfb53f92d232f
--- /dev/null
+++
b/Misc/NEWS.d/next/Core_and_Builtins/2025-03-03-20-33-44.gh-issue-130794.LwtGQc.rst
@@ -0,0 +1,2 @@
+Fix memory leak in the :term:`free threaded <free threading>` build when
+resizing a shared list or dictionary from multiple short-lived threads.
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index a9decc5dc1b1dd..4b830f5bb63c69 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -1208,6 +1208,17 @@ process_queue(struct llist_node *head, struct
_qsbr_thread_state *qsbr,
static void
process_interp_queue(struct _Py_mem_interp_free_queue *queue,
struct _qsbr_thread_state *qsbr)
+{
+ assert(PyMutex_IsLocked(&queue->mutex));
+ process_queue(&queue->head, qsbr, false);
+
+ int more_work = !llist_empty(&queue->head);
+ _Py_atomic_store_int_relaxed(&queue->has_work, more_work);
+}
+
+static void
+maybe_process_interp_queue(struct _Py_mem_interp_free_queue *queue,
+ struct _qsbr_thread_state *qsbr)
{
if (!_Py_atomic_load_int_relaxed(&queue->has_work)) {
return;
@@ -1215,11 +1226,7 @@ process_interp_queue(struct _Py_mem_interp_free_queue
*queue,
// Try to acquire the lock, but don't block if it's already held.
if (_PyMutex_LockTimed(&queue->mutex, 0, 0) == PY_LOCK_ACQUIRED) {
- process_queue(&queue->head, qsbr, false);
-
- int more_work = !llist_empty(&queue->head);
- _Py_atomic_store_int_relaxed(&queue->has_work, more_work);
-
+ process_interp_queue(queue, qsbr);
PyMutex_Unlock(&queue->mutex);
}
}
@@ -1234,7 +1241,7 @@ _PyMem_ProcessDelayed(PyThreadState *tstate)
process_queue(&tstate_impl->mem_free_queue, tstate_impl->qsbr, true);
// Process shared interpreter work
- process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
+ maybe_process_interp_queue(&interp->mem_free_queue, tstate_impl->qsbr);
}
void
@@ -1256,10 +1263,15 @@ _PyMem_AbandonDelayed(PyThreadState *tstate)
return;
}
- // Merge the thread's work queue into the interpreter's work queue.
PyMutex_Lock(&interp->mem_free_queue.mutex);
+
+ // Merge the thread's work queue into the interpreter's work queue.
llist_concat(&interp->mem_free_queue.head, queue);
- _Py_atomic_store_int_relaxed(&interp->mem_free_queue.has_work, 1);
+
+ // Process the merged queue now (see gh-130794).
+ _PyThreadStateImpl *this_tstate = (_PyThreadStateImpl
*)_PyThreadState_GET();
+ process_interp_queue(&interp->mem_free_queue, this_tstate->qsbr);
+
PyMutex_Unlock(&interp->mem_free_queue.mutex);
assert(llist_empty(queue)); // the thread's queue is now empty
diff --git a/Python/qsbr.c b/Python/qsbr.c
index 0df1285cc8e063..386a8451dc40c0 100644
--- a/Python/qsbr.c
+++ b/Python/qsbr.c
@@ -161,6 +161,7 @@ bool
_Py_qsbr_poll(struct _qsbr_thread_state *qsbr, uint64_t goal)
{
assert(_Py_atomic_load_int_relaxed(&_PyThreadState_GET()->state) ==
_Py_THREAD_ATTACHED);
+ assert(((_PyThreadStateImpl *)_PyThreadState_GET())->qsbr == qsbr);
if (_Py_qbsr_goal_reached(qsbr, goal)) {
return true;
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]