https://github.com/python/cpython/commit/77fc2f5a5e4ca92c609b87425a4080b71c5fa188
commit: 77fc2f5a5e4ca92c609b87425a4080b71c5fa188
branch: main
author: Pablo Galindo Salgado <[email protected]>
committer: pablogsal <[email protected]>
date: 2026-04-05T16:29:38+01:00
summary:
gh-144319: Fix huge page leak in datastack chunk allocator (#147963)
Fix huge page leak in datastack chunk allocator
The original fix rounded datastack chunk allocations in pystate.c so that
_PyObject_VirtualFree() would receive the full huge page mapping size.
Change direction and move that logic into _PyObject_VirtualAlloc() and
_PyObject_VirtualFree() instead. The key invariant is that munmap() must see
the full mapped size, so alloc and free now apply the same platform-specific
rounding in the allocator layer.
This keeps _PyStackChunk bookkeeping in requested-size units, avoids a
hardcoded 2 MB assumption, and also covers other small virtual-memory users
such as the JIT tracer state allocation in optimizer.c.
files:
A
Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst
M Include/internal/pycore_obmalloc.h
M Objects/obmalloc.c
diff --git a/Include/internal/pycore_obmalloc.h
b/Include/internal/pycore_obmalloc.h
index 0b23bb48dd5c1b..d4dbe541e6da51 100644
--- a/Include/internal/pycore_obmalloc.h
+++ b/Include/internal/pycore_obmalloc.h
@@ -691,7 +691,11 @@ struct _obmalloc_state {
/* Allocate memory directly from the O/S virtual memory system,
- * where supported. Otherwise fallback on malloc */
+ * where supported. Otherwise fallback on malloc.
+ *
+ * Large-page and huge-page backends may round the mapped size up
+ * internally, so pass the original requested size back to
+ * _PyObject_VirtualFree(). */
void *_PyObject_VirtualAlloc(size_t size);
void _PyObject_VirtualFree(void *, size_t size);
diff --git
a/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst
b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst
new file mode 100644
index 00000000000000..f3f07ab35dbb01
--- /dev/null
+++
b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst
@@ -0,0 +1,3 @@
+Fix a bug that could cause applications with specific allocation patterns to
+leak memory via Huge Pages if compiled with Huge Page support. Patch by
+Pablo Galindo
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 983bdddbf026a8..e2d5b012955c3e 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -14,6 +14,7 @@
#include <stdlib.h> // malloc()
#include <stdbool.h>
#include <stdio.h> // fopen(), fgets(), sscanf()
+#include <errno.h> // errno
#ifdef WITH_MIMALLOC
// Forward declarations of functions used in our mimalloc modifications
static void _PyMem_mi_page_clear_qsbr(mi_page_t *page);
@@ -572,6 +573,49 @@ _pymalloc_system_hugepage_size(void)
}
#endif
+#if (defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES)) || \
+ (defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) &&
defined(MAP_HUGETLB))
+static size_t
+_pymalloc_round_up_to_multiple(size_t size, size_t multiple)
+{
+ if (multiple == 0 || size == 0) {
+ return size;
+ }
+
+ size_t remainder = size % multiple;
+ if (remainder == 0) {
+ return size;
+ }
+
+ size_t padding = multiple - remainder;
+ if (size > SIZE_MAX - padding) {
+ return 0;
+ }
+ return size + padding;
+}
+#endif
+
+static size_t
+_pymalloc_virtual_alloc_size(size_t size)
+{
+#if defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES)
+ if (_PyRuntime.allocators.use_hugepages) {
+ SIZE_T large_page_size = GetLargePageMinimum();
+ if (large_page_size > 0) {
+ return _pymalloc_round_up_to_multiple(size,
(size_t)large_page_size);
+ }
+ }
+#elif defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) &&
defined(MAP_HUGETLB)
+ if (_PyRuntime.allocators.use_hugepages) {
+ size_t hp_size = _pymalloc_system_hugepage_size();
+ if (hp_size > 0) {
+ return _pymalloc_round_up_to_multiple(size, hp_size);
+ }
+ }
+#endif
+ return size;
+}
+
void *
_PyMem_ArenaAlloc(void *Py_UNUSED(ctx), size_t size)
{
@@ -648,7 +692,11 @@ _PyMem_ArenaFree(void *Py_UNUSED(ctx), void *ptr,
if (ptr == NULL) {
return;
}
- munmap(ptr, size);
+ if (munmap(ptr, size) < 0) {
+ _Py_FatalErrorFormat(__func__,
+ "munmap(%p, %zu) failed with errno %d",
+ ptr, size, errno);
+ }
#else
free(ptr);
#endif
@@ -1128,13 +1176,19 @@ PyObject_SetArenaAllocator(PyObjectArenaAllocator
*allocator)
void *
_PyObject_VirtualAlloc(size_t size)
{
- return _PyObject_Arena.alloc(_PyObject_Arena.ctx, size);
+ size_t alloc_size = _pymalloc_virtual_alloc_size(size);
+ if (alloc_size == 0 && size != 0) {
+ return NULL;
+ }
+ return _PyObject_Arena.alloc(_PyObject_Arena.ctx, alloc_size);
}
void
_PyObject_VirtualFree(void *obj, size_t size)
{
- _PyObject_Arena.free(_PyObject_Arena.ctx, obj, size);
+ size_t alloc_size = _pymalloc_virtual_alloc_size(size);
+ assert(alloc_size != 0 || size == 0);
+ _PyObject_Arena.free(_PyObject_Arena.ctx, obj, alloc_size);
}
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3//lists/python-checkins.python.org
Member address: [email protected]