https://github.com/python/cpython/commit/eecafc33800c84ecb67f5d3ed819fbed057677ab
commit: eecafc33800c84ecb67f5d3ed819fbed057677ab
branch: main
author: Neil Schemenauer <[email protected]>
committer: nascheme <[email protected]>
date: 2025-04-28T23:38:29-07:00
summary:
Revert gh-127266: avoid data races when updating type slots (gh-131174)
(gh-133129)
This is triggering deadlocks in test_opcache. See GH-133130 for stack trace.
files:
D
Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst
M Include/internal/pycore_interp_structs.h
M Include/internal/pycore_object.h
M Include/internal/pycore_typeobject.h
M Include/object.h
M Include/refcount.h
M Lib/test/test_opcache.py
M Objects/typeobject.c
M Python/ceval.c
M Tools/tsan/suppressions_free_threading.txt
diff --git a/Include/internal/pycore_interp_structs.h
b/Include/internal/pycore_interp_structs.h
index f193fed1153f14..af6ee3ab48939f 100644
--- a/Include/internal/pycore_interp_structs.h
+++ b/Include/internal/pycore_interp_structs.h
@@ -667,11 +667,8 @@ struct _Py_interp_cached_objects {
/* object.__reduce__ */
PyObject *objreduce;
-#ifndef Py_GIL_DISABLED
- /* resolve_slotdups() */
PyObject *type_slots_pname;
pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
-#endif
/* TypeVar and related types */
PyTypeObject *generic_type;
diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h
index 986bcc9fd08b1c..b7e162c8abcabf 100644
--- a/Include/internal/pycore_object.h
+++ b/Include/internal/pycore_object.h
@@ -313,7 +313,7 @@ extern int _PyDict_CheckConsistency(PyObject *mp, int
check_content);
// Fast inlined version of PyType_HasFeature()
static inline int
_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
- return ((type->tp_flags) & feature) != 0;
+ return ((FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags) & feature) != 0);
}
extern void _PyType_InitCache(PyInterpreterState *interp);
diff --git a/Include/internal/pycore_typeobject.h
b/Include/internal/pycore_typeobject.h
index 0ee7d555c56cdd..1a4f89fd2449a0 100644
--- a/Include/internal/pycore_typeobject.h
+++ b/Include/internal/pycore_typeobject.h
@@ -134,6 +134,7 @@ extern int _PyType_AddMethod(PyTypeObject *, PyMethodDef *);
extern void _PyType_SetFlagsRecursive(PyTypeObject *self, unsigned long mask,
unsigned long flags);
+extern unsigned int _PyType_GetVersionForCurrentState(PyTypeObject *tp);
PyAPI_FUNC(void) _PyType_SetVersion(PyTypeObject *tp, unsigned int version);
PyTypeObject *_PyType_LookupByVersion(unsigned int version);
diff --git a/Include/object.h b/Include/object.h
index 994cac1ad17501..8cc83abb8574e3 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -620,12 +620,6 @@ given type object has a specified feature.
#define Py_TPFLAGS_HAVE_FINALIZE (1UL << 0)
#define Py_TPFLAGS_HAVE_VERSION_TAG (1UL << 18)
-// Flag values for ob_flags (16 bits available, if SIZEOF_VOID_P > 4).
-#define _Py_IMMORTAL_FLAGS (1 << 0)
-#define _Py_STATICALLY_ALLOCATED_FLAG (1 << 2)
-#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG)
-#define _Py_TYPE_REVEALED_FLAG (1 << 3)
-#endif
#define Py_CONSTANT_NONE 0
#define Py_CONSTANT_FALSE 1
@@ -782,7 +776,11 @@ PyType_HasFeature(PyTypeObject *type, unsigned long
feature)
// PyTypeObject is opaque in the limited C API
flags = PyType_GetFlags(type);
#else
- flags = type->tp_flags;
+# ifdef Py_GIL_DISABLED
+ flags = _Py_atomic_load_ulong_relaxed(&type->tp_flags);
+# else
+ flags = type->tp_flags;
+# endif
#endif
return ((flags & feature) != 0);
}
diff --git a/Include/refcount.h b/Include/refcount.h
index ebd1dba6d15e1a..177bbdaf0c5977 100644
--- a/Include/refcount.h
+++ b/Include/refcount.h
@@ -19,6 +19,9 @@ immortal. The latter should be the only instances that require
cleanup during runtime finalization.
*/
+#define _Py_STATICALLY_ALLOCATED_FLAG 4
+#define _Py_IMMORTAL_FLAGS 1
+
#if SIZEOF_VOID_P > 4
/*
In 64+ bit systems, any object whose 32 bit reference count is >= 2**31
diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py
index 40349339df54fd..21d7e62833c061 100644
--- a/Lib/test/test_opcache.py
+++ b/Lib/test/test_opcache.py
@@ -576,7 +576,6 @@ class TestRacesDoNotCrash(TestBase):
# Careful with these. Bigger numbers have a higher chance of catching bugs,
# but you can also burn through a *ton* of type/dict/function versions:
ITEMS = 1000
- SMALL_ITEMS = 100
LOOPS = 4
WRITERS = 2
@@ -620,7 +619,7 @@ class C:
__getitem__ = lambda self, item: None
items = []
- for _ in range(self.SMALL_ITEMS):
+ for _ in range(self.ITEMS):
item = C()
items.append(item)
return items
@@ -791,7 +790,7 @@ class C:
__getattribute__ = lambda self, name: None
items = []
- for _ in range(self.SMALL_ITEMS):
+ for _ in range(self.ITEMS):
item = C()
items.append(item)
return items
diff --git
a/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst
b/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst
deleted file mode 100644
index b26977628de136..00000000000000
---
a/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-In the free-threaded build, avoid data races caused by updating type slots
-or type flags after the type was initially created. For those (typically
-rare) cases, use the stop-the-world mechanism. Remove the use of atomics
-when reading or writing type flags. The use of atomics is not sufficient to
-avoid races (since flags are sometimes read without a lock and without
-atomics) and are no longer required.
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index 22628018cc2a8f..4e614daaa6955b 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -48,7 +48,7 @@ class object "PyObject *" "&PyBaseObject_Type"
& ((1 << MCACHE_SIZE_EXP) - 1))
#define MCACHE_HASH_METHOD(type, name) \
- MCACHE_HASH(FT_ATOMIC_LOAD_UINT_RELAXED((type)->tp_version_tag), \
+ MCACHE_HASH(FT_ATOMIC_LOAD_UINT32_RELAXED((type)->tp_version_tag), \
((Py_ssize_t)(name)) >> 3)
#define MCACHE_CACHEABLE_NAME(name) \
PyUnicode_CheckExact(name) && \
@@ -60,19 +60,11 @@ class object "PyObject *" "&PyBaseObject_Type"
#ifdef Py_GIL_DISABLED
-// There's a global lock for types that ensures that tp_version_tag and
-// _spec_cache are correctly updated if the type is modified. It also protects
-// tp_mro, tp_bases, and tp_base. This avoids having to take additional locks
-// while doing various subclass processing which may result in odd behaviors
-// w.r.t. running with the GIL as the outer type lock could be released and
-// reacquired during a subclass update if there's contention on the subclass
-// lock.
-//
-// Note that this lock does not protect updates of other type slots or the
-// tp_flags member. Instead, we either ensure those updates are done before
-// the type has been revealed to other threads or we only do those updates
-// while the stop-the-world mechanism is active. The slots and flags are read
-// in many places without holding a lock and without atomics.
+// There's a global lock for mutation of types. This avoids having to take
+// additional locks while doing various subclass processing which may result
+// in odd behaviors w.r.t. running with the GIL as the outer type lock could
+// be released and reacquired during a subclass update if there's contention
+// on the subclass lock.
#define TYPE_LOCK &PyInterpreterState_Get()->types.mutex
#define BEGIN_TYPE_LOCK() Py_BEGIN_CRITICAL_SECTION_MUT(TYPE_LOCK)
#define END_TYPE_LOCK() Py_END_CRITICAL_SECTION()
@@ -82,59 +74,8 @@ class object "PyObject *" "&PyBaseObject_Type"
#define END_TYPE_DICT_LOCK() Py_END_CRITICAL_SECTION2()
-#ifdef Py_DEBUG
-// Return true if the world is currently stopped.
-static bool
-types_world_is_stopped(void)
-{
- PyInterpreterState *interp = _PyInterpreterState_GET();
- return interp->stoptheworld.world_stopped;
-}
-#endif
-
-// Checks that the type has not yet been revealed (exposed) to other
-// threads. The _Py_TYPE_REVEALED_FLAG flag is set by type_new() and
-// PyType_FromMetaclass() to indicate that a newly initialized type might be
-// revealed. We only have ob_flags on 64-bit platforms.
-#if SIZEOF_VOID_P > 4
-#define TYPE_IS_REVEALED(tp) ((((PyObject *)(tp))->ob_flags &
_Py_TYPE_REVEALED_FLAG) != 0)
-#else
-#define TYPE_IS_REVEALED(tp) 0
-#endif
-
-#ifdef Py_DEBUG
#define ASSERT_TYPE_LOCK_HELD() \
- if (!types_world_is_stopped()) {
_Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK); }
-
-// Checks if we can safely update type slots or tp_flags.
-#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp) \
- assert(!TYPE_IS_REVEALED(tp) || types_world_is_stopped())
-
-#define ASSERT_NEW_TYPE_OR_LOCKED(tp) \
- if (TYPE_IS_REVEALED(tp)) { ASSERT_TYPE_LOCK_HELD(); }
-#else
-#define ASSERT_TYPE_LOCK_HELD()
-#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp)
-#define ASSERT_NEW_TYPE_OR_LOCKED(tp)
-#endif
-
-static void
-types_stop_world(void)
-{
- assert(!types_world_is_stopped());
- PyInterpreterState *interp = _PyInterpreterState_GET();
- _PyEval_StopTheWorld(interp);
- assert(types_world_is_stopped());
-}
-
-static void
-types_start_world(void)
-{
- assert(types_world_is_stopped());
- PyInterpreterState *interp = _PyInterpreterState_GET();
- _PyEval_StartTheWorld(interp);
- assert(!types_world_is_stopped());
-}
+ _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK)
#else
@@ -143,12 +84,6 @@ types_start_world(void)
#define BEGIN_TYPE_DICT_LOCK(d)
#define END_TYPE_DICT_LOCK()
#define ASSERT_TYPE_LOCK_HELD()
-#define TYPE_IS_REVEALED(tp) 0
-#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp)
-#define ASSERT_NEW_TYPE_OR_LOCKED(tp)
-#define types_world_is_stopped() 1
-#define types_stop_world()
-#define types_start_world()
#endif
@@ -411,14 +346,21 @@ _PyStaticType_GetBuiltins(void)
static void
type_set_flags(PyTypeObject *tp, unsigned long flags)
{
- ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp);
- tp->tp_flags = flags;
+ if (tp->tp_flags & Py_TPFLAGS_READY) {
+ // It's possible the type object has been exposed to other threads
+ // if it's been marked ready. In that case, the type lock should be
+ // held when flags are modified.
+ ASSERT_TYPE_LOCK_HELD();
+ }
+ // Since PyType_HasFeature() reads the flags without holding the type
+ // lock, we need an atomic store here.
+ FT_ATOMIC_STORE_ULONG_RELAXED(tp->tp_flags, flags);
}
static void
type_set_flags_with_mask(PyTypeObject *tp, unsigned long mask, unsigned long
flags)
{
- ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp);
+ ASSERT_TYPE_LOCK_HELD();
unsigned long new_flags = (tp->tp_flags & ~mask) | flags;
type_set_flags(tp, new_flags);
}
@@ -556,7 +498,6 @@ static inline void
set_tp_bases(PyTypeObject *self, PyObject *bases, int initial)
{
assert(PyTuple_Check(bases));
- ASSERT_NEW_TYPE_OR_LOCKED(self);
if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) {
// XXX tp_bases can probably be statically allocated for each
// static builtin type.
@@ -601,7 +542,7 @@ clear_tp_bases(PyTypeObject *self, int final)
static inline PyObject *
lookup_tp_mro(PyTypeObject *self)
{
- ASSERT_NEW_TYPE_OR_LOCKED(self);
+ ASSERT_TYPE_LOCK_HELD();
return self->tp_mro;
}
@@ -1086,6 +1027,7 @@ PyType_Unwatch(int watcher_id, PyObject* obj)
static void
set_version_unlocked(PyTypeObject *tp, unsigned int version)
{
+ ASSERT_TYPE_LOCK_HELD();
assert(version == 0 || (tp->tp_versions_used != _Py_ATTR_CACHE_UNUSED));
#ifndef Py_GIL_DISABLED
PyInterpreterState *interp = _PyInterpreterState_GET();
@@ -1133,7 +1075,7 @@ type_modified_unlocked(PyTypeObject *type)
We don't assign new version tags eagerly, but only as
needed.
*/
- ASSERT_NEW_TYPE_OR_LOCKED(type);
+ ASSERT_TYPE_LOCK_HELD();
if (type->tp_version_tag == 0) {
return;
}
@@ -1164,8 +1106,6 @@ type_modified_unlocked(PyTypeObject *type)
while (bits) {
assert(i < TYPE_MAX_WATCHERS);
if (bits & 1) {
- // Note that PyErr_FormatUnraisable is potentially re-entrant
- // and the watcher callback might be too.
PyType_WatchCallback cb = interp->type_watchers[i];
if (cb && (cb(type) < 0)) {
PyErr_FormatUnraisable(
@@ -1305,6 +1245,14 @@ _PyType_LookupByVersion(unsigned int version)
#endif
}
+unsigned int
+_PyType_GetVersionForCurrentState(PyTypeObject *tp)
+{
+ return tp->tp_version_tag;
+}
+
+
+
#define MAX_VERSIONS_PER_CLASS 1000
#if _Py_ATTR_CACHE_UNUSED < MAX_VERSIONS_PER_CLASS
#error "_Py_ATTR_CACHE_UNUSED must be bigger than max"
@@ -1638,13 +1586,10 @@ type_set_abstractmethods(PyObject *tp, PyObject *value,
void *Py_UNUSED(closure)
BEGIN_TYPE_LOCK();
type_modified_unlocked(type);
- types_stop_world();
if (abstract)
type_add_flags(type, Py_TPFLAGS_IS_ABSTRACT);
else
type_clear_flags(type, Py_TPFLAGS_IS_ABSTRACT);
- types_start_world();
- ASSERT_TYPE_LOCK_HELD();
END_TYPE_LOCK();
return 0;
@@ -1679,8 +1624,8 @@ type_get_mro(PyObject *tp, void *Py_UNUSED(closure))
return mro;
}
-static PyTypeObject *find_best_base(PyObject *);
-static int mro_internal(PyTypeObject *, int, PyObject **);
+static PyTypeObject *best_base(PyObject *);
+static int mro_internal(PyTypeObject *, PyObject **);
static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *);
static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const
char *);
static int add_subclass(PyTypeObject*, PyTypeObject*);
@@ -1695,15 +1640,13 @@ static int update_subclasses(PyTypeObject *type,
PyObject *attr_name,
static int recurse_down_subclasses(PyTypeObject *type, PyObject *name,
update_callback callback, void *data);
-// Compute tp_mro for this type and all of its subclasses. This
-// is called after __bases__ is assigned to an existing type.
static int
mro_hierarchy(PyTypeObject *type, PyObject *temp)
{
ASSERT_TYPE_LOCK_HELD();
PyObject *old_mro;
- int res = mro_internal(type, 0, &old_mro);
+ int res = mro_internal(type, &old_mro);
if (res <= 0) {
/* error / reentrance */
return res;
@@ -1765,9 +1708,9 @@ mro_hierarchy(PyTypeObject *type, PyObject *temp)
}
static int
-type_check_new_bases(PyTypeObject *type, PyObject *new_bases, PyTypeObject
**best_base)
+type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases)
{
- // Check arguments, this is re-entrant due to the PySys_Audit() call
+ // Check arguments
if (!check_set_special_type_attr(type, new_bases, "__bases__")) {
return -1;
}
@@ -1816,29 +1759,20 @@ type_check_new_bases(PyTypeObject *type, PyObject
*new_bases, PyTypeObject **bes
}
// Compute the new MRO and the new base class
- *best_base = find_best_base(new_bases);
- if (*best_base == NULL)
+ PyTypeObject *new_base = best_base(new_bases);
+ if (new_base == NULL)
return -1;
- if (!compatible_for_assignment(type->tp_base, *best_base, "__bases__")) {
+ if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) {
return -1;
}
- return 0;
-}
-
-static int
-type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases, PyTypeObject
*best_base)
-{
- ASSERT_TYPE_LOCK_HELD();
-
- Py_ssize_t n;
PyObject *old_bases = lookup_tp_bases(type);
assert(old_bases != NULL);
PyTypeObject *old_base = type->tp_base;
set_tp_bases(type, Py_NewRef(new_bases), 0);
- type->tp_base = (PyTypeObject *)Py_NewRef(best_base);
+ type->tp_base = (PyTypeObject *)Py_NewRef(new_base);
PyObject *temp = PyList_New(0);
if (temp == NULL) {
@@ -1862,10 +1796,7 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject
*new_bases, PyTypeObject *b
add to all new_bases */
remove_all_subclasses(type, old_bases);
res = add_all_subclasses(type, new_bases);
- types_stop_world();
update_all_slots(type);
- types_start_world();
- ASSERT_TYPE_LOCK_HELD();
}
else {
res = 0;
@@ -1896,13 +1827,13 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject
*new_bases, PyTypeObject *b
bail:
if (lookup_tp_bases(type) == new_bases) {
- assert(type->tp_base == best_base);
+ assert(type->tp_base == new_base);
set_tp_bases(type, old_bases, 0);
type->tp_base = old_base;
Py_DECREF(new_bases);
- Py_DECREF(best_base);
+ Py_DECREF(new_base);
}
else {
Py_DECREF(old_bases);
@@ -1917,13 +1848,9 @@ static int
type_set_bases(PyObject *tp, PyObject *new_bases, void *Py_UNUSED(closure))
{
PyTypeObject *type = PyTypeObject_CAST(tp);
- PyTypeObject *best_base;
int res;
BEGIN_TYPE_LOCK();
- res = type_check_new_bases(type, new_bases, &best_base);
- if (res == 0) {
- res = type_set_bases_unlocked(type, new_bases, best_base);
- }
+ res = type_set_bases_unlocked(type, new_bases);
END_TYPE_LOCK();
return res;
}
@@ -3165,7 +3092,6 @@ static PyObject *
class_name(PyObject *cls)
{
PyObject *name;
- // Note that this is potentially re-entrant.
if (PyObject_GetOptionalAttr(cls, &_Py_ID(__name__), &name) == 0) {
name = PyObject_Repr(cls);
}
@@ -3502,13 +3428,9 @@ mro_invoke(PyTypeObject *type)
const int custom = !Py_IS_TYPE(type, &PyType_Type);
if (custom) {
- // Custom mro() method on metaclass. This is potentially re-entrant.
- // We are called either from type_ready() or from type_set_bases().
mro_result = call_method_noarg((PyObject *)type, &_Py_ID(mro));
}
else {
- // In this case, the mro() method on the type object is being used and
- // we know that these calls are not re-entrant.
mro_result = mro_implementation_unlocked(type);
}
if (mro_result == NULL)
@@ -3556,7 +3478,7 @@ mro_invoke(PyTypeObject *type)
- Returns -1 in case of an error.
*/
static int
-mro_internal(PyTypeObject *type, int initial, PyObject **p_old_mro)
+mro_internal_unlocked(PyTypeObject *type, int initial, PyObject **p_old_mro)
{
ASSERT_TYPE_LOCK_HELD();
@@ -3604,11 +3526,21 @@ mro_internal(PyTypeObject *type, int initial, PyObject
**p_old_mro)
return 1;
}
+static int
+mro_internal(PyTypeObject *type, PyObject **p_old_mro)
+{
+ int res;
+ BEGIN_TYPE_LOCK();
+ res = mro_internal_unlocked(type, 0, p_old_mro);
+ END_TYPE_LOCK();
+ return res;
+}
+
/* Calculate the best base amongst multiple base classes.
This is the first one that's on the path to the "solid base". */
static PyTypeObject *
-find_best_base(PyObject *bases)
+best_base(PyObject *bases)
{
Py_ssize_t i, n;
PyTypeObject *base, *winner, *candidate;
@@ -3697,7 +3629,6 @@ static int update_slot(PyTypeObject *, PyObject *);
static void fixup_slot_dispatchers(PyTypeObject *);
static int type_new_set_names(PyTypeObject *);
static int type_new_init_subclass(PyTypeObject *, PyObject *);
-static bool has_slotdef(PyObject *);
/*
* Helpers for __dict__ descriptor. We don't want to expose the dicts
@@ -3895,7 +3826,7 @@ type_init(PyObject *cls, PyObject *args, PyObject *kwds)
unsigned long
PyType_GetFlags(PyTypeObject *type)
{
- return type->tp_flags;
+ return FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags);
}
@@ -4673,10 +4604,6 @@ type_new_impl(type_new_ctx *ctx)
}
assert(_PyType_CheckConsistency(type));
-#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4
- // After this point, other threads can potentally use this type.
- ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG;
-#endif
return (PyObject *)type;
@@ -4739,7 +4666,7 @@ type_new_get_bases(type_new_ctx *ctx, PyObject **type)
}
/* Calculate best base, and check that all bases are type objects */
- PyTypeObject *base = find_best_base(ctx->bases);
+ PyTypeObject *base = best_base(ctx->bases);
if (base == NULL) {
return -1;
}
@@ -5154,12 +5081,12 @@ PyType_FromMetaclass(
}
/* Calculate best base, and check that all bases are type objects */
- PyTypeObject *base = find_best_base(bases); // borrowed ref
+ PyTypeObject *base = best_base(bases); // borrowed ref
if (base == NULL) {
goto finally;
}
- // find_best_base() should check Py_TPFLAGS_BASETYPE & raise a proper
- // exception, here we just check its work
+ // best_base should check Py_TPFLAGS_BASETYPE & raise a proper exception,
+ // here we just check its work
assert(_PyType_HasFeature(base, Py_TPFLAGS_BASETYPE));
/* Calculate sizes */
@@ -5390,10 +5317,6 @@ PyType_FromMetaclass(
}
assert(_PyType_CheckConsistency(type));
-#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4
- // After this point, other threads can potentally use this type.
- ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG;
-#endif
finally:
if (PyErr_Occurred()) {
@@ -5687,6 +5610,8 @@ PyObject_GetItemData(PyObject *obj)
static PyObject *
find_name_in_mro(PyTypeObject *type, PyObject *name, int *error)
{
+ ASSERT_TYPE_LOCK_HELD();
+
Py_hash_t hash = _PyObject_HashFast(name);
if (hash == -1) {
*error = -1;
@@ -5995,13 +5920,9 @@ _PyType_CacheGetItemForSpecialization(PyHeapTypeObject
*ht, PyObject *descriptor
void
_PyType_SetFlags(PyTypeObject *self, unsigned long mask, unsigned long flags)
{
- unsigned long new_flags = (self->tp_flags & ~mask) | flags;
- if (new_flags != self->tp_flags) {
- types_stop_world();
- // can't use new_flags here since they could be out-of-date
- self->tp_flags = (self->tp_flags & ~mask) | flags;
- types_start_world();
- }
+ BEGIN_TYPE_LOCK();
+ type_set_flags_with_mask(self, mask, flags);
+ END_TYPE_LOCK();
}
int
@@ -6048,9 +5969,9 @@ set_flags_recursive(PyTypeObject *self, unsigned long
mask, unsigned long flags)
void
_PyType_SetFlagsRecursive(PyTypeObject *self, unsigned long mask, unsigned
long flags)
{
- types_stop_world();
+ BEGIN_TYPE_LOCK();
set_flags_recursive(self, mask, flags);
- types_start_world();
+ END_TYPE_LOCK();
}
/* This is similar to PyObject_GenericGetAttr(),
@@ -6164,8 +6085,6 @@ _Py_type_getattro(PyObject *tp, PyObject *name)
return _Py_type_getattro_impl(type, name, NULL);
}
-// Called by type_setattro(). Updates both the type dict and
-// the type versions.
static int
type_update_dict(PyTypeObject *type, PyDictObject *dict, PyObject *name,
PyObject *value, PyObject **old_value)
@@ -6195,6 +6114,10 @@ type_update_dict(PyTypeObject *type, PyDictObject *dict,
PyObject *name,
return -1;
}
+ if (is_dunder_name(name)) {
+ return update_slot(type, name);
+ }
+
return 0;
}
@@ -6252,9 +6175,7 @@ type_setattro(PyObject *self, PyObject *name, PyObject
*value)
PyObject *dict = type->tp_dict;
if (dict == NULL) {
- // This is an unlikely case. PyType_Ready has not yet been done and
- // we need to initialize tp_dict. We don't just do PyType_Ready
- // because we could already be readying.
+ // We don't just do PyType_Ready because we could already be readying
BEGIN_TYPE_LOCK();
dict = type->tp_dict;
if (dict == NULL) {
@@ -6270,15 +6191,6 @@ type_setattro(PyObject *self, PyObject *name, PyObject
*value)
BEGIN_TYPE_DICT_LOCK(dict);
res = type_update_dict(type, (PyDictObject *)dict, name, value,
&old_value);
assert(_PyType_CheckConsistency(type));
- if (res == 0) {
- if (is_dunder_name(name) && has_slotdef(name)) {
- // The name corresponds to a type slot.
- types_stop_world();
- res = update_slot(type, name);
- types_start_world();
- ASSERT_TYPE_LOCK_HELD();
- }
- }
END_TYPE_DICT_LOCK();
done:
@@ -7208,10 +7120,15 @@ object_set_class(PyObject *self, PyObject *value, void
*closure)
return -1;
}
- types_stop_world();
+#ifdef Py_GIL_DISABLED
+ PyInterpreterState *interp = _PyInterpreterState_GET();
+ _PyEval_StopTheWorld(interp);
+#endif
PyTypeObject *oldto = Py_TYPE(self);
int res = object_set_class_world_stopped(self, newto);
- types_start_world();
+#ifdef Py_GIL_DISABLED
+ _PyEval_StartTheWorld(interp);
+#endif
if (res == 0) {
if (oldto->tp_flags & Py_TPFLAGS_HEAPTYPE) {
Py_DECREF(oldto);
@@ -8619,7 +8536,7 @@ type_ready_mro(PyTypeObject *type, int initial)
}
/* Calculate method resolution order */
- if (mro_internal(type, initial, NULL) < 0) {
+ if (mro_internal_unlocked(type, initial, NULL) < 0) {
return -1;
}
PyObject *mro = lookup_tp_mro(type);
@@ -11142,21 +11059,12 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
{
/* XXX Maybe this could be optimized more -- but is it worth it? */
-#ifdef Py_GIL_DISABLED
- pytype_slotdef *ptrs[MAX_EQUIV];
- pytype_slotdef **pp = ptrs;
- /* Collect all slotdefs that match name into ptrs. */
- for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) {
- if (p->name_strobj == name)
- *pp++ = p;
- }
- *pp = NULL;
-#else
/* pname and ptrs act as a little cache */
PyInterpreterState *interp = _PyInterpreterState_GET();
#define pname _Py_INTERP_CACHED_OBJECT(interp, type_slots_pname)
#define ptrs _Py_INTERP_CACHED_OBJECT(interp, type_slots_ptrs)
pytype_slotdef *p, **pp;
+ void **res, **ptr;
if (pname != name) {
/* Collect all slotdefs that match name into ptrs. */
@@ -11168,12 +11076,10 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
}
*pp = NULL;
}
-#endif
/* Look in all slots of the type matching the name. If exactly one of these
has a filled-in slot, return a pointer to that slot.
Otherwise, return NULL. */
- void **res, **ptr;
res = NULL;
for (pp = ptrs; *pp; pp++) {
ptr = slotptr(type, (*pp)->offset);
@@ -11183,25 +11089,11 @@ resolve_slotdups(PyTypeObject *type, PyObject *name)
return NULL;
res = ptr;
}
-#ifndef Py_GIL_DISABLED
+ return res;
#undef pname
#undef ptrs
-#endif
- return res;
}
-// Return true if "name" corresponds to at least one slot definition. This is
-// a more accurate but more expensive test compared to is_dunder_name().
-static bool
-has_slotdef(PyObject *name)
-{
- for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) {
- if (p->name_strobj == name) {
- return true;
- }
- }
- return false;
-}
/* Common code for update_slots_callback() and fixup_slot_dispatchers().
*
@@ -11260,7 +11152,7 @@ has_slotdef(PyObject *name)
static pytype_slotdef *
update_one_slot(PyTypeObject *type, pytype_slotdef *p)
{
- ASSERT_WORLD_STOPPED_OR_NEW_TYPE(type);
+ ASSERT_TYPE_LOCK_HELD();
PyObject *descr;
PyWrapperDescrObject *d;
@@ -11383,7 +11275,7 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p)
static int
update_slots_callback(PyTypeObject *type, void *data)
{
- ASSERT_WORLD_STOPPED_OR_NEW_TYPE(type);
+ ASSERT_TYPE_LOCK_HELD();
pytype_slotdef **pp = (pytype_slotdef **)data;
for (; *pp; pp++) {
@@ -11401,7 +11293,7 @@ update_slot(PyTypeObject *type, PyObject *name)
pytype_slotdef **pp;
int offset;
- assert(types_world_is_stopped());
+ ASSERT_TYPE_LOCK_HELD();
assert(PyUnicode_CheckExact(name));
assert(PyUnicode_CHECK_INTERNED(name));
@@ -11435,27 +11327,33 @@ update_slot(PyTypeObject *type, PyObject *name)
static void
fixup_slot_dispatchers(PyTypeObject *type)
{
+ // This lock isn't strictly necessary because the type has not been
+ // exposed to anyone else yet, but update_ont_slot calls find_name_in_mro
+ // where we'd like to assert that the type is locked.
+ BEGIN_TYPE_LOCK();
+
assert(!PyErr_Occurred());
for (pytype_slotdef *p = slotdefs; p->name; ) {
p = update_one_slot(type, p);
}
+
+ END_TYPE_LOCK();
}
-// Called when __bases__ is re-assigned.
static void
update_all_slots(PyTypeObject* type)
{
pytype_slotdef *p;
- assert(types_world_is_stopped());
+ ASSERT_TYPE_LOCK_HELD();
+
+ /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */
+ type_modified_unlocked(type);
for (p = slotdefs; p->name; p++) {
/* update_slot returns int but can't actually fail */
update_slot(type, p->name_strobj);
}
-
- /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */
- type_modified_unlocked(type);
}
@@ -11727,10 +11625,7 @@ PyType_Freeze(PyTypeObject *type)
}
BEGIN_TYPE_LOCK();
- types_stop_world();
type_add_flags(type, Py_TPFLAGS_IMMUTABLETYPE);
- types_start_world();
- ASSERT_TYPE_LOCK_HELD();
type_modified_unlocked(type);
END_TYPE_LOCK();
diff --git a/Python/ceval.c b/Python/ceval.c
index b7b7f9cc7db76b..fb72fd49811e2d 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -138,19 +138,6 @@
#endif
-static void
-check_invalid_reentrancy(void)
-{
-#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED)
- // In the free-threaded build, the interpreter must not be re-entered if
- // the world-is-stopped. If so, that's a bug somewhere (quite likely in
- // the painfully complex typeobject code).
- PyInterpreterState *interp = _PyInterpreterState_GET();
- assert(!interp->stoptheworld.world_stopped);
-#endif
-}
-
-
#ifdef Py_DEBUG
static void
dump_item(_PyStackRef item)
@@ -1008,7 +995,6 @@ PyObject* _Py_HOT_FUNCTION DONT_SLP_VECTORIZE
_PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame,
int throwflag)
{
_Py_EnsureTstateNotNULL(tstate);
- check_invalid_reentrancy();
CALL_STAT_INC(pyeval_calls);
#if USE_COMPUTED_GOTOS && !Py_TAIL_CALL_INTERP
diff --git a/Tools/tsan/suppressions_free_threading.txt
b/Tools/tsan/suppressions_free_threading.txt
index 404c30157362aa..21224e490b8160 100644
--- a/Tools/tsan/suppressions_free_threading.txt
+++ b/Tools/tsan/suppressions_free_threading.txt
@@ -12,12 +12,15 @@
# These warnings trigger directly in a CPython function.
+race_top:assign_version_tag
+race_top:_Py_slot_tp_getattr_hook
race_top:dump_traceback
race_top:fatal_error
race_top:_PyFrame_GetCode
race_top:_PyFrame_Initialize
race_top:_PyObject_TryGetInstanceAttribute
race_top:PyUnstable_InterpreterFrame_GetLine
+race_top:type_modified_unlocked
race_top:write_thread_id
# gh-129068: race on shared range iterators
(test_free_threading.test_zip.ZipThreading.test_threading)
@@ -26,6 +29,9 @@ race_top:rangeiter_next
# gh-129748: test.test_free_threading.test_slots.TestSlots.test_object
race_top:mi_block_set_nextx
+# gh-127266: type slot updates are not thread-safe
(test_opcache.test_load_attr_method_lazy_dict)
+race_top:update_one_slot
+
# https://gist.github.com/mpage/6962e8870606cfc960e159b407a0cb40
thread:pthread_create
@@ -40,4 +46,4 @@ race:list_inplace_repeat_lock_held
# PyObject_Realloc internally does memcpy which isn't atomic so can race
# with non-locking reads. See #132070
-race:PyObject_Realloc
+race:PyObject_Realloc
\ No newline at end of file
_______________________________________________
Python-checkins mailing list -- [email protected]
To unsubscribe send an email to [email protected]
https://mail.python.org/mailman3/lists/python-checkins.python.org/
Member address: [email protected]