https://github.com/python/cpython/commit/e414a2d81c3e15553516979e146d9f258fb47b2e commit: e414a2d81c3e15553516979e146d9f258fb47b2e branch: main author: Neil Schemenauer <nas-git...@arctrix.com> committer: nascheme <nas-git...@arctrix.com> date: 2025-04-28T20:28:44Z summary:
gh-127266: avoid data races when updating type slots (gh-131174) In the free-threaded build, avoid data races caused by updating type slots or type flags after the type was initially created. For those (typically rare) cases, use the stop-the-world mechanism. Remove the use of atomics when reading or writing type flags. The use of atomics is not sufficient to avoid races (since flags are sometimes read without a lock and without atomics) and are no longer required. files: A Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst M Include/internal/pycore_interp_structs.h M Include/internal/pycore_object.h M Include/internal/pycore_typeobject.h M Include/object.h M Include/refcount.h M Lib/test/test_opcache.py M Objects/typeobject.c M Python/ceval.c M Tools/tsan/suppressions_free_threading.txt diff --git a/Include/internal/pycore_interp_structs.h b/Include/internal/pycore_interp_structs.h index af6ee3ab48939f..f193fed1153f14 100644 --- a/Include/internal/pycore_interp_structs.h +++ b/Include/internal/pycore_interp_structs.h @@ -667,8 +667,11 @@ struct _Py_interp_cached_objects { /* object.__reduce__ */ PyObject *objreduce; +#ifndef Py_GIL_DISABLED + /* resolve_slotdups() */ PyObject *type_slots_pname; pytype_slotdef *type_slots_ptrs[MAX_EQUIV]; +#endif /* TypeVar and related types */ PyTypeObject *generic_type; diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h index b7e162c8abcabf..986bcc9fd08b1c 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -313,7 +313,7 @@ extern int _PyDict_CheckConsistency(PyObject *mp, int check_content); // Fast inlined version of PyType_HasFeature() static inline int _PyType_HasFeature(PyTypeObject *type, unsigned long feature) { - return ((FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags) & feature) != 0); + return ((type->tp_flags) & feature) != 0; } extern void _PyType_InitCache(PyInterpreterState *interp); diff --git a/Include/internal/pycore_typeobject.h b/Include/internal/pycore_typeobject.h index 1a4f89fd2449a0..0ee7d555c56cdd 100644 --- a/Include/internal/pycore_typeobject.h +++ b/Include/internal/pycore_typeobject.h @@ -134,7 +134,6 @@ extern int _PyType_AddMethod(PyTypeObject *, PyMethodDef *); extern void _PyType_SetFlagsRecursive(PyTypeObject *self, unsigned long mask, unsigned long flags); -extern unsigned int _PyType_GetVersionForCurrentState(PyTypeObject *tp); PyAPI_FUNC(void) _PyType_SetVersion(PyTypeObject *tp, unsigned int version); PyTypeObject *_PyType_LookupByVersion(unsigned int version); diff --git a/Include/object.h b/Include/object.h index 8cc83abb8574e3..994cac1ad17501 100644 --- a/Include/object.h +++ b/Include/object.h @@ -620,6 +620,12 @@ given type object has a specified feature. #define Py_TPFLAGS_HAVE_FINALIZE (1UL << 0) #define Py_TPFLAGS_HAVE_VERSION_TAG (1UL << 18) +// Flag values for ob_flags (16 bits available, if SIZEOF_VOID_P > 4). +#define _Py_IMMORTAL_FLAGS (1 << 0) +#define _Py_STATICALLY_ALLOCATED_FLAG (1 << 2) +#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) +#define _Py_TYPE_REVEALED_FLAG (1 << 3) +#endif #define Py_CONSTANT_NONE 0 #define Py_CONSTANT_FALSE 1 @@ -776,11 +782,7 @@ PyType_HasFeature(PyTypeObject *type, unsigned long feature) // PyTypeObject is opaque in the limited C API flags = PyType_GetFlags(type); #else -# ifdef Py_GIL_DISABLED - flags = _Py_atomic_load_ulong_relaxed(&type->tp_flags); -# else - flags = type->tp_flags; -# endif + flags = type->tp_flags; #endif return ((flags & feature) != 0); } diff --git a/Include/refcount.h b/Include/refcount.h index 177bbdaf0c5977..ebd1dba6d15e1a 100644 --- a/Include/refcount.h +++ b/Include/refcount.h @@ -19,9 +19,6 @@ immortal. The latter should be the only instances that require cleanup during runtime finalization. */ -#define _Py_STATICALLY_ALLOCATED_FLAG 4 -#define _Py_IMMORTAL_FLAGS 1 - #if SIZEOF_VOID_P > 4 /* In 64+ bit systems, any object whose 32 bit reference count is >= 2**31 diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py index 21d7e62833c061..40349339df54fd 100644 --- a/Lib/test/test_opcache.py +++ b/Lib/test/test_opcache.py @@ -576,6 +576,7 @@ class TestRacesDoNotCrash(TestBase): # Careful with these. Bigger numbers have a higher chance of catching bugs, # but you can also burn through a *ton* of type/dict/function versions: ITEMS = 1000 + SMALL_ITEMS = 100 LOOPS = 4 WRITERS = 2 @@ -619,7 +620,7 @@ class C: __getitem__ = lambda self, item: None items = [] - for _ in range(self.ITEMS): + for _ in range(self.SMALL_ITEMS): item = C() items.append(item) return items @@ -790,7 +791,7 @@ class C: __getattribute__ = lambda self, name: None items = [] - for _ in range(self.ITEMS): + for _ in range(self.SMALL_ITEMS): item = C() items.append(item) return items diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst new file mode 100644 index 00000000000000..b26977628de136 --- /dev/null +++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-03-14-13-08-20.gh-issue-127266._tyfBp.rst @@ -0,0 +1,6 @@ +In the free-threaded build, avoid data races caused by updating type slots +or type flags after the type was initially created. For those (typically +rare) cases, use the stop-the-world mechanism. Remove the use of atomics +when reading or writing type flags. The use of atomics is not sufficient to +avoid races (since flags are sometimes read without a lock and without +atomics) and are no longer required. diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 4e614daaa6955b..22628018cc2a8f 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -48,7 +48,7 @@ class object "PyObject *" "&PyBaseObject_Type" & ((1 << MCACHE_SIZE_EXP) - 1)) #define MCACHE_HASH_METHOD(type, name) \ - MCACHE_HASH(FT_ATOMIC_LOAD_UINT32_RELAXED((type)->tp_version_tag), \ + MCACHE_HASH(FT_ATOMIC_LOAD_UINT_RELAXED((type)->tp_version_tag), \ ((Py_ssize_t)(name)) >> 3) #define MCACHE_CACHEABLE_NAME(name) \ PyUnicode_CheckExact(name) && \ @@ -60,11 +60,19 @@ class object "PyObject *" "&PyBaseObject_Type" #ifdef Py_GIL_DISABLED -// There's a global lock for mutation of types. This avoids having to take -// additional locks while doing various subclass processing which may result -// in odd behaviors w.r.t. running with the GIL as the outer type lock could -// be released and reacquired during a subclass update if there's contention -// on the subclass lock. +// There's a global lock for types that ensures that tp_version_tag and +// _spec_cache are correctly updated if the type is modified. It also protects +// tp_mro, tp_bases, and tp_base. This avoids having to take additional locks +// while doing various subclass processing which may result in odd behaviors +// w.r.t. running with the GIL as the outer type lock could be released and +// reacquired during a subclass update if there's contention on the subclass +// lock. +// +// Note that this lock does not protect updates of other type slots or the +// tp_flags member. Instead, we either ensure those updates are done before +// the type has been revealed to other threads or we only do those updates +// while the stop-the-world mechanism is active. The slots and flags are read +// in many places without holding a lock and without atomics. #define TYPE_LOCK &PyInterpreterState_Get()->types.mutex #define BEGIN_TYPE_LOCK() Py_BEGIN_CRITICAL_SECTION_MUT(TYPE_LOCK) #define END_TYPE_LOCK() Py_END_CRITICAL_SECTION() @@ -74,8 +82,59 @@ class object "PyObject *" "&PyBaseObject_Type" #define END_TYPE_DICT_LOCK() Py_END_CRITICAL_SECTION2() +#ifdef Py_DEBUG +// Return true if the world is currently stopped. +static bool +types_world_is_stopped(void) +{ + PyInterpreterState *interp = _PyInterpreterState_GET(); + return interp->stoptheworld.world_stopped; +} +#endif + +// Checks that the type has not yet been revealed (exposed) to other +// threads. The _Py_TYPE_REVEALED_FLAG flag is set by type_new() and +// PyType_FromMetaclass() to indicate that a newly initialized type might be +// revealed. We only have ob_flags on 64-bit platforms. +#if SIZEOF_VOID_P > 4 +#define TYPE_IS_REVEALED(tp) ((((PyObject *)(tp))->ob_flags & _Py_TYPE_REVEALED_FLAG) != 0) +#else +#define TYPE_IS_REVEALED(tp) 0 +#endif + +#ifdef Py_DEBUG #define ASSERT_TYPE_LOCK_HELD() \ - _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK) + if (!types_world_is_stopped()) { _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK); } + +// Checks if we can safely update type slots or tp_flags. +#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp) \ + assert(!TYPE_IS_REVEALED(tp) || types_world_is_stopped()) + +#define ASSERT_NEW_TYPE_OR_LOCKED(tp) \ + if (TYPE_IS_REVEALED(tp)) { ASSERT_TYPE_LOCK_HELD(); } +#else +#define ASSERT_TYPE_LOCK_HELD() +#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp) +#define ASSERT_NEW_TYPE_OR_LOCKED(tp) +#endif + +static void +types_stop_world(void) +{ + assert(!types_world_is_stopped()); + PyInterpreterState *interp = _PyInterpreterState_GET(); + _PyEval_StopTheWorld(interp); + assert(types_world_is_stopped()); +} + +static void +types_start_world(void) +{ + assert(types_world_is_stopped()); + PyInterpreterState *interp = _PyInterpreterState_GET(); + _PyEval_StartTheWorld(interp); + assert(!types_world_is_stopped()); +} #else @@ -84,6 +143,12 @@ class object "PyObject *" "&PyBaseObject_Type" #define BEGIN_TYPE_DICT_LOCK(d) #define END_TYPE_DICT_LOCK() #define ASSERT_TYPE_LOCK_HELD() +#define TYPE_IS_REVEALED(tp) 0 +#define ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp) +#define ASSERT_NEW_TYPE_OR_LOCKED(tp) +#define types_world_is_stopped() 1 +#define types_stop_world() +#define types_start_world() #endif @@ -346,21 +411,14 @@ _PyStaticType_GetBuiltins(void) static void type_set_flags(PyTypeObject *tp, unsigned long flags) { - if (tp->tp_flags & Py_TPFLAGS_READY) { - // It's possible the type object has been exposed to other threads - // if it's been marked ready. In that case, the type lock should be - // held when flags are modified. - ASSERT_TYPE_LOCK_HELD(); - } - // Since PyType_HasFeature() reads the flags without holding the type - // lock, we need an atomic store here. - FT_ATOMIC_STORE_ULONG_RELAXED(tp->tp_flags, flags); + ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp); + tp->tp_flags = flags; } static void type_set_flags_with_mask(PyTypeObject *tp, unsigned long mask, unsigned long flags) { - ASSERT_TYPE_LOCK_HELD(); + ASSERT_WORLD_STOPPED_OR_NEW_TYPE(tp); unsigned long new_flags = (tp->tp_flags & ~mask) | flags; type_set_flags(tp, new_flags); } @@ -498,6 +556,7 @@ static inline void set_tp_bases(PyTypeObject *self, PyObject *bases, int initial) { assert(PyTuple_Check(bases)); + ASSERT_NEW_TYPE_OR_LOCKED(self); if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { // XXX tp_bases can probably be statically allocated for each // static builtin type. @@ -542,7 +601,7 @@ clear_tp_bases(PyTypeObject *self, int final) static inline PyObject * lookup_tp_mro(PyTypeObject *self) { - ASSERT_TYPE_LOCK_HELD(); + ASSERT_NEW_TYPE_OR_LOCKED(self); return self->tp_mro; } @@ -1027,7 +1086,6 @@ PyType_Unwatch(int watcher_id, PyObject* obj) static void set_version_unlocked(PyTypeObject *tp, unsigned int version) { - ASSERT_TYPE_LOCK_HELD(); assert(version == 0 || (tp->tp_versions_used != _Py_ATTR_CACHE_UNUSED)); #ifndef Py_GIL_DISABLED PyInterpreterState *interp = _PyInterpreterState_GET(); @@ -1075,7 +1133,7 @@ type_modified_unlocked(PyTypeObject *type) We don't assign new version tags eagerly, but only as needed. */ - ASSERT_TYPE_LOCK_HELD(); + ASSERT_NEW_TYPE_OR_LOCKED(type); if (type->tp_version_tag == 0) { return; } @@ -1106,6 +1164,8 @@ type_modified_unlocked(PyTypeObject *type) while (bits) { assert(i < TYPE_MAX_WATCHERS); if (bits & 1) { + // Note that PyErr_FormatUnraisable is potentially re-entrant + // and the watcher callback might be too. PyType_WatchCallback cb = interp->type_watchers[i]; if (cb && (cb(type) < 0)) { PyErr_FormatUnraisable( @@ -1245,14 +1305,6 @@ _PyType_LookupByVersion(unsigned int version) #endif } -unsigned int -_PyType_GetVersionForCurrentState(PyTypeObject *tp) -{ - return tp->tp_version_tag; -} - - - #define MAX_VERSIONS_PER_CLASS 1000 #if _Py_ATTR_CACHE_UNUSED < MAX_VERSIONS_PER_CLASS #error "_Py_ATTR_CACHE_UNUSED must be bigger than max" @@ -1586,10 +1638,13 @@ type_set_abstractmethods(PyObject *tp, PyObject *value, void *Py_UNUSED(closure) BEGIN_TYPE_LOCK(); type_modified_unlocked(type); + types_stop_world(); if (abstract) type_add_flags(type, Py_TPFLAGS_IS_ABSTRACT); else type_clear_flags(type, Py_TPFLAGS_IS_ABSTRACT); + types_start_world(); + ASSERT_TYPE_LOCK_HELD(); END_TYPE_LOCK(); return 0; @@ -1624,8 +1679,8 @@ type_get_mro(PyObject *tp, void *Py_UNUSED(closure)) return mro; } -static PyTypeObject *best_base(PyObject *); -static int mro_internal(PyTypeObject *, PyObject **); +static PyTypeObject *find_best_base(PyObject *); +static int mro_internal(PyTypeObject *, int, PyObject **); static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *); static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const char *); static int add_subclass(PyTypeObject*, PyTypeObject*); @@ -1640,13 +1695,15 @@ static int update_subclasses(PyTypeObject *type, PyObject *attr_name, static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data); +// Compute tp_mro for this type and all of its subclasses. This +// is called after __bases__ is assigned to an existing type. static int mro_hierarchy(PyTypeObject *type, PyObject *temp) { ASSERT_TYPE_LOCK_HELD(); PyObject *old_mro; - int res = mro_internal(type, &old_mro); + int res = mro_internal(type, 0, &old_mro); if (res <= 0) { /* error / reentrance */ return res; @@ -1708,9 +1765,9 @@ mro_hierarchy(PyTypeObject *type, PyObject *temp) } static int -type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases) +type_check_new_bases(PyTypeObject *type, PyObject *new_bases, PyTypeObject **best_base) { - // Check arguments + // Check arguments, this is re-entrant due to the PySys_Audit() call if (!check_set_special_type_attr(type, new_bases, "__bases__")) { return -1; } @@ -1759,20 +1816,29 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases) } // Compute the new MRO and the new base class - PyTypeObject *new_base = best_base(new_bases); - if (new_base == NULL) + *best_base = find_best_base(new_bases); + if (*best_base == NULL) return -1; - if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) { + if (!compatible_for_assignment(type->tp_base, *best_base, "__bases__")) { return -1; } + return 0; +} + +static int +type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases, PyTypeObject *best_base) +{ + ASSERT_TYPE_LOCK_HELD(); + + Py_ssize_t n; PyObject *old_bases = lookup_tp_bases(type); assert(old_bases != NULL); PyTypeObject *old_base = type->tp_base; set_tp_bases(type, Py_NewRef(new_bases), 0); - type->tp_base = (PyTypeObject *)Py_NewRef(new_base); + type->tp_base = (PyTypeObject *)Py_NewRef(best_base); PyObject *temp = PyList_New(0); if (temp == NULL) { @@ -1796,7 +1862,10 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases) add to all new_bases */ remove_all_subclasses(type, old_bases); res = add_all_subclasses(type, new_bases); + types_stop_world(); update_all_slots(type); + types_start_world(); + ASSERT_TYPE_LOCK_HELD(); } else { res = 0; @@ -1827,13 +1896,13 @@ type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases) bail: if (lookup_tp_bases(type) == new_bases) { - assert(type->tp_base == new_base); + assert(type->tp_base == best_base); set_tp_bases(type, old_bases, 0); type->tp_base = old_base; Py_DECREF(new_bases); - Py_DECREF(new_base); + Py_DECREF(best_base); } else { Py_DECREF(old_bases); @@ -1848,9 +1917,13 @@ static int type_set_bases(PyObject *tp, PyObject *new_bases, void *Py_UNUSED(closure)) { PyTypeObject *type = PyTypeObject_CAST(tp); + PyTypeObject *best_base; int res; BEGIN_TYPE_LOCK(); - res = type_set_bases_unlocked(type, new_bases); + res = type_check_new_bases(type, new_bases, &best_base); + if (res == 0) { + res = type_set_bases_unlocked(type, new_bases, best_base); + } END_TYPE_LOCK(); return res; } @@ -3092,6 +3165,7 @@ static PyObject * class_name(PyObject *cls) { PyObject *name; + // Note that this is potentially re-entrant. if (PyObject_GetOptionalAttr(cls, &_Py_ID(__name__), &name) == 0) { name = PyObject_Repr(cls); } @@ -3428,9 +3502,13 @@ mro_invoke(PyTypeObject *type) const int custom = !Py_IS_TYPE(type, &PyType_Type); if (custom) { + // Custom mro() method on metaclass. This is potentially re-entrant. + // We are called either from type_ready() or from type_set_bases(). mro_result = call_method_noarg((PyObject *)type, &_Py_ID(mro)); } else { + // In this case, the mro() method on the type object is being used and + // we know that these calls are not re-entrant. mro_result = mro_implementation_unlocked(type); } if (mro_result == NULL) @@ -3478,7 +3556,7 @@ mro_invoke(PyTypeObject *type) - Returns -1 in case of an error. */ static int -mro_internal_unlocked(PyTypeObject *type, int initial, PyObject **p_old_mro) +mro_internal(PyTypeObject *type, int initial, PyObject **p_old_mro) { ASSERT_TYPE_LOCK_HELD(); @@ -3526,21 +3604,11 @@ mro_internal_unlocked(PyTypeObject *type, int initial, PyObject **p_old_mro) return 1; } -static int -mro_internal(PyTypeObject *type, PyObject **p_old_mro) -{ - int res; - BEGIN_TYPE_LOCK(); - res = mro_internal_unlocked(type, 0, p_old_mro); - END_TYPE_LOCK(); - return res; -} - /* Calculate the best base amongst multiple base classes. This is the first one that's on the path to the "solid base". */ static PyTypeObject * -best_base(PyObject *bases) +find_best_base(PyObject *bases) { Py_ssize_t i, n; PyTypeObject *base, *winner, *candidate; @@ -3629,6 +3697,7 @@ static int update_slot(PyTypeObject *, PyObject *); static void fixup_slot_dispatchers(PyTypeObject *); static int type_new_set_names(PyTypeObject *); static int type_new_init_subclass(PyTypeObject *, PyObject *); +static bool has_slotdef(PyObject *); /* * Helpers for __dict__ descriptor. We don't want to expose the dicts @@ -3826,7 +3895,7 @@ type_init(PyObject *cls, PyObject *args, PyObject *kwds) unsigned long PyType_GetFlags(PyTypeObject *type) { - return FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags); + return type->tp_flags; } @@ -4604,6 +4673,10 @@ type_new_impl(type_new_ctx *ctx) } assert(_PyType_CheckConsistency(type)); +#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 + // After this point, other threads can potentally use this type. + ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; +#endif return (PyObject *)type; @@ -4666,7 +4739,7 @@ type_new_get_bases(type_new_ctx *ctx, PyObject **type) } /* Calculate best base, and check that all bases are type objects */ - PyTypeObject *base = best_base(ctx->bases); + PyTypeObject *base = find_best_base(ctx->bases); if (base == NULL) { return -1; } @@ -5081,12 +5154,12 @@ PyType_FromMetaclass( } /* Calculate best base, and check that all bases are type objects */ - PyTypeObject *base = best_base(bases); // borrowed ref + PyTypeObject *base = find_best_base(bases); // borrowed ref if (base == NULL) { goto finally; } - // best_base should check Py_TPFLAGS_BASETYPE & raise a proper exception, - // here we just check its work + // find_best_base() should check Py_TPFLAGS_BASETYPE & raise a proper + // exception, here we just check its work assert(_PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)); /* Calculate sizes */ @@ -5317,6 +5390,10 @@ PyType_FromMetaclass( } assert(_PyType_CheckConsistency(type)); +#if defined(Py_GIL_DISABLED) && defined(Py_DEBUG) && SIZEOF_VOID_P > 4 + // After this point, other threads can potentally use this type. + ((PyObject*)type)->ob_flags |= _Py_TYPE_REVEALED_FLAG; +#endif finally: if (PyErr_Occurred()) { @@ -5610,8 +5687,6 @@ PyObject_GetItemData(PyObject *obj) static PyObject * find_name_in_mro(PyTypeObject *type, PyObject *name, int *error) { - ASSERT_TYPE_LOCK_HELD(); - Py_hash_t hash = _PyObject_HashFast(name); if (hash == -1) { *error = -1; @@ -5920,9 +5995,13 @@ _PyType_CacheGetItemForSpecialization(PyHeapTypeObject *ht, PyObject *descriptor void _PyType_SetFlags(PyTypeObject *self, unsigned long mask, unsigned long flags) { - BEGIN_TYPE_LOCK(); - type_set_flags_with_mask(self, mask, flags); - END_TYPE_LOCK(); + unsigned long new_flags = (self->tp_flags & ~mask) | flags; + if (new_flags != self->tp_flags) { + types_stop_world(); + // can't use new_flags here since they could be out-of-date + self->tp_flags = (self->tp_flags & ~mask) | flags; + types_start_world(); + } } int @@ -5969,9 +6048,9 @@ set_flags_recursive(PyTypeObject *self, unsigned long mask, unsigned long flags) void _PyType_SetFlagsRecursive(PyTypeObject *self, unsigned long mask, unsigned long flags) { - BEGIN_TYPE_LOCK(); + types_stop_world(); set_flags_recursive(self, mask, flags); - END_TYPE_LOCK(); + types_start_world(); } /* This is similar to PyObject_GenericGetAttr(), @@ -6085,6 +6164,8 @@ _Py_type_getattro(PyObject *tp, PyObject *name) return _Py_type_getattro_impl(type, name, NULL); } +// Called by type_setattro(). Updates both the type dict and +// the type versions. static int type_update_dict(PyTypeObject *type, PyDictObject *dict, PyObject *name, PyObject *value, PyObject **old_value) @@ -6114,10 +6195,6 @@ type_update_dict(PyTypeObject *type, PyDictObject *dict, PyObject *name, return -1; } - if (is_dunder_name(name)) { - return update_slot(type, name); - } - return 0; } @@ -6175,7 +6252,9 @@ type_setattro(PyObject *self, PyObject *name, PyObject *value) PyObject *dict = type->tp_dict; if (dict == NULL) { - // We don't just do PyType_Ready because we could already be readying + // This is an unlikely case. PyType_Ready has not yet been done and + // we need to initialize tp_dict. We don't just do PyType_Ready + // because we could already be readying. BEGIN_TYPE_LOCK(); dict = type->tp_dict; if (dict == NULL) { @@ -6191,6 +6270,15 @@ type_setattro(PyObject *self, PyObject *name, PyObject *value) BEGIN_TYPE_DICT_LOCK(dict); res = type_update_dict(type, (PyDictObject *)dict, name, value, &old_value); assert(_PyType_CheckConsistency(type)); + if (res == 0) { + if (is_dunder_name(name) && has_slotdef(name)) { + // The name corresponds to a type slot. + types_stop_world(); + res = update_slot(type, name); + types_start_world(); + ASSERT_TYPE_LOCK_HELD(); + } + } END_TYPE_DICT_LOCK(); done: @@ -7120,15 +7208,10 @@ object_set_class(PyObject *self, PyObject *value, void *closure) return -1; } -#ifdef Py_GIL_DISABLED - PyInterpreterState *interp = _PyInterpreterState_GET(); - _PyEval_StopTheWorld(interp); -#endif + types_stop_world(); PyTypeObject *oldto = Py_TYPE(self); int res = object_set_class_world_stopped(self, newto); -#ifdef Py_GIL_DISABLED - _PyEval_StartTheWorld(interp); -#endif + types_start_world(); if (res == 0) { if (oldto->tp_flags & Py_TPFLAGS_HEAPTYPE) { Py_DECREF(oldto); @@ -8536,7 +8619,7 @@ type_ready_mro(PyTypeObject *type, int initial) } /* Calculate method resolution order */ - if (mro_internal_unlocked(type, initial, NULL) < 0) { + if (mro_internal(type, initial, NULL) < 0) { return -1; } PyObject *mro = lookup_tp_mro(type); @@ -11059,12 +11142,21 @@ resolve_slotdups(PyTypeObject *type, PyObject *name) { /* XXX Maybe this could be optimized more -- but is it worth it? */ +#ifdef Py_GIL_DISABLED + pytype_slotdef *ptrs[MAX_EQUIV]; + pytype_slotdef **pp = ptrs; + /* Collect all slotdefs that match name into ptrs. */ + for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) { + if (p->name_strobj == name) + *pp++ = p; + } + *pp = NULL; +#else /* pname and ptrs act as a little cache */ PyInterpreterState *interp = _PyInterpreterState_GET(); #define pname _Py_INTERP_CACHED_OBJECT(interp, type_slots_pname) #define ptrs _Py_INTERP_CACHED_OBJECT(interp, type_slots_ptrs) pytype_slotdef *p, **pp; - void **res, **ptr; if (pname != name) { /* Collect all slotdefs that match name into ptrs. */ @@ -11076,10 +11168,12 @@ resolve_slotdups(PyTypeObject *type, PyObject *name) } *pp = NULL; } +#endif /* Look in all slots of the type matching the name. If exactly one of these has a filled-in slot, return a pointer to that slot. Otherwise, return NULL. */ + void **res, **ptr; res = NULL; for (pp = ptrs; *pp; pp++) { ptr = slotptr(type, (*pp)->offset); @@ -11089,11 +11183,25 @@ resolve_slotdups(PyTypeObject *type, PyObject *name) return NULL; res = ptr; } - return res; +#ifndef Py_GIL_DISABLED #undef pname #undef ptrs +#endif + return res; } +// Return true if "name" corresponds to at least one slot definition. This is +// a more accurate but more expensive test compared to is_dunder_name(). +static bool +has_slotdef(PyObject *name) +{ + for (pytype_slotdef *p = slotdefs; p->name_strobj; p++) { + if (p->name_strobj == name) { + return true; + } + } + return false; +} /* Common code for update_slots_callback() and fixup_slot_dispatchers(). * @@ -11152,7 +11260,7 @@ resolve_slotdups(PyTypeObject *type, PyObject *name) static pytype_slotdef * update_one_slot(PyTypeObject *type, pytype_slotdef *p) { - ASSERT_TYPE_LOCK_HELD(); + ASSERT_WORLD_STOPPED_OR_NEW_TYPE(type); PyObject *descr; PyWrapperDescrObject *d; @@ -11275,7 +11383,7 @@ update_one_slot(PyTypeObject *type, pytype_slotdef *p) static int update_slots_callback(PyTypeObject *type, void *data) { - ASSERT_TYPE_LOCK_HELD(); + ASSERT_WORLD_STOPPED_OR_NEW_TYPE(type); pytype_slotdef **pp = (pytype_slotdef **)data; for (; *pp; pp++) { @@ -11293,7 +11401,7 @@ update_slot(PyTypeObject *type, PyObject *name) pytype_slotdef **pp; int offset; - ASSERT_TYPE_LOCK_HELD(); + assert(types_world_is_stopped()); assert(PyUnicode_CheckExact(name)); assert(PyUnicode_CHECK_INTERNED(name)); @@ -11327,33 +11435,27 @@ update_slot(PyTypeObject *type, PyObject *name) static void fixup_slot_dispatchers(PyTypeObject *type) { - // This lock isn't strictly necessary because the type has not been - // exposed to anyone else yet, but update_ont_slot calls find_name_in_mro - // where we'd like to assert that the type is locked. - BEGIN_TYPE_LOCK(); - assert(!PyErr_Occurred()); for (pytype_slotdef *p = slotdefs; p->name; ) { p = update_one_slot(type, p); } - - END_TYPE_LOCK(); } +// Called when __bases__ is re-assigned. static void update_all_slots(PyTypeObject* type) { pytype_slotdef *p; - ASSERT_TYPE_LOCK_HELD(); - - /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */ - type_modified_unlocked(type); + assert(types_world_is_stopped()); for (p = slotdefs; p->name; p++) { /* update_slot returns int but can't actually fail */ update_slot(type, p->name_strobj); } + + /* Clear the VALID_VERSION flag of 'type' and all its subclasses. */ + type_modified_unlocked(type); } @@ -11625,7 +11727,10 @@ PyType_Freeze(PyTypeObject *type) } BEGIN_TYPE_LOCK(); + types_stop_world(); type_add_flags(type, Py_TPFLAGS_IMMUTABLETYPE); + types_start_world(); + ASSERT_TYPE_LOCK_HELD(); type_modified_unlocked(type); END_TYPE_LOCK(); diff --git a/Python/ceval.c b/Python/ceval.c index fb72fd49811e2d..b7b7f9cc7db76b 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -138,6 +138,19 @@ #endif +static void +check_invalid_reentrancy(void) +{ +#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED) + // In the free-threaded build, the interpreter must not be re-entered if + // the world-is-stopped. If so, that's a bug somewhere (quite likely in + // the painfully complex typeobject code). + PyInterpreterState *interp = _PyInterpreterState_GET(); + assert(!interp->stoptheworld.world_stopped); +#endif +} + + #ifdef Py_DEBUG static void dump_item(_PyStackRef item) @@ -995,6 +1008,7 @@ PyObject* _Py_HOT_FUNCTION DONT_SLP_VECTORIZE _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int throwflag) { _Py_EnsureTstateNotNULL(tstate); + check_invalid_reentrancy(); CALL_STAT_INC(pyeval_calls); #if USE_COMPUTED_GOTOS && !Py_TAIL_CALL_INTERP diff --git a/Tools/tsan/suppressions_free_threading.txt b/Tools/tsan/suppressions_free_threading.txt index 21224e490b8160..404c30157362aa 100644 --- a/Tools/tsan/suppressions_free_threading.txt +++ b/Tools/tsan/suppressions_free_threading.txt @@ -12,15 +12,12 @@ # These warnings trigger directly in a CPython function. -race_top:assign_version_tag -race_top:_Py_slot_tp_getattr_hook race_top:dump_traceback race_top:fatal_error race_top:_PyFrame_GetCode race_top:_PyFrame_Initialize race_top:_PyObject_TryGetInstanceAttribute race_top:PyUnstable_InterpreterFrame_GetLine -race_top:type_modified_unlocked race_top:write_thread_id # gh-129068: race on shared range iterators (test_free_threading.test_zip.ZipThreading.test_threading) @@ -29,9 +26,6 @@ race_top:rangeiter_next # gh-129748: test.test_free_threading.test_slots.TestSlots.test_object race_top:mi_block_set_nextx -# gh-127266: type slot updates are not thread-safe (test_opcache.test_load_attr_method_lazy_dict) -race_top:update_one_slot - # https://gist.github.com/mpage/6962e8870606cfc960e159b407a0cb40 thread:pthread_create @@ -46,4 +40,4 @@ race:list_inplace_repeat_lock_held # PyObject_Realloc internally does memcpy which isn't atomic so can race # with non-locking reads. See #132070 -race:PyObject_Realloc \ No newline at end of file +race:PyObject_Realloc _______________________________________________ Python-checkins mailing list -- python-checkins@python.org To unsubscribe send an email to python-checkins-le...@python.org https://mail.python.org/mailman3/lists/python-checkins.python.org/ Member address: arch...@mail-archive.com