| /* Type object implementation */ |
| |
| #include "Python.h" |
| #include "pycore_abstract.h" // _PySequence_IterSearch() |
| #include "pycore_call.h" // _PyObject_VectorcallTstate() |
| #include "pycore_code.h" // CO_FAST_FREE |
| #include "pycore_dict.h" // _PyDict_KeysSize() |
| #include "pycore_frame.h" // _PyInterpreterFrame |
| #include "pycore_lock.h" // _PySeqLock_* |
| #include "pycore_long.h" // _PyLong_IsNegative(), _PyLong_GetOne() |
| #include "pycore_memoryobject.h" // _PyMemoryView_FromBufferProc() |
| #include "pycore_modsupport.h" // _PyArg_NoKwnames() |
| #include "pycore_moduleobject.h" // _PyModule_GetDef() |
| #include "pycore_object.h" // _PyType_HasFeature() |
| #include "pycore_object_alloc.h" // _PyObject_MallocWithType() |
| #include "pycore_pyerrors.h" // _PyErr_Occurred() |
| #include "pycore_pystate.h" // _PyThreadState_GET() |
| #include "pycore_symtable.h" // _Py_Mangle() |
| #include "pycore_typeobject.h" // struct type_cache |
| #include "pycore_unionobject.h" // _Py_union_type_or |
| #include "pycore_weakref.h" // _PyWeakref_GET_REF() |
| #include "opcode.h" // MAKE_CELL |
| |
| #include <stddef.h> // ptrdiff_t |
| |
| /*[clinic input] |
| class type "PyTypeObject *" "&PyType_Type" |
| class object "PyObject *" "&PyBaseObject_Type" |
| [clinic start generated code]*/ |
| /*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b94608d231c434b]*/ |
| |
| #include "clinic/typeobject.c.h" |
| |
| /* Support type attribute lookup cache */ |
| |
| /* The cache can keep references to the names alive for longer than |
| they normally would. This is why the maximum size is limited to |
| MCACHE_MAX_ATTR_SIZE, since it might be a problem if very large |
| strings are used as attribute names. */ |
| #define MCACHE_MAX_ATTR_SIZE 100 |
| #define MCACHE_HASH(version, name_hash) \ |
| (((unsigned int)(version) ^ (unsigned int)(name_hash)) \ |
| & ((1 << MCACHE_SIZE_EXP) - 1)) |
| |
| #define MCACHE_HASH_METHOD(type, name) \ |
| MCACHE_HASH(FT_ATOMIC_LOAD_UINT32_RELAXED((type)->tp_version_tag), \ |
| ((Py_ssize_t)(name)) >> 3) |
| #define MCACHE_CACHEABLE_NAME(name) \ |
| PyUnicode_CheckExact(name) && \ |
| PyUnicode_IS_READY(name) && \ |
| (PyUnicode_GET_LENGTH(name) <= MCACHE_MAX_ATTR_SIZE) |
| |
| #define NEXT_GLOBAL_VERSION_TAG _PyRuntime.types.next_version_tag |
| #define NEXT_VERSION_TAG(interp) \ |
| (interp)->types.next_version_tag |
| |
| #ifdef Py_GIL_DISABLED |
| |
| // There's a global lock for mutation of types. This avoids having to take |
| // additional locks while doing various subclass processing which may result |
| // in odd behaviors w.r.t. running with the GIL as the outer type lock could |
| // be released and reacquired during a subclass update if there's contention |
| // on the subclass lock. |
| #define TYPE_LOCK &PyInterpreterState_Get()->types.mutex |
| #define BEGIN_TYPE_LOCK() Py_BEGIN_CRITICAL_SECTION_MUT(TYPE_LOCK) |
| #define END_TYPE_LOCK() Py_END_CRITICAL_SECTION() |
| |
| #define BEGIN_TYPE_DICT_LOCK(d) \ |
| Py_BEGIN_CRITICAL_SECTION2_MUT(TYPE_LOCK, &_PyObject_CAST(d)->ob_mutex) |
| |
| #define END_TYPE_DICT_LOCK() Py_END_CRITICAL_SECTION2() |
| |
| #define ASSERT_TYPE_LOCK_HELD() \ |
| _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(TYPE_LOCK) |
| |
| #else |
| |
| #define BEGIN_TYPE_LOCK() |
| #define END_TYPE_LOCK() |
| #define BEGIN_TYPE_DICT_LOCK(d) |
| #define END_TYPE_DICT_LOCK() |
| #define ASSERT_TYPE_LOCK_HELD() |
| |
| #endif |
| |
| |
| typedef struct PySlot_Offset { |
| short subslot_offset; |
| short slot_offset; |
| } PySlot_Offset; |
| |
| static void |
| slot_bf_releasebuffer(PyObject *self, Py_buffer *buffer); |
| |
| static void |
| releasebuffer_call_python(PyObject *self, Py_buffer *buffer); |
| |
| static PyObject * |
| slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds); |
| |
| static PyObject * |
| lookup_maybe_method(PyObject *self, PyObject *attr, int *unbound); |
| |
| static int |
| slot_tp_setattro(PyObject *self, PyObject *name, PyObject *value); |
| |
| |
| static inline PyTypeObject * |
| type_from_ref(PyObject *ref) |
| { |
| PyObject *obj = _PyWeakref_GET_REF(ref); |
| if (obj == NULL) { |
| return NULL; |
| } |
| return _PyType_CAST(obj); |
| } |
| |
| |
| /* helpers for for static builtin types */ |
| |
| #ifndef NDEBUG |
| static inline int |
| managed_static_type_index_is_set(PyTypeObject *self) |
| { |
| return self->tp_subclasses != NULL; |
| } |
| #endif |
| |
| static inline size_t |
| managed_static_type_index_get(PyTypeObject *self) |
| { |
| assert(managed_static_type_index_is_set(self)); |
| /* We store a 1-based index so 0 can mean "not initialized". */ |
| return (size_t)self->tp_subclasses - 1; |
| } |
| |
| static inline void |
| managed_static_type_index_set(PyTypeObject *self, size_t index) |
| { |
| assert(index < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES); |
| /* We store a 1-based index so 0 can mean "not initialized". */ |
| self->tp_subclasses = (PyObject *)(index + 1); |
| } |
| |
| static inline void |
| managed_static_type_index_clear(PyTypeObject *self) |
| { |
| self->tp_subclasses = NULL; |
| } |
| |
| static PyTypeObject * |
| static_ext_type_lookup(PyInterpreterState *interp, size_t index, |
| int64_t *p_interp_count) |
| { |
| assert(interp->runtime == &_PyRuntime); |
| assert(index < _Py_MAX_MANAGED_STATIC_EXT_TYPES); |
| |
| size_t full_index = index + _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; |
| int64_t interp_count = |
| _PyRuntime.types.managed_static.types[full_index].interp_count; |
| assert((interp_count == 0) == |
| (_PyRuntime.types.managed_static.types[full_index].type == NULL)); |
| *p_interp_count = interp_count; |
| |
| PyTypeObject *type = interp->types.for_extensions.initialized[index].type; |
| if (type == NULL) { |
| return NULL; |
| } |
| assert(!interp->types.for_extensions.initialized[index].isbuiltin); |
| assert(type == _PyRuntime.types.managed_static.types[full_index].type); |
| assert(managed_static_type_index_is_set(type)); |
| return type; |
| } |
| |
| static managed_static_type_state * |
| managed_static_type_state_get(PyInterpreterState *interp, PyTypeObject *self) |
| { |
| // It's probably a builtin type. |
| size_t index = managed_static_type_index_get(self); |
| managed_static_type_state *state = |
| &(interp->types.builtins.initialized[index]); |
| if (state->type == self) { |
| return state; |
| } |
| if (index > _Py_MAX_MANAGED_STATIC_EXT_TYPES) { |
| return state; |
| } |
| return &(interp->types.for_extensions.initialized[index]); |
| } |
| |
| /* For static types we store some state in an array on each interpreter. */ |
| managed_static_type_state * |
| _PyStaticType_GetState(PyInterpreterState *interp, PyTypeObject *self) |
| { |
| assert(self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN); |
| return managed_static_type_state_get(interp, self); |
| } |
| |
| /* Set the type's per-interpreter state. */ |
| static void |
| managed_static_type_state_init(PyInterpreterState *interp, PyTypeObject *self, |
| int isbuiltin, int initial) |
| { |
| assert(interp->runtime == &_PyRuntime); |
| |
| size_t index; |
| if (initial) { |
| assert(!managed_static_type_index_is_set(self)); |
| if (isbuiltin) { |
| index = interp->types.builtins.num_initialized; |
| assert(index < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES); |
| } |
| else { |
| PyMutex_Lock(&interp->types.mutex); |
| index = interp->types.for_extensions.next_index; |
| interp->types.for_extensions.next_index++; |
| PyMutex_Unlock(&interp->types.mutex); |
| assert(index < _Py_MAX_MANAGED_STATIC_EXT_TYPES); |
| } |
| managed_static_type_index_set(self, index); |
| } |
| else { |
| index = managed_static_type_index_get(self); |
| if (isbuiltin) { |
| assert(index == interp->types.builtins.num_initialized); |
| assert(index < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES); |
| } |
| else { |
| assert(index < _Py_MAX_MANAGED_STATIC_EXT_TYPES); |
| } |
| } |
| size_t full_index = isbuiltin |
| ? index |
| : index + _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; |
| |
| assert((initial == 1) == |
| (_PyRuntime.types.managed_static.types[full_index].interp_count == 0)); |
| (void)_Py_atomic_add_int64( |
| &_PyRuntime.types.managed_static.types[full_index].interp_count, 1); |
| |
| if (initial) { |
| assert(_PyRuntime.types.managed_static.types[full_index].type == NULL); |
| _PyRuntime.types.managed_static.types[full_index].type = self; |
| } |
| else { |
| assert(_PyRuntime.types.managed_static.types[full_index].type == self); |
| } |
| |
| managed_static_type_state *state = isbuiltin |
| ? &(interp->types.builtins.initialized[index]) |
| : &(interp->types.for_extensions.initialized[index]); |
| |
| /* It should only be called once for each builtin type per interpreter. */ |
| assert(state->type == NULL); |
| state->type = self; |
| state->isbuiltin = isbuiltin; |
| |
| /* state->tp_subclasses is left NULL until init_subclasses() sets it. */ |
| /* state->tp_weaklist is left NULL until insert_head() or insert_after() |
| (in weakrefobject.c) sets it. */ |
| |
| if (isbuiltin) { |
| interp->types.builtins.num_initialized++; |
| } |
| else { |
| interp->types.for_extensions.num_initialized++; |
| } |
| } |
| |
| /* Reset the type's per-interpreter state. |
| This basically undoes what managed_static_type_state_init() did. */ |
| static void |
| managed_static_type_state_clear(PyInterpreterState *interp, PyTypeObject *self, |
| int isbuiltin, int final) |
| { |
| size_t index = managed_static_type_index_get(self); |
| size_t full_index = isbuiltin |
| ? index |
| : index + _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; |
| |
| managed_static_type_state *state = isbuiltin |
| ? &(interp->types.builtins.initialized[index]) |
| : &(interp->types.for_extensions.initialized[index]); |
| assert(state != NULL); |
| |
| assert(_PyRuntime.types.managed_static.types[full_index].interp_count > 0); |
| assert(_PyRuntime.types.managed_static.types[full_index].type == state->type); |
| |
| assert(state->type != NULL); |
| state->type = NULL; |
| assert(state->tp_weaklist == NULL); // It was already cleared out. |
| |
| (void)_Py_atomic_add_int64( |
| &_PyRuntime.types.managed_static.types[full_index].interp_count, -1); |
| if (final) { |
| assert(!_PyRuntime.types.managed_static.types[full_index].interp_count); |
| _PyRuntime.types.managed_static.types[full_index].type = NULL; |
| |
| managed_static_type_index_clear(self); |
| } |
| |
| if (isbuiltin) { |
| assert(interp->types.builtins.num_initialized > 0); |
| interp->types.builtins.num_initialized--; |
| } |
| else { |
| PyMutex_Lock(&interp->types.mutex); |
| assert(interp->types.for_extensions.num_initialized > 0); |
| interp->types.for_extensions.num_initialized--; |
| if (interp->types.for_extensions.num_initialized == 0) { |
| interp->types.for_extensions.next_index = 0; |
| } |
| PyMutex_Unlock(&interp->types.mutex); |
| } |
| } |
| |
| |
| |
| PyObject * |
| _PyStaticType_GetBuiltins(void) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| Py_ssize_t count = (Py_ssize_t)interp->types.builtins.num_initialized; |
| assert(count <= _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES); |
| |
| PyObject *results = PyList_New(count); |
| if (results == NULL) { |
| return NULL; |
| } |
| for (Py_ssize_t i = 0; i < count; i++) { |
| PyTypeObject *cls = interp->types.builtins.initialized[i].type; |
| assert(cls != NULL); |
| assert(interp->types.builtins.initialized[i].isbuiltin); |
| PyList_SET_ITEM(results, i, Py_NewRef((PyObject *)cls)); |
| } |
| |
| return results; |
| } |
| |
| |
| // Also see _PyStaticType_InitBuiltin() and _PyStaticType_FiniBuiltin(). |
| |
| /* end static builtin helpers */ |
| |
| |
| static inline void |
| start_readying(PyTypeObject *type) |
| { |
| if (type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = managed_static_type_state_get(interp, type); |
| assert(state != NULL); |
| assert(!state->readying); |
| state->readying = 1; |
| return; |
| } |
| assert((type->tp_flags & Py_TPFLAGS_READYING) == 0); |
| type->tp_flags |= Py_TPFLAGS_READYING; |
| } |
| |
| static inline void |
| stop_readying(PyTypeObject *type) |
| { |
| if (type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = managed_static_type_state_get(interp, type); |
| assert(state != NULL); |
| assert(state->readying); |
| state->readying = 0; |
| return; |
| } |
| assert(type->tp_flags & Py_TPFLAGS_READYING); |
| type->tp_flags &= ~Py_TPFLAGS_READYING; |
| } |
| |
| static inline int |
| is_readying(PyTypeObject *type) |
| { |
| if (type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = managed_static_type_state_get(interp, type); |
| assert(state != NULL); |
| return state->readying; |
| } |
| return (type->tp_flags & Py_TPFLAGS_READYING) != 0; |
| } |
| |
| |
| /* accessors for objects stored on PyTypeObject */ |
| |
| static inline PyObject * |
| lookup_tp_dict(PyTypeObject *self) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| assert(state != NULL); |
| return state->tp_dict; |
| } |
| return self->tp_dict; |
| } |
| |
| PyObject * |
| _PyType_GetDict(PyTypeObject *self) |
| { |
| /* It returns a borrowed reference. */ |
| return lookup_tp_dict(self); |
| } |
| |
| PyObject * |
| PyType_GetDict(PyTypeObject *self) |
| { |
| PyObject *dict = lookup_tp_dict(self); |
| return _Py_XNewRef(dict); |
| } |
| |
| static inline void |
| set_tp_dict(PyTypeObject *self, PyObject *dict) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| assert(state != NULL); |
| state->tp_dict = dict; |
| return; |
| } |
| self->tp_dict = dict; |
| } |
| |
| static inline void |
| clear_tp_dict(PyTypeObject *self) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| assert(state != NULL); |
| Py_CLEAR(state->tp_dict); |
| return; |
| } |
| Py_CLEAR(self->tp_dict); |
| } |
| |
| |
| static inline PyObject * |
| lookup_tp_bases(PyTypeObject *self) |
| { |
| return self->tp_bases; |
| } |
| |
| PyObject * |
| _PyType_GetBases(PyTypeObject *self) |
| { |
| PyObject *res; |
| |
| BEGIN_TYPE_LOCK(); |
| res = lookup_tp_bases(self); |
| Py_INCREF(res); |
| END_TYPE_LOCK(); |
| |
| return res; |
| } |
| |
| static inline void |
| set_tp_bases(PyTypeObject *self, PyObject *bases, int initial) |
| { |
| assert(PyTuple_CheckExact(bases)); |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| // XXX tp_bases can probably be statically allocated for each |
| // static builtin type. |
| assert(initial); |
| assert(self->tp_bases == NULL); |
| if (PyTuple_GET_SIZE(bases) == 0) { |
| assert(self->tp_base == NULL); |
| } |
| else { |
| assert(PyTuple_GET_SIZE(bases) == 1); |
| assert(PyTuple_GET_ITEM(bases, 0) == (PyObject *)self->tp_base); |
| assert(self->tp_base->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN); |
| assert(_Py_IsImmortal(self->tp_base)); |
| } |
| _Py_SetImmortal(bases); |
| } |
| self->tp_bases = bases; |
| } |
| |
| static inline void |
| clear_tp_bases(PyTypeObject *self, int final) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| if (final) { |
| if (self->tp_bases != NULL) { |
| if (PyTuple_GET_SIZE(self->tp_bases) == 0) { |
| Py_CLEAR(self->tp_bases); |
| } |
| else { |
| assert(_Py_IsImmortal(self->tp_bases)); |
| _Py_ClearImmortal(self->tp_bases); |
| } |
| } |
| } |
| return; |
| } |
| Py_CLEAR(self->tp_bases); |
| } |
| |
| |
| static inline PyObject * |
| lookup_tp_mro(PyTypeObject *self) |
| { |
| ASSERT_TYPE_LOCK_HELD(); |
| return self->tp_mro; |
| } |
| |
| PyObject * |
| _PyType_GetMRO(PyTypeObject *self) |
| { |
| #ifdef Py_GIL_DISABLED |
| PyObject *mro = _Py_atomic_load_ptr_relaxed(&self->tp_mro); |
| if (mro == NULL) { |
| return NULL; |
| } |
| if (_Py_TryIncrefCompare(&self->tp_mro, mro)) { |
| return mro; |
| } |
| |
| BEGIN_TYPE_LOCK(); |
| mro = lookup_tp_mro(self); |
| Py_XINCREF(mro); |
| END_TYPE_LOCK(); |
| return mro; |
| #else |
| return Py_XNewRef(lookup_tp_mro(self)); |
| #endif |
| } |
| |
| static inline void |
| set_tp_mro(PyTypeObject *self, PyObject *mro, int initial) |
| { |
| assert(PyTuple_CheckExact(mro)); |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| // XXX tp_mro can probably be statically allocated for each |
| // static builtin type. |
| assert(initial); |
| assert(self->tp_mro == NULL); |
| /* Other checks are done via set_tp_bases. */ |
| _Py_SetImmortal(mro); |
| } |
| self->tp_mro = mro; |
| } |
| |
| static inline void |
| clear_tp_mro(PyTypeObject *self, int final) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| if (final) { |
| if (self->tp_mro != NULL) { |
| if (PyTuple_GET_SIZE(self->tp_mro) == 0) { |
| Py_CLEAR(self->tp_mro); |
| } |
| else { |
| assert(_Py_IsImmortal(self->tp_mro)); |
| _Py_ClearImmortal(self->tp_mro); |
| } |
| } |
| } |
| return; |
| } |
| Py_CLEAR(self->tp_mro); |
| } |
| |
| |
| static PyObject * |
| init_tp_subclasses(PyTypeObject *self) |
| { |
| PyObject *subclasses = PyDict_New(); |
| if (subclasses == NULL) { |
| return NULL; |
| } |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| state->tp_subclasses = subclasses; |
| return subclasses; |
| } |
| self->tp_subclasses = (void *)subclasses; |
| return subclasses; |
| } |
| |
| static void |
| clear_tp_subclasses(PyTypeObject *self) |
| { |
| /* Delete the dictionary to save memory. _PyStaticType_Dealloc() |
| callers also test if tp_subclasses is NULL to check if a static type |
| has no subclass. */ |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| Py_CLEAR(state->tp_subclasses); |
| return; |
| } |
| Py_CLEAR(self->tp_subclasses); |
| } |
| |
| static inline PyObject * |
| lookup_tp_subclasses(PyTypeObject *self) |
| { |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| managed_static_type_state *state = _PyStaticType_GetState(interp, self); |
| assert(state != NULL); |
| return state->tp_subclasses; |
| } |
| return (PyObject *)self->tp_subclasses; |
| } |
| |
| int |
| _PyType_HasSubclasses(PyTypeObject *self) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| if (self->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN |
| // XXX _PyStaticType_GetState() should never return NULL. |
| && _PyStaticType_GetState(interp, self) == NULL) |
| { |
| return 0; |
| } |
| if (lookup_tp_subclasses(self) == NULL) { |
| return 0; |
| } |
| return 1; |
| } |
| |
| PyObject* |
| _PyType_GetSubclasses(PyTypeObject *self) |
| { |
| PyObject *list = PyList_New(0); |
| if (list == NULL) { |
| return NULL; |
| } |
| |
| PyObject *subclasses = lookup_tp_subclasses(self); // borrowed ref |
| if (subclasses == NULL) { |
| return list; |
| } |
| assert(PyDict_CheckExact(subclasses)); |
| // The loop cannot modify tp_subclasses, there is no need |
| // to hold a strong reference (use a borrowed reference). |
| |
| Py_ssize_t i = 0; |
| PyObject *ref; // borrowed ref |
| while (PyDict_Next(subclasses, &i, NULL, &ref)) { |
| PyTypeObject *subclass = type_from_ref(ref); |
| if (subclass == NULL) { |
| continue; |
| } |
| |
| if (PyList_Append(list, _PyObject_CAST(subclass)) < 0) { |
| Py_DECREF(list); |
| Py_DECREF(subclass); |
| return NULL; |
| } |
| Py_DECREF(subclass); |
| } |
| return list; |
| } |
| |
| /* end accessors for objects stored on PyTypeObject */ |
| |
| |
| /* |
| * finds the beginning of the docstring's introspection signature. |
| * if present, returns a pointer pointing to the first '('. |
| * otherwise returns NULL. |
| * |
| * doesn't guarantee that the signature is valid, only that it |
| * has a valid prefix. (the signature must also pass skip_signature.) |
| */ |
| static const char * |
| find_signature(const char *name, const char *doc) |
| { |
| const char *dot; |
| size_t length; |
| |
| if (!doc) |
| return NULL; |
| |
| assert(name != NULL); |
| |
| /* for dotted names like classes, only use the last component */ |
| dot = strrchr(name, '.'); |
| if (dot) |
| name = dot + 1; |
| |
| length = strlen(name); |
| if (strncmp(doc, name, length)) |
| return NULL; |
| doc += length; |
| if (*doc != '(') |
| return NULL; |
| return doc; |
| } |
| |
| #define SIGNATURE_END_MARKER ")\n--\n\n" |
| #define SIGNATURE_END_MARKER_LENGTH 6 |
| /* |
| * skips past the end of the docstring's introspection signature. |
| * (assumes doc starts with a valid signature prefix.) |
| */ |
| static const char * |
| skip_signature(const char *doc) |
| { |
| while (*doc) { |
| if ((*doc == *SIGNATURE_END_MARKER) && |
| !strncmp(doc, SIGNATURE_END_MARKER, SIGNATURE_END_MARKER_LENGTH)) |
| return doc + SIGNATURE_END_MARKER_LENGTH; |
| if ((*doc == '\n') && (doc[1] == '\n')) |
| return NULL; |
| doc++; |
| } |
| return NULL; |
| } |
| |
| int |
| _PyType_CheckConsistency(PyTypeObject *type) |
| { |
| #define CHECK(expr) \ |
| do { if (!(expr)) { _PyObject_ASSERT_FAILED_MSG((PyObject *)type, Py_STRINGIFY(expr)); } } while (0) |
| |
| CHECK(!_PyObject_IsFreed((PyObject *)type)); |
| |
| if (!(type->tp_flags & Py_TPFLAGS_READY)) { |
| /* don't check static types before PyType_Ready() */ |
| return 1; |
| } |
| |
| CHECK(Py_REFCNT(type) >= 1); |
| CHECK(PyType_Check(type)); |
| |
| CHECK(!is_readying(type)); |
| CHECK(lookup_tp_dict(type) != NULL); |
| |
| if (type->tp_flags & Py_TPFLAGS_HAVE_GC) { |
| // bpo-44263: tp_traverse is required if Py_TPFLAGS_HAVE_GC is set. |
| // Note: tp_clear is optional. |
| CHECK(type->tp_traverse != NULL); |
| } |
| |
| if (type->tp_flags & Py_TPFLAGS_DISALLOW_INSTANTIATION) { |
| CHECK(type->tp_new == NULL); |
| CHECK(PyDict_Contains(lookup_tp_dict(type), &_Py_ID(__new__)) == 0); |
| } |
| |
| return 1; |
| #undef CHECK |
| } |
| |
| static const char * |
| _PyType_DocWithoutSignature(const char *name, const char *internal_doc) |
| { |
| const char *doc = find_signature(name, internal_doc); |
| |
| if (doc) { |
| doc = skip_signature(doc); |
| if (doc) |
| return doc; |
| } |
| return internal_doc; |
| } |
| |
| PyObject * |
| _PyType_GetDocFromInternalDoc(const char *name, const char *internal_doc) |
| { |
| const char *doc = _PyType_DocWithoutSignature(name, internal_doc); |
| |
| if (!doc || *doc == '\0') { |
| Py_RETURN_NONE; |
| } |
| |
| return PyUnicode_FromString(doc); |
| } |
| |
| static const char * |
| signature_from_flags(int flags) |
| { |
| switch (flags & ~METH_COEXIST) { |
| case METH_NOARGS: |
| return "($self, /)"; |
| case METH_NOARGS|METH_CLASS: |
| return "($type, /)"; |
| case METH_NOARGS|METH_STATIC: |
| return "()"; |
| case METH_O: |
| return "($self, object, /)"; |
| case METH_O|METH_CLASS: |
| return "($type, object, /)"; |
| case METH_O|METH_STATIC: |
| return "(object, /)"; |
| default: |
| return NULL; |
| } |
| } |
| |
| PyObject * |
| _PyType_GetTextSignatureFromInternalDoc(const char *name, const char *internal_doc, int flags) |
| { |
| const char *start = find_signature(name, internal_doc); |
| const char *end; |
| |
| if (start) |
| end = skip_signature(start); |
| else |
| end = NULL; |
| if (!end) { |
| start = signature_from_flags(flags); |
| if (start) { |
| return PyUnicode_FromString(start); |
| } |
| Py_RETURN_NONE; |
| } |
| |
| /* back "end" up until it points just past the final ')' */ |
| end -= SIGNATURE_END_MARKER_LENGTH - 1; |
| assert((end - start) >= 2); /* should be "()" at least */ |
| assert(end[-1] == ')'); |
| assert(end[0] == '\n'); |
| return PyUnicode_FromStringAndSize(start, end - start); |
| } |
| |
| |
| static struct type_cache* |
| get_type_cache(void) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| return &interp->types.type_cache; |
| } |
| |
| |
| static void |
| type_cache_clear(struct type_cache *cache, PyObject *value) |
| { |
| for (Py_ssize_t i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { |
| struct type_cache_entry *entry = &cache->hashtable[i]; |
| #ifdef Py_GIL_DISABLED |
| _PySeqLock_LockWrite(&entry->sequence); |
| #endif |
| entry->version = 0; |
| Py_XSETREF(entry->name, _Py_XNewRef(value)); |
| entry->value = NULL; |
| #ifdef Py_GIL_DISABLED |
| _PySeqLock_UnlockWrite(&entry->sequence); |
| #endif |
| } |
| } |
| |
| |
| void |
| _PyType_InitCache(PyInterpreterState *interp) |
| { |
| struct type_cache *cache = &interp->types.type_cache; |
| for (Py_ssize_t i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { |
| struct type_cache_entry *entry = &cache->hashtable[i]; |
| assert(entry->name == NULL); |
| |
| entry->version = 0; |
| // Set to None so _PyType_LookupRef() can use Py_SETREF(), |
| // rather than using slower Py_XSETREF(). |
| entry->name = Py_None; |
| entry->value = NULL; |
| } |
| } |
| |
| |
| static unsigned int |
| _PyType_ClearCache(PyInterpreterState *interp) |
| { |
| struct type_cache *cache = &interp->types.type_cache; |
| // Set to None, rather than NULL, so _PyType_LookupRef() can |
| // use Py_SETREF() rather than using slower Py_XSETREF(). |
| type_cache_clear(cache, Py_None); |
| |
| return NEXT_VERSION_TAG(interp) - 1; |
| } |
| |
| |
| unsigned int |
| PyType_ClearCache(void) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| return _PyType_ClearCache(interp); |
| } |
| |
| |
| void |
| _PyTypes_Fini(PyInterpreterState *interp) |
| { |
| struct type_cache *cache = &interp->types.type_cache; |
| type_cache_clear(cache, NULL); |
| |
| // All the managed static types should have been finalized already. |
| assert(interp->types.for_extensions.num_initialized == 0); |
| for (size_t i = 0; i < _Py_MAX_MANAGED_STATIC_EXT_TYPES; i++) { |
| assert(interp->types.for_extensions.initialized[i].type == NULL); |
| } |
| assert(interp->types.builtins.num_initialized == 0); |
| for (size_t i = 0; i < _Py_MAX_MANAGED_STATIC_BUILTIN_TYPES; i++) { |
| assert(interp->types.builtins.initialized[i].type == NULL); |
| } |
| } |
| |
| |
| int |
| PyType_AddWatcher(PyType_WatchCallback callback) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| |
| // start at 1, 0 is reserved for cpython optimizer |
| for (int i = 1; i < TYPE_MAX_WATCHERS; i++) { |
| if (!interp->type_watchers[i]) { |
| interp->type_watchers[i] = callback; |
| return i; |
| } |
| } |
| |
| PyErr_SetString(PyExc_RuntimeError, "no more type watcher IDs available"); |
| return -1; |
| } |
| |
| static inline int |
| validate_watcher_id(PyInterpreterState *interp, int watcher_id) |
| { |
| if (watcher_id < 0 || watcher_id >= TYPE_MAX_WATCHERS) { |
| PyErr_Format(PyExc_ValueError, "Invalid type watcher ID %d", watcher_id); |
| return -1; |
| } |
| if (!interp->type_watchers[watcher_id]) { |
| PyErr_Format(PyExc_ValueError, "No type watcher set for ID %d", watcher_id); |
| return -1; |
| } |
| return 0; |
| } |
| |
| int |
| PyType_ClearWatcher(int watcher_id) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| if (validate_watcher_id(interp, watcher_id) < 0) { |
| return -1; |
| } |
| interp->type_watchers[watcher_id] = NULL; |
| return 0; |
| } |
| |
| static int assign_version_tag(PyInterpreterState *interp, PyTypeObject *type); |
| |
| int |
| PyType_Watch(int watcher_id, PyObject* obj) |
| { |
| if (!PyType_Check(obj)) { |
| PyErr_SetString(PyExc_ValueError, "Cannot watch non-type"); |
| return -1; |
| } |
| PyTypeObject *type = (PyTypeObject *)obj; |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| if (validate_watcher_id(interp, watcher_id) < 0) { |
| return -1; |
| } |
| // ensure we will get a callback on the next modification |
| BEGIN_TYPE_LOCK(); |
| assign_version_tag(interp, type); |
| type->tp_watched |= (1 << watcher_id); |
| END_TYPE_LOCK(); |
| return 0; |
| } |
| |
| int |
| PyType_Unwatch(int watcher_id, PyObject* obj) |
| { |
| if (!PyType_Check(obj)) { |
| PyErr_SetString(PyExc_ValueError, "Cannot watch non-type"); |
| return -1; |
| } |
| PyTypeObject *type = (PyTypeObject *)obj; |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| if (validate_watcher_id(interp, watcher_id)) { |
| return -1; |
| } |
| type->tp_watched &= ~(1 << watcher_id); |
| return 0; |
| } |
| |
| static void |
| set_version_unlocked(PyTypeObject *tp, unsigned int version) |
| { |
| ASSERT_TYPE_LOCK_HELD(); |
| #ifndef Py_GIL_DISABLED |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| // lookup the old version and set to null |
| if (tp->tp_version_tag != 0) { |
| PyTypeObject **slot = |
| interp->types.type_version_cache |
| + (tp->tp_version_tag % TYPE_VERSION_CACHE_SIZE); |
| *slot = NULL; |
| } |
| if (version) { |
| tp->tp_versions_used++; |
| } |
| #else |
| if (version) { |
| _Py_atomic_add_uint16(&tp->tp_versions_used, 1); |
| } |
| #endif |
| FT_ATOMIC_STORE_UINT32_RELAXED(tp->tp_version_tag, version); |
| #ifndef Py_GIL_DISABLED |
| if (version != 0) { |
| PyTypeObject **slot = |
| interp->types.type_version_cache |
| + (version % TYPE_VERSION_CACHE_SIZE); |
| *slot = tp; |
| } |
| #endif |
| } |
| |
| static void |
| type_modified_unlocked(PyTypeObject *type) |
| { |
| /* Invalidate any cached data for the specified type and all |
| subclasses. This function is called after the base |
| classes, mro, or attributes of the type are altered. |
| |
| Invariants: |
| |
| - before tp_version_tag can be set on a type, |
| it must first be set on all super types. |
| |
| This function clears the tp_version_tag of a |
| type (so it must first clear it on all subclasses). The |
| tp_version_tag value is meaningless when equal to zero. |
| We don't assign new version tags eagerly, but only as |
| needed. |
| */ |
| if (type->tp_version_tag == 0) { |
| return; |
| } |
| // Cannot modify static builtin types. |
| assert((type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) == 0); |
| |
| PyObject *subclasses = lookup_tp_subclasses(type); |
| if (subclasses != NULL) { |
| assert(PyDict_CheckExact(subclasses)); |
| |
| Py_ssize_t i = 0; |
| PyObject *ref; |
| while (PyDict_Next(subclasses, &i, NULL, &ref)) { |
| PyTypeObject *subclass = type_from_ref(ref); |
| if (subclass == NULL) { |
| continue; |
| } |
| type_modified_unlocked(subclass); |
| Py_DECREF(subclass); |
| } |
| } |
| |
| // Notify registered type watchers, if any |
| if (type->tp_watched) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| int bits = type->tp_watched; |
| int i = 0; |
| while (bits) { |
| assert(i < TYPE_MAX_WATCHERS); |
| if (bits & 1) { |
| PyType_WatchCallback cb = interp->type_watchers[i]; |
| if (cb && (cb(type) < 0)) { |
| PyErr_FormatUnraisable( |
| "Exception ignored in type watcher callback #%d for %R", |
| i, type); |
| } |
| } |
| i++; |
| bits >>= 1; |
| } |
| } |
| |
| set_version_unlocked(type, 0); /* 0 is not a valid version tag */ |
| if (PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) { |
| // This field *must* be invalidated if the type is modified (see the |
| // comment on struct _specialization_cache): |
| ((PyHeapTypeObject *)type)->_spec_cache.getitem = NULL; |
| } |
| } |
| |
| void |
| PyType_Modified(PyTypeObject *type) |
| { |
| // Quick check without the lock held |
| if (type->tp_version_tag == 0) { |
| return; |
| } |
| |
| BEGIN_TYPE_LOCK(); |
| type_modified_unlocked(type); |
| END_TYPE_LOCK(); |
| } |
| |
| static int |
| is_subtype_with_mro(PyObject *a_mro, PyTypeObject *a, PyTypeObject *b); |
| |
| static void |
| type_mro_modified(PyTypeObject *type, PyObject *bases) { |
| /* |
| Check that all base classes or elements of the MRO of type are |
| able to be cached. This function is called after the base |
| classes or mro of the type are altered. |
| |
| Unset HAVE_VERSION_TAG and VALID_VERSION_TAG if the type |
| has a custom MRO that includes a type which is not officially |
| super type, or if the type implements its own mro() method. |
| |
| Called from mro_internal, which will subsequently be called on |
| each subclass when their mro is recursively updated. |
| */ |
| Py_ssize_t i, n; |
| int custom = !Py_IS_TYPE(type, &PyType_Type); |
| int unbound; |
| |
| ASSERT_TYPE_LOCK_HELD(); |
| if (custom) { |
| PyObject *mro_meth, *type_mro_meth; |
| mro_meth = lookup_maybe_method( |
| (PyObject *)type, &_Py_ID(mro), &unbound); |
| if (mro_meth == NULL) { |
| goto clear; |
| } |
| type_mro_meth = lookup_maybe_method( |
| (PyObject *)&PyType_Type, &_Py_ID(mro), &unbound); |
| if (type_mro_meth == NULL) { |
| Py_DECREF(mro_meth); |
| goto clear; |
| } |
| int custom_mro = (mro_meth != type_mro_meth); |
| Py_DECREF(mro_meth); |
| Py_DECREF(type_mro_meth); |
| if (custom_mro) { |
| goto clear; |
| } |
| } |
| n = PyTuple_GET_SIZE(bases); |
| for (i = 0; i < n; i++) { |
| PyObject *b = PyTuple_GET_ITEM(bases, i); |
| PyTypeObject *cls = _PyType_CAST(b); |
| |
| if (!is_subtype_with_mro(lookup_tp_mro(type), type, cls)) { |
| goto clear; |
| } |
| } |
| return; |
| |
| clear: |
| assert(!(type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN)); |
| set_version_unlocked(type, 0); /* 0 is not a valid version tag */ |
| if (PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)) { |
| // This field *must* be invalidated if the type is modified (see the |
| // comment on struct _specialization_cache): |
| ((PyHeapTypeObject *)type)->_spec_cache.getitem = NULL; |
| } |
| } |
| |
| /* |
| The Tier 2 interpreter requires looking up the type object by the type version, so it can install |
| watchers to understand when they change. |
| |
| So we add a global cache from type version to borrowed references of type objects. |
| |
| This is similar to func_version_cache. |
| */ |
| |
| void |
| _PyType_SetVersion(PyTypeObject *tp, unsigned int version) |
| { |
| |
| BEGIN_TYPE_LOCK(); |
| set_version_unlocked(tp, version); |
| END_TYPE_LOCK(); |
| } |
| |
| PyTypeObject * |
| _PyType_LookupByVersion(unsigned int version) |
| { |
| #ifdef Py_GIL_DISABLED |
| return NULL; |
| #else |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| PyTypeObject **slot = |
| interp->types.type_version_cache |
| + (version % TYPE_VERSION_CACHE_SIZE); |
| if (*slot && (*slot)->tp_version_tag == version) { |
| return *slot; |
| } |
| return NULL; |
| #endif |
| } |
| |
| unsigned int |
| _PyType_GetVersionForCurrentState(PyTypeObject *tp) |
| { |
| return tp->tp_version_tag; |
| } |
| |
| |
| |
| #define MAX_VERSIONS_PER_CLASS 1000 |
| |
| static int |
| assign_version_tag(PyInterpreterState *interp, PyTypeObject *type) |
| { |
| ASSERT_TYPE_LOCK_HELD(); |
| |
| /* Ensure that the tp_version_tag is valid. |
| * To respect the invariant, this must first be done on all super classes. |
| * Return 0 if this cannot be done, 1 if tp_version_tag is set. |
| */ |
| if (type->tp_version_tag != 0) { |
| return 1; |
| } |
| if (!_PyType_HasFeature(type, Py_TPFLAGS_READY)) { |
| return 0; |
| } |
| if (type->tp_versions_used >= MAX_VERSIONS_PER_CLASS) { |
| return 0; |
| } |
| |
| PyObject *bases = lookup_tp_bases(type); |
| Py_ssize_t n = PyTuple_GET_SIZE(bases); |
| for (Py_ssize_t i = 0; i < n; i++) { |
| PyObject *b = PyTuple_GET_ITEM(bases, i); |
| if (!assign_version_tag(interp, _PyType_CAST(b))) { |
| return 0; |
| } |
| } |
| if (type->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) { |
| /* static types */ |
| if (NEXT_GLOBAL_VERSION_TAG > _Py_MAX_GLOBAL_TYPE_VERSION_TAG) { |
| /* We have run out of version numbers */ |
| return 0; |
| } |
| set_version_unlocked(type, NEXT_GLOBAL_VERSION_TAG++); |
| assert (type->tp_version_tag <= _Py_MAX_GLOBAL_TYPE_VERSION_TAG); |
| } |
| else { |
| /* heap types */ |
| if (NEXT_VERSION_TAG(interp) == 0) { |
| /* We have run out of version numbers */ |
| return 0; |
| } |
| set_version_unlocked(type, NEXT_VERSION_TAG(interp)++); |
| assert (type->tp_version_tag != 0); |
| } |
| return 1; |
| } |
| |
| int PyUnstable_Type_AssignVersionTag(PyTypeObject *type) |
| { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| int assigned; |
| BEGIN_TYPE_LOCK(); |
| assigned = assign_version_tag(interp, type); |
| END_TYPE_LOCK(); |
| return assigned; |
| } |
| |
| |
| static PyMemberDef type_members[] = { |
| {"__basicsize__", Py_T_PYSSIZET, offsetof(PyTypeObject,tp_basicsize),Py_READONLY}, |
| {"__itemsize__", Py_T_PYSSIZET, offsetof(PyTypeObject, tp_itemsize), Py_READONLY}, |
| {"__flags__", Py_T_ULONG, offsetof(PyTypeObject, tp_flags), Py_READONLY}, |
| /* Note that this value is misleading for static builtin types, |
| since the memory at this offset will always be NULL. */ |
| {"__weakrefoffset__", Py_T_PYSSIZET, |
| offsetof(PyTypeObject, tp_weaklistoffset), Py_READONLY}, |
| {"__base__", _Py_T_OBJECT, offsetof(PyTypeObject, tp_base), Py_READONLY}, |
| {"__dictoffset__", Py_T_PYSSIZET, |
| offsetof(PyTypeObject, tp_dictoffset), Py_READONLY}, |
| {0} |
| }; |
| |
| static int |
| check_set_special_type_attr(PyTypeObject *type, PyObject *value, const char *name) |
| { |
| if (_PyType_HasFeature(type, Py_TPFLAGS_IMMUTABLETYPE)) { |
| PyErr_Format(PyExc_TypeError, |
| "cannot set '%s' attribute of immutable type '%s'", |
| name, type->tp_name); |
| return 0; |
| } |
| if (!value) { |
| PyErr_Format(PyExc_TypeError, |
| "cannot delete '%s' attribute of immutable type '%s'", |
| name, type->tp_name); |
| return 0; |
| } |
| |
| if (PySys_Audit("object.__setattr__", "OsO", |
| type, name, value) < 0) { |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| const char * |
| _PyType_Name(PyTypeObject *type) |
| { |
| assert(type->tp_name != NULL); |
| const char *s = strrchr(type->tp_name, '.'); |
| if (s == NULL) { |
| s = type->tp_name; |
| } |
| else { |
| s++; |
| } |
| return s; |
| } |
| |
| static PyObject * |
| type_name(PyTypeObject *type, void *context) |
| { |
| if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { |
| PyHeapTypeObject* et = (PyHeapTypeObject*)type; |
| |
| return Py_NewRef(et->ht_name); |
| } |
| else { |
| return PyUnicode_FromString(_PyType_Name(type)); |
| } |
| } |
| |
| static PyObject * |
| type_qualname(PyTypeObject *type, void *context) |
| { |
| if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { |
| PyHeapTypeObject* et = (PyHeapTypeObject*)type; |
| return Py_NewRef(et->ht_qualname); |
| } |
| else { |
| return PyUnicode_FromString(_PyType_Name(type)); |
| } |
| } |
| |
| static int |
| type_set_name(PyTypeObject *type, PyObject *value, void *context) |
| { |
| const char *tp_name; |
| Py_ssize_t name_size; |
| |
| if (!check_set_special_type_attr(type, value, "__name__")) |
| return -1; |
| if (!PyUnicode_Check(value)) { |
| PyErr_Format(PyExc_TypeError, |
| "can only assign string to %s.__name__, not '%s'", |
| type->tp_name, Py_TYPE(value)->tp_name); |
| return -1; |
| } |
| |
| tp_name = PyUnicode_AsUTF8AndSize(value, &name_size); |
| if (tp_name == NULL) |
| return -1; |
| if (strlen(tp_name) != (size_t)name_size) { |
| PyErr_SetString(PyExc_ValueError, |
| "type name must not contain null characters"); |
| return -1; |
| } |
| |
| type->tp_name = tp_name; |
| Py_SETREF(((PyHeapTypeObject*)type)->ht_name, Py_NewRef(value)); |
| |
| return 0; |
| } |
| |
| static int |
| type_set_qualname(PyTypeObject *type, PyObject *value, void *context) |
| { |
| PyHeapTypeObject* et; |
| |
| if (!check_set_special_type_attr(type, value, "__qualname__")) |
| return -1; |
| if (!PyUnicode_Check(value)) { |
| PyErr_Format(PyExc_TypeError, |
| "can only assign string to %s.__qualname__, not '%s'", |
| type->tp_name, Py_TYPE(value)->tp_name); |
| return -1; |
| } |
| |
| et = (PyHeapTypeObject*)type; |
| Py_SETREF(et->ht_qualname, Py_NewRef(value)); |
| return 0; |
| } |
| |
| static PyObject * |
| type_module(PyTypeObject *type) |
| { |
| PyObject *mod; |
| if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { |
| PyObject *dict = lookup_tp_dict(type); |
| if (PyDict_GetItemRef(dict, &_Py_ID(__module__), &mod) == 0) { |
| PyErr_Format(PyExc_AttributeError, "__module__"); |
| } |
| } |
| else { |
| const char *s = strrchr(type->tp_name, '.'); |
| if (s != NULL) { |
| mod = PyUnicode_FromStringAndSize( |
| type->tp_name, (Py_ssize_t)(s - type->tp_name)); |
| if (mod != NULL) { |
| PyInterpreterState *interp = _PyInterpreterState_GET(); |
| _PyUnicode_InternMortal(interp, &mod); |
| } |
| } |
| else { |
| mod = &_Py_ID(builtins); |
| } |
| } |
| return mod; |
| } |
| |
| static PyObject * |
| type_get_module(PyTypeObject *type, void *context) |
| { |
| return type_module(type); |
| } |
| |
| static int |
| type_set_module(PyTypeObject *type, PyObject *value, void *context) |
| { |
| if (!check_set_special_type_attr(type, value, "__module__")) |
| return -1; |
| |
| PyType_Modified(type); |
| |
| PyObject *dict = lookup_tp_dict(type); |
| if (PyDict_Pop(dict, &_Py_ID(__firstlineno__), NULL) < 0) { |
| return -1; |
| } |
| return PyDict_SetItem(dict, &_Py_ID(__module__), value); |
| } |
| |
| |
| PyObject * |
| _PyType_GetFullyQualifiedName(PyTypeObject *type, char sep) |
| { |
| if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { |
| return PyUnicode_FromString(type->tp_name); |
| } |
| |
| PyObject *qualname = type_qualname(type, NULL); |
| if (qualname == NULL) { |
| return NULL; |
| } |
| |
| PyObject *module = type_module(type); |
| if (module == NULL) { |
| Py_DECREF(qualname); |
| return NULL; |
| } |
| |
| PyObject *result; |
| if (PyUnicode_Check(module) |
| && !_PyUnicode_Equal(module, &_Py_ID(builtins)) |
| && !_PyUnicode_Equal(module, &_Py_ID(__main__))) |
| { |
| result = PyUnicode_FromFormat("%U%c%U", module, sep, qualname); |
| } |
| else { |
| result = Py_NewRef(qualname); |
| } |
| Py_DECREF(module); |
| Py_DECREF(qualname); |
| return result; |
| } |
| |
| PyObject * |
| PyType_GetFullyQualifiedName(PyTypeObject *type) |
| { |
| return _PyType_GetFullyQualifiedName(type, '.'); |
| } |
| |
| |
| static PyObject * |
| type_abstractmethods(PyTypeObject *type, void *context) |
| { |
| PyObject *mod = NULL; |
| /* type itself has an __abstractmethods__ descriptor (this). Don't return |
| that. */ |
| if (type == &PyType_Type) { |
| PyErr_SetObject(PyExc_AttributeError, &_Py_ID(__abstractmethods__)); |
| } |
| else { |
| PyObject *dict = lookup_tp_dict(type); |
| if (PyDict_GetItemRef(dict, &_Py_ID(__abstractmethods__), &mod) == 0) { |
| PyErr_SetObject(PyExc_AttributeError, &_Py_ID(__abstractmethods__)); |
| } |
| } |
| return mod; |
| } |
| |
| static int |
| type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context) |
| { |
| /* __abstractmethods__ should only be set once on a type, in |
| abc.ABCMeta.__new__, so this function doesn't do anything |
| special to update subclasses. |
| */ |
| int abstract, res; |
| PyObject *dict = lookup_tp_dict(type); |
| if (value != NULL) { |
| abstract = PyObject_IsTrue(value); |
| if (abstract < 0) |
| return -1; |
| res = PyDict_SetItem(dict, &_Py_ID(__abstractmethods__), value); |
| } |
| else { |
| abstract = 0; |
| res = PyDict_Pop(dict, &_Py_ID(__abstractmethods__), NULL); |
| if (res == 0) { |
| PyErr_SetObject(PyExc_AttributeError, &_Py_ID(__abstractmethods__)); |
| return -1; |
| } |
| } |
| if (res < 0) { |
| return -1; |
| } |
| |
| PyType_Modified(type); |
| if (abstract) |
| type->tp_flags |= Py_TPFLAGS_IS_ABSTRACT; |
| else |
| type->tp_flags &= ~Py_TPFLAGS_IS_ABSTRACT; |
| return 0; |
| } |
| |
| static PyObject * |
| type_get_bases(PyTypeObject *type, void *context) |
| { |
| PyObject *bases = _PyType_GetBases(type); |
| if (bases == NULL) { |
| Py_RETURN_NONE; |
| } |
| return bases; |
| } |
| |
| static PyObject * |
| type_get_mro(PyTypeObject *type, void *context) |
| { |
| PyObject *mro; |
| |
| BEGIN_TYPE_LOCK(); |
| mro = lookup_tp_mro(type); |
| if (mro == NULL) { |
| mro = Py_None; |
| } else { |
| Py_INCREF(mro); |
| } |
| |
| END_TYPE_LOCK(); |
| return mro; |
| } |
| |
| static PyTypeObject *best_base(PyObject *); |
| static int mro_internal(PyTypeObject *, PyObject **); |
| static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *); |
| static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const char *); |
| static int add_subclass(PyTypeObject*, PyTypeObject*); |
| static int add_all_subclasses(PyTypeObject *type, PyObject *bases); |
| static void remove_subclass(PyTypeObject *, PyTypeObject *); |
| static void remove_all_subclasses(PyTypeObject *type, PyObject *bases); |
| static void update_all_slots(PyTypeObject *); |
| |
| typedef int (*update_callback)(PyTypeObject *, void *); |
| static int update_subclasses(PyTypeObject *type, PyObject *attr_name, |
| update_callback callback, void *data); |
| static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, |
| update_callback callback, void *data); |
| |
| static int |
| mro_hierarchy(PyTypeObject *type, PyObject *temp) |
| { |
| ASSERT_TYPE_LOCK_HELD(); |
| |
| PyObject *old_mro; |
| int res = mro_internal(type, &old_mro); |
| if (res <= 0) { |
| /* error / reentrance */ |
| return res; |
| } |
| PyObject *new_mro = lookup_tp_mro(type); |
| |
| PyObject *tuple; |
| if (old_mro != NULL) { |
| tuple = PyTuple_Pack(3, type, new_mro, old_mro); |
| } |
| else { |
| tuple = PyTuple_Pack(2, type, new_mro); |
| } |
| |
| if (tuple != NULL) { |
| res = PyList_Append(temp, tuple); |
| } |
| else { |
| res = -1; |
| } |
| Py_XDECREF(tuple); |
| |
| if (res < 0) { |
| set_tp_mro(type, old_mro, 0); |
| Py_DECREF(new_mro); |
| return -1; |
| } |
| Py_XDECREF(old_mro); |
| |
| // Avoid creating an empty list if there is no subclass |
| if (_PyType_HasSubclasses(type)) { |
| /* Obtain a copy of subclasses list to iterate over. |
| |
| Otherwise type->tp_subclasses might be altered |
| in the middle of the loop, for example, through a custom mro(), |
| by invoking type_set_bases on some subclass of the type |
| which in turn calls remove_subclass/add_subclass on this type. |
| |
| Finally, this makes things simple avoiding the need to deal |
| with dictionary iterators and weak references. |
| */ |
| PyObject *subclasses = _PyType_GetSubclasses(type); |
| if (subclasses == NULL) { |
| return -1; |
| } |
| |
| Py_ssize_t n = PyList_GET_SIZE(subclasses); |
| for (Py_ssize_t i = 0; i < n; i++) { |
| PyTypeObject *subclass = _PyType_CAST(PyList_GET_ITEM(subclasses, i)); |
| res = mro_hierarchy(subclass, temp); |
| if (res < 0) { |
| break; |
| } |
| } |
| Py_DECREF(subclasses); |
| } |
| |
| return res; |
| } |
| |
| static int |
| type_set_bases_unlocked(PyTypeObject *type, PyObject *new_bases, void *context) |
| { |
| // Check arguments |
| if (!check_set_special_type_attr(type, new_bases, "__bases__")) { |
| return -1; |
| } |
| assert(new_bases != NULL); |
| |
| if (!PyTuple_Check(new_bases)) { |
| PyErr_Format(PyExc_TypeError, |
| "can only assign tuple to %s.__bases__, not %s", |
| type->tp_name, Py_TYPE(new_bases)->tp_name); |
| return -1; |
| } |
| if (PyTuple_GET_SIZE(new_bases) == 0) { |
| PyErr_Format(PyExc_TypeError, |
| "can only assign non-empty tuple to %s.__bases__, not ()", |
| type->tp_name); |
| return -1; |
| } |
| Py_ssize_t n = PyTuple_GET_SIZE(new_bases); |
| for (Py_ssize_t i = 0; i < n; i++) { |
| PyObject *ob = PyTuple_GET_ITEM(new_bases, i); |
| if (!PyType_Check(ob)) { |
| PyErr_Format(PyExc_TypeError, |
| "%s.__bases__ must be tuple of classes, not '%s'", |
| type->tp_name, Py_TYPE(ob)->tp_name); |
| return -1; |
| } |
| PyTypeObject *base = (PyTypeObject*)ob; |
| |
| if (is_subtype_with_mro(lookup_tp_mro(base), base, type) || |
| /* In case of reentering here again through a custom mro() |
| the above check is not enough since it relies on |
| base->tp_mro which would gonna be updated inside |
| mro_internal only upon returning from the mro(). |
| |
| However, base->tp_base has already been assigned (see |
| below), which in turn may cause an inheritance cycle |
| through tp_base chain. And this is definitely |
| not what you want to ever happen. */ |
| (lookup_tp_mro(base) != NULL |
| && type_is_subtype_base_chain(base, type))) |
| { |
| PyErr_SetString(PyExc_TypeError, |
| "a __bases__ item causes an inheritance cycle"); |
| return -1; |
| } |
| } |
| |
| // Compute the new MRO and the new base class |
| PyTypeObject *new_base = best_base(new_bases); |
| if (new_base == NULL) |
| return -1; |
| |
| if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) { |
| return -1; |
| } |
| |
| PyObject *old_bases = lookup_tp_bases(type); |
| assert(old_bases != NULL); |
| PyTypeObject *old_base = type->tp_base; |
| |
| set_tp_bases(type, Py_NewRef(new_bases), 0); |
| type->tp_base = (PyTypeObject *)Py_NewRef(new_base); |
| |
| PyObject *temp = PyList_New(0); |
| if (temp == NULL) { |
| goto bail; |
| } |
| if (mro_hierarchy(type, temp) < 0) { |
| goto undo; |
| } |
| Py_DECREF(temp); |
| |
| /* Take no action in case if type->tp_bases has been replaced |
| through reentrance. */ |
| int res; |
| if (lookup_tp_bases(type) == new_bases) { |
| /* any base that was in __bases__ but now isn't, we |
| need to remove |type| from its tp_subclasses. |
| conversely, any class now in __bases__ that wasn't |
| needs to have |type| added to its subclasses. */ |
| |
| /* for now, sod that: just remove from all old_bases, |
| add to all new_bases */ |
| remove_all_subclasses(type, old_bases); |
| res = add_all_subclasses(type, new_bases); |
| update_all_slots(type); |
| } |
| else { |
| res = 0; |
| } |
| |
| RARE_EVENT_INC(set_bases); |
| Py_DECREF(old_bases); |
| Py_DECREF(old_base); |
| |
| assert(_PyType_CheckConsistency(type)); |
| return res; |
| |
| undo: |
| n = PyList_GET_SIZE(temp); |
| for (Py_ssize_t i = n - 1; i >= 0; i--) { |
| PyTypeObject *cls; |
| PyObject *new_mro, *old_mro = NULL; |
| |
| PyArg_UnpackTuple(PyList_GET_ITEM(temp, i), |
| "", 2, 3, &cls, &new_mro, &old_mro); |
| /* Do not rollback if cls has a newer version of MRO. */ |
| if (lookup_tp_mro(cls) == new_mro) { |
| set_tp_mro(cls, Py_XNewRef(old_mro), 0); |
| Py_DECREF(new_mro); |
| } |
| } |
| Py_DECREF(temp); |
| |
| bail: |
| if (lookup_tp_bases(type) == new_bases) { |
| assert(type->tp_base == new_base); |
| |
| set_tp_bases(type, old_bases, 0); |
| type->tp_base = old_base; |
| |
| Py_DECREF(new_bases); |
| Py_DECREF(new_base); |
| } |
| else { |
| Py_DECREF(old_bases); |
| Py_DECREF(old_base); |
| } |
| |
| assert(_PyType_CheckConsistency(type)); |
| return -1; |
| } |
| |
| static int |
| type_set_bases(PyTypeObject *type, PyObject *new_bases, void *context) |
| { |
| int res; |
| BEGIN_TYPE_LOCK(); |
| res = type_set_bases_unlocked(type, new_bases, context); |
| END_TYPE_LOCK(); |
| return res; |
| } |
| |
| static PyObject * |
| type_dict(PyTypeObject *type, void *context) |
| { |
| PyObject *dict = lookup_tp_dict(type); |
| if (dict == NULL) { |
| Py_RETURN_NONE; |
| } |
| return PyDictProxy_New(dict); |
| } |
| |
| static PyObject * |
| type_get_doc(PyTypeObject *type, void *context) |
| { |
| PyObject *result; |
| if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE) && type->tp_doc != NULL) { |
| return _PyType_GetDocFromInternalDoc(type->tp_name, type->tp_doc); |
| } |
| PyObject *dict = lookup_tp_dict(type); |
| if (PyDict_GetItemRef(dict, &_Py_ID(__doc__), &result) == 0) { |
| result = Py_NewRef(Py_None); |
| } |
| else if (result) { |
| descrgetfunc descr_get = Py_TYPE(result)->tp_descr_get; |
| if (descr_get) { |
| Py_SETREF(result, descr_get(result, NULL, (PyObject *)type)); |
| } |
| } |
| return result; |
| } |
| |
| static PyObject * |
| type_get_text_signature(PyTypeObject *type, void *context) |
| { |
| return _PyType_GetTextSignatureFromInternalDoc(type->tp_name, type->tp_doc, 0); |
| } |
| |
| static int |
| type_set_doc(PyTypeObject *type, PyObject *value, void *context) |
| { |
| if (!check_set_special_type_attr(type, value, "__doc__")) |
| return -1; |
| PyType_Modified(type); |
| PyObject *dict = lookup_tp_dict(type); |
| return PyDict_SetItem(dict, &_Py_ID(__doc__), value); |
| } |
| |
| static PyObject * |
| type_get_annotate(PyTypeObject *type, void *Py_UNUSED(ignored)) |
| { |
| if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { |
| PyErr_Format(PyExc_AttributeError, "type object '%s' has no attribute '__annotate__'", type->tp_name); |
| return NULL; |
| } |
| |
| PyObject *annotate; |
| PyObject *dict = PyType_GetDict(type); |
| if (PyDict_GetItemRef(dict, &_Py_ID(__annotate__), &annotate) < 0) { |
| Py_DECREF(dict); |
| return NULL; |
| } |
| if (annotate) { |
| descrgetfunc get = Py_TYPE(annotate)->tp_descr_get; |
| if (get) { |
| Py_SETREF(annotate, get(annotate, NULL, (PyObject *)type)); |
| } |
| } |
| else { |
| annotate = Py_None; |
| int result = PyDict_SetItem(dict, &_Py_ID(__annotate__), annotate); |
| if (result < 0) { |
| Py_DECREF(dict); |
| return NULL; |
| } |
| } |
| Py_DECREF(dict); |
| return annotate; |
| } |
| |
| static int |
| type_set_annotate(PyTypeObject *type, PyObject *value, void *Py_UNUSED(ignored)) |
| { |
| if (value == NULL) { |
| PyErr_SetString(PyExc_TypeError, "cannot delete __annotate__ attribute"); |
| return -1; |
| } |
| if (_PyType_HasFeature(type, Py_TPFLAGS_IMMUTABLETYPE)) { |
| PyErr_Format(PyExc_TypeError, |
| "cannot set '__annotate__' attribute of immutable type '%s'", |
| type->tp_name); |
| return -1; |
| } |
| |
| if (!Py_IsNone(value) && !PyCallable_Check(value)) { |
| PyErr_SetString(PyExc_TypeError, "__annotate__ must be callable or None"); |
| return -1; |
| } |
| |
| PyObject *dict = PyType_GetDict(type); |
| assert(PyDict_Check(dict)); |
| int result = PyDict_SetItem(dict, &_Py_ID(__annotate__), value); |
| if (result < 0) { |
| Py_DECREF(dict); |
| return -1; |
| } |
| if (!Py_IsNone(value)) { |
| if (PyDict_Pop(dict, &_Py_ID(__annotations__), NULL) == -1) { |
| Py_DECREF(dict); |
| PyType_Modified(type); |
| return -1; |
| } |
| } |
| Py_DECREF(dict); |
| PyType_Modified(type); |
| return 0; |
| } |
| |
| static PyObject * |
| type_get_annotations(PyTypeObject *type, void *context) |
| { |
| if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { |
| PyErr_Format(PyExc_AttributeError, "type object '%s' has no attribute '__annotations__'", type->tp_name); |
| return NULL; |
| } |
| |
| PyObject *annotations; |
| PyObject *dict = PyType_GetDict(type); |
| if (PyDict_GetItemRef(dict, &_Py_ID(__annotations__), &annotations) < 0) { |
| Py_DECREF(dict); |
| return NULL; |
| } |
| if (annotations) { |
| descrgetfunc get = Py_TYPE(annotations)->tp_descr_get; |
| if (get) { |
| Py_SETREF(annotations, get(annotations, NULL, (PyObject *)type)); |
| } |
| } |
| else { |
| PyObject *annotate = type_get_annotate(type, NULL); |
| if (annotate == NULL) { |
| Py_DECREF(dict); |
| return NULL; |
| } |
| if (PyCallable_Check(annotate)) { |
| PyObject *one = _PyLong_GetOne(); |
| annotations = _PyObject_CallOneArg(annotate, one); |
| if (annotations == NULL) { |
| Py_DECREF(dict); |
| Py_DECREF(annotate); |
| return NULL; |
| } |
| if (!PyDict_Check(annotations)) { |
| PyErr_Format(PyExc_TypeError, "__annotate__ returned non-dict of type '%.100s'", |
| Py_TYPE(annotations)->tp_name); |
| Py_DECREF(annotations); |
| Py_DECREF(annotate); |
| Py_DECREF(dict); |
| return NULL; |
| } |
| } |
| else { |
| annotations = PyDict_New(); |
| } |
| Py_DECREF(annotate); |
| if (annotations) { |
| int result = PyDict_SetItem( |
| dict, &_Py_ID(__annotations__), annotations); |
| if (result) { |
| Py_CLEAR(annotations); |
| } else { |
| PyType_Modified(type); |
| } |
| } |
| } |
| Py_DECREF(dict); |
| return annotations; |
| } |
| |
| static int |
| type_set_annotations(PyTypeObject *type, PyObject *value, void *context) |
| { |
| if (_PyType_HasFeature(type, Py_TPFLAGS_IMMUTABLETYPE)) { |
| PyErr_Format(PyExc_TypeError, |
| "cannot set '__annotations__' attribute of immutable type '%s'", |
| type->tp_name); |
| return -1; |
| } |
| |
| int result; |
| PyObject *dict = PyType_GetDict(type); |
| if (value != NULL) { |
| /* set */ |
| result = PyDict_SetItem(dict, &_Py_ID(__annotations__), value); |
| } else { |
| /* delete */ |
| result = PyDict_Pop(dict, &_Py_ID(__annotations__), NULL); |
| if (result == 0) { |
| PyErr_SetString(PyExc_AttributeError, "__annotations__"); |
| Py_DECREF(dict); |
| return -1; |
| } |
| } |
| if (result < 0) { |
| Py_DECREF(dict); |
| return -1; |
| } |
| else if (result == 0) { |
| if (PyDict_Pop(dict, &_Py_ID(__annotate__), NULL) < 0) { |
| PyType_Modified(type); |
| Py_DECREF(dict); |
| return -1; |
| } |
| } |
| PyType_Modified(type); |
| Py_DECREF(dict); |
| return 0; |
| } |
| |
| static PyObject * |
| type_get_type_params(PyTypeObject *type, void *context) |
| { |
| if (type == &PyType_Type) { |
| return PyTuple_New(0); |
| } |
| |
| PyObject *params; |
| if (PyDict_GetItemRef(lookup_tp_dict(type), &_Py_ID(__type_params__), ¶ms) == 0) { |
| return PyTuple_New(0); |
| } |
| return params; |
| } |
| |
| static int |
| type_set_type_params(PyTypeObject *type, PyObject *value, void *context) |
| { |
| if (!check_set_special_type_attr(type, value, "__type_params__")) { |
| return -1; |
| } |
| |
| PyObject *dict = lookup_tp_dict(type); |
| int result = PyDict_SetItem(dict, &_Py_ID(__type_params__), value); |
| |
| if (result == 0) { |
| PyType_Modified(type); |
| } |
| return result; |
| } |
| |
| |
| /*[clinic input] |
| type.__instancecheck__ -> bool |
| |
| instance: object |
| / |
| |
| Check if an object is an instance. |
| [clinic start generated code]*/ |
| |
| static int |
| type___instancecheck___impl(PyTypeObject *self, PyObject *instance) |
| /*[clinic end generated code: output=08b6bf5f591c3618 input=cdbfeaee82c01a0f]*/ |
| { |
| return _PyObject_RealIsInstance(instance, (PyObject *)self); |
| } |
| |
| /*[clinic input] |
| type.__subclasscheck__ -> bool |
| |
| subclass: object |
| / |
| |
| Check if a class is a subclass. |
| [clinic start generated code]*/ |
| |
| static int |
| type___subclasscheck___impl(PyTypeObject *self, PyObject *subclass) |
| /*[clinic end generated code: output=97a4e51694500941 input=071b2ca9e03355f4]*/ |
| { |
| return _PyObject_RealIsSubclass(subclass, (PyObject *)self); |
| } |
| |
| |
| static PyGetSetDef type_getsets[] = { |
| {"__name__", (getter)type_name, (setter)type_set_name, NULL}, |
| {"__qualname__", (getter)type_qualname, (setter)type_set_qualname, NULL}, |
| {"__bases__", (getter)type_get_bases, (setter)type_set_bases, NULL}, |
| {"__mro__", (getter)type_get_mro, NULL, NULL}, |
| {"__module__", (getter)type_get_module, (setter)type_set_module, NULL}, |
| {"__abstractmethods__", (getter)type_abstractmethods, |
| (setter)type_set_abstractmethods, NULL}, |
| {"__dict__", (getter)type_dict, NULL, NULL}, |
| {"__doc__", (getter)type_get_doc, (setter)type_set_doc, NULL}, |
| {"__text_signature__", (getter)type_get_text_signature, NULL, NULL}, |
| {"__annotations__", (getter)type_get_annotations, (setter)type_set_annotations, NULL}, |
| {"__annotate__", (getter)type_get_annotate, (setter)type_set_annotate, NULL}, |
| {"__type_params__", (getter)type_get_type_params, (setter)type_set_type_params, NULL}, |
| {0} |
| }; |
| |
| static PyObject * |
| type_repr(PyObject *self) |
| { |
| PyTypeObject *type = (PyTypeObject *)self; |
| if (type->tp_name == NULL) { |
| // type_repr() called before the type is fully initialized |
| // by PyType_Ready(). |
| return PyUnicode_FromFormat("<class at %p>", type); |
| } |
| |
| PyObject *mod = type_module(type); |
| if (mod == NULL) { |
| PyErr_Clear(); |
| } |
| else if (!PyUnicode_Check(mod)) { |
| Py_CLEAR(mod); |
| } |
| |
| PyObject *name = type_qualname(type, NULL); |
| if (name == NULL) { |
| Py_XDECREF(mod); |
| return NULL; |
| } |
| |
| PyObject *result; |
| if (mod != NULL && !_PyUnicode_Equal(mod, &_Py_ID(builtins))) { |
| result = PyUnicode_FromFormat("<class '%U.%U'>", mod, name); |
| } |
| else { |
| result = PyUnicode_FromFormat("<class '%s'>", type->tp_name); |
| } |
| Py_XDECREF(mod); |
| Py_DECREF(name); |
| |
| return result; |
| } |
| |
| static PyObject * |
| type_call(PyObject *self, PyObject *args, PyObject *kwds) |
| { |
| PyTypeObject *type = (PyTypeObject *)self; |
| PyObject *obj; |
| PyThreadState *tstate = _PyThreadState_GET(); |
| |
| #ifdef Py_DEBUG |
| /* type_call() must not be called with an exception set, |
| because it can clear it (directly or indirectly) and so the |
| caller loses its exception */ |
| assert(!_PyErr_Occurred(tstate)); |
| #endif |
| |
| /* Special case: type(x) should return Py_TYPE(x) */ |
| /* We only want type itself to accept the one-argument form (#27157) */ |
| if (type == &PyType_Type) { |
| assert(args != NULL && PyTuple_Check(args)); |
| assert(kwds == NULL || PyDict_Check(kwds)); |
| Py_ssize_t nargs = PyTuple_GET_SIZE(args); |
| |
| if (nargs == 1 && (kwds == NULL || !PyDict_GET_SIZE(kwds))) { |
| obj = (PyObject *) Py_TYPE(PyTuple_GET_ITEM(args, 0)); |
| return Py_NewRef(obj); |
| } |
| |
| /* SF bug 475327 -- if that didn't trigger, we need 3 |
| arguments. But PyArg_ParseTuple in type_new may give |
| a msg saying type() needs exactly 3. */ |
| if (nargs != 3) { |
| PyErr_SetString(PyExc_TypeError, |
| "type() takes 1 or 3 arguments"); |
| return NULL; |
| } |
| } |
| |
| if (type->tp_new == NULL) { |
| _PyErr_Format(tstate, PyExc_TypeError, |
| "cannot create '%s' instances", type->tp_name); |
| return NULL; |
| } |
| |
| obj = type->tp_new(type, args, kwds); |
| obj = _Py_CheckFunctionResult(tstate, (PyObject*)type, obj, NULL); |
| if (obj == NULL) |
| return NULL; |
| |
| /* If the returned object is not an instance of type, |
| it won't be initialized. */ |
| if (!PyObject_TypeCheck(obj, type)) |
| return obj; |
| |
| type = Py_TYPE(obj); |
| if (type->tp_init != NULL) { |
| int res = type->tp_init(obj, args, kwds); |
| if (res < 0) { |
| assert(_PyErr_Occurred(tstate)); |
| Py_SETREF(obj, NULL); |
| } |
| else { |
| assert(!_PyErr_Occurred(tstate)); |
| } |
| } |
| return obj; |
| } |
| |
| PyObject * |
| _PyType_NewManagedObject(PyTypeObject *type) |
| { |
| assert(type->tp_flags & Py_TPFLAGS_INLINE_VALUES); |
| assert(_PyType_IS_GC(type)); |
| assert(type->tp_new == PyBaseObject_Type.tp_new); |
| assert(type->tp_alloc == PyType_GenericAlloc); |
| assert(type->tp_itemsize == 0); |
| PyObject *obj = PyType_GenericAlloc(type, 0); |
| if (obj == NULL) { |
| return PyErr_NoMemory(); |
| } |
| return obj; |
| } |
| |
| PyObject * |
| _PyType_AllocNoTrack(PyTypeObject *type, Py_ssize_t nitems) |
| { |
| PyObject *obj; |
| /* The +1 on nitems is needed for most types but not all. We could save a |
| * bit of space by allocating one less item in certain cases, depending on |
| * the type. However, given the extra complexity (e.g. an additional type |
| * flag to indicate when that is safe) it does not seem worth the memory |
| * savings. An example type that doesn't need the +1 is a subclass of |
| * tuple. See GH-100659 and GH-81381. */ |
| size_t size = _PyObject_VAR_SIZE(type, nitems+1); |
| |
| const size_t presize = _PyType_PreHeaderSize(type); |
| if (type->tp_flags & Py_TPFLAGS_INLINE_VALUES) { |
| assert(type->tp_itemsize == 0); |
| size += _PyInlineValuesSize(type); |
| } |
| char *alloc = _PyObject_MallocWithType(type, size + presize); |
| if (alloc == NULL) { |
| return PyErr_NoMemory(); |
| } |
| obj = (PyObject *)(alloc + presize); |
| if (presize) { |
| ((PyObject **)alloc)[0] = NULL; |
| ((PyObject **)alloc)[1] = NULL; |
| } |
| if (PyType_IS_GC(type)) { |
| _PyObject_GC_Link(obj); |
| } |
| memset(obj, '\0', size); |
| |
| if (type->tp_itemsize == 0) { |
| _PyObject_Init(obj, type); |
| } |
| else { |
| _PyObject_InitVar((PyVarObject *)obj, type, nitems); |
| } |
| if (type->tp_flags & Py_TPFLAGS_INLINE_VALUES) { |
| _PyObject_InitInlineValues(obj, type); |
| } |
| return obj; |
| } |
| |
| PyObject * |
| PyType_GenericAlloc(PyTypeObject *type, Py_ssize_t nitems) |
| { |
| PyObject *obj = _PyType_AllocNoTrack(type, nitems); |
| if (obj == NULL) { |
| return NULL; |
| } |
| |
| if (_PyType_IS_GC(type)) { |
| _PyObject_GC_TRACK(obj); |
| } |
| return obj; |
| } |
| |
| PyObject * |
| PyType_GenericNew(PyTypeObject *type, PyObject *args, PyObject *kwds) |
| { |
| return type->tp_alloc(type, 0); |
| } |
| |
| /* Helpers for subtyping */ |
| |
| static inline PyMemberDef * |
| _PyHeapType_GET_MEMBERS(PyHeapTypeObject* type) |
| { |
| return PyObject_GetItemData((PyObject *)type); |
| } |
| |
| static int |
| traverse_slots(PyTypeObject *type, PyObject *self, visitproc visit, void *arg) |
| { |
| Py_ssize_t i, n; |
| PyMemberDef *mp; |
| |
| n = Py_SIZE(type); |
| mp = _PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); |
| for (i = 0; i < n; i++, mp++) { |
| if (mp->type == Py_T_OBJECT_EX) { |
| char *addr = (char *)self + mp->offset; |
| PyObject *obj = *(PyObject **)addr; |
| if (obj != NULL) { |
| int err = visit(obj, arg); |
| if (err) |
| return err; |
| } |
| } |
| } |
| return 0; |
| } |
| |
| static int |
| subtype_traverse(PyObject *self, visitproc visit, void *arg) |
| { |
| PyTypeObject *type, *base; |
| traverseproc basetraverse; |
| |
| /* Find the nearest base with a different tp_traverse, |
| and traverse slots while we're at it */ |
| type = Py_TYPE(self); |
| base = type; |
| while ((basetraverse = base->tp_traverse) == subtype_traverse) { |
| if (Py_SIZE(base)) { |
| int err = traverse_slots(base, self, visit, arg); |
| if (err) |
| return err; |
| } |
| base = base->tp_base; |
| assert(base); |
| } |
| |
| if (type->tp_dictoffset != base->tp_dictoffset) { |
| assert(base->tp_dictoffset == 0); |
| if (type->tp_flags & Py_TPFLAGS_MANAGED_DICT) { |
| assert(type->tp_dictoffset == -1); |
| int err = PyObject_VisitManagedDict(self, visit, arg); |
| if (err) { |
| return err; |
| } |
| } |
| else { |
| PyObject **dictptr = _PyObject_ComputedDictPointer(self); |
| if (dictptr && *dictptr) { |
| Py_VISIT(*dictptr); |
| } |
| } |
| } |
| |
| if (type->tp_flags & Py_TPFLAGS_HEAPTYPE |
| && (!basetraverse || !(base->tp_flags & Py_TPFLAGS_HEAPTYPE))) { |
| /* For a heaptype, the instances count as references |
| to the type. Traverse the type so the collector |
| can find cycles involving this link. |
| Skip this visit if basetraverse belongs to a heap type: in that |
| case, basetraverse will visit the type when we call it later. |
| */ |
| Py_VISIT(type); |
| } |
| |
| if (basetraverse) |
| return basetraverse(self, visit, arg); |
| return 0; |
| } |
| |
| static void |
| clear_slots(PyTypeObject *type, PyObject *self) |
| { |
| Py_ssize_t i, n; |
| PyMemberDef *mp; |
| |
| n = Py_SIZE(type); |
| mp = _PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); |
| for (i = 0; i < n; i++, mp++) { |
| if (mp->type == Py_T_OBJECT_EX && !(mp->flags & Py_READONLY)) { |
| char *addr = (char *)self + mp->offset; |
| PyObject *obj = *(PyObject **)addr; |
| if (obj != NULL) { |
| *(PyObject **)addr = NULL; |
| Py_DECREF(obj); |
| } |
| } |
| } |
| } |
| |
| static int |
| subtype_clear(PyObject *self) |
| { |
| PyTypeObject *type, *base; |
| inquiry baseclear; |
| |
| /* Find the nearest base with a different tp_clear |
| and clear slots while we're at it */ |
| type = Py_TYPE(self); |
| base = type; |
| while ((baseclear = base->tp_clear) == subtype_clear) { |
| if (Py_SIZE(base)) |
| clear_slots(base, self); |
| base = base->tp_base; |
| assert(base); |
| } |
| |
| /* Clear the instance dict (if any), to break cycles involving only |
| __dict__ slots (as in the case 'self.__dict__ is self'). */ |
| if (type->tp_flags & Py_TPFLAGS_MANAGED_DICT) { |
| if ((base->tp_flags & Py_TPFLAGS_MANAGED_DICT) == 0) { |
| PyObject_ClearManagedDict(self); |
| } |
| else { |
| assert((base->tp_flags & Py_TPFLAGS_INLINE_VALUES) == |
| (type->tp_flags & Py_TPFLAGS_INLINE_VALUES)); |
| } |
| } |
| else if (type->tp_dictoffset != base->tp_dictoffset) { |
| PyObject **dictptr = _PyObject_ComputedDictPointer(self); |
| if (dictptr && *dictptr) |
| Py_CLEAR(*dictptr); |
| } |
| |
| if (baseclear) |
| return baseclear(self); |
| return 0; |
| } |
| |
| static void |
| subtype_dealloc(PyObject *self) |
| { |
| PyTypeObject *type, *base; |
| destructor basedealloc; |
| int has_finalizer; |
| |
| /* Extract the type; we expect it to be a heap type */ |
| type = Py_TYPE(self); |
| _PyObject_ASSERT((PyObject *)type, type->tp_flags & Py_TPFLAGS_HEAPTYPE); |
| |
| /* Test whether the type has GC exactly once */ |
| |
| if (!_PyType_IS_GC(type)) { |
| /* A non GC dynamic type allows certain simplifications: |
| there's no need to call clear_slots(), or DECREF the dict, |
| or clear weakrefs. */ |
| |
| /* Maybe call finalizer; exit early if resurrected */ |
| if (type->tp_finalize) { |
| if (PyObject_CallFinalizerFromDealloc(self) < 0) |
| return; |
| } |
| if (type->tp_del) { |
| type->tp_del(self); |
| if (Py_REFCNT(self) > 0) { |
| return; |
| } |
| } |
| |
| /* Find the nearest base with a different tp_dealloc */ |
| base = type; |
| while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { |
| base = base->tp_base; |
| assert(base); |
| } |
| |
| /* Extract the type again; tp_del may have changed it */ |
| type = Py_TYPE(self); |
| |
| // Don't read type memory after calling basedealloc() since basedealloc() |
| // can deallocate the type and free its memory. |
| int type_needs_decref = (type->tp_flags & Py_TPFLAGS_HEAPTYPE |
| && !(base->tp_flags & Py_TPFLAGS_HEAPTYPE)); |
| |
| assert((type->tp_flags & Py_TPFLAGS_MANAGED_DICT) == 0); |
| |
| /* Call the base tp_dealloc() */ |
| assert(basedealloc); |
| basedealloc(self); |
| |
| /* Can't reference self beyond this point. It's possible tp_del switched |
| our type from a HEAPTYPE to a non-HEAPTYPE, so be careful about |
| reference counting. Only decref if the base type is not already a heap |
| allocated type. Otherwise, basedealloc should have decref'd it already */ |
| if (type_needs_decref) { |
| _Py_DECREF_TYPE(type); |
| } |
| |
| /* Done */ |
| return; |
| } |
| |
| /* We get here only if the type has GC */ |
| |
| /* UnTrack and re-Track around the trashcan macro, alas */ |
| /* See explanation at end of function for full disclosure */ |
| PyObject_GC_UnTrack(self); |
| Py_TRASHCAN_BEGIN(self, subtype_dealloc); |
| |
| /* Find the nearest base with a different tp_dealloc */ |
| base = type; |
| while ((/*basedealloc =*/ base->tp_dealloc) == subtype_dealloc) { |
| base = base->tp_base; |
| assert(base); |
| } |
| |
| has_finalizer = type->tp_finalize || type->tp_del; |
| |
| if (type->tp_finalize) { |
| _PyObject_GC_TRACK(self); |
| if (PyObject_CallFinalizerFromDealloc(self) < 0) { |
| /* Resurrected */ |
| goto endlabel; |
| } |
| _PyObject_GC_UNTRACK(self); |
| } |
| /* |
| If we added a weaklist, we clear it. Do this *before* calling tp_del, |
| clearing slots, or clearing the instance dict. |
| |
| GC tracking must be off at this point. weakref callbacks (if any, and |
| whether directly here or indirectly in something we call) may trigger GC, |
| and if self is tracked at that point, it will look like trash to GC and GC |
| will try to delete self again. |
| */ |
| if (type->tp_weaklistoffset && !base->tp_weaklistoffset) { |
| PyObject_ClearWeakRefs(self); |
| } |
| |
| if (type->tp_del) { |
| _PyObject_GC_TRACK(self); |
| type->tp_del(self); |
| if (Py_REFCNT(self) > 0) { |
| /* Resurrected */ |
| goto endlabel; |
| } |
| _PyObject_GC_UNTRACK(self); |
| } |
| if (has_finalizer) { |
| /* New weakrefs could be created during the finalizer call. |
| If this occurs, clear them out without calling their |
| finalizers since they might rely on part of the object |
| being finalized that has already been destroyed. */ |
| if (type->tp_weaklistoffset && !base->tp_weaklistoffset) { |
| _PyWeakref_ClearWeakRefsNoCallbacks(self); |
| } |
| } |
| |
| /* Clear slots up to the nearest base with a different tp_dealloc */ |
| base = type; |
| while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { |
| if (Py_SIZE(base)) |
| clear_slots(base, self); |
| base = base->tp_base; |
| assert(base); |
| } |
| |
| /* If we added a dict, DECREF it, or free inline values. */ |
| if (type->tp_flags & Py_TPFLAGS_MANAGED_DICT) { |
| PyObject_ClearManagedDict(self); |
| } |
| else if (type->tp_dictoffset && !base->tp_dictoffset) { |
| PyObject **dictptr = _PyObject_ComputedDictPointer(self); |
| if (dictptr != NULL) { |
| PyObject *dict = *dictptr; |
| if (dict != NULL) { |
| Py_DECREF(dict); |
| *dictptr = NULL; |
| } |
| } |
| } |
| |
| /* Extract the type again; tp_del may have changed it */ |
| type = Py_TYPE(self); |
| |
| /* Call the base tp_dealloc(); first retrack self if |
| * basedealloc knows about gc. |
| */ |
| if (_PyType_IS_GC(base)) { |
| _PyObject_GC_TRACK(self); |
| } |
| |
| // Don't read type memory after calling basedealloc() since basedealloc() |
| // can deallocate the type and free its memory. |
| int type_needs_decref = (type->tp_flags & Py_TPFLAGS_HEAPTYPE |
| && !(base->tp_flags & Py_TPFLAGS_HEAPTYPE)); |
| |
| assert(basedealloc); |
| basedealloc(self); |
| |
| /* Can't reference self beyond this point. It's possible tp_del switched |
| our type from a HEAPTYPE to a non-HEAPTYPE, so be careful about |
| reference counting. Only decref if the base type is not already a heap |
| allocated type. Otherwise, basedealloc should have decref'd it already */ |
| if (type_needs_decref) { |
| _Py_DECREF_TYPE(type); |
| } |
| |
| endlabel: |
| Py_TRASHCAN_END |
| |
| /* Explanation of the weirdness around the trashcan macros: |
| |
| Q. What do the trashcan macros do? |
| |
| A. Read the comment titled "Trashcan mechanism" in object.h. |
| For one, this explains why there must be a call to GC-untrack |
| before the trashcan begin macro. Without understanding the |
| trashcan code, the answers to the following questions don't make |
| sense. |
| |
| Q. Why do we GC-untrack before the trashcan and then immediately |
| GC-track again afterward? |
| |
| A. In the case that the base class is GC-aware, the base class |
| probably GC-untracks the object. If it does that using the |
| UNTRACK macro, this will crash when the object is already |
| untracked. Because we don't know what the base class does, the |
| only safe thing is to make sure the object is tracked when we |
| call the base class dealloc. But... The trashcan begin macro |
| requires that the object is *untracked* before it is called. So |
| the dance becomes: |
| |
| GC untrack |
| trashcan begin |
| GC track |
| |
| Q. Why did the last question say "immediately GC-track again"? |
| It's nowhere near immediately. |
| |
| A. Because the code *used* to re-track immediately. Bad Idea. |
| self has a refcount of 0, and if gc ever gets its hands on it |
| (which can happen if any weakref callback gets invoked), it |
| looks like trash to gc too, and gc also tries to delete self |
| then. But we're already deleting self. Double deallocation is |
| a subtle disaster. |
| */ |
| } |
| |
| static PyTypeObject *solid_base(PyTypeObject *type); |
| |
| /* type test with subclassing support */ |
| |
| static int |
| type_is_subtype_base_chain(PyTypeObject *a, PyTypeObject *b) |
| { |
| do { |
| if (a == b) |
| return 1; |
| a = a->tp_base; |
| } while (a != NULL); |
| |
| return (b == &PyBaseObject_Type); |
| } |
| |
| static int |
| is_subtype_with_mro(PyObject *a_mro, PyTypeObject *a, PyTypeObject *b) |
| { |
| int res; |
| if (a_mro != NULL) { |
| /* Deal with multiple inheritance without recursion |
| by walking the MRO tuple */ |
| Py_ssize_t i, n; |
| assert(PyTuple_Check(a_mro)); |
| n = PyTuple_GET_SIZE(a_mro); |
| res = 0; |
| for (i = 0; i < n; i++) { |
| if (PyTuple_GET_ITEM(a_mro, i) == (PyObject *)b) { |
| res = 1; |
| break; |
| } |
| } |
| } |
| else { |
| /* a is not completely initialized yet; follow tp_base */ |
| res = type_is_subtype_base_chain(a, b); |
| } |
| return res; |
| } |
| |
| int |
| PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b) |
| { |
| return is_subtype_with_mro(a->tp_mro, a, b); |
| } |
| |
| /* Routines to do a method lookup in the type without looking in the |
| instance dictionary (so we can't use PyObject_GetAttr) but still |
| binding it to the instance. |
| |
| Variants: |
| |
| - _PyObject_LookupSpecial() returns NULL without raising an exception |
| when the _PyType_LookupRef() call fails; |
| |
| - lookup_maybe_method() and lookup_method() are internal routines similar |
| to _PyObject_LookupSpecial(), but can return unbound PyFunction |
| to avoid temporary method object. Pass self as first argument when |
| unbound == 1. |
| */ |
| |
| PyObject * |
| _PyObject_LookupSpecial(PyObject *self, PyObject *attr) |
| { |
| PyObject *res; |
| |
| res = _PyType_LookupRef(Py_TYPE(self), attr); |
| if (res != NULL) { |
| descrgetfunc f; |
| if ((f = Py_TYPE(res)->tp_descr_get) != NULL) { |
| Py_SETREF(res, f(res, self, (PyObject *)(Py_TYPE(self)))); |
| } |
| } |
| return res; |
| } |
| |
| /* Steals a reference to self */ |
| PyObject * |
| _PyObject_LookupSpecialMethod(PyObject *self, PyObject *attr, PyObject **self_or_null) |
| { |
| PyObject *res; |
| |
| res = _PyType_LookupRef(Py_TYPE(self), attr); |
| if (res == NULL) { |
| Py_DECREF(self); |
| *self_or_null = NULL; |
| return NULL; |
| } |
| |
| if (_PyType_HasFeature(Py_TYPE(res), Py_TPFLAGS_METHOD_DESCRIPTOR)) { |
| /* Avoid temporary PyMethodObject */ |
| *self_or_null = self; |
| } |
| else { |
| descrgetfunc f = Py_TYPE(res)->tp_descr_get; |
| if (f != NULL) { |
| Py_SETREF(res, f(res, self, (PyObject *)(Py_TYPE(self)))); |
| } |
| *self_or_null = NULL; |
| Py_DECREF(self); |
| } |
| return res; |
| } |
| |
| static PyObject * |
| lookup_maybe_method(PyObject *self, PyObject *attr, int *unbound) |
| { |
| PyObject *res = _PyType_LookupRef(Py_TYPE(self), attr); |
| if (res == NULL) { |
| return NULL; |
| } |
| |
| if (_PyType_HasFeature(Py_TYPE(res), Py_TPFLAGS_METHOD_DESCRIPTOR)) { |
| /* Avoid temporary PyMethodObject */ |
| *unbound = 1; |
| } |
| else { |
| *unbound = 0; |
| descrgetfunc f = Py_TYPE(res)->tp_descr_get; |
| if (f != NULL) { |
| Py_SETREF(res, f(res, self, (PyObject *)(Py_TYPE(self)))); |
| } |
| } |
| return res; |
| } |
| |
| static PyObject * |
| lookup_method(PyObject *self, PyObject *attr, int *unbound) |
| { |
| PyObject *res = lookup_maybe_method(self, attr, unbound); |
| if (res == NULL && !PyErr_Occurred()) { |
| PyErr_SetObject(PyExc_AttributeError, attr); |
| } |
| return res; |
| } |
| |
| |
| static inline PyObject* |
| vectorcall_unbound(PyThreadState *tstate, int unbound, PyObject *func, |
| PyObject *const *args, Py_ssize_t nargs) |
| { |
| size_t nargsf = nargs; |
| if (!unbound) { |
| /* Skip self argument, freeing up args[0] to use for |
| * PY_VECTORCALL_ARGUMENTS_OFFSET */ |
| args++; |
| nargsf = nargsf - 1 + PY_VECTORCALL_ARGUMENTS_OFFSET; |
| } |
| EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_SLOT, func); |
| return _PyObject_VectorcallTstate(tstate, func, args, nargsf, NULL); |
| } |
| |
| static PyObject* |
| call_unbound_noarg(int unbound, PyObject *func, PyObject *self) |
| { |
| if (unbound) { |
| return PyObject_CallOneArg(func, self); |
| } |
| else { |
| return _PyObject_CallNoArgs(func); |
| } |
| } |
| |
| /* A variation of PyObject_CallMethod* that uses lookup_method() |
| instead of PyObject_GetAttrString(). |
| |
| args is an argument vector of length nargs. The first element in this |
| vector is the special object "self" which is used for the method lookup */ |
| static PyObject * |
| vectorcall_method(PyObject *name, PyObject *const *args, Py_ssize_t nargs) |
| { |
| assert(nargs >= 1); |
| |
| PyThreadState *tstate = _PyThreadState_GET(); |
| int unbound; |
| PyObject *self = args[0]; |
| PyObject *func = lookup_method(self, name, &unbound); |
| if (func == NULL) { |
| return NULL; |
| } |
| PyObject *retval = vectorcall_unbound(tstate, unbound, func, args, nargs); |
| Py_DECREF(func); |
| return retval; |
| } |
| |
| /* Clone of vectorcall_method() that returns NotImplemented |
| * when the lookup fails. */ |
| static PyObject * |
| vectorcall_maybe(PyThreadState *tstate, PyObject *name, |
| PyObject *const *args, Py_ssize_t nargs) |
| { |
| assert(nargs >= 1); |
| |
| int unbound; |
| PyObject *self = args[0]; |
| PyObject *func = lookup_maybe_method(self, name, &unbound); |
| if (func == NULL) { |
| if (!PyErr_Occurred()) |
| Py_RETURN_NOTIMPLEMENTED; |
| return NULL; |
| } |
| PyObject *retval = vectorcall_unbound(tstate, unbound, func, args, nargs); |
| Py_DECREF(func); |
| return retval; |
| } |
| |
| /* |
| Method resolution order algorithm C3 described in |
| "A Monotonic Superclass Linearization for Dylan", |
| by Kim Barrett, Bob Cassel, Paul Haahr, |
| David A. Moon, Keith Playford, and P. Tucker Withington. |
| (OOPSLA 1996) |
| |
| Some notes about the rules implied by C3: |
| |
| No duplicate bases. |
| It isn't legal to repeat a class in a list of base classes. |
| |
| The next three properties are the 3 constraints in "C3". |
| |
| Local precedence order. |
| If A precedes B in C's MRO, then A will precede B in the MRO of all |
| subclasses of C. |
| |
| Monotonicity. |
| The MRO of a class must be an extension without reordering of the |
| MRO of each of its superclasses. |
| |
| Extended Precedence Graph (EPG). |
| Linearization is consistent if there is a path in the EPG from |
| each class to all its successors in the linearization. See |
| the paper for definition of EPG. |
| */ |
| |
| static int |
| tail_contains(PyObject *tuple, int whence, PyObject *o) |
| { |
| Py_ssize_t j, size; |
| size = PyTuple_GET_SIZE(tuple); |
| |
| for (j = whence+1; j < size; j++) { |
| if (PyTuple_GET_ITEM(tuple, j) == o) |
| return 1; |
| } |
| return 0; |
| } |
| |
| static PyObject * |
| class_name(PyObject *cls) |
| { |
| PyObject *name; |
| if (PyObject_GetOptionalAttr(cls, &_Py_ID(__name__), &name) == 0) { |
| name = PyObject_Repr(cls); |
| } |
| return name; |
| } |
| |
| static int |
| check_duplicates(PyObject *tuple) |
| { |
| Py_ssize_t i, j, n; |
| /* Let's use a quadratic time algorithm, |
| assuming that the bases tuples is short. |
| */ |
| n = PyTuple_GET_SIZE(tuple); |
| for (i = 0; i < n; i++) { |
| PyObject *o = PyTuple_GET_ITEM(tuple, i); |
| for (j = i + 1; j < n; j++) { |
| if (PyTuple_GET_ITEM(tuple, j) == o) { |
| o = class_name(o); |
| if (o != NULL) { |
| if (PyUnicode_Check(o)) { |
| PyErr_Format(PyExc_TypeError, |
| "duplicate base class %U", o); |
| } |
| else { |
| PyErr_SetString(PyExc_TypeError, |
| "duplicate base class"); |
| } |
| Py_DECREF(o); |
| } |
| return -1; |
| } |
| } |
| } |
| return 0; |
| } |
| |
| /* Raise a TypeError for an MRO order disagreement. |
| |
| It's hard to produce a good error message. In the absence of better |
| insight into error reporting, report the classes that were candidates |
| to be put next into the MRO. There is some conflict between the |
| order in which they should be put in the MRO, but it's hard to |
| diagnose what constraint can't be satisfied. |
| */ |
| |
| static void |
| set_mro_error(PyObject **to_merge, Py_ssize_t to_merge_size, int *remain) |
| { |
| Py_ssize_t i, n, off; |
| char buf[1000]; |
| PyObject *k, *v; |
| PyObject *set = PyDict_New(); |
| if (!set) return; |
| |
| for (i = 0; i < to_merge_size; i++) { |
| PyObject *L = to_merge[i]; |
| if (remain[i] < PyTuple_GET_SIZE(L)) { |
| PyObject *c = PyTuple_GET_ITEM(L, remain[i]); |
| if (PyDict_SetItem(set, c, Py_None) < 0) { |
| Py_DECREF(set); |
| return; |
| } |
| } |
| } |
| n = PyDict_GET_SIZE(set); |
| |
| off = PyOS_snprintf(buf, sizeof(buf), "Cannot create a \ |
| consistent method resolution order (MRO) for bases"); |
| i = 0; |
| while (PyDict_Next(set, &i, &k, &v) && (size_t)off < sizeof(buf)) { |
| PyObject *name = class_name(k); |
| const char *name_str = NULL; |
| if (name != NULL) { |
| if (PyUnicode_Check(name)) { |
| name_str = PyUnicode_AsUTF8(name); |
| } |
| else { |
| name_str = "?"; |
| } |
| } |
| if (name_str == NULL) { |
| Py_XDECREF(name); |
| Py_DECREF(set); |
| return; |
| } |
| off += PyOS_snprintf(buf + off, sizeof(buf) - off, " %s", name_str); |
| Py_XDECREF(name); |
| if (--n && (size_t)(off+1) < sizeof(buf)) { |
| buf[off++] = ','; |
| buf[off] = '\0'; |
| } |
| } |
| PyErr_SetString(PyExc_TypeError, buf); |
| Py_DECREF(set); |
| } |
| |
| static int |
| pmerge(PyObject *acc, PyObject **to_merge, Py_ssize_t to_merge_size) |
| { |
| int res = 0; |
| Py_ssize_t i, j, empty_cnt; |
| int *remain; |
| |
| /* remain stores an index into each sublist of to_merge. |
| remain[i] is the index of the next base in to_merge[i] |
| that is not included in acc. |
| */ |
| remain = PyMem_New(int, to_merge_size); |
| if (remain == NULL) { |
| PyErr_NoMemory(); |
| return -1; |
| } |
| for (i = 0; i < to_merge_size; i++) |
| remain[i] = 0; |
|