关键数据结构
/// 存储弱
template<typename T>
class StripedMap {
enum { CacheLineSize = 64 };
#if TARGET_OS_EMBEDDED
enum { StripeCount = 8 };
#else
enum { StripeCount = 64 };
#endif
struct PaddedT {
T value alignas(CacheLineSize);
};
PaddedT array[StripeCount];
static unsigned int indexForPointer(const void *p) {
uintptr_t addr = reinterpret_cast<uintptr_t>(p);
return ((addr >> 4) ^ (addr >> 9)) % StripeCount;
}
public:
T& operator[] (const void *p) {
return array[indexForPointer(p)].value;
}
const T& operator[] (const void *p) const {
return const_cast<StripedMap<T>>(this)[p];
}
#if DEBUG
StripedMap() {
// Verify alignment expectations.
uintptr_t base = (uintptr_t)&array[0].value;
uintptr_t delta = (uintptr_t)&array[1].value - base;
assert(delta % CacheLineSize == 0);
assert(base % CacheLineSize == 0);
}
#endif
};
struct SideTable {
spinlock_t slock;
RefcountMap refcnts;
weak_table_t weak_table;
SideTable() {
memset(&weak_table, 0, sizeof(weak_table));
}
~SideTable() {
_objc_fatal("Do not delete SideTable.");
}
void lock() { slock.lock(); }
void unlock() { slock.unlock(); }
bool trylock() { return slock.trylock(); }
// Address-ordered lock discipline for a pair of side tables.
template<bool HaveOld, bool HaveNew>
static void lockTwo(SideTable *lock1, SideTable *lock2);
template<bool HaveOld, bool HaveNew>
static void unlockTwo(SideTable *lock1, SideTable *lock2);
};
/**
* The global weak references table. Stores object ids as keys,
* and weak_entry_t structs as their values.
*/
struct weak_table_t {
weak_entry_t *weak_entries;
size_t num_entries;
uintptr_t mask;
uintptr_t max_hash_displacement;
};
核心代码
id objc_storeWeak(id *location, id newObj)
{
return storeWeak<true/*old*/, true/*new*/, true/*crash*/>
(location, (objc_object *)newObj);
}
template <bool HaveOld, bool HaveNew, bool CrashIfDeallocating>
static id
storeWeak(id *location, objc_object *newObj)
{
assert(HaveOld || HaveNew);
if (!HaveNew) assert(newObj == nil);
Class previouslyInitializedClass = nil;
id oldObj;
SideTable *oldTable;
SideTable *newTable;
// Acquire locks for old and new values.
// Order by lock address to prevent lock ordering problems.
// Retry if the old value changes underneath us.
retry:
if (HaveOld) {
oldObj = *location;
oldTable = &SideTables()[oldObj];
} else {
oldTable = nil;
}
if (HaveNew) {
newTable = &SideTables()[newObj];
} else {
newTable = nil;
}
SideTable::lockTwo<HaveOld, HaveNew>(oldTable, newTable);
if (HaveOld && *location != oldObj) {
SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
goto retry;
}
// Prevent a deadlock between the weak reference machinery
// and the +initialize machinery by ensuring that no
// weakly-referenced object has an un-+initialized isa.
if (HaveNew && newObj) {
Class cls = newObj->getIsa();
if (cls != previouslyInitializedClass &&
!((objc_class *)cls)->isInitialized())
{
SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
_class_initialize(_class_getNonMetaClass(cls, (id)newObj));
// If this class is finished with +initialize then we're good.
// If this class is still running +initialize on this thread
// (i.e. +initialize called storeWeak on an instance of itself)
// then we may proceed but it will appear initializing and
// not yet initialized to the check above.
// Instead set previouslyInitializedClass to recognize it on retry.
previouslyInitializedClass = cls;
goto retry;
}
}
// Clean up old value, if any.
if (HaveOld) {
weak_unregister_no_lock(&oldTable->weak_table, oldObj, location);
}
// Assign new value, if any.
if (HaveNew) {
newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table,
(id)newObj, location,
CrashIfDeallocating);
// weak_register_no_lock returns nil if weak store should be rejected
// Set is-weakly-referenced bit in refcount table.
if (newObj && !newObj->isTaggedPointer()) {
newObj->setWeaklyReferenced_nolock();
}
// Do not set *location anywhere else. That would introduce a race.
*location = (id)newObj;
}
else {
// No new value. The storage is not changed.
}
SideTable::unlockTwo<HaveOld, HaveNew>(oldTable, newTable);
return (id)newObj;
}
弱引用注册
/**
* Unregister an already-registered weak reference.
* This is used when referrer's storage is about to go away, but referent
* isn't dead yet. (Otherwise, zeroing referrer later would be a
* bad memory access.)
* Does nothing if referent/referrer is not a currently active weak reference.
* Does not zero referrer.
*
* FIXME currently requires old referent value to be passed in (lame)
* FIXME unregistration should be automatic if referrer is collected
*
* @param weak_table The global weak table.
* @param referent The object.
* @param referrer The weak reference.
*/
void
weak_unregister_no_lock(weak_table_t *weak_table, id referent_id,
id *referrer_id)
{
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;
weak_entry_t *entry;
if (!referent) return;
if ((entry = weak_entry_for_referent(weak_table, referent))) {
remove_referrer(entry, referrer);
bool empty = true;
if (entry->out_of_line && entry->num_refs != 0) {
empty = false;
}
else {
for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) {
if (entry->inline_referrers[i]) {
empty = false;
break;
}
}
}
if (empty) {
weak_entry_remove(weak_table, entry);
}
}
// Do not set *referrer = nil. objc_storeWeak() requires that the
// value not change.
}
弱引用取消注册
/**
* Registers a new (object, weak pointer) pair. Creates a new weak
* object entry if it does not exist.
*
* @param weak_table The global weak table.
* @param referent The object pointed to by the weak reference.
* @param referrer The weak pointer address.
*/
id
weak_register_no_lock(weak_table_t *weak_table, id referent_id,
id *referrer_id, bool crashIfDeallocating)
{
objc_object *referent = (objc_object *)referent_id;
objc_object **referrer = (objc_object **)referrer_id;
if (!referent || referent->isTaggedPointer()) return referent_id;
// ensure that the referenced object is viable
bool deallocating;
if (!referent->ISA()->hasCustomRR()) {
deallocating = referent->rootIsDeallocating();
}
else {
BOOL (*allowsWeakReference)(objc_object *, SEL) =
(BOOL(*)(objc_object *, SEL))
object_getMethodImplementation((id)referent,
SEL_allowsWeakReference);
if ((IMP)allowsWeakReference == _objc_msgForward) {
return nil;
}
deallocating =
! (*allowsWeakReference)(referent, SEL_allowsWeakReference);
}
if (deallocating) {
if (crashIfDeallocating) {
_objc_fatal("Cannot form weak reference to instance (%p) of "
"class %s. It is possible that this object was "
"over-released, or is in the process of deallocation.",
(void*)referent, object_getClassName((id)referent));
} else {
return nil;
}
}
// now remember it and where it is being stored
weak_entry_t *entry;
if ((entry = weak_entry_for_referent(weak_table, referent))) {
append_referrer(entry, referrer);
}
else {
weak_entry_t new_entry;
new_entry.referent = referent;
new_entry.out_of_line = 0;
new_entry.inline_referrers[0] = referrer;
for (size_t i = 1; i < WEAK_INLINE_COUNT; i++) {
new_entry.inline_referrers[i] = nil;
}
weak_grow_maybe(weak_table);
weak_entry_insert(weak_table, &new_entry);
}
// Do not set *referrer. objc_storeWeak() requires that the
// value not change.
return referent_id;
}
总结
通过全局的HashTable,被weak引用的对象地址作为key,value是引用地址的table。
storeWeak(id *location, objc_object *newObj)
方法中
- 通过*location拿到旧对象地址
- 通过旧地址拿到旧对象的weak table
- weak_unregister_no_lock 方法将引用地址(location)中的值至为nil并从weak table中移除
- 通过newObj拿到新对象的weak table
- weak_register_no_lock方法将引用地址(location)添加到weak table中
补充
weak table 里存储的的是weak_entry_t数组,weak_entry_t里存储引用地址数组。weak_entry_t里最多放PTR_MINUS_1个引用地址。这样把表分成小块小块的,方便分配内存。
#define WEAK_INLINE_COUNT 4
struct weak_entry_t {
DisguisedPtr<objc_object> referent;
union {
struct {
weak_referrer_t *referrers;
uintptr_t out_of_line : 1;
uintptr_t num_refs : PTR_MINUS_1;
uintptr_t mask;
uintptr_t max_hash_displacement;
};
struct {
// out_of_line=0 is LSB of one of these (don't care which)
weak_referrer_t inline_referrers[WEAK_INLINE_COUNT];
};
};
};
#if __LP64__
#define PTR_MINUS_1 63
#else
#define PTR_MINUS_1 31
#endif
网友评论