类的结构@2x.png
对象的内存结构
(lldb) x/4gx person
0x101025660: 0x001d8001000033dd 0x0000000100002080
0x101025670: 0x0000000100002060 0x0000000000000000
(lldb) po 0x001d8001000033dd
8303516107944925
(lldb) po 0x0000000100002080
fish
(lldb) po 0x0000000100002060
cloud
(lldb) po 0x001d8001000033dd & 0x00007ffffffffff8ULL
DCPerson
(lldb)
cache_t结构
struct cache_t {
#if CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_OUTLINED
explicit_atomic<struct bucket_t *> _buckets;
explicit_atomic<mask_t> _mask;
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_HIGH_16
explicit_atomic<uintptr_t> _maskAndBuckets;
mask_t _mask_unused;
// How much the mask is shifted by.
static constexpr uintptr_t maskShift = 48;
// Additional bits after the mask which must be zero. msgSend
// takes advantage of these additional bits to construct the value
// `mask << 4` from `_maskAndBuckets` in a single instruction.
static constexpr uintptr_t maskZeroBits = 4;
// The largest mask value we can store.
static constexpr uintptr_t maxMask = ((uintptr_t)1 << (64 - maskShift)) - 1;
// The mask applied to `_maskAndBuckets` to retrieve the buckets pointer.
static constexpr uintptr_t bucketsMask = ((uintptr_t)1 << (maskShift - maskZeroBits)) - 1;
// Ensure we have enough bits for the buckets pointer.
static_assert(bucketsMask >= MACH_VM_MAX_ADDRESS, "Bucket field doesn't have enough bits for arbitrary pointers.");
#elif CACHE_MASK_STORAGE == CACHE_MASK_STORAGE_LOW_4
// _maskAndBuckets stores the mask shift in the low 4 bits, and
// the buckets pointer in the remainder of the value. The mask
// shift is the value where (0xffff >> shift) produces the correct
// mask. This is equal to 16 - log2(cache_size).
explicit_atomic<uintptr_t> _maskAndBuckets;
mask_t _mask_unused;
static constexpr uintptr_t maskBits = 4;
static constexpr uintptr_t maskMask = (1 << maskBits) - 1;
static constexpr uintptr_t bucketsMask = ~maskMask;
#else
#error Unknown cache mask storage type.
#endif
#if __LP64__
uint16_t _flags;
#endif
uint16_t _occupied;
public:
static bucket_t *emptyBuckets();
struct bucket_t *buckets();
mask_t mask();
mask_t occupied();
void incrementOccupied();
void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask);
void initializeToEmpty();
unsigned capacity();
bool isConstantEmptyCache();
bool canBeFreed();
#if __LP64__
bool getBit(uint16_t flags) const {
return _flags & flags;
}
void setBit(uint16_t set) {
__c11_atomic_fetch_or((_Atomic(uint16_t) *)&_flags, set, __ATOMIC_RELAXED);
}
void clearBit(uint16_t clear) {
__c11_atomic_fetch_and((_Atomic(uint16_t) *)&_flags, ~clear, __ATOMIC_RELAXED);
}
#endif
#if FAST_CACHE_ALLOC_MASK
bool hasFastInstanceSize(size_t extra) const
{
if (__builtin_constant_p(extra) && extra == 0) {
return _flags & FAST_CACHE_ALLOC_MASK16;
}
return _flags & FAST_CACHE_ALLOC_MASK;
}
size_t fastInstanceSize(size_t extra) const
{
ASSERT(hasFastInstanceSize(extra));
if (__builtin_constant_p(extra) && extra == 0) {
return _flags & FAST_CACHE_ALLOC_MASK16;
} else {
size_t size = _flags & FAST_CACHE_ALLOC_MASK;
// remove the FAST_CACHE_ALLOC_DELTA16 that was added
// by setFastInstanceSize
return align16(size + extra - FAST_CACHE_ALLOC_DELTA16);
}
}
void setFastInstanceSize(size_t newSize)
{
// Set during realization or construction only. No locking needed.
uint16_t newBits = _flags & ~FAST_CACHE_ALLOC_MASK;
uint16_t sizeBits;
// Adding FAST_CACHE_ALLOC_DELTA16 allows for FAST_CACHE_ALLOC_MASK16
// to yield the proper 16byte aligned allocation size with a single mask
sizeBits = word_align(newSize) + FAST_CACHE_ALLOC_DELTA16;
sizeBits &= FAST_CACHE_ALLOC_MASK;
if (newSize <= sizeBits) {
newBits |= sizeBits;
}
_flags = newBits;
}
#else
bool hasFastInstanceSize(size_t extra) const {
return false;
}
size_t fastInstanceSize(size_t extra) const {
abort();
}
void setFastInstanceSize(size_t extra) {
// nothing
}
#endif
static size_t bytesForCapacity(uint32_t cap);
static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap);
void reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld);
void insert(Class cls, SEL sel, IMP imp, id receiver);
static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn, cold));
};
cache_t@2x.png
1.第一次运行一个方法之前,cache_t缓存中的方法为0,之后缓存中的方法的数量为前面已经执行的方法的数量,由类的结构体得知
cache_t偏移16字节@2x.png
可以明显发现编译一个类方法之后,cache_t中的_sel,_imp,_mask,_occupied的值发生明显变化
编译方法前后cache_t的变化@2x.png
(lldb) p/x DCPerson.class
(Class) $0 = 0x0000000100003428 DCPerson
(lldb) p (cache_t *)0x0000000100003438
(cache_t *) $1 = 0x0000000100003438
(lldb) p *$1
(cache_t) $2 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x00000001003323d0 {
_sel = {
std::__1::atomic<objc_selector *> = (null)
}
_imp = {
std::__1::atomic<unsigned long> = 0
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 0
}
_flags = 32820
_occupied = 0
}
2020-10-07 17:12:52.237343+0800 DCPerson[10411:5356955] sayHello
(lldb) p *$1
(cache_t) $3 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x00000001018a0000 {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 3
}
_flags = 32820
_occupied = 1
}
21.gif
- 2.打印出来并没有我们想看到的sayHello函数,在cache_t结构体中找到函数struct bucket_t *buckets();通过buckets()获取到结构体bucket_t,再到bucket_t查找到函数sel()和inline IMP imp(Class cls),找到sayHello方法如下:
(lldb) p/x DCPerson.class
(Class) $0 = 0x0000000100003428 DCPerson
(lldb) p (cache_t *)0x0000000100003438
(cache_t *) $1 = 0x0000000100003438
(lldb) p *$1
(cache_t) $2 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x00000001003323d0 {
_sel = {
std::__1::atomic<objc_selector *> = (null)
}
_imp = {
std::__1::atomic<unsigned long> = 0
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 0
}
_flags = 32820
_occupied = 0
}
2020-10-07 17:12:52.237343+0800 DCPerson[10411:5356955] sayHello
(lldb) p *$1
(cache_t) $3 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x00000001018a0000 {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 3
}
_flags = 32820
_occupied = 1
}
(lldb) p $3.buckets()
(bucket_t *) $4 = 0x00000001018a0000
(lldb) p *$4
(bucket_t) $5 = {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
(lldb) p $5.sel()
(SEL) $6 = "sayHello"
(lldb) p $5.imp(DCPerson.class)
(IMP) $7 = 0x0000000100001a80 (DCPerson`-[DCPerson sayHello])
22.gif
3.通过MachOView查看最后的DCPerson sayHello的地址是否正确
sayHello方法地址MachOView@2x.png
- 4.查看第二个方法:通过指针偏移或数组取值两种方式获取
- I指针偏移
-(void)sayHello;
-(void)sayCode;
2020-10-07 19:24:19.931422+0800 DCPerson[84824:5536090] sayHello
DCPerson was compiled with optimization - stepping may behave oddly; variables may not be available.
2020-10-07 19:24:27.426872+0800 DCPerson[84824:5536090] -[DCPerson sayCode]
(lldb) p/x DCPerson.class
(Class) $0 = 0x0000000100003428 DCPerson
(lldb) p (cache_t *)0x0000000100003438
(cache_t *) $1 = 0x0000000100003438
(lldb) p *$1
(cache_t) $2 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x0000000100723b90 {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 3
}
_flags = 32820
_occupied = 2
}
(lldb) p $2.buckets()
(bucket_t *) $3 = 0x0000000100723b90
(lldb) p *$3
(bucket_t) $4 = {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
(lldb) p *($3+1)
(bucket_t) $5 = {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11928
}
}
(lldb) p $5.sel()
(SEL) $6 = "sayCode"
(lldb) p $5.imp(DCPerson.class)
(IMP) $7 = 0x0000000100001ab0 (DCPerson`-[DCPerson sayCode])
- II数组下标
2020-10-07 19:24:19.931422+0800 DCPerson[84824:5536090] sayHello
DCPerson was compiled with optimization - stepping may behave oddly; variables may not be available.
2020-10-07 19:24:27.426872+0800 DCPerson[84824:5536090] -[DCPerson sayCode]
(lldb) p/x DCPerson.class
(Class) $0 = 0x0000000100003428 DCPerson
(lldb) p (cache_t *)0x0000000100003438
(cache_t *) $1 = 0x0000000100003438
(lldb) p *$1
(cache_t) $2 = {
_buckets = {
std::__1::atomic<bucket_t *> = 0x0000000100723b90 {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
}
_mask = {
std::__1::atomic<unsigned int> = 3
}
_flags = 32820
_occupied = 2
}
(lldb) p $2.buckets()
(bucket_t *) $3 = 0x0000000100723b90
(lldb) p *$3
(bucket_t) $4 = {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11944
}
}
(lldb) p $2.buckets()[1]
(bucket_t) $8 = {
_sel = {
std::__1::atomic<objc_selector *> = ""
}
_imp = {
std::__1::atomic<unsigned long> = 11928
}
}
(lldb) p $8.sel()
(SEL) $9 = "sayCode"
(lldb) p $8.imp(DCPerson.class)
(IMP) $10 = 0x0000000100001ab0 (DCPerson`-[DCPerson sayCode])
脱离源码环境调试buckets()
struct lg_bucket_t {
// IMP-first is better for arm64e ptrauth and no worse for arm64.
SEL _sel;
IMP _imp;
};
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits
struct lg_cache_t {
struct lg_bucket_t * _buckets;
mask_t _mask;
uint16_t _flags;
uint16_t _occupied;
};
struct lg_class_data_bits_t {
uintptr_t bits;
};
struct lg_objc_class {
Class ISA;
Class superclass;
struct lg_cache_t cache; // formerly cache pointer and vtable
struct lg_class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags
};
int main(int argc, const char * argv[]) {
@autoreleasepool {
DCPerson *person = [DCPerson alloc];
Class pClass = [DCPerson class];//objc_class
// person.name = @"cloud";
// person.nickName = @"fish";
[person say1];
[person say2];
// [person say3];
// [person say4];
struct lg_objc_class *lg_pClass = (__bridge struct lg_objc_class *)(pClass);
NSLog(@"%hu - %u",lg_pClass->cache._occupied,lg_pClass->cache._mask);
for (mask_t i = 0; i<lg_pClass->cache._mask; i++) {
//打印获取到的bucket
struct lg_bucket_t bucket = lg_pClass->cache._buckets[i];
NSLog(@"%@ - %p",NSStringFromSelector(bucket._sel),bucket._imp);
}
NSLog(@"%@,%@",person,pClass);
}
return 0;
}
//输出
2020-10-07 20:44:00.694375+0800 iOS-脱离源码环境调试buckets()[34019:5667773] -[DCPerson say1]
2020-10-07 20:44:00.694750+0800 iOS-脱离源码环境调试buckets()[34019:5667773] -[DCPerson say2]
2020-10-07 20:44:00.694928+0800 iOS-脱离源码环境调试buckets()[34019:5667773] 2 - 3
2020-10-07 20:44:00.695075+0800 iOS-脱离源码环境调试buckets()[34019:5667773] say1 - 0x2970
2020-10-07 20:44:00.695144+0800 iOS-脱离源码环境调试buckets()[34019:5667773] say2 - 0x2ea0
2020-10-07 20:44:00.695188+0800 iOS-脱离源码环境调试buckets()[34019:5667773] (null) - 0x0
2020-10-07 20:44:00.695400+0800 iOS-脱离源码环境调试buckets()[34019:5667773] <DCPerson: 0x10052d320>,DCPerson
增加调用两个方法查看bucket的值
[person say1];
[person say2];
[person say3];
[person say4];
//输出
2020-10-07 20:46:46.509218+0800 iOS-脱离源码环境调试buckets()[35630:5672751] -[DCPerson say1]
2020-10-07 20:46:46.509623+0800 iOS-脱离源码环境调试buckets()[35630:5672751] -[DCPerson say2]
2020-10-07 20:46:46.509829+0800 iOS-脱离源码环境调试buckets()[35630:5672751] -[DCPerson say3]
2020-10-07 20:46:46.509870+0800 iOS-脱离源码环境调试buckets()[35630:5672751] -[DCPerson say4]
2020-10-07 20:46:46.509906+0800 iOS-脱离源码环境调试buckets()[35630:5672751] 2 - 7
2020-10-07 20:46:46.510002+0800 iOS-脱离源码环境调试buckets()[35630:5672751] say4 - 0x2e10
2020-10-07 20:46:46.510045+0800 iOS-脱离源码环境调试buckets()[35630:5672751] (null) - 0x0
2020-10-07 20:46:46.510102+0800 iOS-脱离源码环境调试buckets()[35630:5672751] say3 - 0x2ec0
2020-10-07 20:46:46.510139+0800 iOS-脱离源码环境调试buckets()[35630:5672751] (null) - 0x0
2020-10-07 20:46:46.510172+0800 iOS-脱离源码环境调试buckets()[35630:5672751] (null) - 0x0
2020-10-07 20:46:46.510204+0800 iOS-脱离源码环境调试buckets()[35630:5672751] (null) - 0x0
2020-10-07 20:46:46.510235+0800 iOS-脱离源码环境调试buckets()[35630:5672751] (null) - 0x0
2020-10-07 20:46:46.510436+0800 iOS-脱离源码环境调试buckets()[35630:5672751] <DCPerson: 0x100496cc0>,DCPerson
occupied&mask@2x.png
- 1._occupied,_mask是什么?
当前存在几个函数在缓存中,_mask掩码数据为capacity-1
- 2.会变化2-3 -> 2-7?
4-1=3,8-1=7
- 3.bucket会有丢失,打印say3和say4,没有打印say1和say2?
超过两个之后会重新梳理,原来的干掉,重新申请内存,扩容
- 4.顺序有点问题,先打印say4后打印say3?
通过哈希存储,无序
cache_t底层原理(属性代表存储,函数代表变化)
源码lookUpImpOrForward-->log_and_fill_cache-->cache_fill-->cache_t::insert
IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior)
{
const IMP forward_imp = (IMP)_objc_msgForward_impcache;
IMP imp = nil;
Class curClass;
runtimeLock.assertUnlocked();
// Optimistic cache lookup
if (fastpath(behavior & LOOKUP_CACHE)) {
imp = cache_getImp(cls, sel);
if (imp) goto done_nolock;
}
// runtimeLock is held during isRealized and isInitialized checking
// to prevent races against concurrent realization.
// runtimeLock is held during method search to make
// method-lookup + cache-fill atomic with respect to method addition.
// Otherwise, a category could be added but ignored indefinitely because
// the cache was re-filled with the old value after the cache flush on
// behalf of the category.
runtimeLock.lock();
// We don't want people to be able to craft a binary blob that looks like
// a class but really isn't one and do a CFI attack.
//
// To make these harder we want to make sure this is a class that was
// either built into the binary or legitimately registered through
// objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair.
checkIsKnownClass(cls);
if (slowpath(!cls->isRealized())) {
cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock);
// runtimeLock may have been dropped but is now locked again
}
if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) {
cls = initializeAndLeaveLocked(cls, inst, runtimeLock);
// runtimeLock may have been dropped but is now locked again
// If sel == initialize, class_initialize will send +initialize and
// then the messenger will send +initialize again after this
// procedure finishes. Of course, if this is not being called
// from the messenger then it won't happen. 2778172
}
runtimeLock.assertLocked();
curClass = cls;
// The code used to lookpu the class's cache again right after
// we take the lock but for the vast majority of the cases
// evidence shows this is a miss most of the time, hence a time loss.
//
// The only codepath calling into this without having performed some
// kind of cache lookup is class_getInstanceMethod().
for (unsigned attempts = unreasonableClassCount();;) {
// curClass method list.
Method meth = getMethodNoSuper_nolock(curClass, sel);
if (meth) {
imp = meth->imp(false);
goto done;
}
if (slowpath((curClass = curClass->superclass) == nil)) {
// No implementation found, and method resolver didn't help.
// Use forwarding.
imp = forward_imp;
break;
}
// Halt if there is a cycle in the superclass chain.
if (slowpath(--attempts == 0)) {
_objc_fatal("Memory corruption in class list.");
}
// Superclass cache.
imp = cache_getImp(curClass, sel);
if (slowpath(imp == forward_imp)) {
// Found a forward:: entry in a superclass.
// Stop searching, but don't cache yet; call method
// resolver for this class first.
break;
}
if (fastpath(imp)) {
// Found the method in a superclass. Cache it in this class.
goto done;
}
}
// No implementation found. Try method resolver once.
if (slowpath(behavior & LOOKUP_RESOLVER)) {
behavior ^= LOOKUP_RESOLVER;
return resolveMethod_locked(inst, sel, cls, behavior);
}
done:
log_and_fill_cache(cls, imp, sel, inst, curClass);
runtimeLock.unlock();
done_nolock:
if (slowpath((behavior & LOOKUP_NIL) && imp == forward_imp)) {
return nil;
}
return imp;
}
static void
log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer)
{
#if SUPPORT_MESSAGE_LOGGING
if (slowpath(objcMsgLogEnabled && implementer)) {
bool cacheIt = logMessageSend(implementer->isMetaClass(),
cls->nameForLogging(),
implementer->nameForLogging(),
sel);
if (!cacheIt) return;
}
#endif
cache_fill(cls, sel, imp, receiver);
}
void cache_fill(Class cls, SEL sel, IMP imp, id receiver)
{
runtimeLock.assertLocked();
#if !DEBUG_TASK_THREADS
// Never cache before +initialize is done
if (cls->isInitialized()) {
cache_t *cache = getCache(cls);
#if CONFIG_USE_CACHE_LOCK
mutex_locker_t lock(cacheUpdateLock);
#endif
cache->insert(cls, sel, imp, receiver);
}
#else
_collecting_in_critical();
#endif
}
ALWAYS_INLINE
void cache_t::insert(Class cls, SEL sel, IMP imp, id receiver)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
#else
runtimeLock.assertLocked();
#endif
ASSERT(sel != 0 && cls->isInitialized());
// Use the cache as-is if it is less than 3/4 full
mask_t newOccupied = occupied() + 1;
unsigned oldCapacity = capacity(), capacity = oldCapacity;
if (slowpath(isConstantEmptyCache())) {
// Cache is read-only. Replace it.
if (!capacity) capacity = INIT_CACHE_SIZE;
reallocate(oldCapacity, capacity, /* freeOld */false);
}
else if (fastpath(newOccupied + CACHE_END_MARKER <= capacity / 4 * 3)) {
// Cache is less than 3/4 full. Use it as-is.
}
else {
capacity = capacity ? capacity * 2 : INIT_CACHE_SIZE;
if (capacity > MAX_CACHE_SIZE) {
capacity = MAX_CACHE_SIZE;
}
reallocate(oldCapacity, capacity, true);
}
bucket_t *b = buckets();
mask_t m = capacity - 1;
mask_t begin = cache_hash(sel, m);
mask_t i = begin;
// Scan for the first unused slot and insert there.
// There is guaranteed to be an empty slot because the
// minimum size is 4 and we resized at 3/4 full.
do {
if (fastpath(b[i].sel() == 0)) {
incrementOccupied();
b[i].set<Atomic, Encoded>(sel, imp, cls);
return;
}
if (b[i].sel() == sel) {
// The entry was added to the cache by some other thread
// before we grabbed the cacheUpdateLock.
return;
}
} while (fastpath((i = cache_next(i, m)) != begin));
cache_t::bad_cache(receiver, (SEL)sel, cls);
}
ALWAYS_INLINE
void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity, bool freeOld)
{
bucket_t *oldBuckets = buckets();
bucket_t *newBuckets = allocateBuckets(newCapacity);
// Cache's old contents are not propagated.
// This is thought to save cache memory at the cost of extra cache fills.
// fixme re-measure this
ASSERT(newCapacity > 0);
ASSERT((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1);
setBucketsAndMask(newBuckets, newCapacity - 1);
if (freeOld) {
cache_collect_free(oldBuckets, oldCapacity);
}
}
bucket_t *allocateBuckets(mask_t newCapacity)
{
// Allocate one extra bucket to mark the end of the list.
// This can't overflow mask_t because newCapacity is a power of 2.
bucket_t *newBuckets = (bucket_t *)
calloc(cache_t::bytesForCapacity(newCapacity), 1);
bucket_t *end = cache_t::endMarker(newBuckets, newCapacity);
#if __arm__
// End marker's sel is 1 and imp points BEFORE the first bucket.
// This saves an instruction in objc_msgSend.
end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)(newBuckets - 1), nil);
#else
// End marker's sel is 1 and imp points to the first bucket.
end->set<NotAtomic, Raw>((SEL)(uintptr_t)1, (IMP)newBuckets, nil);
#endif
if (PrintCaches) recordNewCache(newCapacity);
return newBuckets;
}
static void cache_collect_free(bucket_t *data, mask_t capacity)
{
#if CONFIG_USE_CACHE_LOCK
cacheUpdateLock.assertLocked();
#else
runtimeLock.assertLocked();
#endif
if (PrintCaches) recordDeadCache(capacity);
_garbage_make_room ();
garbage_byte_size += cache_t::bytesForCapacity(capacity);
garbage_refs[garbage_count++] = data;
cache_collect(false);
}
查看哪儿有调用void incrementOccupied();方法,发现cache_t::insert方法用有调用
incrementOccupied@2x.png
cache_t_insert@2x.png
分析流程如下图,函数sel()和imp通过哈希算法存储,若有重复的,就会哈希再哈希,哈希存储是无序的,先打印say4,后打印say3。
Cooci 关于Cache_t原理分析图.png
网友评论