美文网首页
24.iOS底层学习之GCD 源码分析(上)

24.iOS底层学习之GCD 源码分析(上)

作者: 牛牛大王奥利给 | 来源:发表于2022-01-10 16:11 被阅读0次

    本篇提纲:
    1、上一篇内容补充
    2、创建队列dispatch_queue_create的源码分析
    3、GCD的底层继承链
    4、同步函数dispatch_sync源码分析
    5、异步函数dispatch_async源码分析
    6、单例dispatch_once_t源码分析

    一、内容补充

    上一篇文章我们简单的看了一下主队列dispatch_get_main_queue的一部分源码,我自己也留下了一些疑问,这篇文章首先来进行一下简单的补充主要是两个问题:
    1、底层源码的哪个用来字段区分是串行队列和并行队列?
    2、dq_serialnum是用来做什么的?

    我们顺着这两个问题来继续更加深入的去看看源码。

    dispatch_get_main_quque源码:

    dispatch_queue_main_t
    dispatch_get_main_queue(void)
    {
        return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q);
    }
    
    dispatch_queue_main_t

    根据源码我们可以了解到,最后返回的类型是dispatch_queue_main_t,而dispatch_queue_main_t的定义是dispatch_queue_static_s类型,我们这样一层一层的去查找源码可以最终找到_OS_OBJECT_HEADER,这个路径是:
    dispatch_queue_main_t->dispatch_queue_static_s->dispatch_lane_s->DISPATCH_LANE_CLASS_HEADER->_DISPATCH_QUEUE_CLASS_HEADER->DISPATCH_OBJECT_HEADER->OS_OBJECT_STRUCT_HEADER->_OS_OBJECT_HEADER

    _OS_OBJECT_HEADER是一个宏定义如下:

    #define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \
            isa; /* must be pointer-sized and use __ptrauth_objc_isa_pointer */ \
            int volatile ref_cnt; \
            int volatile xref_cnt
    

    绕了这么大一圈儿发现dispatch_queue_main_t最下层是一个isa的层层封装,😂😂😂前边还包括了dq_serialnum 还有dq_label等信息

    DISPATCH_GLOBAL_OBJECT
    #define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))
    

    返回类型我们了解过了,再来看实现的方法DISPATCH_GLOBAL_OBJECT,发现又是一个宏定义。
    第一个参数是type对应:dispatch_queue_main_t
    第二个参数是object对应:_dispatch_main_q
    所以这里DISPATCH_GLOBAL_OBJECT的运算是第一个参数和第二个参数做安位与运算返回结果,应该是一个自己的类型和一个对象做运算取值的过程。

    _dispatch_main_q
    struct dispatch_queue_static_s _dispatch_main_q = {
        DISPATCH_GLOBAL_OBJECT_HEADER(queue_main), //前面分析过了,实际上是isa
    #if !DISPATCH_USE_RESOLVERS //不是
        .do_targetq = _dispatch_get_default_queue(true),
    #endif
        .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
                DISPATCH_QUEUE_ROLE_BASE_ANON,
        .dq_label = "com.apple.main-thread",
        .dq_atomic_flags = DQF_THREAD_BOUND | DQF_WIDTH(1),
        .dq_serialnum = 1,
    };
    

    我们来一行一行的分析代码:
    1、#if !DISPATCH_USE_RESOLVERS

    #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707
        _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true);
    
    #define _dispatch_get_default_queue(overcommit) \
            _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \
                    !!(overcommit)]._as_dq
    
    

    _dispatch_root_queues定义:

    struct dispatch_queue_global_s _dispatch_root_queues[] = {
    #define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
            ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
            DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
            DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
    #define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
        [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
            DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
            .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
            .do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
            .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
            .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
                    _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
                    _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
            __VA_ARGS__ \
        }
        _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
            .dq_label = "com.apple.root.maintenance-qos",
            .dq_serialnum = 4,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.maintenance-qos.overcommit",
            .dq_serialnum = 5,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
            .dq_label = "com.apple.root.background-qos",
            .dq_serialnum = 6,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.background-qos.overcommit",
            .dq_serialnum = 7,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
            .dq_label = "com.apple.root.utility-qos",
            .dq_serialnum = 8,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.utility-qos.overcommit",
            .dq_serialnum = 9,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK,
            .dq_label = "com.apple.root.default-qos",
            .dq_serialnum = 10,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
                DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.default-qos.overcommit",
            .dq_serialnum = 11,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
            .dq_label = "com.apple.root.user-initiated-qos",
            .dq_serialnum = 12,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.user-initiated-qos.overcommit",
            .dq_serialnum = 13,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
            .dq_label = "com.apple.root.user-interactive-qos",
            .dq_serialnum = 14,
        ),
        _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.user-interactive-qos.overcommit",
            .dq_serialnum = 15,
        ),
    };
    

    DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS是一个枚举值:

    DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS 枚举.png

    我们看完以上这些信息还得回溯到前面DISPATCH_USE_RESOLVERS的定义可以了解传进来的值是ture,所以:
    _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \ !!(overcommit)]._as_dq里面的
    DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS+!!(overcommit) 就等于 6+!!(ture)= 6+!!(1) =7;

    所以:_dispatch_root_queues取得就是第七个index对应的值也就是_dispatch_root_queues[7] 是:

    _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
                DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
            .dq_label = "com.apple.root.default-qos.overcommit",
            .dq_serialnum = 11,
        ),
    

    是一个com.apple.root.default-qos.overcommit类型的。而_DISPATCH_ROOT_QUEUE_ENTRY_dispatch_root_queues的前边就有宏定义,里面包括很多信息:

        [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
            DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
            .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
            .do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
            .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
            .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
                    _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
                    _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
            __VA_ARGS__ \
        }
    

    由此部分定义可以看到:
    dq_atomic_flags对应的是DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL),也就是这个队列的原子属性和非原子属性的flag是DQF_WIDTH
    _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \ !!(overcommit)]._as_dq后面的_as_dqDISPATCH_LANE_CLASS_HEADER宏定义里面的值

    #define DISPATCH_LANE_CLASS_HEADER(x) \
        struct dispatch_queue_s _as_dq[0]; \
        DISPATCH_QUEUE_CLASS_HEADER(x, \
                struct dispatch_object_s *volatile dq_items_tail); \
        dispatch_unfair_lock_s dq_sidelock; \
        struct dispatch_object_s *volatile dq_items_head; \
        uint32_t dq_side_suspend_cnt
    

    所以我们回到第一句代码的预处理判断#if !DISPATCH_USE_RESOLVERS这个的意思就是:不是这种类型的队列的情况下才执行:

        .do_targetq = _dispatch_get_default_queue(true),
    

    .do_targetq 才有值。

    2、.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | DISPATCH_QUEUE_ROLE_BASE_ANON,
    然后到这句。DISPATCH_QUEUE_STATE_INIT_VALUE定义如下:

    #define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \
            ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT)
    
    #define DISPATCH_QUEUE_WIDTH_FULL           0x1000ull
    
    #define DISPATCH_QUEUE_ROLE_BASE_ANON       0x0000001000000000ull
    
    #define DISPATCH_QUEUE_WIDTH_SHIFT          41
    
    

    又是宏。查到上面的源代码吧对应的值进行替换可以把这句代码写成:
    .dq_state = ((0x1000ull - (1)) << 41) | 0x0000001000000000ull,
    因为这样,还是看不太出来这个.dq_state是做什么的,然后我去搜索.dq_state相关的宏定义赋值,从赋值上看看能不能找到相关的注释,结果被我找到了一处大致的说明:

    dq_state
    根据这个注释大致可以看出来,这个dq_state的值表示全局队列的,有两个值一个是QUEUE_FULL,还一个是INBARRIER,在dispatch_barrier_sync() 和dispatch_sync()中会设置,来进行阻塞。

    dq_label:这个是队列的标签,显而易见,也就是那个队列名称。
    dq_atomic_flags:在检索这一项的时候,直接查找是找不到相关的信息的,所以我还是从值的地方开始入手去看。查到了相关DQF_THREAD_BOUND,于是就找到了这个

    DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t,
        DQF_NONE                = 0x00000000,
        DQF_AUTORELEASE_ALWAYS  = 0x00010000,
        DQF_AUTORELEASE_NEVER   = 0x00020000,
    #define _DQF_AUTORELEASE_MASK 0x00030000
        DQF_THREAD_BOUND        = 0x00040000, // queue is bound to a thread
        DQF_BARRIER_BIT         = 0x00080000, // queue is a barrier on its target
        DQF_TARGETED            = 0x00100000, // queue is targeted by another object
        DQF_LABEL_NEEDS_FREE    = 0x00200000, // queue label was strdup()ed
        DQF_MUTABLE             = 0x00400000,
        DQF_RELEASED            = 0x00800000, // xref_cnt == -1
    
        //
        // Only applies to sources
        //
        // @const DSF_STRICT
        // Semantics of the source are strict (implies DQF_MUTABLE being unset):
        // - handlers can't be changed past activation
        // - EV_VANISHED causes a hard failure
        // - source can't change WLH
        //
        // @const DSF_WLH_CHANGED
        // The wlh for the source changed (due to retarget past activation).
        // Only used for debugging and diagnostics purposes.
        //
        // @const DSF_CANCELED
        // Explicit cancelation has been requested.
        //
        // @const DSF_CANCEL_WAITER
        // At least one caller of dispatch_source_cancel_and_wait() is waiting on
        // the cancelation to finish. DSF_CANCELED must be set if this bit is set.
        //
        // @const DSF_NEEDS_EVENT
        // The source has started to delete its unotes due to cancelation, but
        // couldn't finish its unregistration and is waiting for some asynchronous
        // events to fire to be able to.
        //
        // This flag prevents spurious wakeups when the source state machine
        // requires specific events to make progress. Events that are likely
        // to unblock a source state machine pass DISPATCH_WAKEUP_EVENT
        // which neuters the effect of DSF_NEEDS_EVENT.
        //
        // @const DSF_DELETED
        // The source can now only be used as a queue and is not allowed to register
        // any new unote anymore. All the previously registered unotes are inactive
        // and their knote is gone. However, these previously registered unotes may
        // still be in the process of delivering their last event.
        //
        // Sources have an internal refcount taken always while they use eventing
        // subsystems which is consumed when this bit is set.
        //
        DSF_STRICT              = 0x04000000,
        DSF_WLH_CHANGED         = 0x08000000,
        DSF_CANCELED            = 0x10000000,
        DSF_CANCEL_WAITER       = 0x20000000,
        DSF_NEEDS_EVENT         = 0x40000000,
        DSF_DELETED             = 0x80000000,
    
    #define DQF_FLAGS_MASK        ((dispatch_queue_flags_t)0xffff0000)
    #define DQF_WIDTH_MASK        ((dispatch_queue_flags_t)0x0000ffff)
    #define DQF_WIDTH(n)          ((dispatch_queue_flags_t)(uint16_t)(n))
    );
    

    所以这个值为:dq_atomic_flags = DQF_THREAD_BOUND | DQF_WIDTH(1) = DQF_THREAD_BOUND,根据这个注释可以看到,这里是描述队列是不是存在阻塞啊什么的类型的。

    dq_serialnum:同样我们使用找值的方法继续探索这个的定义。如下

    dq_serialnum
    这个注释中解释,没有0,1表示主队列,4到15是全局队列,16是自定义队列。
    所以这个值是用来描述队列名称的类型的,来区分主队列,全局,和自定义等等队列的。
    二、创建队列dispatch_queue_create的源码分析

    dispatch_queue_create源码:

    dispatch_queue_t
    dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
    {
        return _dispatch_lane_create_with_target(label, attr,
                DISPATCH_TARGET_QUEUE_DEFAULT, true);
    }
    

    根据这个源码可以看到来到了方法_dispatch_lane_create_with_target,然后这个方法的内容挺多的。这个方法源码中有一部分注释:

    static dispatch_queue_t
    _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
            dispatch_queue_t tq, bool legacy)
    {
        dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);
    
        //
        // Step 1: Normalize arguments (qos, overcommit, tq)
        //
    
        dispatch_qos_t qos = dqai.dqai_qos;
    //此处省略好多行代码。。。。。。。。。。。。。。。。。。。。。。。。。。
        //
        // Step 2: Initialize the queue
        //
    
        if (legacy) {
            // if any of these attributes is specified, use non legacy classes
            if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) {
                legacy = false;
            }
        }
    
        const void *vtable;
        dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
        if (dqai.dqai_concurrent) {
            vtable = DISPATCH_VTABLE(queue_concurrent);
        } else {
            vtable = DISPATCH_VTABLE(queue_serial);
        }
        switch (dqai.dqai_autorelease_frequency) {
        case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
            dqf |= DQF_AUTORELEASE_NEVER;
            break;
        case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
            dqf |= DQF_AUTORELEASE_ALWAYS;
            break;
        }
        if (label) {
            const char *tmp = _dispatch_strdup_if_mutable(label);
            if (tmp != label) {
                dqf |= DQF_LABEL_NEEDS_FREE;
                label = tmp;
            }
        }
    
        dispatch_lane_t dq = _dispatch_object_alloc(vtable,
                sizeof(struct dispatch_lane_s));
        _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
                DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
                (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
    
        dq->dq_label = label;
        dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
                dqai.dqai_relpri);
        if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
            dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
        }
        if (!dqai.dqai_inactive) {
            _dispatch_queue_priority_inherit_from_target(dq, tq);
            _dispatch_lane_inherit_wlh_from_target(dq, tq);
        }
        _dispatch_retain(tq);
        dq->do_targetq = tq;
        _dispatch_object_debug(dq, "%s", __func__);
        return _dispatch_trace_queue_create(dq)._dq;
    }
    

    step1:Normalize arguments (qos, overcommit, tq),规范参数。
    step2:Initialize the queue,第二步才是真正初始化队列的部分。
    根据源码可以看到创建队列的关键方法是:
    dispatch_lane_t dq = _dispatch_object_alloc(vtable, sizeof(struct dispatch_lane_s));
    进入到_dispatch_object_alloc

    void *
    _dispatch_object_alloc(const void *vtable, size_t size)
    {
    #if OS_OBJECT_HAVE_OBJC1
        const struct dispatch_object_vtable_s *_vtable = vtable;
        dispatch_object_t dou;
        dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size);
        dou._do->do_vtable = vtable;
        return dou._do;
    #else
        return _os_object_alloc_realized(vtable, size);
    #endif
    }
    

    这个方法是开辟空间,再深入进去看来到_os_object_alloc_realized,这个把cls和obj->os_obj_isa进行绑定,并且开辟相应的size空间。

    而接下来的_dispatch_queue_init的实现如下:

    static inline dispatch_queue_class_t
    _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf,
            uint16_t width, uint64_t initial_state_bits)
    {
        uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
        dispatch_queue_t dq = dqu._dq;
    
        dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
                DISPATCH_QUEUE_INACTIVE)) == 0);
    
        if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
            dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
            if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
                dq->do_ref_cnt++; // released when DSF_DELETED is set
            }
        }
    
        dq_state |= initial_state_bits;
        dq->do_next = DISPATCH_OBJECT_LISTLESS;
        dqf |= DQF_WIDTH(width);
        os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
        dq->dq_state = dq_state;
        dq->dq_serialnum =
                os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
        return dqu;
    }
    

    这方法有四个参数,第三个是width,我们再去前边看这个方法的调用,看看传进来的是什么:
    _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ? DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
    第三个参数是dqai.dqai_concurrent ? DISPATCH_QUEUE_WIDTH_MAX : 1,而dqai.dqai_concurrent我们再看方法_dispatch_lane_create_with_target发现里面有一段是:

    const void *vtable;
        dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
        if (dqai.dqai_concurrent) {
            vtable = DISPATCH_VTABLE(queue_concurrent);
        } else {
            vtable = DISPATCH_VTABLE(queue_serial);
        }
    

    这很明显这个值dqai_concurrent代表并发还是串行队列,所以这个dqai_concurrent ? DISPATCH_QUEUE_WIDTH_MAX : 1,并发的时候为DISPATCH_QUEUE_WIDTH_MAX,串行的时候为1,也就是对应上了DQF_WIDTH(width)=DQF_WIDTH(1),所以这个值表示的是串行或者并行。
    而且,进一步看DISPATCH_QUEUE_WIDTH_MAX=0x1000ull-2=0xFFe,而如果是串行队列,width就是1,这个width可能描述的是并发的个数,这样也说得通串行的时候并发数是1,并行的时候不限制。

    再接着往下看,可以看到dq_label,优先级等一些赋值,然后最后释放了tq,tq是什么呢,往上查阅代码可以看到tq的获取在这里:

        if (!tq) {
            tq = _dispatch_get_root_queue(
                    qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
                    overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
            if (unlikely(!tq)) {
                DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
            }
        }
    

    所以又回到了最开始的_dispatch_get_root_queue,从root队列里面取(里面标号1~17的那个)。
    dispatch_queue_create的大致流程是,通过方法_dispatch_lane_create_with_target先进行参数的规范处理,然后通过tq的判断去root_quque中拿到相应队列的信息attributes等,进行开辟空间,生成_os_object_t对象,然后进行dq_label,并行串行,优先级等一些信息的初始化,最终把拿到的队列类型相应的attributes付给创建好的队列。

    三、GCD的底层继承链

    上面我们创建队列的时候,返回的是dispatch_queue_t,我们通过源码具体来看看它的定义:

    typedef struct dispatch_queue_s *dispatch_queue_t;
    
    struct dispatch_queue_s {
        DISPATCH_QUEUE_CLASS_HEADER(queue, void *__dq_opaque1);
        /* 32bit hole on LP64 */
    } DISPATCH_ATOMIC64_ALIGN;
    
    #define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
        DISPATCH_OBJECT_HEADER(x); \
        DISPATCH_UNION_LE(uint64_t volatile dq_state, \
                dispatch_lock dq_state_lock, \
                uint32_t dq_state_bits \
        ); \
    
    #define _DISPATCH_OBJECT_HEADER(x) \
        struct _os_object_s _as_os_obj[0]; \
        OS_OBJECT_STRUCT_HEADER(dispatch_##x); \
        struct dispatch_##x##_s *volatile do_next; \
        struct dispatch_queue_s *do_targetq; \
        void *do_ctxt; \
        union { \
            dispatch_function_t DISPATCH_FUNCTION_POINTER do_finalizer; \
            void *do_introspection_ctxt; \
        }
    

    dispatch_queue_t是类型dispatch_queue_s->DISPATCH_QUEUE_CLASS_HEADER->DISPATCH_OBJECT_HEADER等等,这个我们在第一个部分研究过,最后是一个isa,所以队列queue_t的本质也是个isaisa的上层封装的是对象_OS_OBJECT_HEADER,也就是定义的_os_object_s,而_os_object_stypedef struct _os_object_s *_os_object_t;
    所以这个过程和我们前面学过的对象的继承链class->objc_class->objc_object,
    dispatch_queue_t-> dispatch_queue_s-> _os_object_s类似。

    四、同步函数dispatch_sync源码分析

    dispatch_sync

    void
    dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
    {
        uintptr_t dc_flags = DC_FLAG_BLOCK;
        if (unlikely(_dispatch_block_has_private_data(work))) {
            return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
        }
        _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
    }
    
    • 传入的work参数就是任务的block,一直追寻work参数就能找到调用的代码
      这里有一个判断,分别根据unlikely的最终值,调用不同的代码,分别是_dispatch_sync_block_with_privdata_dispatch_sync_f。我们先来看_dispatch_sync_f

    • _dispatch_sync_f

    static void
    _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
            uintptr_t dc_flags)
    {
        _dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
    }
    
    static inline void
    _dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
            dispatch_function_t func, uintptr_t dc_flags)
    {
        if (likely(dq->dq_width == 1)) {
            return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
        }
    
        if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
            DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
        }
    
        dispatch_lane_t dl = upcast(dq)._dl;
        // Global concurrent queues and queues bound to non-dispatch threads
        // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
        if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
            return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
        }
    
        if (unlikely(dq->do_targetq->do_targetq)) {
            return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
        }
        _dispatch_introspection_sync_begin(dl);
        _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
                _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
    }
    

    可以看到_dispatch_sync_f_inline中的判断还挺多的,具体走了哪个分支我们通过符号断点来确定:

    符号断点
    进来来到了_dispatch_sync_f_slow
    DISPATCH_NOINLINE
    static void
    _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
            dispatch_function_t func, uintptr_t top_dc_flags,
            dispatch_queue_class_t dqu, uintptr_t dc_flags)
    {
        dispatch_queue_t top_dq = top_dqu._dq;
        dispatch_queue_t dq = dqu._dq;
        if (unlikely(!dq->do_targetq)) {
            return _dispatch_sync_function_invoke(dq, ctxt, func);
        }
    
        pthread_priority_t pp = _dispatch_get_priority();
        struct dispatch_sync_context_s dsc = {
            .dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
            .dc_func     = _dispatch_async_and_wait_invoke,
            .dc_ctxt     = &dsc,
            .dc_other    = top_dq,
            .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
            .dc_voucher  = _voucher_get(),
            .dsc_func    = func,
            .dsc_ctxt    = ctxt,
            .dsc_waiter  = _dispatch_tid_self(),
        };
    
        _dispatch_trace_item_push(top_dq, &dsc);
        __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
    
        if (dsc.dsc_func == NULL) {
            // dsc_func being cleared means that the block ran on another thread ie.
            // case (2) as listed in _dispatch_async_and_wait_f_slow.
            dispatch_queue_t stop_dq = dsc.dc_other;
            return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
        }
    
        _dispatch_introspection_sync_begin(top_dq);
        _dispatch_trace_item_pop(top_dq, &dsc);
        _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
                DISPATCH_TRACE_ARG(&dsc));
    }
    

    我们再下符号断点来看走的是哪个部分,来到的是_dispatch_sync_function_invoke我们接着跟进来:

    static void
    _dispatch_sync_function_invoke(dispatch_queue_class_t dq, void *ctxt,
            dispatch_function_t func)
    {
        _dispatch_sync_function_invoke_inline(dq, ctxt, func);
    }
    
    static inline void
    _dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
            dispatch_function_t func)
    {
        dispatch_thread_frame_s dtf;
        _dispatch_thread_frame_push(&dtf, dq);
        _dispatch_client_callout(ctxt, func);
        _dispatch_perfmon_workitem_inc();
        _dispatch_thread_frame_pop(&dtf);
    }
    

    这里我分别试了下同步串行队列,还有并发队列,串行队列走的是_dispatch_sync_f_slow,并发队列走的是_dispatch_sync_invoke_and_complete_dispatch_sync_invoke_and_complete,而他们最终都会来到方法_dispatch_sync_function_invoke_inline

    • _dispatch_thread_frame_push 任务入队
    • _dispatch_client_callout任务执行
    • _dispatch_thread_frame_pop任务出队
      _dispatch_client_callout
    void
    _dispatch_client_callout(void *ctxt, dispatch_function_t f)
    {
        _dispatch_get_tsd_base();
        void *u = _dispatch_get_unwind_tsd();
        if (likely(!u)) return f(ctxt);
        _dispatch_set_unwind_tsd(NULL);
        f(ctxt);
        _dispatch_free_unwind_tsd();
        _dispatch_set_unwind_tsd(u);
    }
    

    这个执行函数f,就是外界传回来的block回调。

    • 死锁
      当出现死锁的时候,我们的编译器定位到_dispatch_sync_f_slow,然后调用到__DISPATCH_WAIT_FOR_QUEUE__
      _dispatch_sync_f_slow

    DISPATCH_WAIT_FOR_QUEUE

    static void
    __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
    {
        uint64_t dq_state = _dispatch_wait_prepare(dq);
        if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
            DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
                    "dispatch_sync called on queue "
                    "already owned by current thread");
        }
    
        // Blocks submitted to the main thread MUST run on the main thread, and
        // dispatch_async_and_wait also executes on the remote context rather than
        // the current thread.
        //
        // For both these cases we need to save the frame linkage for the sake of
        // _dispatch_async_and_wait_invoke
        _dispatch_thread_frame_save_state(&dsc->dsc_dtf);
    
        if (_dq_state_is_suspended(dq_state) ||
                _dq_state_is_base_anon(dq_state)) {
            dsc->dc_data = DISPATCH_WLH_ANON;
        } else if (_dq_state_is_base_wlh(dq_state)) {
            dsc->dc_data = (dispatch_wlh_t)dq;
        } else {
            _dispatch_wait_compute_wlh(upcast(dq)._dl, dsc);
        }
    
        if (dsc->dc_data == DISPATCH_WLH_ANON) {
            dsc->dsc_override_qos_floor = dsc->dsc_override_qos =
                    (uint8_t)_dispatch_get_basepri_override_qos_floor();
            _dispatch_thread_event_init(&dsc->dsc_event);
        }
        dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority));
        _dispatch_trace_runtime_event(sync_wait, dq, 0);
        if (dsc->dc_data == DISPATCH_WLH_ANON) {
            _dispatch_thread_event_wait(&dsc->dsc_event); // acquire
        } else if (!dsc->dsc_wlh_self_wakeup) {
            _dispatch_event_loop_wait_for_ownership(dsc);
        }
        if (dsc->dc_data == DISPATCH_WLH_ANON) {
            _dispatch_thread_event_destroy(&dsc->dsc_event);
            // If _dispatch_sync_waiter_wake() gave this thread an override,
            // ensure that the root queue sees it.
            if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) {
                _dispatch_set_basepri_override_qos(dsc->dsc_override_qos);
            }
        }
    }
    

    可以看到,死锁的部分注释

    "dispatch_sync called on queue "
    "already owned by current thread"
    同步任务调起了一个已经被当前线程占用的队列
    unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))这个触发的判断具体如下:

    static inline bool
    _dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
    {
        return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
    }
    
    static inline bool
    _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
    {
        // equivalent to _dispatch_lock_owner(lock_value) == tid
        return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
    }
    
    #define DLOCK_OWNER_MASK            ((dispatch_lock)0xfffffffc)  //c = 1100
    

    结合这个部分的源码,我们可以理解这个判断是这样的:
    lock_value - > dq_state 是队列的状态
    tid->是线程id。
    队列的状态和线程的id按位异或,与上0xfffffffc也就是只有最后两位是0的 最后的结果为0 ,异或运算是相同为0 不同为1。
    所以这里的判断一下将要调度的队列,和当前等待的队列是不是一个队列,如果相同则返回YES,造成了矛盾,产生了死锁。而注释的代码也很好的说明了这里:证明要调度的队列和当被锁住的线程 是一个。

    五、异步函数dispatch_async源码分析
    void
    dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
    {
        dispatch_continuation_t dc = _dispatch_continuation_alloc();
        uintptr_t dc_flags = DC_FLAG_CONSUME;
        dispatch_qos_t qos;
    
        qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
        _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
    }
    

    将任务封装成qos,然后通过_dispatch_continuation_async调用。

    _dispatch_continuation_async任务派发,push。

    _dispatch_continuation_async(dispatch_queue_class_t dqu,
            dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
    {
    #if DISPATCH_INTROSPECTION
        if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
            _dispatch_trace_item_push(dqu, dc);
        }
    #else
        (void)dc_flags;
    #endif
        return dx_push(dqu._dq, dc, qos);
    }
    
    //dx_push
    #define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
    

    dx_push本质上调用了dx_vtable的dq_push进一步查看,发现有好多个dq_push:

    dq_push
    有串行的、并发的、全局队列等,我们先以并发的为例子,就去找_dispatch_lane_concurrent_push:
    void
    _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
            dispatch_qos_t qos)
    {
        // <rdar://problem/24738102&24743140> reserving non barrier width
        // doesn't fail if only the ENQUEUED bit is set (unlike its barrier
        // width equivalent), so we have to check that this thread hasn't
        // enqueued anything ahead of this call or we can break ordering
        if (dq->dq_items_tail == NULL &&
                !_dispatch_object_is_waiter(dou) &&
                !_dispatch_object_is_barrier(dou) &&
                _dispatch_queue_try_acquire_async(dq)) {
            return _dispatch_continuation_redirect_push(dq, dou, qos);
        }
    
        _dispatch_lane_push(dq, dou, qos);
    }
    

    我们通过符号断点调试并发队列,先进入到_dispatch_continuation_redirect_push

    static void
    _dispatch_continuation_redirect_push(dispatch_lane_t dl,
            dispatch_object_t dou, dispatch_qos_t qos)
    {
        if (likely(!_dispatch_object_is_redirection(dou))) {
            dou._dc = _dispatch_async_redirect_wrap(dl, dou);
        } else if (!dou._dc->dc_ctxt) {
            // find first queue in descending target queue order that has
            // an autorelease frequency set, and use that as the frequency for
            // this continuation.
            dou._dc->dc_ctxt = (void *)
            (uintptr_t)_dispatch_queue_autorelease_frequency(dl);
        }
    
        dispatch_queue_t dq = dl->do_targetq;
        if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
        dx_push(dq, dou, qos);
    }
    

    而这个方法最终又调用了dx_push,而此时的队列类型通过调试为queue_pthread_root,所以对应的dq_push的参数为_dispatch_root_queue_push,它的具体实现我们进去看一下

    void
    _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
            dispatch_qos_t qos)
    {
    #if DISPATCH_USE_KEVENT_WORKQUEUE
        dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
        if (unlikely(ddi && ddi->ddi_can_stash)) {
            dispatch_object_t old_dou = ddi->ddi_stashed_dou;
            dispatch_priority_t rq_overcommit;
            rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
    
            if (likely(!old_dou._do || rq_overcommit)) {
                dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq;
                dispatch_qos_t old_qos = ddi->ddi_stashed_qos;
                ddi->ddi_stashed_rq = rq;
                ddi->ddi_stashed_dou = dou;
                ddi->ddi_stashed_qos = qos;
                _dispatch_debug("deferring item %p, rq %p, qos %d",
                        dou._do, rq, qos);
                if (rq_overcommit) {
                    ddi->ddi_can_stash = false;
                }
                if (likely(!old_dou._do)) {
                    return;
                }
                // push the previously stashed item
                qos = old_qos;
                rq = old_rq;
                dou = old_dou;
            }
        }
    #endif
    #if HAVE_PTHREAD_WORKQUEUE_QOS
        if (_dispatch_root_queue_push_needs_override(rq, qos)) {
            return _dispatch_root_queue_push_override(rq, dou, qos);
        }
    #else
        (void)qos;
    #endif
        _dispatch_root_queue_push_inline(rq, dou, dou, 1);
    }
    

    符号断点接着跟踪来到_dispatch_root_queue_push_override,然后进一步来到_dispatch_root_queue_push_inline->_dispatch_root_queue_poke->_dispatch_root_queue_poke_slow
    这个方法又好长好多。里面通过pthread_create创建线程

    _dispatch_continuation_init任务封装:

    static inline dispatch_qos_t
    _dispatch_continuation_init(dispatch_continuation_t dc,
            dispatch_queue_class_t dqu, dispatch_block_t work,
            dispatch_block_flags_t flags, uintptr_t dc_flags)
    {
        void *ctxt = _dispatch_Block_copy(work);
    
        dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
        if (unlikely(_dispatch_block_has_private_data(work))) {
            dc->dc_flags = dc_flags;
            dc->dc_ctxt = ctxt;
            // will initialize all fields but requires dc_flags & dc_ctxt to be set
            return _dispatch_continuation_init_slow(dc, dqu, flags);
        }
    
        dispatch_function_t func = _dispatch_Block_invoke(work);
        if (dc_flags & DC_FLAG_CONSUME) {
            func = _dispatch_call_block_and_release;
        }
        return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
    }
    

    进行block的初始化。

    小结下流程:
    1、通过_dispatch_continuation_init方法对任务和优先级进行封装;
    2、然后调用dx_push,最终找到自己的对应的dq_push标记的函数去调用相关的方法,并发队列调用的是_dispatch_lane_concurrent_push,最终调用_dispatch_root_queue_push
    3、继续进行调用,最终找到_dispatch_root_queue_poke_slow方法,进行线程的创建pthread_create

    六、单例dispatch_once_t源码分析

    找到源码

    void
    dispatch_once(dispatch_once_t *val, dispatch_block_t block)
    {
        dispatch_once_f(val, block, _dispatch_Block_invoke(block));
    }
    
    dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
    {
      //这里转换了val的类型为dispatch_once_gate_t
        dispatch_once_gate_t l = (dispatch_once_gate_t)val; 
    
    #if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
        uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
        if (likely(v == DLOCK_ONCE_DONE)) {
            return;
        }
    #if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
        if (likely(DISPATCH_ONCE_IS_GEN(v))) {
            return _dispatch_once_mark_done_if_quiesced(l, v);
        }
    #endif
    #endif
        if (_dispatch_once_gate_tryenter(l)) {
            return _dispatch_once_callout(l, ctxt, func);
        }
        return _dispatch_once_wait(l);
    }
    

    这里有三个条件:
    1、如果已经loaded,直接返回
    2、如果第一次被调用,执行_dispatch_once_callout
    3、如果正在执行,执行_dispatch_once_wait。

    _dispatch_once_gate_tryenter进入:

    static inline bool
    _dispatch_once_gate_tryenter(dispatch_once_gate_t l)
    {
        return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
                (uintptr_t)_dispatch_lock_value_for_self(), relaxed);
    }
    
    #define os_atomic_cmpxchg(p, e, v, m) \
            ({ _os_atomic_basetypeof(p) _r = (e); \
            atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \
            &_r, v, memory_order_##m, memory_order_relaxed); })
    

    是不是已经没有加锁DLOCK_ONCE_UNLOCKED,没有加锁,进行call_out的操作,加锁的话要进行wait的等待。

    _dispatch_once_callout实现

    static void
    _dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
            dispatch_function_t func)
    {
        _dispatch_client_callout(ctxt, func);
        _dispatch_once_gate_broadcast(l);
    }
    

    _dispatch_once_gate_broadcast:

    tatic inline void
    _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
    {
        dispatch_lock value_self = _dispatch_lock_value_for_self();
        uintptr_t v;
    #if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
        v = _dispatch_once_mark_quiescing(l);
    #else
        v = _dispatch_once_mark_done(l);
    #endif
        if (likely((dispatch_lock)v == value_self)) return;
        _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
    }
    

    锁的处理,并且标记为完成。

    单例的原理小结:
    通过函数dispatch_once_f 对onceToken进行强转,转换成dispatch_once_gate_t类型,是一个锁。这个锁L 通过os_atomic_load函数获取锁的状态,如果此时为DLOCK_ONCE_UNLOCK,证明可以操作,进行call_out,操作完毕进行广播标记完成。
    如果此时为DLOCK_ONCE_DONE,直接返回。
    如果此时为LOCK状态,说明有任务正在执行,会进入等待状态。等待当前任务完成,获取锁

    ps:这部分代码好困难,我自己也看的迷迷糊糊,先参考了挺多博客大致的进行一个梳理,后面再看看源码有新的更好的理解会再更新。(。・_・。)ノ

    相关文章

      网友评论

          本文标题:24.iOS底层学习之GCD 源码分析(上)

          本文链接:https://www.haomeiwen.com/subject/pelliltx.html