1.前言
上一篇我们介绍了GCD的结构体,这一篇我们着重看一下GCD中队列的构成。队列是我们在使用GCD中经常接触的技术点。
2.关键点
2.1 主队列和主线程
这两个术语我们可以经常听到,不知道有没有人会把这两个概念等同化。主队列和主线程是有关联,但是它们是两个不同的概念。简单地说,主队列是主线程上的一个串行队列,是系统自动为我们创建的。换言之,主线程是可以执行除主队列之外其他队列的任务。
2.2 队列和线程
Concurrent Programming: APIs and Challenges中的一张图片可以很直观地描述GCD与线程之间的关系:
GCDandThread@2x.png一个线程内可能有多个队列,这些队列可能是串行的或者是并行的,按照同步或者异步的方式工作。
3.队列的定义
dispatch_queue_s
是队列的结构体,可以说我们在GCD中接触最多的结构体了。
struct dispatch_queue_vtable_s {
DISPATCH_VTABLE_HEADER(dispatch_queue_s);
};
#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64
#ifdef __LP64__
#define DISPATCH_QUEUE_CACHELINE_PAD 32
#else
#define DISPATCH_QUEUE_CACHELINE_PAD 8
#endif
#define DISPATCH_QUEUE_HEADER \
uint32_t volatile dq_running; \
uint32_t dq_width; \
struct dispatch_object_s *volatile dq_items_tail; \
struct dispatch_object_s *volatile dq_items_head; \
unsigned long dq_serialnum; \
dispatch_queue_t dq_specific_q;
struct dispatch_queue_s {
DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s);
DISPATCH_QUEUE_HEADER;
char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last
char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only
};
GCD中使用了很多的宏,不利于我们理解代码,我们用对应的结构替换掉定义的宏,如下:
struct dispatch_queue_s {
//第一部分:DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s)
const struct dispatch_queue_vtable_s *do_vtable; \ //dispatch_queue_s的操作函数:dispatch_queue_vtable_s类型的结构体
struct dispatch_queue_s *volatile do_next; \ //链表的next
unsigned int do_ref_cnt; \ //引用计数
unsigned int do_xref_cnt; \ //外部引用计数
unsigned int do_suspend_cnt; \ //暂停标志,比如延时处理中,在任务到时后,计时器处理将会将该标志位修改,然后唤醒队列调度
struct dispatch_queue_s *do_targetq; \ //目标队列,GCD允许我们将一个队列放在另一个队列里执行任务
void *do_ctxt; \ //上下文,用来存储线程池相关数据,比如用于线程挂起和唤醒的信号量、线程池尺寸等
void *do_finalizer;
//第二部分:DISPATCH_QUEUE_HEADER
uint32_t volatile dq_running; \ //是否运行中
uint32_t dq_width; \ //最大并发数:主线程/串行中这个值为1
struct dispatch_object_s *volatile dq_items_tail; \ //链表尾节点
struct dispatch_object_s *volatile dq_items_head; \ //链表头节点
unsigned long dq_serialnum; \ //队列的序列号
dispatch_queue_t dq_specific_q; //specific队列
//其他:
char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last 说明队列的名字要少于64个字符
char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only
};
4.队列的类型
队列的类型可以分为主队列、管理队列、自定义队列、全局队列4种类型。
4.1 主队列
我们在开发过程中可以使用dispatch_get_main_queue
获取主队列,看一下它的定义:
#define dispatch_get_main_queue() (&_dispatch_main_q)
struct dispatch_queue_s _dispatch_main_q = {
#if !DISPATCH_USE_RESOLVERS
.do_vtable = &_dispatch_queue_vtable,
.do_targetq = &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
#endif
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.dq_label = "com.apple.main-thread",
.dq_running = 1,
.dq_width = 1, //说明主队列是一个串行队列
.dq_serialnum = 1,
};
接着我们看一下它的几个主要属性:
1.do_vtable
const struct dispatch_queue_vtable_s _dispatch_queue_vtable = {
.do_type = DISPATCH_QUEUE_TYPE,
.do_kind = "queue",
.do_dispose = _dispatch_queue_dispose,
.do_invoke = NULL,
.do_probe = (void *)dummy_function_r0,
.do_debug = dispatch_queue_debug,
};
2.do_targetq
主队列的目标队列:"com.apple.root.default-overcommit-priority"这个全局队列。这里我们先提前总结一下:非全局队列的队列类型(主队列以及后面提到的管理队列和自定义队列),都需要压入到全局队列处理,所以需要设置do_targetq
。
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.default-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 7,
},
3.do_ref_cnt和do_xref_cnt
前面提到do_ref_cnt
和do_xref_cnt
是引用计数,主队列的这两个值为DISPATCH_OBJECT_GLOBAL_REFCNT
。既然是引用计数,那想必是和GCD的内存管理有关,找到和内存管理相关的代码:
void
dispatch_retain(dispatch_object_t dou)
{
if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
return; // global object
}
...
}
void
_dispatch_retain(dispatch_object_t dou)
{
if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
return; // global object
}
...
}
void
dispatch_release(dispatch_object_t dou)
{
if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
return;
}
...
}
void
_dispatch_release(dispatch_object_t dou)
{
if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
return; // global object
}
...
}
从上面几个函数我们可以看出,主队列的生命周期是伴随着应用的,不会受retain和release的影响。
4.2 管理队列
_dispatch_mgr_q
(管理队列),是GCD的内部队列,不对外公开。从名字上看,这个队列应该是用来扮演管理的角色,GCD定时器就用到了管理队列。
struct dispatch_queue_s _dispatch_mgr_q = {
.do_vtable = &_dispatch_queue_mgr_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_targetq = &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.libdispatch-manager",
.dq_width = 1,
.dq_serialnum = 2,
};
1.do_vtable
static const struct dispatch_queue_vtable_s _dispatch_queue_mgr_vtable = {
.do_type = DISPATCH_QUEUE_MGR_TYPE,
.do_kind = "mgr-queue",
.do_invoke = _dispatch_mgr_thread,
.do_debug = dispatch_queue_debug,
.do_probe = _dispatch_mgr_wakeup,
};
2.do_targetq
管理队列的目标队列:"com.apple.root.high-overcommit-priority"这个全局队列。
[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.high-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 9,
},
3.do_ref_cnt和do_xref_cnt
管理队列的这两个值为DISPATCH_OBJECT_GLOBAL_REFCNT
,所以和主队列的生命周期应该是一样的。
4.3 自定义队列
我们在开发中会使用 dispatch_queue_create(const char *label, dispatch_queue_attr_t attire)
创建一个自定义的队列。它的源代码如下:
dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
dispatch_queue_t dq;
size_t label_len;
if (!label) {
label = "";
}
label_len = strlen(label);
if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) {
label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1);
}
// XXX switch to malloc()
dq = calloc(1ul, sizeof(struct dispatch_queue_s) -
DISPATCH_QUEUE_MIN_LABEL_SIZE - DISPATCH_QUEUE_CACHELINE_PAD +
label_len + 1);
if (slowpath(!dq)) {
return dq;
}
//队列初始化数据
_dispatch_queue_init(dq);
strcpy(dq->dq_label, label);
if (fastpath(!attr)) {
return dq;
}
if (fastpath(attr == DISPATCH_QUEUE_CONCURRENT)) {
dq->dq_width = UINT32_MAX;
dq->do_targetq = _dispatch_get_root_queue(0, false);
} else {
dispatch_debug_assert(!attr, "Invalid attribute");
}
return dq;
}
1.slowpath(x)
和fastpath(x)
关于这两个宏的定义如下:
#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l))
#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l))
fastpath(x)
表示x的值一般不为0,希望编译器进行优化。slowpath(x)
表示x的值很可能为0,希望编译器进行优化。
2._dispatch_queue_init
static inline void
_dispatch_queue_init(dispatch_queue_t dq)
{
dq->do_vtable = &_dispatch_queue_vtable;
dq->do_next = DISPATCH_OBJECT_LISTLESS;
dq->do_ref_cnt = 1;
dq->do_xref_cnt = 1;
// Default target queue is overcommit!
dq->do_targetq = _dispatch_get_root_queue(0, true);
dq->dq_running = 0;
dq->dq_width = 1;
dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1;
}
_dispatch_queue_init
默认设置一个队列为串行队列,它的目标队列是_dispatch_get_root_queue(0, true)
。
3.do_targetq
前面对这个字段的解释有点简单了,do_targetq
代表目的队列。在Concurrent Programming: APIs and Challenges提到:
While custom queues are a powerful abstraction, all blocks you schedule on them will ultimately trickle down to one of the system’s global queues and its thread pool(s).
虽然自定义队列是一个强大的抽象,但你在队列上安排的所有Block最终都会渗透到系统的某一个全局队列及其线程池。
看起来自定义队列更像是全局队列的一个代理。在自定义队列创建的时候默认其目标队列为_dispatch_get_root_queue(0, true)
。其中0
代表优先级DISPATCH_QUEUE_PRIORITY_DEFAULT
,true
代表是否是overcommit
。overcommit
参数表示该队列在执行block时,无论系统多忙都会新开一个线程,这样做的目的是不会造成某个线程过载。如果是自定义并发队列的话,do_targetq
会被设置为_dispatch_get_root_queue(0, false)
。
值得注意的是,主队列的目标队列也是一个全局队列,全局队列的底层就是普通的线程池(这个会在全局队列中讲到)。
4.dq_serialnum
dq_serialnum
是在_dispatch_queue_serial_numbers
基础上进行原子操作加1,即从12开始累加。1到11被保留的序列号定义如下:
// skip zero
// 1 - main_q
// 2 - mgr_q
// 3 - _unused_
// 4,5,6,7,8,9,10,11 - global queues
// we use 'xadd' on Intel, so the initial value == next assigned
其中1用于主队列,2用于管理队列,3暂时没有被使用,4~11是用于全局队列的。
4.4 全局队列
上面说了很多全局队列,现在我们来看一下全局队列是如何定义的。
dispatch_queue_t
dispatch_get_global_queue(long priority, unsigned long flags)
{
if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) {
return NULL;
}
return _dispatch_get_root_queue(priority,
flags & DISPATCH_QUEUE_OVERCOMMIT);
}
static inline dispatch_queue_t
_dispatch_get_root_queue(long priority, bool overcommit)
{
if (overcommit) switch (priority) {
case DISPATCH_QUEUE_PRIORITY_LOW:
return &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_DEFAULT:
return &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_HIGH:
return &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
return &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY];
}
switch (priority) {
case DISPATCH_QUEUE_PRIORITY_LOW:
return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_DEFAULT:
return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_HIGH:
return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY];
case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
return &_dispatch_root_queues[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY];
default:
return NULL;
}
}
DISPATCH_CACHELINE_ALIGN
struct dispatch_queue_s _dispatch_root_queues[] = {
[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY],
.dq_label = "com.apple.root.low-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 4,
},
[DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.low-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 5,
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY],
.dq_label = "com.apple.root.default-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 6,
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.default-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 7,
},
[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY],
.dq_label = "com.apple.root.high-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 8,
},
[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.high-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 9,
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY],
.dq_label = "com.apple.root.background-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 10,
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {
.do_vtable = &_dispatch_queue_root_vtable,
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY],
.dq_label = "com.apple.root.background-overcommit-priority",
.dq_running = 2,
.dq_width = UINT32_MAX,
.dq_serialnum = 11,
},
};
1.do_vtable
全局队列的do_vtable
:
static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = {
.do_type = DISPATCH_QUEUE_GLOBAL_TYPE,
.do_kind = "global-queue",
.do_debug = dispatch_queue_debug,
.do_probe = _dispatch_queue_wakeup_global,
};
2.do_ctxt
全局队列中有一个上下文的属性,用来存储线程池相关数据,比如用于线程挂起和唤醒的信号量、线程池尺寸等。
它的定义如下:
static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {
#if DISPATCH_ENABLE_THREAD_POOL
.dgq_thread_mediator = &_dispatch_thread_mediator[
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY],
.dgq_thread_pool_size = MAX_THREAD_COUNT,
#endif
},
};
5. 队列的同步
5.1 dispatch_sync
dispatch_sync
的源码如下:
void
dispatch_sync(dispatch_queue_t dq, void (^work)(void))
{
#if DISPATCH_COCOA_COMPAT
if (slowpath(dq == &_dispatch_main_q)) {
return _dispatch_sync_slow(dq, work);
}
#endif
struct Block_basic *bb = (void *)work;
dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke);
}
如果这个队列是主队列,则调用_dispatch_sync_slow
,否则调用dispatch_sync_f
。点开_dispatch_sync_slow
返现,最终还是调用了dispatch_sync_f
方法。通过_dispatch_Block_copy
或者Block_basic
完成由block到function的转换。所以block的执行底层还是使用function。
5.2 dispatch_sync_f
dispatch_sync_f
的源码如下:
void
dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
//串行队列
if (fastpath(dq->dq_width == 1)) {
return dispatch_barrier_sync_f(dq, ctxt, func);
}
//全局队列
if (slowpath(!dq->do_targetq)) {
// the global root queues do not need strict ordering
(void)dispatch_atomic_add2o(dq, dq_running, 2);
return _dispatch_sync_f_invoke(dq, ctxt, func);
}
//并发队列
_dispatch_sync_f2(dq, ctxt, func);
}
这里分成了三种情况:
1.如果是串行队列,执行dispatch_barrier_sync_f
。
2.如果是全局队列,执行_dispatch_sync_f_invoke
。
3.如果是并行队列,执行_dispatch_sync_f2
5.2.1 dispatch_barrier_sync_f
如果是串行队列压入同步任务,那么当前任务就必须等待前面的任务执行完成后才能执行。源代码就会调用dispatch_barrier_sync_f
函数完成上面的效果。
void
dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
// 1) ensure that this thread hasn't enqueued anything ahead of this call
// 2) the queue is not suspended
//第一步:如果串行队列中存在其他任务或者队列被挂起,则直接进入_dispatch_sync_f_slow函数,等待这个队列中的其他任务完成(信号量的方式),然后执行这个任务。
if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){
return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
}
//第二步:检测队列的dq_running状态,如果有运行,进入_dispatch_barrier_sync_f_slow,等待激活。
if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) {
// global queues and main queue bound to main thread always falls into
// the slow case
return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
}
//第三步:有多重队列,寻找真正的目标队列,其实还是回到了dispatch_sync_f方法
if (slowpath(dq->do_targetq->do_targetq)) {
return _dispatch_barrier_sync_f_recurse(dq, ctxt, func);
}
//第四步:执行队列里的任务,执行后检测队列有无其他任务,如果有,释放前面的信号量(释放信号_dispatch_barrier_sync_f2函数中)。
_dispatch_barrier_sync_f_invoke(dq, ctxt, func);
}
看了上面的代码注释后,我们来想一下同步串行队列死锁问题。死锁是怎么产生的?先看下示例代码:
#import "DeadLock.h"
@implementation DeadLock
- (instancetype)init {
if (self = [super init]) {
// [self _mianQueueDeadLock];
[self _serialQueueDeadLock];
}
return self;
}
#pragma mark - Private
- (void)_mianQueueDeadLock {
dispatch_sync(dispatch_get_main_queue(), ^(void){
NSLog(@"这里死锁了");
});
}
- (void)_serialQueueDeadLock {
dispatch_queue_t queue1 = dispatch_queue_create("1serialQueue", DISPATCH_QUEUE_SERIAL);
dispatch_queue_t queue2 = dispatch_queue_create("2serialQueue", DISPATCH_QUEUE_SERIAL);
dispatch_sync(queue1, ^{
NSLog(@"11111");
dispatch_sync(queue1, ^{//如果使用queue2就不会发生死锁,使用queue1就会死锁
NSLog(@"22222");
});
});
}
@end
以_serialQueueDeadLock
为例:当第一次执行串行队列任务的时候,跳到第四步,直接开始执行任务,在运行第二个dispatch_sync
时候,在任务里面通过执行第一步(队列在运行)向这个同步队列中压入信号量,然后等待信号量,进入死锁。如果主队列则会跳转到第二步进入死锁。
5.2.2 _dispatch_sync_f_invoke
static void
_dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
_dispatch_function_invoke(dq, ctxt, func);
if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) {
_dispatch_wakeup(dq);
}
}
如果当前队列是全局队列的话,就会调用_dispatch_sync_f_invoke
。这个函数的作用:执行传入的任务,然后根据dq_running检测任务队列有没有激活,没有激活就执行激活函数。关于激活函数_dispatch_wakeup(dq)
放在队列的异步中讲解。
5.2.3 _dispatch_sync_f2
_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
// 1) ensure that this thread hasn't enqueued anything ahead of this call
// 2) the queue is not suspended
//第一步:并发队列中有其他任务或者队列被挂起,压入信号量,等待其他线程释放这个信号量
if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){
return _dispatch_sync_f_slow(dq, ctxt, func);
}
//第二步:并行队列没激活,激活队列后执行任务,最终还是调用了_dispatch_sync_f_slow函数,只是多了一个激活函数
if (slowpath(dispatch_atomic_add2o(dq, dq_running, 2) & 1)) {
return _dispatch_sync_f_slow2(dq, ctxt, func);
}
//第三步:队列有多重队列,寻找真正的目标队列
if (slowpath(dq->do_targetq->do_targetq)) {
return _dispatch_sync_f_recurse(dq, ctxt, func);
}
//第四步:并行队列没有其他任务,调用并激活这个队列
_dispatch_sync_f_invoke(dq, ctxt, func);
}
通过上面的注释,并行队列同步执行是顺序执行的。这种顺序执行和操作队列为并发队列没有关系。而是因为这些操作均为同步操作,所以每一个操作放入队列后都会被等待执行完成才会放入下一操作,造成了这种顺序执行的现象。
现在我们整理一下队列同步执行的流程,如下图:
queue_synchronize.png
6. 队列的异步
说完了同步我们现在看一下异步。我们使用dispatch_async
进行队列的异步执行。
6.1 dispatch_async
dispatch_async
的源码如下:
void
dispatch_async(dispatch_queue_t dq, void (^work)(void))
{
dispatch_async_f(dq, _dispatch_Block_copy(work),
_dispatch_call_block_and_release);
}
dispatch_async
主要将block从栈copy到堆上,或者增加引用计数,保证block在执行之前不会被销毁,另外_dispatch_call_block_and_release
用于销毁block。然后调用dispatch_async_f
。
6.2 dispatch_async_f
void
dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
{
dispatch_continuation_t dc;
// No fastpath/slowpath hint because we simply don't know
//串行队列,执行dispatch_barrier_async_f,其实最后还是执行任务入队的操作
if (dq->dq_width == 1) {
return dispatch_barrier_async_f(dq, ctxt, func);
}
//从线程私有数据中获取一个dispatch_continuation_t的结构体
dc = fastpath(_dispatch_continuation_alloc_cacheonly());
if (!dc) {
return _dispatch_async_f_slow(dq, ctxt, func);
}
dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
dc->dc_func = func;
dc->dc_ctxt = ctxt;
// No fastpath/slowpath hint because we simply don't know
//有目标队列,调用_dispatch_async_f2函数进行转发。
if (dq->do_targetq) {
return _dispatch_async_f2(dq, dc);
}
//全局队列直接进行入队操作
_dispatch_queue_push(dq, dc);
}
从上面的源代码中我们可以看出dispatch_async_f
大致分为三种情况:
- 如果是串行队列,调用
dispatch_barrier_async_f
。 - 其他队列且有目标队列,调用
_dispatch_async_f2
。 - 如果是全局队列的话,直接进行入队操作。
虽然上面分三种情况,但是归根到底,它们最后执行都是_dispatch_queue_push
来进行入队的操作。
这里有一点需要注意下:就是dispatch_continuation_t
中do_vtable
的赋值情况。
//串行队列,barrier
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
//not barrier
dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
在libdispatch
全部标识符有四种:
#define DISPATCH_OBJ_ASYNC_BIT 0x1 //异步
#define DISPATCH_OBJ_BARRIER_BIT 0x2 //阻塞
#define DISPATCH_OBJ_GROUP_BIT 0x4 //组
#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 //同步慢
从上面我们可以知道串行队列在异步执行的时候,通过DISPATCH_OBJ_BARRIER_BIT
这个标识符实现阻塞等待的。
接着我们分析下dispatch_barrier_async_f
和_dispatch_async_f2
这两个函数。
6.2.1 dispatch_barrier_async_f
dispatch_barrier_async_f
的源码如下:
void
dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
dispatch_continuation_t dc;
//从线程私有数据中获取一个dispatch_continuation_t的结构体,dispatch_continuation_t中封装了异步执行任务。
dc = fastpath(_dispatch_continuation_alloc_cacheonly());
if (!dc) {
//return _dispatch_barrier_async_f_slow(dq, ctxt, func);
//以下是_dispatch_barrier_async_f_slow的具体实现
//如果没有则从堆上获取一个dispatch_continuation_t的结构体
dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap();
//通过do_vtable区分类型
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
//将_dispatch_call_block_and_release作为func方法
dc->dc_func = func;
//将传入的block作为上下文
dc->dc_ctxt = ctxt;
//入队操作
_dispatch_queue_push(dq, dc);
}
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
dc->dc_func = func;
dc->dc_ctxt = ctxt;
_dispatch_queue_push(dq, dc);
}
6.3 _dispatch_queue_push
_dispatch_queue_push
是一个宏定义,它最后会变成执行_dispatch_queue_push_list
函数。
#define _dispatch_queue_push(x, y) _dispatch_queue_push_list((x), (y), (y))
#define _dispatch_queue_push_list _dispatch_trace_queue_push_list
static inline void
_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
dispatch_object_t _tail)
{
if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
struct dispatch_object_s *dou = _head._do;
do {
//主要是对dispatch_continuation_s结构体的处理,确保后面的使用。
_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
} while (dou != _tail._do && (dou = dou->do_next));
}
_dispatch_queue_push_list(dq, _head, _tail);
}
static inline void
_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
dispatch_object_t _tail)
{
struct dispatch_object_s *prev, *head = _head._do, *tail = _tail._do;
tail->do_next = NULL;
dispatch_atomic_store_barrier();
//dispatch_atomic_xchg2o实质是调用((typeof(*(p)))__sync_swap((p), (n))),它的定义是将p设为n并返回p操作之前的值。
//dispatch_atomic_xchg2o(dq, dq_items_tail, tail)相当于dq->dq_items_tail = tail,重新设置了队列的尾指针
prev = fastpath(dispatch_atomic_xchg2o(dq, dq_items_tail, tail));
if (prev) {
// if we crash here with a value less than 0x1000, then we are at a
// known bug in client code for example, see _dispatch_queue_dispose
// or _dispatch_atfork_child
//prev是原先的队尾,如果队列中有其他的元素,就将压入的对象加在队列的尾部。
prev->do_next = head;
} else {
//如果队列为空
_dispatch_queue_push_list_slow(dq, head);
}
}
_dispatch_queue_push_list_slow
如果队列为空,调用_dispatch_queue_push_list_slow
方法。
_dispatch_queue_push_list_slow(dispatch_queue_t dq,
struct dispatch_object_s *obj)
{
//dq->dq_items_head设置为dc,然后唤醒这个队列。因为此时队列为空,没有任务在执行,处于休眠状态,所以需要唤醒
_dispatch_retain(dq);
dq->dq_items_head = obj;
_dispatch_wakeup(dq);
_dispatch_release(dq);
}
6.4 _dispatch_wakeup
无论是同步还是异步中都调用了_dispatch_wakeup
,这个函数的作用就是唤醒当前队列。
_dispatch_wakeup
的源码:
dispatch_queue_t
_dispatch_wakeup(dispatch_object_t dou)
{
dispatch_queue_t tq;
if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
return NULL;
}
//这里比较隐晦,这里其实是走全局队列的唤醒逻辑调用_dispatch_queue_wakeup_global,如果唤醒失败且对尾指针为空,返回NULL
if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) {
return NULL;
}
// _dispatch_source_invoke() relies on this testing the whole suspend count
// word, not just the lock bit. In other words, no point taking the lock
// if the source is suspended or canceled.
if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0,
DISPATCH_OBJECT_SUSPEND_LOCK)) {
#if DISPATCH_COCOA_COMPAT
if (dou._dq == &_dispatch_main_q) {
//传入主队列,会进入到 _dispatch_queue_wakeup_main() 函数中
_dispatch_queue_wakeup_main();
}
#endif
return NULL;
}
_dispatch_retain(dou._do);
//有目标队列,继续向目标队列压入这个队列
tq = dou._do->do_targetq;
_dispatch_queue_push(tq, dou._do);
return tq; // libdispatch does not need this, but the Instrument DTrace
// probe does
}
从上面的代码可以看出_dispatch_wakeup
分为三种情况:
1.主队列调用_dispatch_queue_wakeup_main()
。
2.全局队列调用_dispatch_queue_wakeup_global
。
3.其他队列像目标队列压入这个队列,继续做入队操作。
6.4.1 _dispatch_queue_wakeup_main
void
_dispatch_queue_wakeup_main(void)
{
kern_return_t kr;
dispatch_once_f(&_dispatch_main_q_port_pred, NULL,
_dispatch_main_q_port_init);
//唤醒主线程,这里已经点不进去了,关于主线的唤醒主要靠mach_port和在runloop中注册相对应的source1
kr = _dispatch_send_wakeup_main_thread(main_q_port, 0);
switch (kr) {
case MACH_SEND_TIMEOUT:
case MACH_SEND_TIMED_OUT:
case MACH_SEND_INVALID_DEST:
break;
default:
(void)dispatch_assume_zero(kr);
break;
}
_dispatch_safe_fork = false;
}
6.4.2 _dispatch_queue_wakeup_global
上面提到dx_probe(dou._do)
这里走的是全局队列的唤醒。前面提到全局队列的do_vtable
:
static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = {
.do_type = DISPATCH_QUEUE_GLOBAL_TYPE,
.do_kind = "global-queue",
.do_debug = dispatch_queue_debug,
.do_probe = _dispatch_queue_wakeup_global,
};
_dispatch_queue_wakeup_global
的源码:
static bool
_dispatch_queue_wakeup_global(dispatch_queue_t dq)
{
static dispatch_once_t pred;
struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
int r;
if (!dq->dq_items_tail) {
return false;
}
_dispatch_safe_fork = false;
dispatch_debug_queue(dq, __PRETTY_FUNCTION__);
dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);
#if HAVE_PTHREAD_WORKQUEUES
#if DISPATCH_ENABLE_THREAD_POOL
//队列上下文的dgq_kworkqueue存在,则调用pthread_workqueue_additem_np函数,该函数使用workq_kernreturn系统调用,通知workqueue增加应当执行的项目。根据该通知,XNU内核基于系统状态判断是否要生成线程,如果是overcommit优先级的队列,workqueue则始终生成线程,之后线程执行_dispatch_worker_thread2函数。
//工作队列,是一个用于创建内核线程的接口,通过它创建的内核线程来执行内核其他模块排列到队列里的工作。不同优先级的dispatch queue对应着对应优先级的workqueue。GCD初始化的时候,使用pthread_workqueue_create_np创建pthread_workqueue
if (qc->dgq_kworkqueue)
#endif
{
if (dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, 1)) {
pthread_workitem_handle_t wh;
unsigned int gen_cnt;
_dispatch_debug("requesting new worker thread");
r = pthread_workqueue_additem_np(qc->dgq_kworkqueue,
_dispatch_worker_thread2, dq, &wh, &gen_cnt);
(void)dispatch_assume_zero(r);
} else {
_dispatch_debug("work thread request still pending on global "
"queue: %p", dq);
}
goto out;
}
#endif // HAVE_PTHREAD_WORKQUEUES
#if DISPATCH_ENABLE_THREAD_POOL
//通过发送一个信号量使线程保活
if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
goto out;
}
pthread_t pthr;
int t_count;
do {
t_count = qc->dgq_thread_pool_size;
if (!t_count) {
_dispatch_debug("The thread pool is full: %p", dq);
goto out;
}
} while (!dispatch_atomic_cmpxchg2o(qc, dgq_thread_pool_size, t_count,
t_count - 1));//如果线程池可用则减1
//这里说明线程池不够用了,使用pthread创建一个线程,并执行_dispatch_worker_thread,_dispatch_worker_thread最终会调用到_dispatch_worker_thread2
while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) {
if (r != EAGAIN) {
(void)dispatch_assume_zero(r);
}
sleep(1);
}
//保证pthr能被自动回收掉
r = pthread_detach(pthr);
(void)dispatch_assume_zero(r);
#endif // DISPATCH_ENABLE_THREAD_POOL
out:
return false;
}
6.4.3 _dispatch_worker_thread2
_dispatch_worker_thread2
的代码如下:
_dispatch_worker_thread2(void *context)
{
struct dispatch_object_s *item;
dispatch_queue_t dq = context;
struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
if (_dispatch_thread_getspecific(dispatch_queue_key)) {
DISPATCH_CRASH("Premature thread recycling");
}
//把dq设置为刚启动的这个线程的TSD
_dispatch_thread_setspecific(dispatch_queue_key, dq);
qc->dgq_pending = 0;
#if DISPATCH_COCOA_COMPAT
(void)dispatch_atomic_inc(&_dispatch_worker_threads);
// ensure that high-level memory management techniques do not leak/crash
if (dispatch_begin_thread_4GC) {
dispatch_begin_thread_4GC();
}
void *pool = _dispatch_begin_NSAutoReleasePool();
#endif
#if DISPATCH_PERF_MON
uint64_t start = _dispatch_absolute_time();
#endif
//_dispatch_queue_concurrent_drain_one用来取出队列的一个内容
while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
// 用来对取出的内容进行处理(如果是任务,则执行任务)
_dispatch_continuation_pop(item);
}
#if DISPATCH_PERF_MON
_dispatch_queue_merge_stats(start);
#endif
#if DISPATCH_COCOA_COMPAT
_dispatch_end_NSAutoReleasePool(pool);
dispatch_end_thread_4GC();
if (!dispatch_atomic_dec(&_dispatch_worker_threads) &&
dispatch_no_worker_threads_4GC) {
dispatch_no_worker_threads_4GC();
}
#endif
_dispatch_thread_setspecific(dispatch_queue_key, NULL);
_dispatch_force_cache_cleanup();
}
这里有两个比较重要的方法:
1._dispatch_queue_concurrent_drain_one
这个方法用来取出队列中中的一个内容。
2._dispatch_continuation_pop
这个方法用来处理取出的内容
_dispatch_queue_concurrent_drain_one
struct dispatch_object_s *
_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq)
{
struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul;
// The mediator value acts both as a "lock" and a signal
head = dispatch_atomic_xchg(&dq->dq_items_head, mediator);
if (slowpath(head == NULL)) {
//队列是空的
dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL);
_dispatch_debug("no work on global work queue");
return NULL;
}
if (slowpath(head == mediator)) {
// 该线程在现线程竞争中失去了对队列的拥有权,这意味着libdispatch的效率很糟糕,
// 这种情况意味着在线程池中有太多的线程,这个时候应该创建一个pengding线程,
// 然后退出该线程,内核会在负载减弱的时候创建一个新的线程
_dispatch_queue_wakeup_global(dq);
return NULL;
}
// 在返回之前将head指针的do_next保存下来,如果next为NULL,这意味着item是最后一个
next = fastpath(head->do_next);
if (slowpath(!next)) {
dq->dq_items_head = NULL;
if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) {
// head 和 tail头尾指针均为空
goto out;
}
// 此时一定有item,该线程不会等待太久.
while (!(next = head->do_next)) {
_dispatch_hardware_pause();
}
}
// 继续调度
dq->dq_items_head = next;
_dispatch_queue_wakeup_global(dq);
out:
// 返回队列的头指针
return head;
}
_dispatch_continuation_pop
static inline void
_dispatch_continuation_pop(dispatch_object_t dou)
{
dispatch_continuation_t dc = dou._dc;
dispatch_group_t dg;
_dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou);
//检测是不是队列,如果是,就进入_dispatch_queue_invoke 处理队列
if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
return _dispatch_queue_invoke(dou._dq);
}
// Add the item back to the cache before calling the function. This
// allows the 'hot' continuation to be used for a quick callback.
//
// The ccache version is per-thread.
// Therefore, the object has not been reused yet.
// This generates better assembly.
if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
_dispatch_continuation_free(dc);
}
//判断是否是group
if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
dg = dc->dc_group;
} else {
dg = NULL;
}
//是任务封装的 dispatch_continuation_t 结构体,直接执行任务。
//到这里我们知道了队列执行的时候,block被调用的时机
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
if (dg) {
//group需要进行调用dispatch_group_leave并释放信号
dispatch_group_leave(dg);
_dispatch_release(dg);
}
}
从上面的函数中可以发现,压入队列的不仅是任务,还有可能是队列。如果是队列,直接执行了_dispatch_queue_invoke
,否则执行dc->dc_func(dc->dc_ctxt)
。
_dispatch_queue_invoke
void
_dispatch_queue_invoke(dispatch_queue_t dq)
{
if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) {
dispatch_atomic_acquire_barrier();
dispatch_queue_t otq = dq->do_targetq, tq = NULL;
_dispatch_queue_drain(dq);
if (dq->do_vtable->do_invoke) {
// Assume that object invoke checks it is executing on correct queue
tq = dx_invoke(dq);
} else if (slowpath(otq != dq->do_targetq)) {
// An item on the queue changed the target queue
tq = dq->do_targetq;
}
// We do not need to check the result.
// When the suspend-count lock is dropped, then the check will happen.
dispatch_atomic_release_barrier();
//dq_running减1,因为任务要么被直接执行了,要么被压到target队列了
(void)dispatch_atomic_dec2o(dq, dq_running);
if (tq) {
return _dispatch_queue_push(tq, dq);
}
}
dq->do_next = DISPATCH_OBJECT_LISTLESS;
if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
DISPATCH_OBJECT_SUSPEND_LOCK)) {
//队列处于空闲状态,需要唤醒
if (dq->dq_running == 0) {
_dispatch_wakeup(dq); // verify that the queue is idle
}
}
//释放队列
_dispatch_release(dq); // added when the queue is put on the list
}
现在我们整理一下队列异步执行的流程,如下图:
dispatch_async.png
7.总结
1.dispatch_sync
函数一般都在当前线程执行,利用与线程绑定的信号量来实现串行。
2.dispatch_async
函数并不是将block直接添加到队列上,而是先构成一个dispatch_continuation
构造体,构造体包含了这个block还有一些上下文信息。队列会将这些任务添加队列的链表中,之后会唤醒队列,根据vtable
中的函数指针,调用_dispatch_wakeup
方法。在_dispatch_wakeup
方法中,从线程池里取出工作线程(如果没有就新建),然后在工作线程中取出对应block并执行。
3.dispatch_async
分发到主队列的任务由Runloop
处理,而分发到其他队列的任务由线程池处理。
4.GCD死锁是队列导致的而不是线程导致,原因是_dispatch_barrier_sync_f_slow
函数中使用了线程对应的信号量并且调用wait方法,从而导致线程死锁。
网友评论