这篇大概是最艰难的一篇了,GCD的底层用了很多宏,并且总是调来调去的,讲真非常的艰难了,所以菜鸟酱只能尽力了,可以随意吐槽一下哈~ 这篇也只解析一下最常用的create和async,因为实在是太长了QAQ
源码下载:https://libdispatch.macosforge.org or https://github.com/apple
常见内容:likely / unlikely / slowpath / fastpath
#define likely(x) __builtin_expect(!!(x), 1) //条件很可能为真
#define unlikely(x) __builtin_expect(!!(x), 0) //条件很可能为假
#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) //条件很可能为真
#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) //条件很可能为假
好像是不同版本使用的不太一样,但是源码中总是逃不开unlikely或者slowpath这一套,这个其实就是为了对CPU指令优化,告诉编译时if的分支哪个可能性会大一点,先读取可能性大的指令块,尽量避免在发现条件不满足重新读取指令了,但是其实对代码理解没有太大影响,可以忽略它的,它只是告诉计算机,是if还是else的可能性大一点,要预先读取哦。
例如程序是:
if(I == 1) {
就去喝水
} else {
去吃饭
}
如果i很可能不等于1,那么如果计算机先读取了if(i == 1)去喝水,在发现i != 1以后就要重新读取else去吃饭,相当于读了两次。
所以我们提前告诉计算机i很可能不为1的话,可以让计算机一开始先读取else的指令,减少重读可能性
dispatch_queue_create
dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
return _dispatch_lane_create_with_target(label, attr,
DISPATCH_TARGET_QUEUE_DEFAULT, true);
}
先看看肿么创建一个queue吧~ 第一步先转发给了非常繁琐的_dispatch_lane_create_with_target方法。。
DISPATCH_TARGET_QUEUE_DEFAULT的宏定义为NULL哈
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
dispatch_queue_t tq, bool legacy)
{
//其实就是把attr转化为{}字典形式的attr集合,dqai里面会有qos,overcommit,inactive,concurrent之类的key和value
dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);
// Step 1: Normalize arguments (qos, overcommit, tq)
dispatch_qos_t qos = dqai.dqai_qos;
…… //设置QOS
if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
…… //tq为NULL所以不执行这段
} else if (tq && !tq->do_targetq) {
…… //tq为NULL所以不执行这段
} else {
if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
// 串行queue默认overcommit,并行queue默认不overcommit
overcommit = dqai.dqai_concurrent ?
_dispatch_queue_attr_overcommit_disabled :
_dispatch_queue_attr_overcommit_enabled;
}
}
if (!tq) {
tq = _dispatch_get_root_queue(
qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
if (unlikely(!tq)) {
DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
}
}
// Step 2: Initialize the queue
//如果dqai.dqai_inactive或dqai.dqai_autorelease_frequency就让legacy为false
……
const void *vtable;
dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
if (dqai.dqai_concurrent) {
vtable = DISPATCH_VTABLE(queue_concurrent);
} else {
vtable = DISPATCH_VTABLE(queue_serial);
}
//设置autorelease_frequency以及如果label是muteable的转换为immutable
……
dispatch_lane_t dq = _dispatch_object_alloc(vtable,
sizeof(struct dispatch_lane_s));
_dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
(dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
// 设置label,priority,target queue
dq->dq_label = label;
dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
dqai.dqai_relpri);
if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
}
if (!dqai.dqai_inactive) {
_dispatch_queue_priority_inherit_from_target(dq, tq);
_dispatch_lane_inherit_wlh_from_target(dq, tq);
}
_dispatch_retain(tq);
dq->do_targetq = tq;
_dispatch_object_debug(dq, "%s", __func__);
return _dispatch_trace_queue_create(dq)._dq;
}
这里先解释两个名词:
QOS:类似于优先级,在创建global queue的时候的第一个参数就是priority或者QOS
over commit:如果当前队列没有可以分配的空余线程,就开启一个新线程来做任务
创建一个新queue的任务比较重要的几步主要是:
· 设置一下各种参数给dqai
· 用_dispatch_get_root_queue创建一个target queue
· 用_dispatch_object_alloc分配内存,并通过_dispatch_queue_init创建一个queue
· 将各种label、qos等设置给queue
· 调用_dispatch_trace_queue_create返回一个queue
① dqai
#define DISPATCH_QUEUE_SERIAL NULL
dqa在串行队列时为null哦~
dispatch_queue_attr_info_t
_dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa)
{
dispatch_queue_attr_info_t dqai = { };
// dqa如果是null就返回空的{}
if (!dqa) return dqai;
#if DISPATCH_VARIANT_STATIC
if (dqa == &_dispatch_queue_attr_concurrent) {
dqai.dqai_concurrent = true;
return dqai;
}
#endif
if (dqa < _dispatch_queue_attrs ||
dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) {
DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
}
size_t idx = (size_t)(dqa - _dispatch_queue_attrs);
dqai.dqai_inactive = (idx % DISPATCH_QUEUE_ATTR_INACTIVE_COUNT);
idx /= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT;
dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT);
idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT;
// 不断做按位取余的方式设置各个参数
……
return dqai;
}
首先如果是串行队列,直接返回空的dqai;
如果是并行队列,则通过按位取余设置下面的各个参数
typedef struct dispatch_queue_attr_info_s {
dispatch_qos_t dqai_qos : 8;
int dqai_relpri : 8;
uint16_t dqai_overcommit:2;
uint16_t dqai_autorelease_frequency:2;
uint16_t dqai_concurrent:1;
uint16_t dqai_inactive:1;
} dispatch_queue_attr_info_t;
两种队列是通过dqai_concurrent来区分的
② _dispatch_get_root_queue创建target queue
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
}
return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}
这里其实是return _dispatch_root_queues[2 * (qos - 1) + overcommit]
也就是说target queue就是从_dispatch_root_queues队列里面拿第(2 * (qos - 1) + overcommit)个queue,由于QOS有6种,并且还有over commit的区分,所以_dispatch_root_queues至少需要有12个queue。
注意串行queue默认是overcommit的,并行是不overcommit的。
// qos
#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0)
#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1)
#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2)
#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3)
#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4)
#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5)
#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6)
③ 创建一个queue
代码太远了先拷贝过来慢慢看哈~
const void *vtable;
dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
if (dqai.dqai_concurrent) {
vtable = DISPATCH_VTABLE(queue_concurrent);
} else {
vtable = DISPATCH_VTABLE(queue_serial);
}
//设置autorelease_frequency以及如果label是muteable的转换为immutable
……
dispatch_lane_t dq = _dispatch_object_alloc(vtable,
sizeof(struct dispatch_lane_s));
_dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
(dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
(1)首先通过了dqai.dqai_concurrent来设置了vtable
#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name)
#define DISPATCH_OBJC_CLASS(name) (&DISPATCH_CLASS_SYMBOL(name))
#define DISPATCH_CLASS_SYMBOL(name) OS_dispatch_##name##_class
通过各种宏可以知道其实并行queue的vtable就是OS_dispatch_concurrent_class;串行queue的的vtable就是OS_dispatch_serial_class,这也就是两种queue对应的class。
(2)通过_dispatch_object_alloc给vtable类分配了内存空间,大小为dispatch_lane_s结构体的大小
typedef struct dispatch_lane_s *dispatch_queue_serial_t;
typedef struct dispatch_lane_s *dispatch_queue_concurrent_t;
(3)通过_dispatch_queue_init初始化queue
这一段主要是设置state、width之类的
static inline dispatch_queue_class_t
_dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf,
uint16_t width, uint64_t initial_state_bits)
{
uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
dispatch_queue_t dq = dqu._dq;
dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
DISPATCH_QUEUE_INACTIVE)) == 0);
if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION;
dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
dq->do_ref_cnt++; // released when DSF_DELETED is set
}
}
dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK);
dq->do_next = DISPATCH_OBJECT_LISTLESS;
dqf |= DQF_WIDTH(width);
os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
dq->dq_state = dq_state;
dq->dq_serialnum =
os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
return dqu;
}
两个小概念哈:
· width:最大并发的线程数
#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2) //4094
#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull
· initial_state:这个queue是不是一开始就是active状态,如果不是的话,需要手动执行dispatch_active以后才能激活
// inactive的initial_state_bits如下,如果是一开始就active的initial_state_bits为0
#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull
(4)设置priority、label、target queue
(5)返回_dispatch_trace_queue_create(queue)
将队列插入到队列数组中,设置队列头尾节点等
_dispatch_trace_queue_create
|___dispatch_introspection_queue_create
现在我们create两个queue看看它们的属性吧~
dispatch_queue_t serialQueue = dispatch_queue_create("serial_queue", DISPATCH_QUEUE_SERIAL);
Printing description of serialQueue:
<OS_dispatch_queue_serial: serial_queue[0x6000024caa00] = { xref = 1, ref = 1, sref = 1, target = com.apple.root.default-qos.overcommit[0x10d875f00], width = 0x1, state = 0x001ffe2000000000, in-flight = 0}>
dispatch_queue_t concurrentQueue = dispatch_queue_create("concurrent_queue", DISPATCH_QUEUE_CONCURRENT);
Printing description of concurrentQueue:
<OS_dispatch_queue_concurrent: concurrent_queue[0x6000024ffb80] = { xref = 1, ref = 1, sref = 1, target = com.apple.root.default-qos[0x10d875e80], width = 0xffe, state = 0x0000041000000000, in-flight = 0}>
可以看到它们的class, width, target queue都和我们源码看的一致~
_dispatch_root_queues
struct dispatch_queue_global_s _dispatch_root_queues[] = {
#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
[_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
.do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
.dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
_dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
_dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
__VA_ARGS__ \
}
_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
.dq_label = "com.apple.root.maintenance-qos",
.dq_serialnum = 4,
),
_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.maintenance-qos.overcommit",
.dq_serialnum = 5,
),
_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
.dq_label = "com.apple.root.background-qos",
.dq_serialnum = 6,
),
_DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.background-qos.overcommit",
.dq_serialnum = 7,
),
_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
.dq_label = "com.apple.root.utility-qos",
.dq_serialnum = 8,
),
_DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.utility-qos.overcommit",
.dq_serialnum = 9,
),
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK,
.dq_label = "com.apple.root.default-qos",
.dq_serialnum = 10,
),
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.default-qos.overcommit",
.dq_serialnum = 11,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
.dq_label = "com.apple.root.user-initiated-qos",
.dq_serialnum = 12,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.user-initiated-qos.overcommit",
.dq_serialnum = 13,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
.dq_label = "com.apple.root.user-interactive-qos",
.dq_serialnum = 14,
),
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.user-interactive-qos.overcommit",
.dq_serialnum = 15,
),
};
我们创建串行queue的时候是取的第2 * (qos - 1) + overcommit = 2 * (DISPATCH_QOS_DEFAULT - 1) + 1 = 2 * (4 - 1) + 1 = 7个queue,也就是"com.apple.root.default-qos.overcommit",和上面试验创建串行queue拿到的target queue是一致的。
注意哦,root queue的list的确有12个queue,但编号是从4开始的哈~
dispatch_get_main_queue
我们再来看一下主队列是如何获取的:
dispatch_queue_main_t
dispatch_get_main_queue(void)
{
return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q);
}
#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))
---------------------------------------
struct dispatch_queue_static_s _dispatch_main_q = {
DISPATCH_GLOBAL_OBJECT_HEADER(queue_main),
#if !DISPATCH_USE_RESOLVERS
.do_targetq = _dispatch_get_default_queue(true),
#endif
.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
DISPATCH_QUEUE_ROLE_BASE_ANON,
.dq_label = "com.apple.main-thread",
.dq_atomic_flags = DQF_THREAD_BOUND | DQF_WIDTH(1),
.dq_serialnum = 1,
};
---------------target queue-------------
#define _dispatch_get_default_queue(overcommit) \
_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \
!!(overcommit)]._as_dq
enum {
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
_DISPATCH_ROOT_QUEUE_IDX_COUNT,
};
由于DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS=6, overcommit为true,所以main_queue拿的是_dispatch_root_queues里面的第七个,也就是"com.apple.root.default-qos.overcommit",和串行queue拿到的target queue是一致的。
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.default-qos.overcommit",
.dq_serialnum = 11,
),
dispatch_get_global_queue
dispatch_queue_global_t
dispatch_get_global_queue(long priority, unsigned long flags)
{
dispatch_assert(countof(_dispatch_root_queues) ==
DISPATCH_ROOT_QUEUE_COUNT);
if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
return DISPATCH_BAD_INPUT;
}
dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
#if !HAVE_PTHREAD_WORKQUEUE_QOS
if (qos == QOS_CLASS_MAINTENANCE) {
qos = DISPATCH_QOS_BACKGROUND;
} else if (qos == QOS_CLASS_USER_INTERACTIVE) {
qos = DISPATCH_QOS_USER_INITIATED;
}
#endif
if (qos == DISPATCH_QOS_UNSPECIFIED) {
return DISPATCH_BAD_INPUT;
}
return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}
获取global对列的最后也是转调的_dispatch_get_root_queue,返回_dispatch_root_queues[2 * (qos - 1) + overcommit],在这个场景下的QOS为DISPATCH_QOS_DEFAULT=4(对应priority为0的时候),默认overcommit也是0,所以拿的是第6个queue,也就是"com.apple.root.default-qos"。
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK,
.dq_label = "com.apple.root.default-qos",
.dq_serialnum = 10,
),
#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
[_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
.do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
.dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
_dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
_dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
__VA_ARGS__ \
}
#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1) //4095
总结一下各种queue的获取方式吧:
- 自己create创建的queue是需要alloc分配内存以后init,最后从root_queue里面的拿一个作为新queue的target queue的
- main和global queue是不需要alloc init的,直接从root_queue里拿出对应的queue即可
dispatch_async
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME;
dispatch_qos_t qos;
qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
先来看一下dispatch_continuation_t是神马~ 通过各种转调,大概就是一个结构体,用于封装block的
typedef struct dispatch_continuation_s *dispatch_continuation_t;
typedef struct dispatch_continuation_s {
DISPATCH_CONTINUATION_HEADER(continuation);
} *dispatch_continuation_t;
#define DISPATCH_CONTINUATION_HEADER(x) \
dispatch_function_t dc_func; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct voucher_s *dc_voucher; \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
struct dispatch_##x##_s *volatile do_next; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
通过_dispatch_continuation_init将dc结构体里面的各个参数设好。
_dispatch_continuation_init
|___dispatch_continuation_init_f
|____dispatch_continuation_priority_set
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_t work,
dispatch_block_flags_t flags, uintptr_t dc_flags)
{
void *ctxt = _dispatch_Block_copy(work);
dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
if (unlikely(_dispatch_block_has_private_data(work))) {
dc->dc_flags = dc_flags;
dc->dc_ctxt = ctxt;
// will initialize all fields but requires dc_flags & dc_ctxt to be set
return _dispatch_continuation_init_slow(dc, dqu, flags);
}
dispatch_function_t func = _dispatch_Block_invoke(work);
if (dc_flags & DC_FLAG_CONSUME) {
func = _dispatch_call_block_and_release;
}
return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}
void
_dispatch_call_block_and_release(void *block)
{
void (^b)(void) = block;
b();
Block_release(b);
}
并且如果是consume flag会直接执行block滴。然后我们接着看下面的。
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
_dispatch_trace_item_push(dqu, dc);
}
#else
(void)dc_flags;
#endif
return dx_push(dqu._dq, dc, qos);
}
#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
可以看出依次调用了dx_push以及dq_push,那么dq_push是什么呢又?
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
.do_type = DISPATCH_QUEUE_SERIAL_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
.do_type = DISPATCH_QUEUE_CONCURRENT_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_concurrent_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
.do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
.do_dispose = _dispatch_object_no_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_object_no_invoke,
.dq_activate = _dispatch_queue_no_activate,
.dq_wakeup = _dispatch_root_queue_wakeup,
.dq_push = _dispatch_root_queue_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, lane,
.do_type = DISPATCH_QUEUE_MAIN_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_queue_no_activate,
.dq_wakeup = _dispatch_main_queue_wakeup,
.dq_push = _dispatch_main_queue_push,
);
这里以concurrent queue的dq_push为例看一下它做了什么:
_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
dispatch_qos_t qos)
{
if (dq->dq_items_tail == NULL &&
!_dispatch_object_is_waiter(dou) &&
!_dispatch_object_is_barrier(dou) &&
_dispatch_queue_try_acquire_async(dq)) {
return _dispatch_continuation_redirect_push(dq, dou, qos);
}
_dispatch_lane_push(dq, dou, qos);
}
static void
_dispatch_continuation_redirect_push(dispatch_lane_t dl,
dispatch_object_t dou, dispatch_qos_t qos)
{
if (likely(!_dispatch_object_is_redirection(dou))) {
dou._dc = _dispatch_async_redirect_wrap(dl, dou);
} else if (!dou._dc->dc_ctxt) {
// find first queue in descending target queue order that has
// an autorelease frequency set, and use that as the frequency for
// this continuation.
dou._dc->dc_ctxt = (void *)
(uintptr_t)_dispatch_queue_autorelease_frequency(dl);
}
dispatch_queue_t dq = dl->do_targetq;
if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
dx_push(dq, dou, qos);
}
DISPATCH_ALWAYS_INLINE
static inline dispatch_continuation_t
_dispatch_async_redirect_wrap(dispatch_lane_t dq, dispatch_object_t dou)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
dou._do->do_next = NULL;
dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT);
dc->dc_func = NULL;
dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq);
dc->dc_data = dq;
dc->dc_other = dou._do;
dc->dc_voucher = DISPATCH_NO_VOUCHER;
dc->dc_priority = DISPATCH_NO_PRIORITY;
_dispatch_retain_2(dq); // released in _dispatch_async_redirect_invoke
return dc;
}
void
_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou,
dispatch_qos_t qos)
{
dispatch_wakeup_flags_t flags = 0;
struct dispatch_object_s *prev;
if (unlikely(_dispatch_object_is_waiter(dou))) {
return _dispatch_lane_push_waiter(dq, dou._dsc, qos);
}
dispatch_assert(!_dispatch_object_is_global(dq));
qos = _dispatch_queue_push_qos(dq, qos);
prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next);
if (unlikely(os_mpsc_push_was_empty(prev))) {
_dispatch_retain_2_unsafe(dq);
flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
} else if (unlikely(_dispatch_queue_need_override(dq, qos))) {
_dispatch_retain_2_unsafe(dq);
flags = DISPATCH_WAKEUP_CONSUME_2;
}
os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next);
if (flags) {
return dx_wakeup(dq, qos, flags);
}
}
大概流程就是当队尾为NULL是,就将任务转发给target queue(一般为root queue列表中的某一个,计算方法见之前的公式)。在_dispatch_lane_push里面会把队列最后一个的next指向当前任务(os_mpsc_push_update_prev),就类似一个链表。
所以其实大部分情况,我们创建的queue的任务其实最后都是转发给了target queue。target queue就是root queue,所以来看一下root queue的dx_push:
_dispatch_root_queue_push
|____dispatch_root_queue_push_inline
|____dispatch_root_queue_poke (仅第一个任务的时候激活调用)
|____dispatch_root_queue_poke_slow
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
int remaining = n;
int r = ENOSYS;
_dispatch_root_queues_init();
_dispatch_debug_root_queue(dq, __func__);
_dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
{
_dispatch_root_queue_debug("requesting new worker thread for global "
"queue: %p", dq);
r = _pthread_workqueue_addthreads(remaining,
_dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
(void)dispatch_assume_zero(r);
return;
}
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
……
}
第一次激活的时候其实是在poke_slow里面通过_pthread_workqueue_addthreads之类的进行线程分配增加的。
现在我们来尝试打印一下async里面任务的执行栈:
dispatch_queue_t concurrentQueue = dispatch_queue_create("concurrent_queue", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(concurrentQueue, ^{
NSLog(@"%@",[NSThread callStackSymbols]);
});
输出:
(
0 Example1 0x000000010410380a __16-[TestGCD test1]_block_invoke_9 + 74
1 libdispatch.dylib 0x0000000106cafd7f _dispatch_call_block_and_release + 12
2 libdispatch.dylib 0x0000000106cb0db5 _dispatch_client_callout + 8
3 libdispatch.dylib 0x0000000106cb3c95 _dispatch_continuation_pop + 552
4 libdispatch.dylib 0x0000000106cb308f _dispatch_async_redirect_invoke + 849
5 libdispatch.dylib 0x0000000106cc1632 _dispatch_root_queue_drain + 351
6 libdispatch.dylib 0x0000000106cc1fca _dispatch_worker_thread2 + 130
7 libsystem_pthread.dylib 0x00000001070996b3 _pthread_wqthread + 583
8 libsystem_pthread.dylib 0x00000001070993fd start_wqthread + 13
)
可以看出其实任务的调用栈其实起始于start_wqthread,但是这个是汇编代码没法看到,所以从_dispatch_worker_thread2开始向上追溯,看看是如何初始化这个worker的。
_dispatch_root_queue_poke_slow
|__ _dispatch_root_queues_init
|__ _dispatch_root_queues_init_once
|__ _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2……)
总结一下dispatch_async在第一次初始化会调用_dispatch_root_queue_poke_slow,这个时候init了worker,之后的任务会通过worker调度执行。
Reference:
超级赞的源码分析!https://bestswifter.com/deep-gcd/
源码分析:https://www.jianshu.com/p/7702c06cda4c
https://www.jianshu.com/p/bd629d25dc2e
网友评论