前言:
iOS对任务的处理运用了多种线程技术,我们常用的有NSOperation和GCD,这篇文章着重研究GCD的原理。
一、GCD简介:
1、基本概念:
全称为Grand central dispatch,纯C语言封装成多个功能强大的函数,从而完成iOS任务在执行过程中全面、严谨的管理;在代码方面的简单解释:通过队列
调度任务
,进一步依赖线程函数
去执行,怎么去理解这句话呢:
①、任务的解释:
这个比较好理解,即iOS中的代码实现的行为事件,每个事件都可被当做一个任务,GCD中封装在Block内;
②、队列的解释:

- 队列可以起到调度任务的作用,我理解为任务执行的第一层控制;
- 所谓串行队列就是我的任务1、2调度有明显的顺序
依赖关系
,队列调度任务时,是按照FIFO(first in first out 先进先出)
原则来的,必须任务1执行完了才执行任务2; - 并发队列则和串行队列有明显的区别,广义上的理解是同时间调取多条任务,但相互之间调度
没有依赖关系
;
③、线程函数的解释:
任务仅仅靠队列调度一个去控制任务的执行够不够?显然是不够满足需求的,我没办法利用到CPU的多核优势,而线程函数可以充分利用多核优势,提高运行效率:
- 同步函数:
dispatch_sync
:
必须等待
函数内任务执行完,才会继续往下走,不会开启线程; - 异步函数:
dispatch_async
不用等待
函数内任务执行完毕,会开辟线程
处理block任务;
2、GCD的优势:
①、GCD是苹果设计并推荐使用的多线程处理方式,充分利用设备cpu多核硬件配置,提高任务处理效率
;
②、GCD会自动管理
线程的生命周期,包括创建线程、任务调度、线程的销毁,不需要写线程的管理代码;
③、任务的代码封装在block中
,没有参数、没有返回值。
3、GCD中队列与函数的搭配:

二、GCD队列源码分析:
1、主队列与全局队列:
①、主队列:
- 概念:专门调度任务在主线程上处理的串行队列,
dispatch_get_main_queue()
; - 特征:不会开启线程,如果当前主线程有任务在执行,那么无论主队列中当前被添加了什么任务,都不会被调度 ;
- 主队列源码探究:
dispatch_queue_t serial = dispatch_queue_create("A", DISPATCH_QUEUE_SERIAL);
dispatch_queue_t conque = dispatch_queue_create("B", DISPATCH_QUEUE_CONCURRENT);
dispatch_queue_t mainQueue = dispatch_get_main_queue();
dispatch_queue_t globQueue = dispatch_get_global_queue(0, 0);
NSLog(@"%@-%@-%@-%@",serial,conque,mainQueue,globQueue);
上述代码,创建分别有标识"A"、"B"的队列,再获取主队列和全局队列,查看打印结果:
<OS_dispatch_queue_serial: A>-<OS_dispatch_queue_concurrent: B>-<OS_dispatch_queue_main: com.apple.main-thread>-<OS_dispatch_queue_global: com.apple.root.default-qos>
发现字符串A和B都接在队列的打印结果>
的前面,像是一个字符串的赋值,那么main_queue的字符串赋值倒推为com.apple.main-thread
;另外在这个方法调用里面打个断点,如下图:

利用lldb输入bt指令打印方法堆栈看到如下:
bt
* thread #1, queue = 'com.apple.main-thread', stop reason = breakpoint 7.1
* frame #0: 0x0000000106b9dcc7 Test`__29-[ViewController viewDidLoad]_block_invoke_2(.block_descriptor=0x0000000106ba1108) at ViewController.m:30:9
frame #1: 0x0000000106e0f7ec libdispatch.dylib`_dispatch_call_block_and_release + 12
frame #2: 0x0000000106e109c8 libdispatch.dylib`_dispatch_client_callout + 8
frame #3: 0x0000000106e1ee75 libdispatch.dylib`_dispatch_main_queue_callback_4CF + 1152
可以看到相关的方法写在libdispatch.dylib
库里面,跳到此库搜com.apple.main-thread
字符串,看到这段函数:
struct dispatch_queue_static_s _dispatch_main_q = {
DISPATCH_GLOBAL_OBJECT_HEADER(queue_main),
#if !DISPATCH_USE_RESOLVERS
.do_targetq = _dispatch_get_default_queue(true),
#endif
.dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) |
DISPATCH_QUEUE_ROLE_BASE_ANON,
.dq_label = "com.apple.main-thread",
.dq_atomic_flags = DQF_THREAD_BOUND | DQF_WIDTH(1),
.dq_serialnum = 1,
};
②、全局队列
-
概念与特征:为了方便程序员的使用,苹果提供了全局队列
dispatch_get_global_queue(0, 0)
,全局队列是一个并发队列
,在使用多线程开发时,如果对队列没有特殊需求,在执行异步任务时,可以直接使用全局队列 -
全局队列源码探究:
同理,来到了这段函数:
struct dispatch_queue_global_s _dispatch_root_queues[] = {
#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
[_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
.dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
.do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
.dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
.dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
_dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
_dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
__VA_ARGS__ \
}
_DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,
.dq_label = "com.apple.root.maintenance-qos",
.dq_serialnum = 4,
),
...
_DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK,
.dq_label = "com.apple.root.default-qos",
.dq_serialnum = 10,
),
...
_DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
.dq_label = "com.apple.root.user-interactive-qos.overcommit",
.dq_serialnum = 15,
),
};
到这里有点复杂,不过貌似dq_serialnum
这句代码类似,主队列为1
,全局队列为4-15
,这是什么意思呢?是否认为dq_serialnum=1
时为主队列?貌似不够严谨,因为他们还有个本质区别
,一个是串行队列
、一个是并发队列
;
那么全局搜索下dq_serialnum,发现有个赋值函数:

外层的函数名像是个queue的初始化,是个关键信息;本身的
dq_serialnum
的赋值函数os_atomic_inc_orig有个&取值
的东西,点进去看看,来了一段关键信息:
// skip zero
// 1 - main_q
// 2 - mgr_q
// 3 - mgr_root_q
// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
// 17 - workloop_fallback_q
// we use 'xadd' on Intel, so the initial value == next assigned
#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17
第二行有个关键备注//1 - main_q,// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
,这就清晰了,dq_serialnum=1时主队列,4-15是全局队列;
这样探究还不够,但我们看队列的创建方法dispatch_queue_create
,返回值是dispatch_queue_t
,依次查看方法调用链路,为dispatch_queue_create
——>_dispatch_lane_create_with_target
,来到一段非常长且关键的代码了:
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
dispatch_queue_t tq, bool legacy)
{
dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);//串行、并行队列的宏定义处理后
//
// Step 1: Normalize arguments (qos, overcommit, tq) 规范化参数,去除上行代码参数做处理
//
dispatch_qos_t qos = dqai.dqai_qos;
#if !HAVE_PTHREAD_WORKQUEUE_QOS
if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED;
}
if (qos == DISPATCH_QOS_MAINTENANCE) {
dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND;
}
#endif // !HAVE_PTHREAD_WORKQUEUE_QOS
_dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit;
if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
if (tq->do_targetq) {
DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
"a non-global target queue");
}
}
if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
// Handle discrepancies between attr and target queue, attributes win
if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
overcommit = _dispatch_queue_attr_overcommit_enabled;
} else {
overcommit = _dispatch_queue_attr_overcommit_disabled;
}
}
if (qos == DISPATCH_QOS_UNSPECIFIED) {
qos = _dispatch_priority_qos(tq->dq_priority);
}
tq = NULL;
} else if (tq && !tq->do_targetq) {
// target is a pthread or runloop root queue, setting QoS or overcommit
// is disallowed
if (overcommit != _dispatch_queue_attr_overcommit_unspecified) {
DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute "
"and use this kind of target queue");
}
} else {
if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
// Serial queues default to overcommit!
overcommit = dqai.dqai_concurrent ?
_dispatch_queue_attr_overcommit_disabled :
_dispatch_queue_attr_overcommit_enabled;
}
}
if (!tq) {
tq = _dispatch_get_root_queue(
qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
if (unlikely(!tq)) {
DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
}
}
//
// Step 2: Initialize the queue
//
if (legacy) {
// if any of these attributes is specified, use non legacy classes
if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) {
legacy = false;
}
}
const void *vtable;
dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
if (dqai.dqai_concurrent) {
vtable = DISPATCH_VTABLE(queue_concurrent);
} else {
vtable = DISPATCH_VTABLE(queue_serial);
}
switch (dqai.dqai_autorelease_frequency) {
case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
dqf |= DQF_AUTORELEASE_NEVER;
break;
case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
dqf |= DQF_AUTORELEASE_ALWAYS;
break;
}
if (label) {
const char *tmp = _dispatch_strdup_if_mutable(label);
if (tmp != label) {
dqf |= DQF_LABEL_NEEDS_FREE;
label = tmp;
}
}
dispatch_lane_t dq = _dispatch_object_alloc(vtable,
sizeof(struct dispatch_lane_s));
_dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
(dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
dq->dq_label = label;
dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
dqai.dqai_relpri);
if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
}
if (!dqai.dqai_inactive) {
_dispatch_queue_priority_inherit_from_target(dq, tq);
_dispatch_lane_inherit_wlh_from_target(dq, tq);
}
_dispatch_retain(tq);
dq->do_targetq = tq;
_dispatch_object_debug(dq, "%s", __func__);
return _dispatch_trace_queue_create(dq)._dq;
}
这什么玩意儿?先看return吧,_dispatch_trace_queue_create函数
中的trace
汉语是追踪
的意思,这像是我们平常做的埋点操作,太过复杂就不看了,重点是这个dq
(前面探索主队列时这个dq
也出现了),这行代码像是一个开辟内存并实例化的代码(包含alloc
、init
):
dispatch_lane_t dq = _dispatch_object_alloc(vtable,
sizeof(struct dispatch_lane_s));
_dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
(dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));
这个_dispatch_queue_init
的代码貌似在前面探索全局队列的时候出现过,点进去看看,dqf |= DQF_WIDTH(width);
根据dqai.dqai_concurrent ? DISPATCH_QUEUE_WIDTH_MAX : 1
穿的参数,可以确定
,如果是串行队列
,那么DQF_WIDTH(1)
,这才定义串行队列
的其中一个特定标识
,而不是serilnum
。
三、GCD关于queue的补充:
1、前文总结:
实际上研究了初步队列的创建,从源码上识别了主队列、全局队列、串行队列、并发队列,但我这里有个奇怪的地方,我们在create函数
外,需要返回的是个dispatch_queue_t
,结果在_dispatch_queue_init
构造函数和return的函数_dispatch_trace_queue_create
中返回的确是dispatch_queue_class_t
,而_dispatch_object_alloc
函数点到最后发现返回的是个_os_object_t
,好像有点乱,那么queue到底是个什么呢?
2、dispatch_queue_t继承探究:
dispatch_queue_t serial = dispatch_queue_create("A", DISPATCH_QUEUE_SERIAL);
点进去dispatch_queue_t查询,并依次查询发现了一个宏定义:
DISPATCH_DECL(dispatch_queue);
#define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object)
到OS_OBJECT_DECL_SUBCLASS无法继续查询,到libDispatch.dylib库中查询发现了以下定义链路:
#define OS_OBJECT_DECL_SUBCLASS(name, super) \
OS_OBJECT_DECL_IMPL(name, NSObject, <OS_OBJECT_CLASS(super)>)
#define OS_OBJECT_CLASS(name) OS_##name
#define OS_OBJECT_DECL_IMPL(name, adhere, ...) \
OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \
typedef adhere<OS_OBJECT_CLASS(name)> \
* OS_OBJC_INDEPENDENT_CLASS name##_t
#define OS_OBJECT_DECL_PROTOCOL(name, ...) \
@protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \
@end
#define OS_OBJC_INDEPENDENT_CLASS __attribute__((objc_independent_class))
翻译到最后是个这个
@protocol OS_dispatch_queue <OS_dispatch_object>
@end
typedef NSObject<OS_dispatch_queue> * dispatch_queue_t
找到这个协议 OS_dispatch_queue
#ifdef __OBJC__
@protocol OS_dispatch_queue;
#endif
// Lane cluster class: type for all the queues that have a single head/tail pair
typedef union {
struct dispatch_lane_s *_dl;
struct dispatch_queue_static_s *_dsq;
struct dispatch_queue_global_s *_dgq;
struct dispatch_queue_pthread_root_s *_dpq;
struct dispatch_source_s *_ds;
struct dispatch_channel_s *_dch;
struct dispatch_mach_s *_dm;
#ifdef __OBJC__
id<OS_dispatch_queue> _objc_dq; // unsafe cast for the sake of object.m
#endif
} dispatch_lane_class_t DISPATCH_TRANSPARENT_UNION;
// Dispatch queue cluster class: type for any dispatch_queue_t
typedef union {
struct dispatch_queue_s *_dq;
struct dispatch_workloop_s *_dwl;
struct dispatch_lane_s *_dl;
struct dispatch_queue_static_s *_dsq;
struct dispatch_queue_global_s *_dgq;
struct dispatch_queue_pthread_root_s *_dpq;
struct dispatch_source_s *_ds;
struct dispatch_channel_s *_dch;
struct dispatch_mach_s *_dm;
dispatch_lane_class_t _dlu;
#ifdef __OBJC__
id<OS_dispatch_queue> _objc_dq;
#endif
} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION;
#ifndef __OBJC__
typedef union {
struct _os_object_s *_os_obj;
struct dispatch_object_s *_do;
struct dispatch_queue_s *_dq;
struct dispatch_queue_attr_s *_dqa;
struct dispatch_group_s *_dg;
struct dispatch_source_s *_ds;
struct dispatch_channel_s *_dch;
struct dispatch_mach_s *_dm;
struct dispatch_mach_msg_s *_dmsg;
struct dispatch_semaphore_s *_dsema;
struct dispatch_data_s *_ddata;
struct dispatch_io_s *_dchannel;
struct dispatch_continuation_s *_dc;
struct dispatch_sync_context_s *_dsc;
struct dispatch_operation_s *_doperation;
struct dispatch_disk_s *_ddisk;
struct dispatch_workloop_s *_dwl;
struct dispatch_lane_s *_dl;
struct dispatch_queue_static_s *_dsq;
struct dispatch_queue_global_s *_dgq;
struct dispatch_queue_pthread_root_s *_dpq;
dispatch_queue_class_t _dqu;
dispatch_lane_class_t _dlu;
uintptr_t _do_value;
} dispatch_object_t DISPATCH_TRANSPARENT_UNION;
没看懂,放以后研究,换个思路吧,全局搜索,发现还有个同样的宏定义:
define DISPATCH_DECL(name) \
typedef struct name##_s : public dispatch_object_s {} *name##_t
带入dispatch_queue,翻译后即:
typedef struct dispatch_queue_s : public dispatch_object_s {} *dispatch_queue_t
翻译的通俗易懂点,dispatch_queue_t
是dispatch_queue_s
结构体类型的,同时继承于public dispatch_object_s {}
这个类型,再看下这段代码:
typedef struct dispatch_object_s {
private:
dispatch_object_s();
~dispatch_object_s();
dispatch_object_s(const dispatch_object_s &);
void operator=(const dispatch_object_s &);
} *dispatch_object_t;
#define DISPATCH_DECL(name) \
typedef struct name##_s : public dispatch_object_s {} *name##_t
#define DISPATCH_DECL_SUBCLASS(name, base) \
typedef struct name##_s : public base##_s {} *name##_t
#define DISPATCH_GLOBAL_OBJECT(type, object) (static_cast<type>(&(object)))
#define DISPATCH_RETURNS_RETAINED
#else /* Plain C */
#ifndef __DISPATCH_BUILDING_DISPATCH__
typedef union {
struct _os_object_s *_os_obj;
struct dispatch_object_s *_do;
struct dispatch_queue_s *_dq;
struct dispatch_queue_attr_s *_dqa;
struct dispatch_group_s *_dg;
struct dispatch_source_s *_ds;
struct dispatch_channel_s *_dch;
struct dispatch_mach_s *_dm;
struct dispatch_mach_msg_s *_dmsg;
struct dispatch_semaphore_s *_dsema;
struct dispatch_data_s *_ddata;
struct dispatch_io_s *_dchannel;
} dispatch_object_t DISPATCH_TRANSPARENT_UNION;
得出最终继承于dispatch_object_t
四、GCD线程函数源码探究:
1、同步线程函数:
直接到Libdispatch.dylib中查询
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
uintptr_t dc_flags = DC_FLAG_BLOCK;
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
}
_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
这里的unlikely
字面意思是不大可能发生
的,这里不看了,点击查看_dispatch_sync_f
,这里传递了dispatch_queue_t 型
的dq
,dispatch_block_ t
型的work
,即队列和队列调度的任务
,另外传递了一个处理work的封装函数
,貌似挺重要,先不管、点击函数_dispatch_sync_f
查看,然后依次查看,函数链如下:
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
uintptr_t dc_flags)
{
_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
#mark 串行队列
if (likely(dq->dq_width == 1)) {
#mark 出来个栅栏函数
return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
}
#mark 类型不符,报错
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
#mark 在queue的探索中,create方法里queue的alloc方法有看到这个dispatch_lane_t
dispatch_lane_t dl = upcast(dq)._dl;
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
#mark 死锁函数
return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
}
if (unlikely(dq->do_targetq->do_targetq)) {
#mark
return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
static void
_dispatch_sync_invoke_and_complete(dispatch_lane_t dq, void *ctxt,
dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
#mark 像是埋点追踪什么的,不看
_dispatch_trace_item_complete(dc);
_dispatch_lane_non_barrier_complete(dq, 0);
}
static inline void
_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
dispatch_function_t func)
{
dispatch_thread_frame_s dtf;
_dispatch_thread_frame_push(&dtf, dq);
#mark 这个函数好熟悉,在文章开始打印方法堆栈时有看到,看下
_dispatch_client_callout(ctxt, func);
_dispatch_perfmon_workitem_inc();
_dispatch_thread_frame_pop(&dtf);
}
void
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
_dispatch_get_tsd_base();
void *u = _dispatch_get_unwind_tsd();
if (likely(!u)) return f(ctxt);
_dispatch_set_unwind_tsd(NULL);
#mark 这里f把ctxt包进去了
f(ctxt);
_dispatch_free_unwind_tsd();
_dispatch_set_unwind_tsd(u);
}
最终发现f(ctxt)
,即任务work
被执行了,我们看下死锁的情况,来一段主队列同步的代码:
dispatch_sync(dispatch_get_main_queue(), ^{
NSLog(@"test");
});
运行崩了,这是一种常见的死锁情况,那么死锁的函数调用链及逻辑是啥样的呢?经过符号断点多次尝试,发现走到这个函数_dispatch_sync_f_slow
, 如下:
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
dispatch_function_t func, uintptr_t top_dc_flags,
dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
dispatch_queue_t top_dq = top_dqu._dq;
dispatch_queue_t dq = dqu._dq;
if (unlikely(!dq->do_targetq)) {
return _dispatch_sync_function_invoke(dq, ctxt, func);
}
pthread_priority_t pp = _dispatch_get_priority();
struct dispatch_sync_context_s dsc = {
.dc_flags = DC_FLAG_SYNC_WAITER | dc_flags,
.dc_func = _dispatch_async_and_wait_invoke,
.dc_ctxt = &dsc,
.dc_other = top_dq,
.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
.dc_voucher = _voucher_get(),
.dsc_func = func,
.dsc_ctxt = ctxt,
.dsc_waiter = _dispatch_tid_self(),
};
_dispatch_trace_item_push(top_dq, &dsc);
__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
if (dsc.dsc_func == NULL) {
// dsc_func being cleared means that the block ran on another thread ie.
// case (2) as listed in _dispatch_async_and_wait_f_slow.
dispatch_queue_t stop_dq = dsc.dc_other;
return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
}
_dispatch_introspection_sync_begin(top_dq);
_dispatch_trace_item_pop(top_dq, &dsc);
_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
DISPATCH_TRACE_ARG(&dsc));
}
//这里是获取当前队列的线程id
#define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port())
#define _dispatch_thread_port() pthread_mach_thread_np(_dispatch_thread_self())
#define _dispatch_thread_self() ((uintptr_t)pthread_self())
解除断点后运行,崩溃,通过lldb指令bt打印函数堆栈:

发现走到了_dispatch_sync_f_slow
函数中的这条函数__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq)
, 点进去看看
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
uint64_t dq_state = _dispatch_wait_prepare(dq);
if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
"dispatch_sync called on queue "
"already owned by current thread");
#mark 同步函数唤起了一个已经被当前线程占用的队列
}
...
}
看下判断条件
_dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
{
return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
}
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
// equivalent to _dispatch_lock_owner(lock_value) == tid
return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
这个与判断((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0
中DLOCK_OWNER_MASK
是一个很大的数不为0,显然只有(lock_value ^ tid)==0
才能上述判断成立,lock_value由调用链路看出是最外层传进来的dq关联的,而tid是当前的队列的线程id,两者相同就会判断成立从而死锁错误。
这里有点抽象,怎么去理解这里的死锁
呢?即我当前一个等待(被调度)
的线程和当前队列中已经在等待(执行)
的线程是同一个线程,就会死锁。
2、异步线程函数
在Libdispatch.dylib中查询
void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME;
dispatch_qos_t qos;
qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
先看qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
这句代码,点击_dispatch_continuation_init
函数依次往下的函数链:
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_t work,
dispatch_block_flags_t flags, uintptr_t dc_flags)
{
void *ctxt = _dispatch_Block_copy(work);
dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
if (unlikely(_dispatch_block_has_private_data(work))) {
dc->dc_flags = dc_flags;
dc->dc_ctxt = ctxt;
// will initialize all fields but requires dc_flags & dc_ctxt to be set
return _dispatch_continuation_init_slow(dc, dqu, flags);
}
dispatch_function_t func = _dispatch_Block_invoke(work);
if (dc_flags & DC_FLAG_CONSUME) {
func = _dispatch_call_block_and_release;
}
return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}
static inline dispatch_qos_t
_dispatch_continuation_init_f(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f,
dispatch_block_flags_t flags, uintptr_t dc_flags)
{
pthread_priority_t pp = 0;
dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED;
#mark 这看到任务ctxt和f都赋值给了dc
dc->dc_func = f;
dc->dc_ctxt = ctxt;
// in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority
// should not be propagated, only taken from the handler if it has one
if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
pp = _dispatch_priority_propagate();
}
_dispatch_continuation_voucher_set(dc, flags);
#mark 这里只需要dc了
return _dispatch_continuation_priority_set(dc, dqu, pp, flags);
}
static inline dispatch_qos_t
#mark 大致是优先级的设置(异步开启线程,肯定会有优先级的区分)
_dispatch_continuation_priority_set(dispatch_continuation_t dc,
dispatch_queue_class_t dqu,
pthread_priority_t pp, dispatch_block_flags_t flags)
{
dispatch_qos_t qos = DISPATCH_QOS_UNSPECIFIED;
#if HAVE_PTHREAD_WORKQUEUE_QOS
dispatch_queue_t dq = dqu._dq;
if (likely(pp)) {
bool enforce = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS);
bool is_floor = (dq->dq_priority & DISPATCH_PRIORITY_FLAG_FLOOR);
bool dq_has_qos = (dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK);
if (enforce) {
pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
qos = _dispatch_qos_from_pp_unsafe(pp);
} else if (!is_floor && dq_has_qos) {
pp = 0;
} else {
qos = _dispatch_qos_from_pp_unsafe(pp);
}
}
dc->dc_priority = pp;
#else
(void)dc; (void)dqu; (void)pp; (void)flags;
#endif
return qos;
}
整理一下,_dispatch_continuation_init
是一个对任务优先级处理了的
函数,并将结果返回给qos
接收,那么接下来看_dispatch_continuation_async
这个函数,点击查看函数链:
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
_dispatch_trace_item_push(dqu, dc);
}
#else
(void)dc_flags;
#endif
return dx_push(dqu._dq, dc, qos);
}
到了dx_push是个宏定义,一次点击查询:
#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
直接看dq_push
,库中全局搜索,为方便研究多线程的处理,挑选这段函数
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
.do_type = DISPATCH_QUEUE_CONCURRENT_TYPE,
.do_dispose = _dispatch_lane_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_lane_invoke,
.dq_activate = _dispatch_lane_activate,
.dq_wakeup = _dispatch_lane_wakeup,
.dq_push = _dispatch_lane_concurrent_push,
);
_dispatch_lane_concurrent_push
在并发队列里被当作dx_push,翻译下return dx_push(dqu._dq, dc, qos);
,即_dispatch_lane_concurrent_push(dqu._dq, dc, qos)
,库中搜索查询发现这段函数:
void
_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
dispatch_qos_t qos)
{
// <rdar://problem/24738102&24743140> reserving non barrier width
// doesn't fail if only the ENQUEUED bit is set (unlike its barrier
// width equivalent), so we have to check that this thread hasn't
// enqueued anything ahead of this call or we can break ordering
if (dq->dq_items_tail == NULL &&
!_dispatch_object_is_waiter(dou) &&
!_dispatch_object_is_barrier(dou) &&
_dispatch_queue_try_acquire_async(dq)) {
return _dispatch_continuation_redirect_push(dq, dou, qos);
}
#mark 这个跟serial队列的dx_push一样,_dispatch_object_is_barrier(dou) == true也能走到这里,回头看
_dispatch_lane_push(dq, dou, qos);
}
符号断点_dispatch_continuation_redirect_push
调试发现走了这里,点进去看看
static void
_dispatch_continuation_redirect_push(dispatch_lane_t dl,
dispatch_object_t dou, dispatch_qos_t qos)
{
if (likely(!_dispatch_object_is_redirection(dou))) {
dou._dc = _dispatch_async_redirect_wrap(dl, dou);
} else if (!dou._dc->dc_ctxt) {
// find first queue in descending target queue order that has
// an autorelease frequency set, and use that as the frequency for
// this continuation.
dou._dc->dc_ctxt = (void *)
(uintptr_t)_dispatch_queue_autorelease_frequency(dl);
}
#mark 获取的是dl.do_targetq
dispatch_queue_t dq = dl->do_targetq;
if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
#mark 这里的dq是不是上级传过来的dq
dx_push(dq, dou, qos);
}
这里的dx_push
里传入的是dl.do_targetq
,这我们在queue的create函数里有看到过,注意这段代码:
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
dispatch_queue_t tq, bool legacy)
{
if (!tq) {
tq = _dispatch_get_root_queue(
qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
if (unlikely(!tq)) {
DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
}
}
....
dq->do_targetq = tq;
....
}
全局搜一下_dispatch_get_root_queue
:
#mark 注意这里返回的是个globalqueue
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
}
return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}
完美,实际上这里dx_push
传入的dq
是globalQueue
,进而到这里:
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
.do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
.do_dispose = _dispatch_object_no_dispose,
.do_debug = _dispatch_queue_debug,
.do_invoke = _dispatch_object_no_invoke,
.dq_activate = _dispatch_queue_no_activate,
.dq_wakeup = _dispatch_root_queue_wakeup,
.dq_push = _dispatch_root_queue_push,
);
取 _dispatch_root_queue_push
,搜索_dispatch_root_queue_push
函数:
void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
dispatch_qos_t qos)
{
...
_dispatch_root_queue_push_inline(rq, dou, dou, 1);
}
static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
dispatch_object_t _head, dispatch_object_t _tail, int n)
{
struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
return _dispatch_root_queue_poke(dq, n, 0);
}
}
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
...
return _dispatch_root_queue_poke_slow(dq, n, floor);
}
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
...
#if !defined(_WIN32)
pthread_attr_t *attr = &pqc->dpq_thread_attr;
pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (unlikely(dq == &_dispatch_mgr_root_queue)) {
pthr = _dispatch_mgr_root_queue_init();
}
#endif
do {
_dispatch_retain(dq); // released in _dispatch_worker_thread
while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
if (r != EAGAIN) {
(void)dispatch_assume_zero(r);
}
_dispatch_temporary_resource_shortage();
}
} while (--remaining);
#else // defined(_WIN32)
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (unlikely(dq == &_dispatch_mgr_root_queue)) {
_dispatch_mgr_root_queue_init();
}
#endif
do {
_dispatch_retain(dq); // released in _dispatch_worker_thread
#if DISPATCH_DEBUG
unsigned dwStackSize = 0;
#else
unsigned dwStackSize = 64 * 1024;
#endif
uintptr_t hThread = 0;
while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) {
if (errno != EAGAIN) {
(void)dispatch_assume(hThread);
}
_dispatch_temporary_resource_shortage();
}
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) {
(void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE);
}
#endif
CloseHandle((HANDLE)hThread);
} while (--remaining);
#endif // defined(_WIN32)
#else
(void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
}
终于看到了pthread_create、hThread = _beginthreadex
之类的thread
相关的代码了,这就是异步函数中的最终的线程创建。
五、不总结了。
下结合一些题目实战说说GCD
网友评论