Binder驱动情景分析
路径:
/framework/base/core/java/android/os/
- IInterface.java
- IBinder.java
- Parcel.java
- IServiceManager.java
- ServiceManager.java
- ServiceManagerNative.java
- Binder.java
/framework/base/core/jni/
- android_os_Parcel.cpp
- AndroidRuntime.cpp
- android_util_Binder.cpp (核心类)
/framework/native/libs/binder
- IServiceManager.cpp
- BpBinder.cpp
- Binder.cpp
- IPCThreadState.cpp (核心类)
- ProcessState.cpp (核心类)
/framework/native/include/binder/
- IServiceManager.h
- IInterface.h
/framework/native/cmds/servicemanager/
- service_manager.c
- binder.c
/kernel/drivers/staging/android/
- binder.c
- uapi/binder.h
1. 启动 service manager
源码的路径在:
framework/native/cmds/servicemanager/
- service_manager.c
- binder.c
kernel/drivers/ (不同Linux分支路径略有不同)
- staging/android/binder.c
- android/binder.c
生成的可执行文件的名字是:servicemanager
1.1 概述

(内核中,第一次调用binder_ioctl(),会创建一个binder_thread)
b.调用ioctl 告诉驱动这是servicemanager, ioctl(BINDER_SET_CONTEXT_MGR)
c.调用ioctl 发起一个写操作 binder_thread_write, ioctl(BC_ENTER_LOOPER)
d.调用ioctl 发起一个读操作 binder_thread_write(一开始读的时候,内核中 binder_thread_read给data一个cmd=BR_NOOP),
没有数据的时候service manager进程休眠,等待其他程序给service manager 发送消息。</pre>
1.2 源码分析
service manager的入口函数在service_manager.c中
struct binder_state
{
int fd; // dev/binder的文件描述符
void *mapped; //指向mmap的内存地址
size_t mapsize; //分配的内存大小,默认为128KB
};
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
bs = binder_open(driver, 128*1024); //打开binder驱动 mmap等 申请空间大小128k
......
//调用ioctl,告诉驱动这是service manager
//return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
...
//陷入循环读取数据
binder_loop(bs, svcmgr_handler);
return 0;
}
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
// void *malloc(size_t size) 分配所需的内存空间,并返回一个指向它的指针。
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//调用驱动 binder_open()
//内核态创建一个`binder_proc`对象,再将`binder_proc`对象赋值给fd->private_data,同时放入全局链表`binder_procs`
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open %s (%s)\n",
driver, strerror(errno));
goto fail_open;
}
//发送ioctl 判断版本是否一致
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize;
//驱动中创建`Binder_buffer`对象,并放入当前binder_proc的`proc->buffers`链表。
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
kmalloc、kzalloc、vmalloc
kmalloc: 在内核空间申请内存,不做清零初始化操作,申请获得物理内存。
kzalloc: 在kmalloc基础上增加初始化清零操作
vmalloc: 在内核空间申请内存,它申请的内存是位于vmalloc_start到vmalloc_end之间的虚拟内存,获取的是虚拟内存地址。
调用:ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
内核态:binder_ioctl
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd); //cmd 从用户态发送过来的指令
//(void __user *)arg 指的是arg值是一个用户空间的地址,不能直接进行拷贝等,要使用例如copy_from_user,copy_to_user等 //函数。 默认是内核空间,因为这是驱动,是在内核空间运行的
void __user *ubuf = (void __user *)arg;
...
thread = binder_get_thread(proc); //第一次会创建一个新节点binder_thread,添加到&binder_proc->threads.rb_node中
...
switch (cmd) {
...
case BINDER_SET_CONTEXT_MGR: //只关心该cmd
//创建binder_node结构体对象,并将创建的binder_node 结构体挂在binder_proc的nodes红黑树上。
//初始化binder_node 一系列信息。
//并创建binder_node的async_todo和binder_work两个队列。
ret = binder_ioctl_set_ctx_mgr(filp); //创建一个binder_node节点
if (ret)
goto err;
break;
...
return ret; //执行完成后返回用户空间
}
用户态:
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER; //cmd
//一个写操作 BC_ENTER_LOOPER 内核中设置一个循环标志
//1. res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
binder_write(bs, readbuf, sizeof(uint32_t));
//进入binder_loop for循环
for (;;) {
bwr.read_size = sizeof(readbuf);//不为0,表示是一个读操作
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //2. 调用ioctl陷入内核态 读操作
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//收到从内核中返回的数据后处理数据
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
内核态:
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
...
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
...
return ret; //执行完之后返回用户空间
}
binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
...
//把用户空间数据ubuf拷贝到内核空间bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
...
//bwr.write_size > 0 先执行写操作
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&proc->todo))//查看当前进程的todo链表是否为空
binder_wakeup_proc_ilocked(proc);//不为空的话唤醒等待的队列
binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
...
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { //将内核数据bwr拷贝到用户空间ubuf
ret = -EFAULT;
goto out;
}
out:
return ret;
}
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
//thread->return_error.cmd=BR_OK 在创建binder_thread的时候初始化
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
if (get_user(cmd, (uint32_t __user *)ptr)) //获取cmd
return -EFAULT;
ptr += sizeof(uint32_t);
...
case BC_ENTER_LOOPER:
...
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED; //设置该线程的looper状态
break;
...
*consumed = ptr - buffer;
}
return 0;
}
接着 binde_loop()进入 for 循环,调用ioctl ,此时为读操作
内核态
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
//一开始读的时候,由于传过来的bwr.read_consumed == 0。所以内核中给ptr(bwr.read_buffer)一个BR_NOOP(4字节)
//**对于所有的读操作,数据头部都是 BR_NOOP**
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t); //ptr指针后移四个字节
}
retry:
binder_inner_proc_lock(proc);
wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
!binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
binder_restore_priority(current, proc->default_priority);
}
if (non_block) {
if (!binder_has_work(thread, wait_for_proc_work))
ret = -EAGAIN;
} else {
//然后没有数据,service manager 休眠。等待被唤醒
ret = binder_wait_for_work(thread, wait_for_proc_work);
}
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
//进入循环
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
binder_inner_proc_lock(proc);
//判断 thread->todo 链表是否为空
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
//判断 proc->todo 链表是否为空
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
else {
binder_inner_proc_unlock(proc);
/* no data added */
//由于第一次读没有数据,只有一个四个字节的 BR_NOOP,所以ptr-buffer == 4
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;
}
w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
switch (w->type) {
...
return 0;
}
2. Binder系统服务之c实现
binder.c中包含使用的函数
binder.c 代码路径:
frameworks\native\cmds\servicemanager\binder.c
test_server.c
#define CMD_SAY_HELLO 0;
#define CMD_SAY_HELLO_TO 1;
void sayhello(void){
static int cnt = 0;
fprintf(stderr, "sayhello :%d\n", cnt++);
}
int sayhello_to(char* name) {
static int cnt = 0;
fprintf(stderr, "sayhello_to %s :%d\n", name, cnt++);
return cnt++;
}
int hello_handler(struct binder_state *bs,
struct binder_transaction_data *txn, //根据txn->code可以知道要调用哪一个函数
struct binder_io *msg,
struct binder_io *reply) //将返回结果放入reply
{
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
char name[512];
strict_policy = bio_get_uint32(msg);
switch(txn->code) {
case CMD_SAY_HELLO:
sayhello();
break;
case CMD_SAY_HELLO_TO:
//从msg中取出字符串
s = bio_get_string16(msg, &len);
uint32_t i = sayhello_to(s);
bio_put_uint32(reply, i); //把结果放入reply
break;
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
return 0;
}
int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
int status;
unsigned iodata[512/4];
struct binder_io msg, reply;
bio_init(&msg, iodata, sizeof(iodata), 4);
bio_put_uint32(&msg, 0); // strict mode header
bio_put_string16_x(&msg, SVC_MGR_NAME);
bio_put_string16_x(&msg, name);
bio_put_obj(&msg, ptr);
if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
return -1;
status = bio_get_uint32(&reply);
binder_done(bs, &msg, &reply);
return status;
}
int main(int argc, char **argv)
{
struct binder_state *bs;
uint32_t svcmgr = BINDER_SERVICE_MANAGER; //svcmgr == 0
uint32_t handle;
bs = binder_open("/dev/binder", 128*1024);
if (!bs) {
fprintf(stderr, "failed to open binder driver\n");
return -1;
}
argc--;
argv++;
//add service
svcmgr_publish(bs, svcmgr, "hello", (void*)123);
//注册服务完成后进入循环 读数据-解析数据-处理数据-回复
binder_loop(bs, hello_handler);//svcmgr_handler 处理函数
return 0;
}
test_client.c
#define CMD_SAY_HELLO 0;
#define CMD_SAY_HELLO_TO 1;
struct binder_state *g_bs;
uint32_t g_handle;
void sayhello(void) {
//构造一个binder_io
unsigned iodata[512/4];
struct binder_io msg, reply;
bio_init(&msg, iodata, sizeof(iodata), 4);
bio_put_uint32(&msg, 0);
if (binder_call(g_bs, &msg, &reply, g_handle, CMD_SAY_HELLO)){
return;
}
binder_done(g_bs, &msg, &reply);
}
int sayhello_to(char* name) {
unsigned iodata[512/4];
struct binder_io msg, reply;
int ret;
bio_init(&msg, iodata, sizeof(iodata), 4);
bio_put_uint32(&msg, 0);
bio_put_string16_x(&msg, name);
if (binder_call(g_bs, &msg, &reply, g_handle, CMD_SAY_HELLO_TO)){
return;
}
ret = bio_get_uint32(reply);
binder_done(g_bs, &msg, &reply);
return ret;
}
int main(int argc, char **argv)
{
struct binder_state *bs;
uint32_t svcmgr = BINDER_SERVICE_MANAGER; //svcmgr == 0
uint32_t handle;
int ret;
bs = binder_open("/dev/binder", 128*1024);
if (!bs) {
fprintf(stderr, "failed to open binder driver\n");
return -1;
}
g_bs = bs;
//get service
handle = svcmgr_lookup(bs, svcmgr, "hello");
g_handle = handle;
//发送数据到server
if (argc == 2) {
sayhello();
} else if (argc == 3) {
ret = sayhello_to(argv[2]);
}
binder_release(bs, handle);
return 0;
}
3. 服务注册过程
3.1 server端
a. open驱动 open("dev/binder") ioctl(BINDER_VERSION) mmap
b. 构造数据,调用ioctl 发起一个写操作(cmd = BC_TRANSACTION)
c. 发起写操作传入到内核态的数据,最终会放到 service manager 的 todo 链表中,之后唤醒 service manager
3.1.1 server 端构造数据并发送 (用户态)
- 构造数据 binder_io (其中包含重要的数据结构 binder_flat_object)
- 构造数据后调用binder_call 向驱动程序发送数据
1.首先 binder_io 转换为binder_transaction_data, 然后放入binder_write_read 结构体
2.构造好数据之后,调用ioctl 发送数据
数据结构:

构造数据
/*
* target = 0: 表示要发给service manager
* name: 服务名
* ptr: 指向一个函数
*/
int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
int status;
unsigned iodata[512/4];
struct binder_io msg, reply;
bio_init(&msg, iodata, sizeof(iodata), 4);//前面16个字节空出来
bio_put_uint32(&msg, 0); // strict mode header 先放入全是 0 的4字节数据
bio_put_string16_x(&msg, SVC_MGR_NAME); //用两个字节存放一个字符 ,先用四个字节存放字符串的长度,后存放字符串
bio_put_string16_x(&msg, name); //name 是服务的名称。两个字节存放一个字符
bio_put_obj(&msg, ptr); //构造一个flat_binder_obj结构体,内核中根据该结构体创建 binder_node
//调用binder_call 发送数据到内核态,先写后读
if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
return -1;
status = bio_get_uint32(&reply);
binder_done(bs, &msg, &reply);
return status;
}
重点 : 构造一个 flat_binder_object
/**
* struct flat_binder_object
*.type 表示传递的是实体/引用 只有服务的提供者server 能传递实体(其他,service manager / client 只能传递引用)
* flags
* binder/handle: 当type传实体的时候, 这一项表示binder. 传引用的时候,这一项表示handle
* cookie = 0
*/
void bio_put_obj(struct binder_io *bio, void *ptr)
{
struct flat_binder_object *obj; //最终将 这个结构体传给内核态,内核根据此创建binder_node
obj = bio_alloc_obj(bio);
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->hdr.type = BINDER_TYPE_BINDER; //type
obj->binder = (uintptr_t)ptr; //ptr 指向我们传入的函数
obj->cookie = 0; //现在暂时未使用
}
调用binder_call构造数据并发送,先写后读
int binder_call(struct binder_state *bs,
struct binder_io *msg, struct binder_io *reply,
uint32_t target, uint32_t code)
{
int res;
struct binder_write_read bwr;
struct {
uint32_t cmd; //占四个字节
struct binder_transaction_data txn;
} __attribute__((packed)) writebuf;
unsigned readbuf[32];
if (msg->flags & BIO_F_OVERFLOW) {
fprintf(stderr,"binder: txn buffer overflow\n");
goto fail;
}
writebuf.cmd = BC_TRANSACTION;
//构造 binder_transaction_data
writebuf.txn.target.handle = target; //表示发送给谁 handle 为0 表示要发给 service manager
writebuf.txn.code = code; //如果是注册服务 code 为SVC_MGR_ADD_SERVICE
writebuf.txn.flags = 0;
writebuf.txn.data_size = msg->data - msg->data0;
writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0); //表明flat_binder_object
writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;
//构造 binder_write_read
bwr.write_size = sizeof(writebuf);
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) &writebuf;
for (;;) {//调用ioctl发送数据,等待驱动返回数据
bwr.read_size = sizeof(readbuf); //sizeof(readbuf) == 128
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //调用ioctl发送数据, 先写后读
if (res < 0) {
fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
goto fail;
}
//解析从内核态返回的数据
res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
if (res == 0) return 0; //收到 cmd = BR_REPLY时res = 0;
if (res < 0) goto fail;
}
3.1.2 server 端处理 BC_TRANSACTION (内核态)
3.内核态 binder_ioctl , (binder_ioctl的作用是把数据放入目的进程的binder_proc结构体中todo 链表,然后唤醒目的进程)
a. 根据 handle 找到目的进程 ,本例handle = 0, 即service manager
b. 把数据 copy_from_user(), 放到service manager mmap的内核空间
c. 处理offset数据 flat_binder_object
1. 构造当前进程的(即server)binder_node结构体
2. 构造binder_ref 给目的进程service manager,binder_ref指向binder_node(即server节点)
3. 增加引用计数 **(会返回一些信息给当前进程)**
d. 唤醒目的进程 service manager
源码:
binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread, //当前线程在第一次ioctl调用时初始化
binder_uintptr_t binder_buffer, //bwr.write_buffer
size_t size, //bwr.write_size,
binder_size_t *consumed) //&bwr.write_consumed
{
uint32_t cmd;
struct binder_context *context = proc->context;
//_user 表名是用户态地址,必须从用户态copy到内核态
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
//取出一个一个cmd。 ptr 指向数据开头, end指向数据结尾
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
//从用户空间取出 cmd(占四个字节)
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
//后移四个字节,指向数据
ptr += sizeof(uint32_t);
...
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
//拷贝用户空间的ptr 到 内核空间(这里只是将 buffer 的地址 从用户空间拷贝到内核空间)
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
//ptr指针后移到下一个cmd(如果存在)
ptr += sizeof(tr);
//关键性处理过程
binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);
break;
}
...
}
return 0;
}
1.根据handle == 0, 找到特殊 target_node, 进而找到 target_proc.
2.创建 binder_transaction *t , binder_work *tcomplete. 并初始化
3.从用户空间拷贝数据到目的进程映射的内核空间。
4.处理flat_binder_object (重点)
a. 给当前进程创建一个 binder_node
b. 给目的进程service manager 创建一个binder_ref.
binder_ref.node = 当前进程的binder_node(此处为server的binder_node)
binder_ref.desc 引用计数+1
c. 修改fp->hdr.type = BINDER_TYPE_HANDLE;
5.tcomplete放到当前线程的todo链表中, t放到目标线程的todo链表中。唤醒目标进程service manager
static void binder_transaction(struct binder_proc *proc, //当前进程指针
struct binder_thread *thread, //当前线程指针
struct binder_transaction_data *tr, //指向真正要处理的数据
int reply,
binder_size_t extra_buffers_size) //extra_buffers_size == 0
{
int ret;
//以下两个是重要的结构体
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc = NULL; //目标进程
struct binder_thread *target_thread = NULL; //目标线程
struct binder_node *target_node = NULL; //目标节点
struct binder_transaction *in_reply_to = NULL;
...
struct binder_context *context = proc->context;
...
if (reply) {
//当前是BC_REPLY走这里
} else {
//根据下面代码查找目的进程
if (tr->target.handle) { //取出handle ,当前为0(即要发给servie_manager)
...
} else {
mutex_lock(&context->context_mgr_node_lock);
//handle = 0情况下,target_node 为特殊节点
//service manager告诉驱动是service manager的时候已经创建 binder_node
target_node = context->binder_context_mgr_node;
if (target_node)
//从 target_node 获取 target_proc(即service manager进程)
target_node = binder_get_node_refs_for_txn(target_node, &target_proc, &return_error);
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
...
}
...
}
...
//struct binder_transaction *t; 分配内核内存空间
t = kzalloc(sizeof(*t), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION);
//binder_work *tcomplete
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
...
//添加服务的tr->flags == 0
if (!reply && !(tr->flags & TF_ONE_WAY))
//条件满足,记录下当前线程到 binder_transaction
t->from = thread; //判断是否需要回复,需要则记录下当前进程的thread信息;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; //记录要发送的目的进程
t->to_thread = target_thread; //记录要发送的目的线程
t->code = tr->code; //此次通信 code = ADD_SERVICE_TRANSACTION
t->flags = tr->flags; //flags == 0
...
//从目的进程所映射的空间分配 t->buffer 出来
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
...
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
//分别拷贝用户空间的binder_transaction_data中ptr.buffer和ptr.offsets到内核空间(目的进程所映射的空间)
//目的空间 t->buffer->data
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {
......
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size)) {
......
}
......
off_end = (void *)off_start + tr->offsets_size;
......
//逐个取出 flat_binder_object数据
for (; offp < off_end; offp++) {
struct binder_object_header *hdr;
......
hdr = (struct binder_object_header *)(t->buffer->data + *offp);
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
//重点看看这段代码 下面有分析
ret = binder_translate_binder(fp, t, thread);
...
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE:
...
}
...
}
//BINDER_WORK_TRANSACTION_COMPLETE 当前进程处理该cmd,返回用户空间:cmd = BR_TRANSACTION_COMPLETE;
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
//将BINDER_WORK_TRANSACTION添加到目标队列,本次通信的目标队列为target_proc->todo
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
//BC_REPLY走这里
} else if (!(t->flags & TF_ONE_WAY)) {
、
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
//list_add_tail(&tcomplete->entry, &thread->todo); tcomplete放到当前线程的todo链表中
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
//这里一个入栈操作将当前线程的传输栈压入,但是 thread->transaction_stack此时为 NULL,因为第一次执行前面没有赋值;
t->from_parent = thread->transaction_stack; //t->from_parent : 记录发送线程的传输栈的父栈
//给当前线程的传输栈赋值
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
//t 放到目的进程的todo链表中并唤醒目的进程
if (!binder_proc_transaction(t, target_proc, target_thread)) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
goto err_dead_proc_or_thread;
}
} else {
...
}
...
return;
此时server 线程的传输栈信息如下:
transaction_stack | |
---|---|
from | server'thread |
to_proc | ServiceManager |
to_thread | ServiceManager'thread |
from_parent | NULL |
<font color="red">binder_translate_binder 分析</font>
这段代码的作用:
1. 给当前进程创建一个 binder_node
2. 给目的进程service manager 创建一个binder_ref.
binder_ref.node = 当前进程的binder_node(此处为server的binder_node)
binder_ref.desc 引用计数+1
3. 修改fp->hdr.type = BINDER_TYPE_HANDLE;
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread) //server进程的线程
{
struct binder_node *node;
struct binder_proc *proc = thread->proc; //当前进程
struct binder_proc *target_proc = t->to_proc; //目的进程
struct binder_ref_data rdata;
int ret = 0;
//从当前进程获得binder_node
node = binder_get_node(proc, fp->binder);
if (!node) {
//为当前进程创建 binder_node 并初始化
//node->proc = proc;
//node->ptr = fp->binder;
//node->cookie = fp->cookie;
//node->work.type = BINDER_WORK_NODE;
node = binder_new_node(proc, fp);
}
...
//给目的进程target_proc 创建引用 binder_ref。binder_ref.proc = target_proc. binder_ref.node指向当前进程node.
//并增加引用计数,同时导致BR_INCREES BR_ACQUIRE这两个消息上报
//并且&node->work放到当前线程的todo链表中 binder_enqueue_work_ilocked(&node->work, target_list);
//ref->data.strong++;
ret = binder_inc_ref_for_node(target_proc, node, fp->hdr.type == BINDER_TYPE_BINDER, &thread->todo, &rdata);
if (ret)
goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE; //将实体改为引用
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
fp->handle = rdata.desc; //handle值 引用计数已经加1
fp->cookie = 0;
...
done:
binder_put_node(node);
return ret;
}
binder_thread_read
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE; //该指令发送给用户态,表示 transaction 过程完成
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
//当前进程创建binder_node 时候,node->work.type = BINDER_WORK_NODE;
//下面这段代码不是很懂 强 弱引用???
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
int strong, weak;
binder_uintptr_t node_ptr = node->ptr;
binder_uintptr_t node_cookie = node->cookie;
int node_debug_id = node->debug_id;
int has_weak_ref;
int has_strong_ref;
void __user *orig_ptr = ptr;
BUG_ON(proc != node->proc);
strong = node->internal_strong_refs || node->local_strong_refs;
weak = !hlist_empty(&node->refs) || node->local_weak_refs || node->tmp_refs || strong;
has_strong_ref = node->has_strong_ref;
has_weak_ref = node->has_weak_ref;
if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
}
if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
}
if (!strong && has_strong_ref)
node->has_strong_ref = 0;
if (!weak && has_weak_ref)
node->has_weak_ref = 0;
if (!weak && !strong) {
rb_erase(&node->rb_node, &proc->nodes);
binder_inner_proc_unlock(proc);
binder_node_lock(node);
binder_node_unlock(node);
binder_free_node(node);
} else
binder_inner_proc_unlock(proc);
if (weak && !has_weak_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_INCREFS, "BR_INCREFS");
if (!ret && strong && !has_strong_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_ACQUIRE, "BR_ACQUIRE");
if (!ret && !strong && has_strong_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_RELEASE, "BR_RELEASE");
if (!ret && !weak && has_weak_ref)
ret = binder_put_node_cmd(
proc, thread, &ptr, node_ptr,
node_cookie, node_debug_id,
BR_DECREFS, "BR_DECREFS");
if (ret)
return ret;
} break;
BR_NOOP
BINDER_WORK_NODE(增加 cmd : BR_INCREFS BR_ACQUIRE)
BINDER_WORK_TRANSACTION_COMPLETE (增加 cmd : BR_TRANSACTION_COMPLETE)
处理数据完之后, 返回用户空间。
用户空间调用binder_parse() 处理数据, 进入循环,最先读到 BR_NOOP 最终由于没有可读数据,当前进程进入休眠。
3.1.3 server 端被唤醒 cmd = BR_REPLY (内核态)
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;
}
w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
BUG_ON(t->buffer == NULL);
//由于target_node 是根据flat_binder_object 得到的,所以此处走else
if (t->buffer->target_node) {
...
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "put_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "copy_to_user failed",
BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(tr);
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
...
} else {
binder_free_transaction(t);
}
break;
}
3.1.4 server 端处理BR_REPLY (用户态)
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
...
return r;
binder_call执行完之后
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
...
if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))
return -1;
status = bio_get_uint32(&reply);
binder_done(bs, &msg, &reply);
return status;
}
void binder_done(struct binder_state *bs,
__unused struct binder_io *msg,
struct binder_io *reply)
{
struct {
uint32_t cmd;
uintptr_t buffer;
} __attribute__((packed)) data;
if (reply->flags & BIO_F_SHARED) {
data.cmd = BC_FREE_BUFFER;
data.buffer = (uintptr_t) reply->data0;
binder_write(bs, &data, sizeof(data));//发起一次写操作, 将占用的内核态的buffer释放掉
reply->flags = 0;
}
}
然后调用binder_loop()进入循环,等待client 进程发送消息
3.2 service manager
3.2.1 service manager 被唤醒后 (内核态)
1. 判断thread->todo proc->todo 链表中有没有任务
2. 从队列中取出 binder_work
3. 构造 binder_transaction_data ,并把命令改为 BR_TRANSACTION
4. 数据拷贝到用户空间,返回用户空间
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
binder_inner_proc_lock(proc);
//判断thread->todo链表中有没有任务
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
//判断proc->todo链表中有没有任务
else if (!binder_worklist_empty_ilocked(&proc->todo) && wait_for_proc_work)
list = &proc->todo;
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4) {
binder_inner_proc_unlock(proc);
break;
}
//从队列中取出 binder_work (server进程放入的)
w = binder_dequeue_work_head_ilocked(list);
if (binder_worklist_empty_ilocked(&thread->todo))
thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
binder_inner_proc_unlock(proc);
//找到binder_transaction* t指针的地址
t = container_of(w, struct binder_transaction, work);
} break;
...
}
...
BUG_ON(t->buffer == NULL);
//这个target_node 实际上是service_manager的binder_node
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
tr.target.ptr = target_node->ptr; //flat_binder_object.binder
tr.cookie = target_node->cookie;
......
cmd = BR_TRANSACTION; //把命令改为 BR_TRANSACTION
} else {
...
}
tr.code = t->code;
tr.flags = t->flags;
...
t_from = binder_get_txn_from(t); //t->from
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}
//根据binder_transaction 构造binder_transaction_data
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
//BR_TRANSACTION写入用户空间
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "put_user failed", BR_FAILED_REPLY);
return -EFAULT;
}
//后移四字节
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "copy_to_user failed", BR_FAILED_REPLY);
return -EFAULT;
}
ptr += sizeof(tr);
...
binder_stat_br(proc, thread, cmd);
...
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
//这里的t是从带处理事务的链表中取出来的,也就是前面server放到ServiceManager的todo链表上
//将当前线程的传输栈入栈
t->to_parent = thread->transaction_stack;
//记录目的线程,即本身 service manager
t->to_thread = thread;
//当前线程的传输栈记录信息
thread->transaction_stack = t;
binder_inner_proc_unlock(thread->proc);
} else {
binder_free_transaction(t);
}
break;
}
...
return 0; //然后返回到用户空间
则ServiceManager
线程的传输栈的信息如下:
transaction_starck | |
---|---|
from | server'thread |
to_proc | ServiceManager |
to_thread | ServiceManager'thread |
from_parent | NULL |
to_parent | NULL |
3.2.2 service manager处理BR_TRANSACTION(用户态)
处理数据
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
#if TRACE
fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
switch(cmd) {
case BR_NOOP:
break;
...
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
//处理数据
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) { //txn->flags == 0
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
//处理完数据之后,发送回复数据 这里的reply是四个字节的0
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
ptr += sizeof(*txn);
break;
}
...
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
uint32_t dumpsys_priority;
if (txn->target.ptr != BINDER_SERVICE_MANAGER)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
strict_policy = bio_get_uint32(msg); //四个字节的 0
s = bio_get_string16(msg, &len); //获取字符串
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
...
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
...
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len); //获取字符串 服务的名称
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg); //现在的handle 为 1
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
dumpsys_priority = bio_get_uint32(msg);
//在链表中增加一个服务节点
if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
txn->sender_pid))
return -1;
break;
...
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0); //成功处理完数据,放入四个字节 0
return 0;
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER; //释放数据占用的内核态缓冲区
data.buffer = buffer_to_free; //要释放的buffer
data.cmd_reply = BC_REPLY; //一次性发两个cmd
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
//res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); 发起一次写操作
binder_write(bs, &data, sizeof(data));
}
3.2.3 service manager 处理BC_REPLY (内核态)
<font color="green">1. 根据thread->transaction_stack 中记录的 from,找到要发送的目的线程,目的进程</font>
<font color="green">2. 把&t->work 放入目标线程的 todo链表,唤醒目标线程</font>
case BC_FREE_BUFFER: {
//释放空间
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0);
break;
}
binder_transaction:
//找到目的进程
if (reply) {
//用个临时变量记录下当前线程的传输栈信息
in_reply_to = thread->transaction_stack;
...
//判断下接收的线程是否为自己本身,如果不是则出错
if (in_reply_to->to_thread != thread) {
....
}
//一次出栈操作,此时 thread->transaction_stack值为 NULL
thread->transaction_stack = in_reply_to->to_parent;
//获取到目标线程, in_reply_to->from中记录着发送线程 server 的信息
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
...
if (target_thread->transaction_stack != in_reply_to) {
...
}
//找到目的进程
target_proc = target_thread->proc;
...
} else {
...
}
...
t = kzalloc(sizeof(*t), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
...
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL; //当前不需要回复
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; //目标进程即server进程
t->to_thread = target_thread; //目标线程
t->code = tr->code;
t->flags = tr->flags;
...
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
......
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
//从用户空间拷贝到目的进程的内核空间
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
}
...
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
binder_enqueue_thread_work(thread, tcomplete); //tcomplete 放入当前线程的todo链表中
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
//一个出栈操作
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);//放入目标线程的todo 链表
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait) //唤醒目标线程;
binder_restore_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
......
} else {
......
}
然后唤醒目的进程 server进程
service manager 处理 BR_TRANSACTION_COMPLETE , 再次进入休眠
4. 服务的获取过程
4.1 client
4.1.1 client端构造数据并发送(用户态)
数据结构:

/**
* target: 0 发送给service manager
* name: "hello" 服务的名称
*/
uint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{
uint32_t handle;
unsigned iodata[512/4];
struct binder_io msg, reply;
bio_init(&msg, iodata, sizeof(iodata), 4); //前16个字节空出来
bio_put_uint32(&msg, 0); //放入四个字节的0
bio_put_string16_x(&msg, SVC_MGR_NAME); //放入SVC_MGR_NAME
bio_put_string16_x(&msg, name); //放入服务名称 "hello"
//调用binder_call发送数据,数据结构如上图
if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))
return 0;
//从内核回复数据中取出handle
handle = bio_get_ref(&reply);
if (handle)
binder_acquire(bs, handle);
binder_done(bs, &msg, &reply);
return handle;
}
4.1.2 client 端处理BC_TRANSACTION(内核态)
1.根据 handle = 0,找到对应的进程 service manager
2.将数据放入目标进程的todo链表中,唤醒目标进程 service manager
4.1.3 client 端被唤醒 cmd = BR_REPLY(内核态)
4.1.4 client 端处理BR_REPLY(用户态)
从内核态返回的数据binder_io * reply 中 binder_flat_object 中获取到handle = 1
//调用binder_call 之后返回数据后,解析数据
res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
...
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn); //取出binder_io的数据
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0; //此处r = 0, binder_call的循环结束
break;
}
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
//binder_call调用结束后,取出handle
// handle = bio_get_ref(&reply);
uint32_t bio_get_ref(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return 0;
if (obj->hdr.type == BINDER_TYPE_HANDLE)
return obj->handle;
return 0;
}
4.2 service manager
4.2.1 service manager 被唤醒后(内核态)
构造数据,返回用户态
4.2.2 service manager处理BR_TRANSACTION(用户态)
1.取出数据,得到“hello”
2.在链表中根据"hello",找到handle =1;
3.用ioctl把handle=1发给驱动。
数据结构:

switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//根据服务名找到handle = 1
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
//构造binder_falt_object 重点!!!
bio_put_ref(reply, handle);
return 0;
构造binder_flat_object
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{
struct flat_binder_object *obj;
if (handle)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->hdr.type = BINDER_TYPE_HANDLE;
obj->handle = handle;
obj->cookie = 0;
}
处理完数据之后,发送回复数据
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));
}
4.2.3 service manager处理BC_REPLY(内核态)
4.在servicemanger 的 binder_proc.refs_by_decs中, 根据handle = 1找到 binder_ref, 进而找到 "hello"服务binder_node
5.为client 创建binder_ref , binder_ref.node指向"hello"服务的binder_node, desc = 1;
6.然后把数据放入 client的 todo链表中,唤醒client
for (; offp < off_end; offp++) {
struct binder_object_header *hdr;
size_t object_size = binder_validate_object(t->buffer, *offp);
...
hdr = (struct binder_object_header *)(t->buffer->data + *offp);
off_min = *offp + object_size;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
...
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
} break;
...
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
struct binder_node *node;
struct binder_ref_data src_rdata;
int ret = 0;
//根据 handle = 1 在binder_proc.refs_by_desc中找到 binder_ref,进而找到 "hello" 服务binder_node
node = binder_get_node_from_ref(proc, fp->handle,
fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
...
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
ret = -EPERM;
goto done;
}
binder_node_lock(node);
//目标进程是client , binder_node->proc 是server
if (node->proc == target_proc) {
...
} else {
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
//为目标进程创建binder_ref.node = server的binder_node
ret = binder_inc_ref_for_node(target_proc, node,
fp->hdr.type == BINDER_TYPE_HANDLE,
NULL, &dest_rdata);
if (ret)
goto done;
fp->binder = 0;
fp->handle = dest_rdata.desc;//引用计数加1
fp->cookie = 0;
trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
&dest_rdata);
}
done:
binder_put_node(node);
return ret;
}
5. 服务使用过程
1.用户态构造数据(含有handle),发送数据
2.驱动根据handle 值,找到对应的 binder_ref , 进而找到服务端的 binder_node, 找到服务进程
3.把构造好的数据放到服务进程的todo链表里,唤醒服务进程。
4.服务进程拿到数据后,根据ptr知道调用哪个服务。
具体源码略(上面的分析重复,不再进行分析)
6. binder server 的多线程支持
可能有多个client 向一个 server进程发送数据。
server 忙不过来时,创建多线程。
问题一:谁来判断忙不过来?
binder_proc.wait 链表中,如果这个等待队列中一个线程都没有了就表示忙不过来了。
1.在驱动中判断是否忙不过来
2.驱动向应用层发出请求,创建新线程
3.应用层创建新线程
binder_thread_read中
驱动向应用层发出“创建新线程”的请求的条件
//proc->requested_threads 未处理的新线程请求
//proc->requested_threads_started 已经启动的线程数 < 最大线程数
if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
} else
binder_inner_proc_unlock(proc);
问题二:那么应用程序要怎么写呢?
1.用户态通过 ioctl 设置max_threads
2.用户态收到 BR_SPAWN_LOOPER 请求
a.创建新线程 使用 int pthread_creat() 函数创建
b.发送一个ioctl(BC_ENTER_LOOPER)
c.进入一个循环
3.新线程要执行ioctl BC_REGISTER_LOOP,表明新线程已经创建,并且进入循环体了。
4.像主线程一样,进入一个循环体。读数据,读驱动然后处理
1.设置max_threads
binder_ioctl中:
case BINDER_SET_MAX_THREADS: {
int max_threads;
if (copy_from_user(&max_threads, ubuf, sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
binder_inner_proc_lock(proc);
proc->max_threads = max_threads;
binder_inner_proc_unlock(proc);
break;
}
7. 小结---数据传输过程
所有的binder_open 里面有一个很重要的结构体 binder_proc
1.server 在内核态中为每个服务创建 binder_node。 binder_node.proc 表示当前的server进程。
2.在内核态中给service manager 创建 binder_ref引用binder_node.
binder_ref.desc = 1,2,3...
在用户态会创建一个服务链表 svclist。其中包含 .name .handle
3.client 向service manager查询服务,传name(服务名)
4.service manager 返回handle给驱动
5.驱动程序在service manager 的binder_ref 红黑树中根据handle 找到binder_ref.
再根据 binder_ref 找到binder_node.
最后给client 创建新的binder_ref.node 指定 server 的binder_node .
它的desc从 1开始。
驱动返回 desc 给 client, 就是handle
由此看出,servicemanager 有一系列的binder_ref, 里面的handle 值由注册服务的顺序决定
client 也有一系列的 binder_ref, 里面的handle值由获取服务的顺序决定
6.client 得到 handle , client发送数据给 handle. 驱动根据 handle 找到 binder_ref ,
再根据 binder_ref 找到 binder_node。根据 binder_node 找到进程server.
这样就可以把数据传给它了。
网友评论