美文网首页framework学习笔记
framework 学习笔记8. binder_ioctl 和服

framework 学习笔记8. binder_ioctl 和服

作者: 加个标志位 | 来源:发表于2020-12-10 09:04 被阅读0次
    1. 几个用到binder_ioctl()的地方:(framework\native\cmd\servicemanager\binder.c)
      (1)ServiceManager进程中:判断binder驱动是否一致:
      if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
      (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
      fprintf(stderr, "binder: driver version differs from user space\n");
      goto fail_open;
      }

    (2)ServiceManager进程中:ServiceManager进程成为管理者:

    static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
    {
        int ret;
            //binder_open()中创建的proc
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;
            //查询获取 binder_thread 见注释1
        thread = binder_get_thread(proc);
        if (thread == NULL) {
            ret = -ENOMEM;
            goto err;
        }
    
        switch (cmd) {
    
            case BINDER_SET_CONTEXT_MGR:
            ret = binder_ioctl_set_ctx_mgr(filp);  // 见注释2
            if (ret)
                goto err;
            break;
        case BINDER_VERSION: {
            struct binder_version __user *ver = ubuf;
                    // 把版本号 拷贝 给 sm进程中的&vers;
            if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                     &ver->protocol_version)) {
                ret = -EINVAL;
                goto err;
            }
            break;
        }
        return ret;
    }
    

    注释1:

    static struct binder_thread *binder_get_thread(struct binder_proc *proc)
    {
        struct binder_thread *thread = NULL;
        struct rb_node *parent = NULL;
        struct rb_node **p = &proc->threads.rb_node;
    
        while (*p) {
            parent = *p;
                    //通过proc结构体中rb_node找到binder_threads
            thread = rb_entry(parent, struct binder_thread, rb_node);
    
            if (current->pid < thread->pid)
                p = &(*p)->rb_left;
            else if (current->pid > thread->pid)
                p = &(*p)->rb_right;
            else
                break;
        }
        if (*p == NULL) {
                    // 第一次pid创建对象
            thread = kzalloc(sizeof(*thread), GFP_KERNEL);
            if (thread == NULL)
                return NULL;
            binder_stats_created(BINDER_STAT_THREAD);
            thread->proc = proc;
                    // current线程
            thread->pid = current->pid;
                    //初始化等待队列和工作队列
            init_waitqueue_head(&thread->wait);
            INIT_LIST_HEAD(&thread->todo);
                    //插入到当前进程线程列表的红黑树;
            rb_link_node(&thread->rb_node, parent, p);
                    //调整红黑树的颜色;
            rb_insert_color(&thread->rb_node, &proc->threads);
            thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
        }
        return thread;
    }
    

    注释2:

    static int binder_ioctl_set_ctx_mgr(struct file *filp)
    {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
        kuid_t curr_euid = current_euid();
            // 判断静态的 binder_context_mgr_node  是否设置过,只能设置过一次(只能有一个应用成为管理者)
        if (binder_context_mgr_node != NULL) {
            pr_err("BINDER_SET_CONTEXT_MGR already set\n");
            ret = -EBUSY;
            goto out;
        }
        // ...省略部分代码
            //创建一个binder_node
        binder_context_mgr_node = binder_new_node(proc, 0, 0);
        if (binder_context_mgr_node == NULL) {
            ret = -ENOMEM;
            goto out;
        }
        binder_context_mgr_node->local_weak_refs++;
        binder_context_mgr_node->local_strong_refs++;
        binder_context_mgr_node->has_strong_ref = 1;
        binder_context_mgr_node->has_weak_ref = 1;
    out:
        return ret;
    }
    
    1. ServiceManager是如何进入等待的:
      (2.1)ServiceManager进程中:设置binder线程进入循环等待(还未进入循环等待的状态),binder_loop(bs, svcmgr_handler)中的
      readbuf[0] = BC_ENTER_LOOPER;
      binder_write(bs, readbuf, sizeof(uint32_t));
      主要在以上两行代码中实现;
      (2.2)ServiceManager进程中:进入循环不断读写 binder 的内容,即for(; ;)循环中的内容
    void binder_loop(struct binder_state *bs, binder_handler func)
    {
        int res;
        struct binder_write_read bwr;
        uint32_t readbuf[32];
    
        bwr.write_size = 0;
        bwr.write_consumed = 0;
        bwr.write_buffer = 0;
    
        readbuf[0] = BC_ENTER_LOOPER;
        binder_write(bs, readbuf, sizeof(uint32_t));  //见注释3
    
        for (;;) {
            bwr.read_size = sizeof(readbuf);
            bwr.read_consumed = 0;
            bwr.read_buffer = (uintptr_t) readbuf;
    
            res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);  //见注释4
    
            if (res < 0) {
                ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
                break;
            }
    
            res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
            if (res == 0) {
                ALOGE("binder_loop: unexpected reply?!\n");
                break;
            }
            if (res < 0) {
                ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
                break;
            }
        }
    }
    

    注释3:在int binder_write(struct binder_state *bs, void *data, size_t len)方法中,我们可以看出调用的还是
    binder_ioctl(bs->fd, BINDER_WRITE_READ, &bwr)这个方法;

    int binder_write(struct binder_state *bs, void *data, size_t len){
        struct binder_write_read bwr;
        int res;
    
        bwr.write_size = len;
        bwr.write_consumed = 0;
        bwr.write_buffer = (uintptr_t) data;
        bwr.read_size = 0;
        bwr.read_consumed = 0;
        bwr.read_buffer = 0;
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        if (res < 0) {
            fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                    strerror(errno));
        }
        return res;
    }
    
    /***  binder_ioctl(bs->fd, BINDER_WRITE_READ, &bwr) 的实现:==> 也就是调用binder_ioctl_write_read()方法;
    case BINDER_WRITE_READ:
            ret = binder_ioctl_write_read(filp, cmd, arg, thread);  // 见注释4
            if (ret)
                goto err;
            break;
    ***/
    

    注释4:

    static int binder_ioctl_write_read(struct file *filp,
                    unsigned int cmd, unsigned long arg,
                    struct binder_thread *thread)
    {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;
        struct binder_write_read bwr;
    
        // 把数据 ubuf (其实就是binder_write_read ) 复制到 bwr;
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
            ret = -EFAULT;
            goto out;
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
                 proc->pid, thread->pid,
                 (u64)bwr.write_size, (u64)bwr.write_buffer,
                 (u64)bwr.read_size, (u64)bwr.read_buffer);
    
        if (bwr.write_size > 0) {
            // 本次调用未 进入 此分支
        }
        if (bwr.read_size > 0) {
                    // 在此方法中进行读操作,见注释5 
            ret = binder_thread_read(proc, thread, bwr.read_buffer,
                         bwr.read_size,
                         &bwr.read_consumed,
                         filp->f_flags & O_NONBLOCK);
            trace_binder_read_done(ret);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))  //如果失败了,把bwr中的数据拷回去,还原;
                    ret = -EFAULT;
                goto out;
            }
        }
        
        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
            ret = -EFAULT;
            goto out;
        }
    out:
        return ret;
    }
    

    注释5:

    static int binder_thread_read(struct binder_proc *proc,
                      struct binder_thread *thread,
                      binder_uintptr_t binder_buffer, size_t size,
                      binder_size_t *consumed, int non_block)
    {
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
    
        int ret = 0;
        int wait_for_proc_work;
    
    retry:
            // for循环中第一次进入 为true;
        wait_for_proc_work = thread->transaction_stack == NULL &&
                    list_empty(&thread->todo);
            // ...
            // 改变状态
        thread->looper |= BINDER_LOOPER_STATE_WAITING;
        if (wait_for_proc_work)
            proc->ready_threads++;
    
        binder_unlock(__func__);
    
        trace_binder_wait_for_work(wait_for_proc_work,
                       !!thread->transaction_stack,
                       !list_empty(&thread->todo));
        if (wait_for_proc_work) {
                    //BINDER_LOOPER_STATE_ENTERED为1,thread->looper为1,不进该分支
            if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |BINDER_LOOPER_STATE_ENTERED))) {
                          //...
            }
            binder_set_nice(proc->default_priority);
            if (non_block) {  // 非阻塞
                if (!binder_has_proc_work(proc, thread))
                    ret = -EAGAIN;
            } else
                            //关键代码:要不要进入等待,ServiceManager进入等待状态;唤醒:则是todo队列里有东西时唤醒;
                ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        } else {
            // ...
        }
            // ...
        return 0;
    }
    
    1. 服务的添加过程binder_ioctl():media服务的添加(数据 流向 在笔记5中有详细的介绍) 和 目标进程的唤醒
      do {
      //调用驱动层的binder_ioctl()方法
      if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
      err = NO_ERROR;
      else
      err = -errno;
      } while (err == -EINTR);
      ==》驱动层binder.c中:binder_ioctl();
      ==》驱动层binder.c中:binder_ioctl_write_read();
      ==》驱动层binder.c中:写命令binder_thread_write();//注释6
      ==》驱动层binder.c中:binder_transaction(proc, thread, &tr, cmd == BC_REPLY); //注释7

    注释6

    static int binder_thread_write(struct binder_proc *proc,
                struct binder_thread *thread,
                binder_uintptr_t binder_buffer, size_t size,
                binder_size_t *consumed){
        uint32_t cmd;
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
    
        while (ptr < end && thread->return_error == BR_OK) {
            if (get_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            
            switch (cmd) {
            //...
            //进入此处
            case BC_TRANSACTION:
            case BC_REPLY: {
                struct binder_transaction_data tr;
                    //把ptr(binder_transaction_data)的数据复制到tr
                if (copy_from_user(&tr, ptr, sizeof(tr)))
                    return -EFAULT;
                ptr += sizeof(tr);
                binder_transaction(proc, thread, &tr, cmd == BC_REPLY); //见 注释 7
                break;
            }
    
            //...
        }
        return 0;
    }
    

    注释7:简化代码

    static void binder_transaction(struct binder_proc *proc,
                       struct binder_thread *thread,
                       struct binder_transaction_data *tr, int reply){
        struct binder_transaction *t;
        struct binder_work *tcomplete;
        binder_size_t *offp, *off_end;
            // 目标进程的 binder_proc
        struct binder_proc *target_proc;
            // 目标进程的 binder_thread
        struct binder_thread *target_thread = NULL;
            // 目标进程的 binder_node
        struct binder_node *target_node = NULL;
            // 目标进程的 todo 队列
        struct list_head *target_list;
            // 目标进程的 等待
        wait_queue_head_t *target_wait;
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
    
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
        e->from_proc = proc->pid;
        e->from_thread = thread->pid;
            // handle 值为 0
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
    
        if (reply) {
            //...
        } else {
            if (tr->target.handle) {
                //...           
            } else {
                            // 这里的分支是handle == 0,target_node 赋值,即之前ServiceManager成为管理者时的静态变量;
                target_node = binder_context_mgr_node;
            }
    
            // 获取到ServiceManager的proc
            target_proc = target_node->proc;
        }
        if (target_thread) {
            e->to_thread = target_thread->pid;
            target_list = &target_thread->todo;
            target_wait = &target_thread->wait;
        } else {
            target_list = &target_proc->todo;
            target_wait = &target_proc->wait;
        }
    
            /* TODO: reuse incoming transaction for reply */
            // 创建两个结构体
        t = kzalloc(sizeof(*t), GFP_KERNEL);
        tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    
            // TF_ONE_WAY:表示写过来的数据是不是需要等待回复
            // binder驱动记录数据的来源        
        if (!reply && !(tr->flags & TF_ONE_WAY))
            t->from = thread; //执行此句 分支代码
        else
            t->from = NULL;
        // ...
    
        trace_binder_transaction(reply, t, target_node);
            // 按需加载 在目标进程 开辟 管理 映射内存;我们在mmap()时只映射了 一个物理页;现在根据传入的数据开辟多大的内存
        t->buffer = binder_alloc_buf(target_proc, tr->data_size,tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
        if (t->buffer == NULL) {
            return_error = BR_FAILED_REPLY;
            goto err_binder_alloc_buf_failed;
        }
        t->buffer->allow_user_free = 0;
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
        trace_binder_transaction_alloc_buf(t->buffer);
            //...        
        offp = (binder_size_t *)(t->buffer->data +ALIGN(tr->data_size, sizeof(void *)));
            // 把数据拷贝到目标进程;t->buffer->data:新开辟的目标进程的地址     tr->data.ptr.buffer:客户端数据
        if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {
            binder_user_error("%d:%d got transaction with invalid data ptr\n",proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_copy_data_failed;
        }
        if (copy_from_user(offp, (const void __user *)(uintptr_t)
                   tr->data.ptr.offsets, tr->offsets_size)) {
            binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                    proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_copy_data_failed;
        }
            
            //循环处理 客户端传递的 flat_binder_object  数据
        off_end = (void *)offp + tr->offsets_size;
        for (; offp < off_end; offp++) {
            struct flat_binder_object *fp;
    
            fp = (struct flat_binder_object *)(t->buffer->data + *offp);
    
            switch (fp->type) {
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
                struct binder_ref *ref;
                            // 对照笔记5中数据流向图可知 fp 是service对象的弱应用,通过客户端binder地址从自己的进程中找binder_node
                struct binder_node *node = binder_get_node(proc, fp->binder);
    
                if (node == NULL) {
                                    // 第一次的时候为空,那么就在本进程中创建一个 binder_node                               
                    node = binder_new_node(proc, fp->binder, fp->cookie);
                    if (node == NULL) {
                        return_error = BR_FAILED_REPLY;
                        goto err_binder_new_node_failed;
                    }
                    node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                    node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
                }
                if (fp->cookie != node->cookie) {
                    binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
                        proc->pid, thread->pid,
                        (u64)fp->binder, node->debug_id,
                        (u64)fp->cookie, (u64)node->cookie);
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_get_ref_for_node_failed;
                }
                ref = binder_get_ref_for_node(target_proc, node);
                //替换 tpye ==》BINDER_TYPE_HANDLE
                if (fp->type == BINDER_TYPE_BINDER)
                    fp->type = BINDER_TYPE_HANDLE;
                else
                    fp->type = BINDER_TYPE_WEAK_HANDLE; //此句分支 不执行
                             //计算赋值handle,从1开始,累加1 ==》binder_get_ref_for_node()方法中
                fp->handle = ref->desc;
    
                binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                           &thread->todo);
    
                trace_binder_transaction_node_to_ref(t, node, ref);
            } break;
        }
        if (reply) {
            //...
        } else if (!(t->flags & TF_ONE_WAY)) {
                    // 不是TF_ONE_WAY;
            BUG_ON(t->buffer->async_transaction != 0);
                    // 需要回复
            t->need_reply = 1;
                    // 记录了from_parent 
            t->from_parent = thread->transaction_stack;
                    // 记录了transaction_stack 
            thread->transaction_stack = t;
        } else {
            //...
            } else
                target_node->has_async_transaction = 1;
        }
            // 目标进程,往目标进程 todo 队列 中添加了一个t->work.entry
        t->work.type = BINDER_WORK_TRANSACTION;
        list_add_tail(&t->work.entry, target_list);
    
            //本进程,往本进程 中添加了一个t->work.entry,然后会向客户端进程发送一条BR_TRANSACTION_COMPLETE
        tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
        list_add_tail(&tcomplete->entry, &thread->todo);
            // 唤醒target_wait
        if (target_wait)
            wake_up_interruptible(target_wait);
        return;
    }
    
    1. 进程唤醒处理数据:
    static int binder_thread_read(struct binder_proc *proc,
                      struct binder_thread *thread,
                      binder_uintptr_t binder_buffer, size_t size,
                      binder_size_t *consumed, int non_block)
    {
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
    
        if (thread->return_error != BR_OK && ptr < end) {
            if (thread->return_error2 != BR_OK) {
                
            } else 
                ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));  //在此处等待
        } else {
            
        }
    
        if (ret)
            return ret;
    
        while (1) {
            uint32_t cmd;
            struct binder_transaction_data tr;
            struct binder_work *w;
            struct binder_transaction *t = NULL;
    
            if (!list_empty(&thread->todo)) {  //唤醒了
                w = list_first_entry(&thread->todo, struct binder_work, entry);
            } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
    
            } else {
                /* no data added */
            }
    
            if (end - ptr < sizeof(tr) + 4)
                break;
    
            switch (w->type) {
            case BINDER_WORK_TRANSACTION:    // 目标进程,往目标进程 todo 队列 中添加了一个t->work.entry 后
                t = container_of(w, struct binder_transaction, work);
              break;
        
            if (!t)
                continue;
    
            BUG_ON(t->buffer == NULL);
            if (t->buffer->target_node) {
                struct binder_node *target_node = t->buffer->target_node;
    
                tr.target.ptr = target_node->ptr;  // 0
                tr.cookie =  target_node->cookie;  // 0
                t->saved_priority = task_nice(current);
    
                if (t->priority < target_node->min_priority &&!(t->flags & TF_ONE_WAY))
                    binder_set_nice(t->priority);
                else if (!(t->flags & TF_ONE_WAY) ||t->saved_priority > target_node->min_priority)
                    binder_set_nice(target_node->min_priority);
                cmd = BR_TRANSACTION;
            } else {
                tr.target.ptr = 0;
                tr.cookie = 0;
                cmd = BR_REPLY;
            }
            tr.code = t->code;
    
                    //...                
            tr.data_size = t->buffer->data_size;
            tr.offsets_size = t->buffer->offsets_size;
            tr.data.ptr.buffer = (binder_uintptr_t)(
                        (uintptr_t)t->buffer->data +
                        proc->user_buffer_offset);
            tr.data.ptr.offsets = tr.data.ptr.buffer +
                        ALIGN(t->buffer->data_size,
                            sizeof(void *));
    
                    // 1. 首先把命令cmd = BR_TRANSACTION 写到ptr ,readbuffer
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
                    // 2. 把数据拷贝到readbuffer
            
            ptr += sizeof(tr);
    
            trace_binder_transaction_received(t);
            binder_stat_br(proc, thread, cmd);  
    
            list_del(&t->work.entry);
            t->buffer->allow_user_free = 1;
    
            if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
                t->to_parent = thread->transaction_stack;
                t->to_thread = thread;
                thread->transaction_stack = t;
            } else {
                t->buffer->transaction = NULL;
                kfree(t);
                binder_stats_deleted(BINDER_STAT_TRANSACTION);
            }
            break;
        }
        return 0;
    }
    
    服务的添加过程.png

    小结:
    a. binder 驱动是如何找到 ServiceManager进程的?
    根据handle = 0;驱动层创建了一个静态变量 binder_context_mgr_node;

    b. ServiceManager进程是如何进入等待,又是怎么被唤醒的?
    等待:ServiceManager执行binder_loop()中的binder_thread_read()时,会根据todo队列是否为空进行判断;
    唤醒:客户端找到target_proc后,把数据拷到目标进程写入处理命令,然后往自己的进程写入一个接收请求的命令(让自己进入等待),最后唤醒服务进程的wait队列,这个时候服务进程被唤醒并且继续todo队列中的请求;

    c. 数据时如何传递的,handle和type时如何被计算和管理的?
    binder驱动会判断type是什么,然后往自己的进程挂一个binder_node,往目标进程挂两个binder_node(一个以handle值为key,一个以地址为key);handle值是根据数据进行累加的;

    d. 内存的拷贝一共有几次?
    不是1次;服务端把数据拷贝到内核再拷贝回去2次 + 客户端把数据拷贝到内核再拷贝回去2次 + 客户端把数据拷贝到服务端映射的内存中1次 + 服务端映射的内存拷贝到驱动层创建的内存 = 6次;

    相关文章

      网友评论

        本文标题:framework 学习笔记8. binder_ioctl 和服

        本文链接:https://www.haomeiwen.com/subject/vbliwktx.html