美文网首页
Android-Binder驱动启动

Android-Binder驱动启动

作者: zzq_nene | 来源:发表于2020-12-19 19:00 被阅读0次

    Linux一切皆文件。
    misc设备 --- 没有具体硬件 --- 实际上就是一块内存 --- 主要是因为注册简单
    Java层 --> native层 --> 驱动层 --> 服务层
    这里源码是基于Android6.0的驱动层源码

    一、Binder驱动层源码

    1.kernel\drivers\staging\android\binder.c#binder_init

    binder驱动的初始化。在该方法中,主要是做如下的工作:

    • 分配内存
    • 初始化devices设备
    • 将设备放入链表binder_devices中
    static int __init binder_init(void)
    {
        // 返回值
        int ret;
        char *device_name, *device_names;
        struct binder_device *device;
        struct hlist_node *tmp;
    
        // 创建单线程的Binder工作队列
        binder_deferred_workqueue = create_singlethread_workqueue("binder");
        if (!binder_deferred_workqueue)
            return -ENOMEM;
    
        binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
        if (binder_debugfs_dir_entry_root)
            binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                             binder_debugfs_dir_entry_root);
    
        if (binder_debugfs_dir_entry_root) {
            debugfs_create_file("state",
                        S_IRUGO,
                        binder_debugfs_dir_entry_root,
                        NULL,
                        &binder_state_fops);
            debugfs_create_file("stats",
                        S_IRUGO,
                        binder_debugfs_dir_entry_root,
                        NULL,
                        &binder_stats_fops);
            debugfs_create_file("transactions",
                        S_IRUGO,
                        binder_debugfs_dir_entry_root,
                        NULL,
                        &binder_transactions_fops);
            debugfs_create_file("transaction_log",
                        S_IRUGO,
                        binder_debugfs_dir_entry_root,
                        &binder_transaction_log,
                        &binder_transaction_log_fops);
            debugfs_create_file("failed_transaction_log",
                        S_IRUGO,
                        binder_debugfs_dir_entry_root,
                        &binder_transaction_log_failed,
                        &binder_transaction_log_fops);
        }
    
        /*
         * 分配内存
         */
        device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
        if (!device_names) {
            ret = -ENOMEM;
            goto err_alloc_device_names_failed;
        }
        // 从配置文件中读取binder_devices_param,并且拷贝到device_names
        // 这里的配置文件,其实就是一个Kconfig文件中的ANDROID_BINDER_DEVICES中的default值
        // 其实就是读取的binder
        strcpy(device_names, binder_devices_param);
    
        while ((device_name = strsep(&device_names, ","))) {
            // 初始化binder设备
            ret = init_binder_device(device_name);
            if (ret)
                goto err_init_binder_device_failed;
        }
    
        return ret;
    
    err_init_binder_device_failed:
        hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
            misc_deregister(&device->miscdev);
            hlist_del(&device->hlist);
            kfree(device);
        }
    err_alloc_device_names_failed:
        debugfs_remove_recursive(binder_debugfs_dir_entry_root);
    
        destroy_workqueue(binder_deferred_workqueue);
    
        return ret;
    }
    
    为binder设备分配内存并且初始化
    /**
     * 初始化Binder设备
     * 
     */ 
    static int __init init_binder_device(const char *name)
    {
        int ret;
        struct binder_device *binder_device;
    
        // 为Binder设备分配内存空间(分配虚拟内存)
        binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
        if (!binder_device)
            return -ENOMEM;
    
        // 初始化设备信息
        binder_device->miscdev.fops = &binder_fops;
        binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
        // 这里的name其实就是binder
        binder_device->miscdev.name = name;
    
        // binder的uid是无效的。uid主要是去拿系统服务的uid
        binder_device->context.binder_context_mgr_uid = INVALID_UID;
        binder_device->context.name = name;
    
        // 注册misc设备
        ret = misc_register(&binder_device->miscdev);
        if (ret < 0) {
            kfree(binder_device);
            return ret;
        }
    
        // 把binder设备添加到设备列表中
        // 即将&binder_device->hlist插入到&binder_devices
        hlist_add_head(&binder_device->hlist, &binder_devices);
    
        return ret;
    }
    
    binder_fops结构体
    /**
     * syscall
     * 就是类似jni工具,java层调用native层的时候的对应关系
     * native层调用驱动层,将native层的方法和驱动层的方法做绑定
     * 比如native层调用了mmap方法,那么如何进入到驱动层并且调用到驱动层
     * 的方法叫什么,就是通过这里的binder_fops构造做绑定
     * 这样native层调用mmap的时候,就会调用驱动层的binder_mmap方法
     */ 
    static const struct file_operations binder_fops = {
        .owner = THIS_MODULE,
        .poll = binder_poll,
        .unlocked_ioctl = binder_ioctl,
        .compat_ioctl = binder_ioctl,
        .mmap = binder_mmap,
        .open = binder_open,
        .flush = binder_flush,
        .release = binder_release,
    };
    

    2.kernel\drivers\staging\android\binder.c#binder_open

    开启binder驱动

    • 创建binder_proc对象
    • 保存当前信息到proc中
    • filp->private_data = proc;即将proc保存到filp中的private_data中,方便binder驱动在使用的时候通过filp取出proc
    • proc中的proc_node节点添加binder_procs链表中
    /**
     * 由客户端或者服务端去打开binder驱动
     * 1.创建binder_proc对象
     * 2.当前进程信息保存到binder_proc对象中
     * 3.把proc保存到filp->private_data
     * 4.添加到binder_procs链表中
     */
    static int binder_open(struct inode *nodp, struct file *filp)
    {
        struct binder_proc *proc;
        struct binder_device *binder_dev;
    
        binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
                 current->group_leader->pid, current->pid);
    
        // 初始化结构体:用来保存进程信息
        // Client端去打开Server端的时候,实际上是跟Binder通信,Client端通过Binder
        // 拿到服务,然后再由Binder返回服务
        // 给proc结构体分配内存(虚拟内存)
        // proc其实可以理解为clientActivity,进程信息保存在proc中
        // 在binder驱动拿到proc代表这个进程,里面有进程所有相关的binder实体,
        // binder引用,binder线程之类的所有进程放在一个双向链表procs里
        proc = kzalloc(sizeof(*proc), GFP_KERNEL);
        if (proc == NULL)
            return -ENOMEM;
        get_task_struct(current);
        // 当前进程的任务栈保存在proc中的tsk中
        // 这里主要就是做对于proc结构体进行一系列的初始化
        proc->tsk = current;
        // todo是目标任务
        INIT_LIST_HEAD(&proc->todo);
        // wait是当前任务
        init_waitqueue_head(&proc->wait);
        // 把当前任务的nice值转换成优先级
        proc->default_priority = task_nice(current);
        binder_dev = container_of(filp->private_data, struct binder_device,
                      miscdev);
        proc->context = &binder_dev->context;
        // 同步锁
        binder_lock(__func__);
        // proc计数,需要加1
        binder_stats_created(BINDER_STAT_PROC);
        // 放在链表,proc中的proc_node节点放在binder_procs链表中
        hlist_add_head(&proc->proc_node, &binder_procs);
        // 保存当前进程pid到proc中
        proc->pid = current->group_leader->pid;
        INIT_LIST_HEAD(&proc->delivered_death);
        // 把proc放到filp->private_data中
        // 后续binder驱动在使用的时候,就通过filp获取proc
        filp->private_data = proc;
        // 解除同步锁
        binder_unlock(__func__);
    
        if (binder_debugfs_dir_entry_proc) {
            char strbuf[11];
    
            snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
            /*
             * proc debug entries are shared between contexts, so
             * this will fail if the process tries to open the driver
             * again with a different context. The priting code will
             * anyway print all contexts that a given PID has, so this
             * is not a problem.
             */
            proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
                binder_debugfs_dir_entry_proc,
                (void *)(unsigned long)proc->pid,
                &binder_proc_fops);
        }
    
        return 0;
    }
    

    3.kernel\drivers\staging\android\binder.c#binder_mmap

    • 通过用户空间的虚拟内存大小 --- 分配一块内核的虚拟内存
    • 分配了一块物理内存(一页 --- 4KB)这块4KB是因为还没通信,等到要用的时候,用多少分配多少
    • 把这块物理内存分配映射到内核的虚拟内存和用户空间的虚拟内存
      这里分配4KB,是因为进程间通信不止4KB,但是现在还没有通信,优先分配4KB,等到要用的时候,要用多少分配多少

    在binder_mmap函数中会判断传输的数据是否是大于4M,如果是大于4M,则限制为4M,驱动层binder数据传输不能超过4M。如果是Intent传输数据,则定义最大的数据传输是1M-8K,如果是异步传输,则需要除以二
    在获取service_manager服务时,处理ProcessState,会new了一个ProcessState对象。
    而ProcessState::self()其实就是创建进程对象,然后通过ProcessState的getContextObject函数获取到对应的BpBinder对象,用BpBinder对象与服务端的BBinder进行通信。而在这里,就会创建对应的内存大小,这里其实就是native层对binder的内存大小限制,这里限制的大小就是1M-8K。就是10241024-(40962)
    #define BINDER_VM_SIZE ((110241024) - (4096 *2))
    new ProcessState做的事情:
    打开驱动 -- 调用open_driver,设置最大线程数为15
    mmap -- 将设置ProcessState内存大小,并且与Binder驱动做内存映射

    /**
     * vma指的是进程的虚拟内存
     * 1.通过用户空间的虚拟内存大小 --- 分配一块内核的虚拟内存
     * 2.分配了一块物理内存(一页 --- 4KB)这块4KB是因为还没通信,等到要用的时候,用多少分配多少
     * 3.把这块物理内存分配映射到内核的虚拟内存和用户空间的虚拟内存
     * 这里的buffer其实就是指向内核的虚拟内存
     */ 
    static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
    {
        int ret;
        // 内核的虚拟内存
        struct vm_struct *area;
        // 通过flip获取到进行的proc信息
        struct binder_proc *proc = filp->private_data;
        const char *failure_string;
        struct binder_buffer *buffer;
    
        if (proc->tsk != current)
            return -EINVAL;
    
        // 判断传输的大小不能超过4M,是驱动层定义的。
        // 应用层部分定义的是1M-8K,不能超过这个值
        // intent传输数据的时候是通过Binder进行传输的,所以传输数据不能超过1M-8K
        // intent传输数据大小,异步是二分之一的大小
        if ((vma->vm_end - vma->vm_start) > SZ_4M)
            vma->vm_end = vma->vm_start + SZ_4M;
    
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                 proc->pid, vma->vm_start, vma->vm_end,
                 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
                 (unsigned long)pgprot_val(vma->vm_page_prot));
    
        if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
            ret = -EPERM;
            failure_string = "bad vm_flags";
            goto err_bad_arg;
        }
        vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
    
        // 同步
        mutex_lock(&binder_mmap_lock);
        // 判断是否已经做过映射
        if (proc->buffer) {
            ret = -EBUSY;
            // 如果已经做过映射,则跳到err_already_mapped位置
            failure_string = "already mapped";
            goto err_already_mapped;
        }
    
        // 给内核区域分配物理内存,与进程的虚拟内存大小一致的
        area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
        if (area == NULL) {
            ret = -ENOMEM;
            failure_string = "get_vm_area";
            goto err_get_vm_area_failed;
        }
        // 将proc->buffer指向内核虚拟内存的寻址空间
        proc->buffer = area->addr;
        // 偏移。进程虚拟空间地址减去内核虚拟空间地址得到一个偏移值
        // 比如虚拟内存放入一个数据
        // 用户空间对应的地址=内核虚拟内存地址 + 偏移值
        proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
        mutex_unlock(&binder_mmap_lock);
    
    #ifdef CONFIG_CPU_CACHE_VIPT
        if (cache_is_vipt_aliasing()) {
            while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
                pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
                vma->vm_start += PAGE_SIZE;
            }
        }
    #endif
        // 分配内存,分配页,每一页其实就是一个vma的大小除以页数
        proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
        if (proc->pages == NULL) {
            ret = -ENOMEM;
            failure_string = "alloc page array";
            goto err_alloc_pages_failed;
        }
        // buffer_size的大小其实就是用户空间虚拟内存的大小
        proc->buffer_size = vma->vm_end - vma->vm_start;
    
        vma->vm_ops = &binder_vm_ops;
        vma->vm_private_data = proc;
        // 分配物理内存,传的是0就是释放内存,传的是1就是分配内存
        if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
            ret = -ENOMEM;
            failure_string = "alloc small buf";
            goto err_alloc_small_buf_failed;
        }
        // 将分配了物理内存的proc->buffer赋值给buffer
        buffer = proc->buffer;
        INIT_LIST_HEAD(&proc->buffers);
        // 把buffer加入到proc->buffers中
        list_add(&buffer->entry, &proc->buffers);
        // 表示这块内存可以使用了
        buffer->free = 1;
        // 将buffer插入到free_buffers中
        // 这里的buffer其实就是内核的虚拟内存
        binder_insert_free_buffer(proc, buffer);
        // TODO
        // 如果异步传输的时候,可以使用的内存大小为二分之一
        // 异步的时候可以使用的虚拟内存大小就是同步时候的buffer大小的一半
        // 这里应该是4M除以二分之一
        proc->free_async_space = proc->buffer_size / 2;
        barrier();
        proc->files = get_files_struct(current);
        proc->vma = vma;
        proc->vma_vm_mm = vma->vm_mm;
    
        /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
             proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
        return 0;
    
    err_alloc_small_buf_failed:
        kfree(proc->pages);
        proc->pages = NULL;
    err_alloc_pages_failed:
        mutex_lock(&binder_mmap_lock);
        vfree(proc->buffer);
        proc->buffer = NULL;
    err_get_vm_area_failed:
    err_already_mapped:
        mutex_unlock(&binder_mmap_lock);
    err_bad_arg:
        pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
               proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
        return ret;
    }
    
    分配物理内存,传的是0就是释放内存,传的是1就是分配内存。在这里是传的1,所以是进行内存分配
    /**
     * 物理内存与内核的虚拟内存、用户的虚拟内存建立映射关系(mmap)
     */ 
    static int binder_update_page_range(struct binder_proc *proc, int allocate,
                        void *start, void *end,
                        struct vm_area_struct *vma)
    {
        void *page_addr;
        unsigned long user_page_addr;
        struct page **page;
        struct mm_struct *mm;
    
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                 "%d: %s pages %p-%p\n", proc->pid,
                 allocate ? "allocate" : "free", start, end);
    
        if (end <= start)
            return 0;
    
        trace_binder_update_page_range(proc, allocate, start, end);
    
        if (vma)
            mm = NULL;
        else
            mm = get_task_mm(proc->tsk);
    
        if (mm) {
            down_write(&mm->mmap_sem);
            vma = proc->vma;
            if (vma && mm != proc->vma_vm_mm) {
                pr_err("%d: vma mm and task mm mismatch\n",
                    proc->pid);
                vma = NULL;
            }
        }
        // allocate == 0是释放内存
        // allocate == 1是分配内存
        if (allocate == 0)
            goto free_range;
    
        if (vma == NULL) {
            pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
                proc->pid);
            goto err_no_vma;
        }
    
        for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
            int ret;
    
            page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
    
            BUG_ON(*page);
            // 分配一个page页(分配的是物理空间,才4KB)一页就是4KB
            *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
            if (*page == NULL) {
                pr_err("%d: binder_alloc_buf failed for page at %p\n",
                    proc->pid, page_addr);
                goto err_alloc_page_failed;
            }
            // 把物理内存映射到内核的虚拟空间
            ret = map_kernel_range_noflush((unsigned long)page_addr,
                        PAGE_SIZE, PAGE_KERNEL, page);
            flush_cache_vmap((unsigned long)page_addr,
                    (unsigned long)page_addr + PAGE_SIZE);
            if (ret != 1) {
                pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                       proc->pid, page_addr);
                goto err_map_kernel_failed;
            }
            user_page_addr =
                (uintptr_t)page_addr + proc->user_buffer_offset;
            // 把用户空间的虚拟内存映射到物理内存
            ret = vm_insert_page(vma, user_page_addr, page[0]);
            if (ret) {
                pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
                       proc->pid, user_page_addr);
                goto err_vm_insert_page_failed;
            }
            /* vm_insert_page does not seem to increment the refcount */
        }
        if (mm) {
            up_write(&mm->mmap_sem);
            mmput(mm);
        }
        return 0;
    
    free_range:
        for (page_addr = end - PAGE_SIZE; page_addr >= start;
             page_addr -= PAGE_SIZE) {
            page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
            if (vma)
                zap_page_range(vma, (uintptr_t)page_addr +
                    proc->user_buffer_offset, PAGE_SIZE, NULL);
    err_vm_insert_page_failed:
            unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
    err_map_kernel_failed:
            __free_page(*page);
            *page = NULL;
    err_alloc_page_failed:
            ;
        }
    err_no_vma:
        if (mm) {
            up_write(&mm->mmap_sem);
            mmput(mm);
        }
        return -ENOMEM;
    }
    
    将buffer插入到free_buffers中
    static void binder_insert_free_buffer(struct binder_proc *proc,
                          struct binder_buffer *new_buffer)
    {
        struct rb_node **p = &proc->free_buffers.rb_node;
        struct rb_node *parent = NULL;
        struct binder_buffer *buffer;
        size_t buffer_size;
        size_t new_buffer_size;
    
        BUG_ON(!new_buffer->free);
    
        new_buffer_size = binder_buffer_size(proc, new_buffer);
    
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                 "%d: add free buffer, size %zd, at %p\n",
                  proc->pid, new_buffer_size, new_buffer);
    
        while (*p) {
            parent = *p;
            buffer = rb_entry(parent, struct binder_buffer, rb_node);
            BUG_ON(!buffer->free);
    
            // 计算出我们可以使用的大小
            buffer_size = binder_buffer_size(proc, buffer);
    
            if (new_buffer_size < buffer_size)
                p = &parent->rb_left;
            else
                p = &parent->rb_right;
        }
        rb_link_node(&new_buffer->rb_node, parent, p);
        // 插入到free_buffers中
        rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
    }
    

    4.kernel\drivers\staging\android\binder.c#binder_ioctl

    读写操作。应用层调用native层,native层调用驱动层,触发驱动层的BINDER_WRITE_READ命令

    /**
     * 主要是读写操作的
     * 
     */ 
    static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
    {
        int ret;
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;
    
        /*pr_info("binder_ioctl: %d:%d %x %lx\n",
                proc->pid, current->pid, cmd, arg);*/
    
        trace_binder_ioctl(cmd, arg);
    
        // 这里是一个挂起中断,正常情况下是不会中断的
        ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
        if (ret)
            goto err_unlocked;
    
        binder_lock(__func__);
        thread = binder_get_thread(proc);
        if (thread == NULL) {
            ret = -ENOMEM;
            goto err;
        }
    
        switch (cmd) {
        case BINDER_WRITE_READ:
            // 读写命令 --- 读写操作的时候,由应用层ioctl(BINDER_WRITE_READ)调用
            ret = binder_ioctl_write_read(filp, cmd, arg, thread);
            if (ret)
                goto err;
            break;
        case BINDER_SET_MAX_THREADS:
            if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
                ret = -EINVAL;
                goto err;
            }
            break;
        case BINDER_SET_CONTEXT_MGR:
            ret = binder_ioctl_set_ctx_mgr(filp);
            if (ret)
                goto err;
            break;
        case BINDER_THREAD_EXIT:
            binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                     proc->pid, thread->pid);
            binder_free_thread(proc, thread);
            thread = NULL;
            break;
        case BINDER_VERSION: {
            struct binder_version __user *ver = ubuf;
    
            if (size != sizeof(struct binder_version)) {
                ret = -EINVAL;
                goto err;
            }
            if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                     &ver->protocol_version)) {
                ret = -EINVAL;
                goto err;
            }
            break;
        }
        default:
            ret = -EINVAL;
            goto err;
        }
        ret = 0;
    err:
        if (thread)
            thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
        binder_unlock(__func__);
        wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
        if (ret && ret != -ERESTARTSYS)
            pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
    err_unlocked:
        trace_binder_ioctl_done(ret);
        return ret;
    }
    
    /**
     * binder_ioctl
     * 读写操作
     * 
     */ 
    static int binder_ioctl_write_read(struct file *filp,
                    unsigned int cmd, unsigned long arg,
                    struct binder_thread *thread)
    {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;
        struct binder_write_read bwr;
    
        if (size != sizeof(struct binder_write_read)) {
            ret = -EINVAL;
            goto out;
        }
        // 这里copy的不是有效数据,copy的是数据头。但是也是将用户空间的数据拷贝到内核空间
        if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
            ret = -EFAULT;
            goto out;
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
                 proc->pid, thread->pid,
                 (u64)bwr.write_size, (u64)bwr.write_buffer,
                 (u64)bwr.read_size, (u64)bwr.read_buffer);
        // 判断写入数据是否有
        if (bwr.write_size > 0) {
            ret = binder_thread_write(proc, thread,
                          bwr.write_buffer,
                          bwr.write_size,
                          &bwr.write_consumed);
            trace_binder_write_done(ret);
            if (ret < 0) {
                bwr.read_consumed = 0;
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto out;
            }
        }
        // 判断读的数据是否有
        if (bwr.read_size > 0) {
            ret = binder_thread_read(proc, thread, bwr.read_buffer,
                         bwr.read_size,
                         &bwr.read_consumed,
                         filp->f_flags & O_NONBLOCK);
            trace_binder_read_done(ret);
            if (!list_empty(&proc->todo))
                wake_up_interruptible(&proc->wait);
            if (ret < 0) {
                if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                    ret = -EFAULT;
                goto out;
            }
        }
        binder_debug(BINDER_DEBUG_READ_WRITE,
                 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
                 proc->pid, thread->pid,
                 (u64)bwr.write_consumed, (u64)bwr.write_size,
                 (u64)bwr.read_consumed, (u64)bwr.read_size);
        // 把内核空间的数据拷贝到用户空间
        if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
            ret = -EFAULT;
            goto out;
        }
    out:
        return ret;
    }
    

    相关文章

      网友评论

          本文标题:Android-Binder驱动启动

          本文链接:https://www.haomeiwen.com/subject/dheeuktx.html