美文网首页
Binder驱动

Binder驱动

作者: lbtrace | 来源:发表于2018-03-18 22:22 被阅读42次

    数据结构

    binder_proc

    // 描述使用binder IPC的进程
    struct binder_proc {
            struct hlist_node proc_node; // binder_proc在全局binder_procs哈希链表中的节点
            struct rb_root threads; // binder线程红黑树
            struct rb_root nodes; // binder实体对象红黑树
            struct rb_root refs_by_desc; // binder引用对象红黑树,以binder引用描述符为key
            struct rb_root refs_by_node; // binder引用对象红黑树,以binder_node地址为key
            int pid; // binder进程pid
            struct vm_area_struct *vma; ![binder_buffer.png](https://img.haomeiwen.com/i1945694/a28b4712fba2afab.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    // binder buffer对应的用户地址空间
            struct mm_struct *vma_vm_mm; // vma所属的进程地址空间
            struct task_struct *tsk; // 进程的task_struct
            struct files_struct *files;
            struct hlist_node deferred_work_node;
            int deferred_work;
            void *buffer; // binder buffer的内核地址空间地址
            ptrdiff_t user_buffer_offset; // 映射binder buffer的内核地址空间与用户地址空间地址差
    
            struct list_head buffers; // binder buffer链表
            struct rb_root free_buffers; // 空闲的binder buffer红黑树
            struct rb_root allocated_buffers; // 正在使用的binder buffer红黑树
            size_t free_async_space;
    
            struct page **pages; // 存储binder buffer的物理页
            size_t buffer_size; // binder buffer的总大小
            uint32_t buffer_free; // 空闲的binder buffer大小
            struct list_head todo; // 进程待处理的binder_work链表
            wait_queue_head_t wait; // 等待队列头
            struct binder_stats stats; // binder状态
            struct list_head delivered_death;
            int max_threads; // 用户态设定的最大的binder线程数
            int requested_threads; // 标示binder驱动正在请求创建binder thread
            int requested_threads_started; // binder驱动请求创建的binder thread数
            int ready_threads; // 空闲的binder线程数
            long default_priority;
            struct dentry *debugfs_entry;
    };
    

    binder_thread

    // 描述参与binder IPC的binder线程
    struct binder_thread {
            struct binder_proc *proc; // binder线程所属进程的binder_proc
            struct rb_node rb_node; // binder线程红黑树节点
            int pid;
            int looper;
            struct binder_transaction *transaction_stack; // binder线程事务栈
            struct list_head todo; // binder线程待处理的binder_work链表
            uint32_t return_error; /* Write failed, return error code in read buf */
            uint32_t return_error2; /* Write failed, return error code in read */
                    /* buffer. Used when sending a reply to a dead process that */
                    /* we are also waiting on */
            wait_queue_head_t wait;
            struct binder_stats stats;
    };
    

    binder_transaction

    // 描述一次binder IPC
    struct binder_transaction {
            int debug_id; // 调试用
            struct binder_work work; // 对应的binder_work
            struct binder_thread *from; // 发送方binder_thread
            struct binder_transaction *from_parent;
            struct binder_proc *to_proc; // 接收方binder_proc
            struct binder_thread *to_thread; // 接收方binder_thread
            struct binder_transaction *to_parent;
            unsigned need_reply:1;
            /* unsigned is_dead:1; */       /* not used at the moment */
    
            struct binder_buffer *buffer; // binder_transaction使用的binder buffer
            unsigned int    code; // binder IPC命令编码
            unsigned int    flags; // binder IPC标志,如TF_ONE_WAY等
            long    priority;
            long    saved_priority;
            kuid_t  sender_euid;
    };
    

    binder_buffer

    // 接收方用于存储binder IPC数据
    struct binder_buffer {
            struct list_head entry; /* free and allocated entries by address */
            struct rb_node rb_node; /* free entry by size or allocated entry */
                                    /* by address */
            unsigned free:1;
            unsigned allow_user_free:1;
            unsigned async_transaction:1;
            unsigned debug_id:29;
    
            struct binder_transaction *transaction;
    
            struct binder_node *target_node; // 接收方binder_node或者null(reply)
            size_t data_size;
            size_t offsets_size;
            uint8_t data[0]; // data起始地址
    };
    
    • binder_buffer内存布局


    flat_binder_object

    // 用于进程间传递binder对象,binder驱动会修改该结构体的成员
    struct flat_binder_object {
            /* 8 bytes for large_flat_header. */
            __u32           type; // binder对象类型
            __u32           flags;
    
            // binder对象的类型决定下面的变量值
            /* 8 bytes of data. */
            union {
                    binder_uintptr_t        binder; /* local object */ 
                    __u32                   handle; /* remote object */
            };
    
            /* extra data associated with local object */
            binder_uintptr_t        cookie;
    };
    

    binder_transaction_data

    // 描述binder IPC数据
    struct binder_transaction_data {
            /* The first two are only used for bcTRANSACTION and brTRANSACTION,
             * identifying the target and contents of the transaction.
             */
            union {
                    /* target descriptor of command transaction */
                    __u32   handle;
                    /* target descriptor of return transaction */
                    binder_uintptr_t ptr;
            } target;
            binder_uintptr_t        cookie; /* target object cookie */
            __u32           code;           /* transaction command */
    
            /* General information about the transaction. */
            __u32           flags;
            pid_t           sender_pid;
            uid_t           sender_euid;
            binder_size_t   data_size;      /* number of bytes of data */
            binder_size_t   offsets_size;   /* number of bytes of offsets */
    
            /* If this transaction is inline, the data immediately
             * follows here; otherwise, it ends with a pointer to
             * the data buffer.
             */
            union {
                    struct {
                            /* transaction data */
                            binder_uintptr_t        buffer;
                            /* offsets from buffer to flat_binder_object structs */
                            binder_uintptr_t        offsets;
                    } ptr;
                    __u8    buf[8];
            } data;
    };
    

    binder_node

    // 描述binder实体对象
    struct binder_node {
            int debug_id;
            struct binder_work work;
            union {
                    struct rb_node rb_node;
                    struct hlist_node dead_node;
            };
            struct binder_proc *proc;
            struct hlist_head refs; // binder实体对应的binder引用的hash链表
            int internal_strong_refs;
            int local_weak_refs;
            int local_strong_refs;
            binder_uintptr_t ptr; // binder实体对应的服务的弱引用
            binder_uintptr_t cookie; // binder实体对应的服务的地址
            unsigned has_strong_ref:1;
            unsigned pending_strong_ref:1;
            unsigned has_weak_ref:1;
            unsigned pending_weak_ref:1;
            unsigned has_async_transaction:1;
            unsigned accept_fds:1;
            unsigned min_priority:8;
            struct list_head async_todo;
    };
    

    binder_ref

    // 描述binder引用对象
    struct binder_ref {
            /* Lookups needed: */
            /*   node + proc => ref (transaction) */
            /*   desc + proc => ref (transaction, inc/dec ref) */
            /*   node => refs + procs (proc exit) */
            int debug_id;
            struct rb_node rb_node_desc;
            struct rb_node rb_node_node;
            struct hlist_node node_entry;
            struct binder_proc *proc; // 所属进程的binder_proc
            struct binder_node *node; // 对应的binder实体
            uint32_t desc; // 描述符(用户空间handle)
            int strong;
            int weak;
            struct binder_ref_death *death;
    };
    

    binder_work

    // 描述binder工作项
    struct binder_work {
            struct list_head entry;
            enum {
                    BINDER_WORK_TRANSACTION = 1,
                    BINDER_WORK_TRANSACTION_COMPLETE,
                    BINDER_WORK_NODE,
                    BINDER_WORK_DEAD_BINDER,
                    BINDER_WORK_DEAD_BINDER_AND_CLEAR,
                    BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
            } type; // 工作类型
    };
    

    常用的binder_driver_return_protocol(binder驱动返回用户态)

    协议名称 含义
    BR_ERROR binder通信发生错误
    BR_OK binder通信正常
    BR_TRANSACTION binder驱动向服务端发送数据
    BR_REPLY binder驱动向客户端发送回复数据
    BR_DEAD_REPLY 通信失败(目标进程/线程死亡或服务binder实体为空)
    BR_TRANSACTION_COMPLETE binder驱动向客户端(服务端)发送传输成功
    BR_NOOP 空操作
    BR_SPAWN_LOOPER binder驱动请求创建binder线程
    BR_DEAD_BINDER 服务死亡通知
    BR_FAILED_REPLY 通信失败

    常用的binder_driver_command_protocol(用户态发送命令到binder驱动)

    协议名称 含义
    BC_TRANSACTION 客户端向binder驱动发送数据
    BC_REPLY 服务端向binder驱动发送回复数据
    BC_FREE_BUFFER 客户端(服务端)请求binder驱动释放binder buffer
    BC_REGISTER_LOOPER binder线程运行
    BC_ENTER_LOOPER binder主线程运行
    BC_EXIT_LOOPER binder线程退出
    BC_REQUEST_DEATH_NOTIFICATION 注册服务死亡通知
    BC_CLEAR_DEATH_NOTIFICATION 注销服务死亡通知
    BC_DEAD_BINDER_DONE 服务死亡通知处理完成

    binder_init

    static int __init binder_init(void)
    {
            int ret;
    
            // 创建单线程工作队列
            binder_deferred_workqueue = create_singlethread_workqueue("binder");
            if (!binder_deferred_workqueue)
                    return -ENOMEM;
            // 在/sys/kernel/debug下创建binder调试目录
            binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
            if (binder_debugfs_dir_entry_root)
                    binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                                                     binder_debugfs_dir_entry_root);
            // 注册binder设备
            ret = misc_register(&binder_miscdev);
            if (binder_debugfs_dir_entry_root) {
                    // 创建 /sys/kernel/debug/binder/state
                    debugfs_create_file("state",
                                        S_IRUGO,
                                        binder_debugfs_dir_entry_root,
                                        NULL,
                                        &binder_state_fops);
                    // 创建/sys/kernel/debug/binder/stats
                    debugfs_create_file("stats",
                                        S_IRUGO,
                                        binder_debugfs_dir_entry_root,
                                        NULL,
                                        &binder_stats_fops);
                    // 创建/sys/kernel/debug/binder/transactions
                    debugfs_create_file("transactions",
                                        S_IRUGO,
                                        binder_debugfs_dir_entry_root,
                                        NULL,
                                        &binder_transactions_fops);
                    // 创建/sys/kernel/debug/binder/transaction_log
                    debugfs_create_file("transaction_log",
                                        S_IRUGO,
                                        binder_debugfs_dir_entry_root,
                                        &binder_transaction_log,
                                        &binder_transaction_log_fops);
                    // 创建/sys/kernel/debug/binder/failed_transaction_log
                    debugfs_create_file("failed_transaction_log",
                                        S_IRUGO,
                                        binder_debugfs_dir_entry_root,
                                        &binder_transaction_log_failed,
                                        &binder_transaction_log_fops);
            }
            return ret;
    }
    

    binder_open

    static int binder_open(struct inode *nodp, struct file *filp)
    {
            struct binder_proc *proc;
    
            binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
                         current->group_leader->pid, current->pid);
            // 创建binder_proc对象
            proc = kzalloc(sizeof(*proc), GFP_KERNEL);
            if (proc == NULL)
                    return -ENOMEM;
            // 增加当前进程task_struct的引用计数
            get_task_struct(current->group_leader);
            proc->tsk = current->group_leader;
            // 初始化待处理工作项链表
            INIT_LIST_HEAD(&proc->todo);
            // 初始化等待队列头
            init_waitqueue_head(&proc->wait);
            proc->default_priority = task_nice(current);
    
            binder_lock(__func__);
    
            binder_stats_created(BINDER_STAT_PROC);
            // binder_proc添加到全局binder_procs哈希链表中
            hlist_add_head(&proc->proc_node, &binder_procs);
            proc->pid = current->group_leader->pid;
            INIT_LIST_HEAD(&proc->delivered_death);
            // binder_proc放到文件的私有数据中
            filp->private_data = proc;
    
            binder_unlock(__func__);
    
            if (binder_debugfs_dir_entry_proc) {
                    char strbuf[11];
    
                    snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
                    // 在/sys/kernel/debug/binder/proc下为binder进程创建调试节点
                    proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
                            binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
            }
    
            return 0;
    }
    

    binder_ioctl

    用户态通常通过ioctl系统调用访问binder驱动,最终调用binder_ioctl。

    • ioctl命令
    ioctl命令名称 含义
    BINDER_WRITE_READ 用户态读写binder驱动(最常用)
    BINDER_SET_MAX_THREADS 设置最大的binder线程数
    BINDER_SET_CONTEXT_MGR ServiceManager注册为上下文管理者
    BINDER_THREAD_EXIT binder线程退出
    BINDER_VERSION 获取binder驱动协议版本

    以后结合具体场景再来看binder_ioctl的实现。

    binder_mmap

    static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
    {
            int ret;
            struct vm_struct *area;
            struct binder_proc *proc = filp->private_data;
            const char *failure_string;
            struct binder_buffer *buffer;
    
            if (proc->tsk != current->group_leader)
                    return -EINVAL;
            // vma表示用户态分配的虚拟内存区域(通常1M - 8K),不能超过4M
            if ((vma->vm_end - vma->vm_start) > SZ_4M)
                    vma->vm_end = vma->vm_start + SZ_4M;
    
            binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                         "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                         proc->pid, vma->vm_start, vma->vm_end,
                         (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
                         (unsigned long)pgprot_val(vma->vm_page_prot));
    
            if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
                    ret = -EPERM;
                    failure_string = "bad vm_flags";
                    goto err_bad_arg;
            }
            vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
    
            mutex_lock(&binder_mmap_lock);
            if (proc->buffer) {
                    ret = -EBUSY;
                    failure_string = "already mapped";
                    goto err_already_mapped;
            }
    
            // 在内核地址空间中保留一个连续的区域
            area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
            if (area == NULL) {
                    ret = -ENOMEM;
                    failure_string = "get_vm_area";
                    goto err_get_vm_area_failed;
            }
            // 用于分配binder_buffer的内核地址空间地址
            proc->buffer = area->addr;
            // 用户虚拟地址空间与内核虚拟地址空间的差
            proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
            mutex_unlock(&binder_mmap_lock);
            ......
            proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
            if (proc->pages == NULL) {
                    ret = -ENOMEM;
                    failure_string = "alloc page array";
                    goto err_alloc_pages_failed;
            }
            proc->buffer_size = vma->vm_end - vma->vm_start;
    
            vma->vm_ops = &binder_vm_ops;
            vma->vm_private_data = proc;
    
            // 分配物理页,用户虚拟地址空间与内核虚拟地址空间映射到相同的物理地址
            if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
                    ret = -ENOMEM;
                    failure_string = "alloc small buf";
                    goto err_alloc_small_buf_failed;
            }
            // 分配第一个binder_buffer
            buffer = proc->buffer;
            INIT_LIST_HEAD(&proc->buffers);
            list_add(&buffer->entry, &proc->buffers);
            buffer->free = 1;
            binder_insert_free_buffer(proc, buffer);
            proc->free_async_space = proc->buffer_size / 2;
            barrier();
            proc->files = get_files_struct(current);
            proc->vma = vma;
            proc->vma_vm_mm = vma->vm_mm;
    
            /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
                     proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
            return 0;
            ......
    }
    

    下面看binder_update_page_range

    static int binder_update_page_range(struct binder_proc *proc, int allocate,
                                        void *start, void *end,
                                        struct vm_area_struct *vma)
    {
            // allocate为1,end - start = PAGE_SIZE暂时分配一个物理页,以后按需分配以及释放
            void *page_addr;
            unsigned long user_page_addr;
            struct page **page;
            struct mm_struct *mm;
    
            binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                         "%d: %s pages %p-%p\n", proc->pid,
                         allocate ? "allocate" : "free", start, end);
    
            if (end <= start)
                    return 0;
    
            trace_binder_update_page_range(proc, allocate, start, end);
            ......
            for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
                    int ret;
    
                    page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
    
                    BUG_ON(*page);
                    // 为binder_buffer分配物理页
                    *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
                    if (*page == NULL) {
                            pr_err("%d: binder_alloc_buf failed for page at %p\n",
                                    proc->pid, page_addr);
                            goto err_alloc_page_failed;
                    }
                    // 物理页映射到内核地址空间
                    ret = map_kernel_range_noflush((unsigned long)page_addr,
                                            PAGE_SIZE, PAGE_KERNEL, page);
                    flush_cache_vmap((unsigned long)page_addr,
                                    (unsigned long)page_addr + PAGE_SIZE);
                    if (ret != 1) {
                            pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
                                   proc->pid, page_addr);
                            goto err_map_kernel_failed;
                    }
                    
                    user_page_addr =
                            (uintptr_t)page_addr + proc->user_buffer_offset;
                    // 物理页映射到用户地址空间
                    ret = vm_insert_page(vma, user_page_addr, page[0]);
                    ......
            }
            ......
    }
    
    • 执行binder_mmap后的内存布局


    binder_flush

    binder_release

    相关文章

      网友评论

          本文标题:Binder驱动

          本文链接:https://www.haomeiwen.com/subject/jdldqftx.html