Android Binder 驱动 - 启动 ServiceMa

作者: 红橙Darren | 来源:发表于2019-09-17 10:11 被阅读0次

    相关文章链接:

    1. Android Framework - 学习启动篇
    2. Android Binder 驱动 - Media 服务的添加过程
    3. Android Binder 驱动 - 启动 ServiceManager 进程
    4. Android Binder 驱动 - 内核驱动层源码分析
    5. Android Binder 驱动 - 从驱动层来分析服务的添加过程

    相关源码文件:

    /system/core/rootdir/init.rc
    /frameworks/native/cmds/servicemanager/service_manager.c
    /frameworks/native/cmds/servicemanager/binder.c
    

    ServiceManager 进程是由 init 进程通过解析 init.rc 文件而创建的。

    service servicemanager /system/bin/servicemanager
        class core
        user system
        group system
        critical
        onrestart restart healthd
        onrestart restart zygote
        onrestart restart media
        onrestart restart surfaceflinger
        onrestart restart drm
    

    对应找到 /frameworks/native/cmds/servicemanager/service_manager.c 源码文件中的 main 方法

    int main(int argc, char **argv) {
        struct binder_state *bs;
        // 打开 binder 驱动,申请 128k 字节大小的内存空间 
        bs = binder_open(128*1024);
        ...
    
        // 成为上下文管理者
        if (binder_become_context_manager(bs)) {
            return -1;
        }
        // selinux 权限是否使能
        selinux_enabled = is_selinux_enabled(); 
        sehandle = selinux_android_service_context_handle();
        selinux_status_open(true);
    
        if (selinux_enabled > 0) {
            if (sehandle == NULL) {  
                // 无法获取 sehandle
                abort(); 
            }
            if (getcon(&service_manager_context) != 0) {
                // 无法获取 service_manager 上下文
                abort(); 
            }
        }
        ...
    
        // 进入无限循环,处理 client 端发来的请求 
        binder_loop(bs, svcmgr_handler);
        return 0;
    }
    

    以上就是 ServiceManager 进程启动的三个阶段:

    • 打开 binder 驱动:binder_open;
    • 注册成为 binder 服务的大管家:binder_become_context_manager;
    • 进入无限循环,处理 client 端发来的请求:binder_loop;

    1. 启打开 binder 驱动

    struct binder_state *binder_open(size_t mapsize)
    {
        struct binder_state *bs;
        struct binder_version vers;
    
        bs = malloc(sizeof(*bs));
        if (!bs) {
            errno = ENOMEM;
            return NULL;
        }
        // 打开 binder 驱动
        bs->fd = open("/dev/binder", O_RDWR);
        if (bs->fd < 0) {
            fprintf(stderr,"binder: cannot open device (%s)\n",
                    strerror(errno));
            goto fail_open;
        }
        // 获取驱动版本,并判断版本是否一致
        if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
            (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
            fprintf(stderr,
                    "binder: kernel driver version (%d) differs from user space version (%d)\n",
                    vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
            goto fail_open;
        }
        //  128k 字节大小的内存空间
        bs->mapsize = mapsize;
        // binder_mmap 内存映射
        bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
        if (bs->mapped == MAP_FAILED) {
            fprintf(stderr,"binder: cannot map device (%s)\n",
                    strerror(errno));
            goto fail_map;
        }
    
        return bs;
    // 失败释放资源处理
    fail_map:
        close(bs->fd);
    fail_open:
        free(bs);
        return NULL;
    }
    
    struct binder_state
    {
        // dev/binder 的文件描述符
        int fd; 
        // 指向 mmap 的内存地址
        void *mapped; 
        // 分配的内存大小,默认为128KB
        size_t mapsize; 
    };
    

    2. 注册成为 binder 服务的大管家 :

    int binder_become_context_manager(struct binder_state *bs)
    {
        return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
    }
    

    ioctl 其实调用的是驱动层的 binder_ioctl 方法,其具体的实现我们到后面的文章再去分析。

    3. 进入无限循环,处理 client 端发来的请求

    void binder_loop(struct binder_state *bs, binder_handler func)
    {
        int res;
        struct binder_write_read bwr;
        uint32_t readbuf[32];
    
        bwr.write_size = 0;
        bwr.write_consumed = 0;
        bwr.write_buffer = 0;
        // 将 BC_ENTER_LOOPER 写入驱动,告诉驱动当前进程进入循环
        readbuf[0] = BC_ENTER_LOOPER;
        binder_write(bs, readbuf, sizeof(uint32_t));
    
        for (;;) {
            bwr.read_size = sizeof(readbuf);
            bwr.read_consumed = 0;
            bwr.read_buffer = (uintptr_t) readbuf;
            // 不断的循环等待读取 binder 驱动的数据
            res  = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    
            if (res < 0) {
                ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
                break;
            }
            // 解析远程进程的 binder 驱动信息
            res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
            if (res == 0) {
                ALOGE("binder_loop: unexpected reply?!\n");
                break;
            }
            if (res < 0) {
                ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
                break;
            }
        }
    }
    
    int binder_write(struct binder_state *bs, void *data, size_t len)
    {
        struct binder_write_read bwr;
        int res;
        // 代表写入数据大小,大小是 len
        bwr.write_size = len;
        bwr.write_consumed = 0;
        // 写入命令 BC_ENTER_LOOPER
        bwr.write_buffer = (uintptr_t) data;
        // read_size = 0,表示不读取数据
        bwr.read_size = 0;
        bwr.read_consumed = 0;
        bwr.read_buffer = 0;
        // 把 binder_write_read 写入 binder 驱动
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        if (res < 0) {
            fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                    strerror(errno));
        }
        return res;
    }
    
    // ptr 是读取数据的地址,是 bwr.read_buffer
    int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func)
    {
        int r = 1;
        uintptr_t end = ptr + (uintptr_t) size;
    
        while (ptr < end) {
            uint32_t cmd = *(uint32_t *) ptr;
            ptr += sizeof(uint32_t);
            switch(cmd) {
            // 无操作,退出循环
            case BR_NOOP:  
                break;
            case BR_TRANSACTION_COMPLETE:
                break;
            case BR_INCREFS:
            case BR_ACQUIRE:
            case BR_RELEASE:
            case BR_DECREFS:
                ptr += sizeof(struct binder_ptr_cookie);
                break;
            case BR_TRANSACTION: {
                struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
                ...
                binder_dump_txn(txn);
                if (func) {
                    unsigned rdata[256/4];
                    struct binder_io msg; 
                    struct binder_io reply;
                    int res;
                    // 创建回复的 reply
                    bio_init(&reply, rdata, sizeof(rdata), 4);
                     // 从 txn 解析出 binder_io 信息
                    bio_init_from_txn(&msg, txn);
                    // 调用解析回调函数 svcmgr_handler 
                    res = func(bs, txn, &msg, &reply);
                    // 像 binder 驱动发送一个回复 
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
                ptr += sizeof(*txn);
                break;
            }
            case BR_REPLY: {
                struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
                ...
                binder_dump_txn(txn);
                if (bio) {
                    bio_init_from_txn(bio, txn);
                    bio = 0;
                }
                ptr += sizeof(*txn);
                r = 0;
                break;
            }
            case BR_DEAD_BINDER: {
                struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
                ptr += sizeof(binder_uintptr_t);
                // binder 死亡消息 
                death->func(bs, death->ptr);
                break;
            }
            case BR_FAILED_REPLY:
                r = -1;
                break;
            case BR_DEAD_REPLY:
                r = -1;
                break;
            default:
                return -1;
            }
        }
        return r;
    }
    
    void bio_init(struct binder_io *bio, void *data, size_t maxdata, size_t maxoffs) {
        size_t n = maxoffs * sizeof(size_t);
        if (n > maxdata) {
            ...
        }
    
        bio->data = bio->data0 = (char *) data + n;
        bio->offs = bio->offs0 = data;
        bio->data_avail = maxdata - n;
        bio->offs_avail = maxoffs;
        bio->flags = 0;
    }
    
    void bio_init_from_txn(struct binder_io *bio, struct binder_transaction_data *txn)
    {
        bio->data = bio->data0 = (char *)(intptr_t)txn->data.ptr.buffer;
        bio->offs = bio->offs0 = (binder_size_t *)(intptr_t)txn->data.ptr.offsets;
        bio->data_avail = txn->data_size;
        bio->offs_avail = txn->offsets_size / sizeof(size_t);
        bio->flags = BIO_F_SHARED;
    }
    
    int svcmgr_handler(struct binder_state *bs,
                       struct binder_transaction_data *txn,
                       struct binder_io *msg,
                       struct binder_io *reply)
    {
        struct svcinfo *si;
        uint16_t *s;
        size_t len;
        uint32_t handle;
        uint32_t strict_policy;
        int allow_isolated;
    
        //ALOGI("target=%p code=%d pid=%d uid=%d\n",
        //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
        // 判断是不是要转给我的
        if (txn->target.ptr != BINDER_SERVICE_MANAGER)
            return -1;
        // PING_TRANSACTION,能不能找到我
        if (txn->code == PING_TRANSACTION)
            return 0;
        
        // 判断 code 是什么命令
        switch(txn->code) {
        // 查询获取 Service 服务命令
        case SVC_MGR_GET_SERVICE:
        case SVC_MGR_CHECK_SERVICE:
            // 要查询的服务名称
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            // 从服务列表中寻找 handle 值
            handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
            if (!handle)
                break;
            // 把 handle 值写入回复数据
            bio_put_ref(reply, handle);
            return 0;
         // 添加服务到列表
        case SVC_MGR_ADD_SERVICE:
            // 获取服务的名称
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            // 获取服务的 handle 的值
            handle = bio_get_ref(msg);
            // 执行添加服务到列表的逻辑
            if (do_add_service(bs, s, len, handle, txn->sender_euid,
                allow_isolated, txn->sender_pid))
                return -1;
            break;
        default:
            ALOGE("unknown code %d\n", txn->code);
            return -1;
        }
    
        bio_put_uint32(reply, 0);
        return 0;
    }
    
    // 从服务列表中查找服务的 handle 值
    uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
    {
        // 根据名称查找服务信息
        struct svcinfo *si = find_svc(s, len);
        //  找不到该服务
        if (!si || !si->handle) {
            return 0;
        }
        ...
        // 返回服务的 handle 值
        return si->handle;
    }
    
    struct svcinfo *find_svc(const uint16_t *s16, size_t len)
    {
        struct svcinfo *si;
    
        for (si = svclist; si; si = si->next) {
            //当名字完全一致,则返回查询到的结果
            if ((len == si->len) &&
                !memcmp(s16, si->name, len * sizeof(uint16_t))) {
                return si;
            }
        }
        return NULL;
    }
    
    int do_add_service(struct binder_state *bs,
                       const uint16_t *s, size_t len,
                       uint32_t handle, uid_t uid, int allow_isolated,
                       pid_t spid)
    {
        struct svcinfo *si;
    
        if (!handle || (len == 0) || (len > 127))
            return -1;
    
        //权限检查 
        if (!svc_can_register(s, len, spid)) {
            return -1;
        }
    
        //服务检索 
        si = find_svc(s, len);
        if (si) {
            if (si->handle) {
                // 服务已注册时,释放之前添加的相应服务
                svcinfo_death(bs, si); 
            }
            si->handle = handle;
        } else {
            si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
            // 内存不足,无法分配足够内存
            if (!si) {  
                return -1;
            }
            // 指定 handle 值
            si->handle = handle;
            si->len = len;
            // 指定当前添加服务的名称
            memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); 
            si->name[len] = '\0';
            si->death.func = (void*) svcinfo_death;
            si->death.ptr = si;
            si->allow_isolated = allow_isolated;
            // svclist保存所有已注册的服务
            si->next = svclist; 
            svclist = si;
        }
    
        // 以 BC_ACQUIRE 命令,handle 为目标的信息,通过 ioctl 发送给 binder 驱动
        binder_acquire(bs, handle);
        // 以 BC_REQUEST_DEATH_NOTIFICATION 命令的信息,通过 ioctl 发送给 binder 驱动,主要用于清理内存等收尾工作
        binder_link_to_death(bs, handle, &si->death);
        return 0;
    }
    
    void binder_send_reply(struct binder_state *bs, struct binder_io *reply, binder_uintptr_t buffer_to_free, int status) {
        struct {
            uint32_t cmd_free;
            binder_uintptr_t buffer;
            uint32_t cmd_reply;
            struct binder_transaction_data txn;
        } __attribute__((packed)) data;
    
        data.cmd_free = BC_FREE_BUFFER; 
        data.buffer = buffer_to_free;
        // reply命令
        data.cmd_reply = BC_REPLY; 
        data.txn.target.ptr = 0;
        data.txn.cookie = 0;
        data.txn.code = 0;
        if (status) {
            data.txn.flags = TF_STATUS_CODE;
            data.txn.data_size = sizeof(int);
            data.txn.offsets_size = 0;
            data.txn.data.ptr.buffer = (uintptr_t)&status;
            data.txn.data.ptr.offsets = 0;
        } else {
            data.txn.flags = 0;
            data.txn.data_size = reply->data - reply->data0;
            data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
            data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
            data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
        }
        // 向 Binder 驱动通信
        binder_write(bs, &data, sizeof(data));
    }
    

    最后回顾 Media 服务的添加过程中的 addService 方法,最终会通过 binder 驱动跨进程执行 ServiceMananger 的 do_add_service 方法。

    addService 的通信过程

    视频地址:https://pan.baidu.com/s/1j_wgzITcgABVbThvO0VBPA
    视频密码:jj4b

    相关文章

      网友评论

        本文标题:Android Binder 驱动 - 启动 ServiceMa

        本文链接:https://www.haomeiwen.com/subject/rbywectx.html