美文网首页
ServiceManager 的启动过程

ServiceManager 的启动过程

作者: ColdWave | 来源:发表于2018-07-09 16:00 被阅读0次
    • ServiceManager 是 Binder 进程间通信的核心组件之一,扮演者 Binder 进程间通信机制的上下文管理者 ( Context Manager ) 的角色.
    • 负责管理系统中的 Service 组件,并且向 Client 组件提供获取 Service 代理对象的服务.

    servicemanager.rc

    service servicemanager /system/bin/servicemanager
        class core animation
        user system
        group system readproc
        critical
        onrestart restart healthd
        onrestart restart zygote
        onrestart restart audioserver
        onrestart restart media
        onrestart restart surfaceflinger
        onrestart restart inputflinger
        onrestart restart drm
        onrestart restart cameraserver
        writepid /dev/cpuset/system-background/tasks
        shutdown critical
    
    • servicemanager 进程名
    • /system/bin/servicemanager 程序文件
    • user system: 说明是以 system 用户运行的.
    • critical 说明是系统关键服务.在系统启动过程中,关键服务不可以退出,一旦退出,系统重启.如果一个关键服务4分钟内退出次数大于4次,系统重启并进入recovery模式.
    • onrestart restart xxx: 一旦 ServiceManager 重启,重启 xxx 进程.

    ServiceManager 启动过程

    • 打开 Binder 设备
    • 注册 ServiceManager 成为 Contex Manager
    • Loop 循环等待和处理 Client 的通信请求.
    int main(int argc, char** argv)
    {
        struct binder_state *bs;
    
        bs = binder_open("/dev/binder", 128*1024);
    
        binder_become_context_manager(bs);
    
        binder_loop(bs, svcmgr_handler);
    
        return 0;
    }
    

    打开和映射 Binder 设备文件.

    struct binder_state
    {
        int fd;
        void *mapped;
        size_t mapsize;
    };
    
    struct binder_state *binder_open(const char* driver, size_t mapsize)
    {
        struct binder_state *bs;
        struct binder_version vers;
    
        bs = malloc(sizeof(*bs));
        if (!bs) {
            errno = ENOMEM;
            return NULL;
        }
    
        bs->fd = open(driver, O_RDWR | O_CLOEXEC);
    
        if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
            // Error
        }
    
        bs->mapsize = mapsize;  // 128 * 1024
        bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    
        return bs;
    }
    

    注册为 Binder Context Manager.

    int binder_become_context_manager(struct binder_state *bs)
    {
        return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
    }
    
    static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
    {
        int ret;
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread;
        unsigned int size = _IOC_SIZE(cmd);
        void __user *ubuf = (void __user *)arg;
    
        thread = binder_get_thread(proc);  // 获取 binder_proc 的 binder_thread.即 ServiceManager 的主线程.
    
        switch (cmd) {
        case BINDER_SET_CONTEXT_MGR:
            binder_ioctl_set_ctx_mgr(filp);
            break;
        }
    }
    
    static int binder_ioctl_set_ctx_mgr(struct file *filp)
    {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
        struct binder_context *context = proc->context;
        struct binder_node *new_node;
        kuid_t curr_euid = current_euid();
    
        mutex_lock(&context->context_mgr_node_lock);
        if (context->binder_context_mgr_node) {   // 已经设置过 Context Manager ,直接返回.
            pr_err("BINDER_SET_CONTEXT_MGR already set\n");
            ret = -EBUSY;
            goto out;
        }
        ret = security_binder_set_context_mgr(proc->tsk);
        if (ret < 0)
            goto out;
        if (uid_valid(context->binder_context_mgr_uid)) {
            if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
                pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                       from_kuid(&init_user_ns, curr_euid),
                       from_kuid(&init_user_ns,
                         context->binder_context_mgr_uid));
                ret = -EPERM;
                goto out;
            }
        } else {
            context->binder_context_mgr_uid = curr_euid;  // 将当前进程ID作为 Context Manager UID
        }
        new_node = binder_new_node(proc, NULL); // 创建 binder_node. ServiceManager 本质也是一个 BBinder.
        binder_node_lock(new_node);
        // 创建 binder_node 时, 引用计数 +1 ,防止被 销毁
        new_node->local_weak_refs++;
        new_node->local_strong_refs++;
        new_node->has_strong_ref = 1;
        new_node->has_weak_ref = 1;
        context->binder_context_mgr_node = new_node;
        binder_node_unlock(new_node);
        binder_put_node(new_node);
    out:
        mutex_unlock(&context->context_mgr_node_lock);
        return ret;
    }
    

    循环等待 Client 请求

    void binder_loop(struct binder_state *bs, binder_handler func)
    {
        int res;
        struct binder_write_read bwr;
        uint32_t readbuf[32];
    
        bwr.write_size = 0;
        bwr.write_consumed = 0;
        bwr.write_buffer = 0;
    
        readbuf[0] = BC_ENTER_LOOPER;
        binder_write(bs, readbuf, sizeof(uint32_t)); // 告诉 Drvier, 该 Thread 已经 ready,可以处理通信请求了.
    
        for (;;) {
            bwr.read_size = sizeof(readbuf);
            bwr.read_consumed = 0;
            bwr.read_buffer = (uintptr_t) readbuf;
    
            ioctl(bs->fd, BINDER_WRITE_READ, &bwr);  // 循环读取 Client 的通信请求.
    
            binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); // 处理 Client 的通信请求.
        }
    }
    

    BC_ENTER_LOOPER

    binder_write -> binder_ioctl -> binder_ioctl_write_read -> binder_thread_write

    static int binder_thread_write(struct binder_proc *proc,
                struct binder_thread *thread,
                binder_uintptr_t binder_buffer, size_t size,
                binder_size_t *consumed)
    {
        uint32_t cmd;
        struct binder_context *context = proc->context;
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
    
        while (ptr < end && thread->return_error.cmd == BR_OK) {
            int ret;
    
            if (get_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            switch (cmd) {
            case BC_ENTER_LOOPER:
                if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                    thread->looper |= BINDER_LOOPER_STATE_INVALID;
                }
                thread->looper |= BINDER_LOOPER_STATE_ENTERED;
                break;
            }
        }
    }
    

    已经说过, Service 的处理线程的两种来源:

    1. Service 自己注册到 Driver 的.通过命令 BC_ENTER_LOOPER 通知 Driver,该线程可以处理通信请求了,并将线程状态设置为 BINDER_LOOPER_STATE_ENTERED
    2. Driver 请求 Service 注册到 Driver 的.通过 BC_REGISTER_LOOPER 通知 Driver 该线程可以处理通信请求了,并将线程状态设置为 BINDER_LOOPER_STATE_REGISTERED

    binder_thread_read

    binder_loop 中当注册好 Binder Thread 后,开始循环从 Driver 中读取通信请求,然后调用 binder_parse 处理.

    /*
     * 获取一个 binder_thread 是否可以处理事件.
    */
    /*
     * 1. 如果一个线程的 transaction_stack 不为 NULL,说明它正在等待其他线程完成另外一个事务.
     * 2. 如果一个线程的 todo list  不为 NULL,说明它有未处理的工作项.
     * 只有 transaction_stack == NULL 并且 todo list 为空时,才可以处理其所属进程的 todo list 的待处理工作项.
    */
    static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
    {
        return !thread->transaction_stack &&
            binder_worklist_empty_ilocked(&thread->todo) &&
            (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
                       BINDER_LOOPER_STATE_REGISTERED));
    }
    
    static int binder_wait_for_work(struct binder_thread *thread,
                    bool do_proc_work)
    {
        DEFINE_WAIT(wait);
        struct binder_proc *proc = thread->proc;
        int ret = 0;
    
        freezer_do_not_count();
        binder_inner_proc_lock(proc);
        for (;;) {
            prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
            if (binder_has_work_ilocked(thread, do_proc_work))
                break;
            if (do_proc_work)
                list_add(&thread->waiting_thread_node,
                     &proc->waiting_threads);
            binder_inner_proc_unlock(proc);
            schedule();
            binder_inner_proc_lock(proc);
            list_del_init(&thread->waiting_thread_node);
            if (signal_pending(current)) {
                ret = -ERESTARTSYS;
                break;
            }
        }
        finish_wait(&thread->wait, &wait);
        binder_inner_proc_unlock(proc);
        freezer_count();
    
        return ret;
    }
    
    static int binder_thread_read(struct binder_proc *proc,
                      struct binder_thread *thread,
                      binder_uintptr_t binder_buffer, size_t size,
                      binder_size_t *consumed, int non_block)
    {
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
    
        int ret = 0;
        int wait_for_proc_work;
    
        if (*consumed == 0) {
            if (put_user(BR_NOOP, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
        }
    
    retry:
        binder_inner_proc_lock(proc);
        wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
        binder_inner_proc_unlock(proc);
    
        // wait_for_proc_work 如果为 1 则说明 binder_thread 处于空闲状态
    
        thread->looper |= BINDER_LOOPER_STATE_WAITING;  // 表示该线程空闲
    
        if (wait_for_proc_work) {
            if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                        BINDER_LOOPER_STATE_ENTERED))) {
                // 说明 binder_thread 此时还未进入 BINDER_LOOPER_STATE_ENTERED,也就是 BC_ENTER_LOOPER 未被调用.
                binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
                    proc->pid, thread->pid, thread->looper);
                wait_event_interruptible(binder_user_error_wait,
                             binder_stop_on_user_error < 2);
            }
            binder_restore_priority(current, proc->default_priority);
        }
    
        if (non_block) { // 非阻塞调用,如果无法处理,就直接返回.
            if (!binder_has_work(thread, wait_for_proc_work)) // 判断是否有待处理的工作项,如果没有,直接返回.
                ret = -EAGAIN;
        } else { // 阻塞调用.
            ret = binder_wait_for_work(thread, wait_for_proc_work); // 等新的工作来.没有等着,有就处理
        }
    
        thread->looper &= ~BINDER_LOOPER_STATE_WAITING;  // 走到这儿,说明有新的工作需要处理了,所以取消该线程空闲的标志
    
        if (ret)
            return ret;
    
        while (1) {
            uint32_t cmd;
            struct binder_transaction_data tr;
            struct binder_work *w = NULL;
            struct list_head *list = NULL;
            struct binder_transaction *t = NULL;
            struct binder_thread *t_from;
    
            binder_inner_proc_lock(proc);
            if (!binder_worklist_empty_ilocked(&thread->todo)) // 先判断 thread->todo list是否有未处理工作项,如果有则优先处理.
                list = &thread->todo;
            else if (!binder_worklist_empty_ilocked(&proc->todo) &&  // 再判断 proc->todo list,是否有未处理工作项
                   wait_for_proc_work)
                list = &proc->todo;
            else { // 如果没有发现未处理工作项, goto retry,再等着.
                binder_inner_proc_unlock(proc);
    
                /* no data added */
                if (ptr - buffer == 4 && !thread->looper_need_return)
                    goto retry;
                break;
            }
    
            if (end - ptr < sizeof(tr) + 4) {  // UserSpace 的读空间不足,无法处理.
                binder_inner_proc_unlock(proc);
                break;
            }
            w = binder_dequeue_work_head_ilocked(list);  // list 就是当前要处理的 work list, 从这个 work list 拿一个工作项出来进行处理
    
            switch (w->type) {
            case BINDER_WORK_TRANSACTION: {
                binder_inner_proc_unlock(proc);
                t = container_of(w, struct binder_transaction, work);
            } break;
            case BINDER_WORK_RETURN_ERROR: {
                struct binder_error *e = container_of(
                        w, struct binder_error, work);
    
                WARN_ON(e->cmd == BR_OK);
                binder_inner_proc_unlock(proc);
                if (put_user(e->cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                e->cmd = BR_OK;
                ptr += sizeof(uint32_t);
    
                binder_stat_br(proc, thread, cmd);
            } break;
            case BINDER_WORK_TRANSACTION_COMPLETE: {
                binder_inner_proc_unlock(proc);
                cmd = BR_TRANSACTION_COMPLETE;
                if (put_user(cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(uint32_t);
    
                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
                         "%d:%d BR_TRANSACTION_COMPLETE\n",
                         proc->pid, thread->pid);
                kfree(w);
                binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
            } break;
            }
    
            if (!t)
                continue;
    
            if (t->buffer->target_node) {
                struct binder_node *target_node = t->buffer->target_node;
                struct binder_priority node_prio;
    
                tr.target.ptr = target_node->ptr;
                tr.cookie =  target_node->cookie;
                node_prio.sched_policy = target_node->sched_policy;
                node_prio.prio = target_node->min_priority;
                binder_transaction_priority(current, t, node_prio,
                                target_node->inherit_rt);
                cmd = BR_TRANSACTION;
            } else {
                tr.target.ptr = 0;
                tr.cookie = 0;
                cmd = BR_REPLY;
            }
            tr.code = t->code;
            tr.flags = t->flags;
            tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
    
            t_from = binder_get_txn_from(t);
            if (t_from) {
                struct task_struct *sender = t_from->proc->tsk;
    
                tr.sender_pid = task_tgid_nr_ns(sender,
                                task_active_pid_ns(current));
            } else {
                tr.sender_pid = 0;
            }
    
            tr.data_size = t->buffer->data_size;
            tr.offsets_size = t->buffer->offsets_size;
            tr.data.ptr.buffer = (binder_uintptr_t)
                ((uintptr_t)t->buffer->data +
                binder_alloc_get_user_buffer_offset(&proc->alloc));
            tr.data.ptr.offsets = tr.data.ptr.buffer +
                        ALIGN(t->buffer->data_size,
                            sizeof(void *));
    
            put_user(cmd, (uint32_t __user *)ptr);
            ptr += sizeof(uint32_t);
            
            copy_to_user(ptr, &tr, sizeof(tr));
            ptr += sizeof(tr);
    
            t->buffer->allow_user_free = 1;
            if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
                binder_inner_proc_lock(thread->proc);
                t->to_parent = thread->transaction_stack;
                t->to_thread = thread;
                thread->transaction_stack = t;
                binder_inner_proc_unlock(thread->proc);
            } else {
                binder_free_transaction(t);
            }
            break;
        }
    
    done:
    
        *consumed = ptr - buffer;
        binder_inner_proc_lock(proc);
        if (proc->requested_threads == 0 &&
            list_empty(&thread->proc->waiting_threads) &&
            proc->requested_threads_started < proc->max_threads &&
            (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
             BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
             /*spawn a new thread if we leave this out */) {
            // Driver 请求用户注册一个线程.
            proc->requested_threads++;
            binder_inner_proc_unlock(proc);
            if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
                return -EFAULT;
            binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
        } else
            binder_inner_proc_unlock(proc);
        return 0;
    }
    
    

    相关文章

      网友评论

          本文标题:ServiceManager 的启动过程

          本文链接:https://www.haomeiwen.com/subject/tsltpftx.html