美文网首页
Android Binder机制之ServiceManager源

Android Binder机制之ServiceManager源

作者: Bfmall | 来源:发表于2023-02-15 18:03 被阅读0次

    一、servicemanager入口

    servicemanager是从init进程中启动的,启动servicemanager进程的入口文件
    /frameworks/native/cmds/servicemanager/servicemanager.rc

    service servicemanager /system/bin/servicemanager
        class core animation
        user system
        group system readproc
        critical
        onrestart restart healthd
        onrestart restart zygote
        onrestart restart audioserver
        onrestart restart media
        onrestart restart surfaceflinger
        onrestart restart inputflinger
        onrestart restart drm
        onrestart restart cameraserver
        onrestart restart keystore
        onrestart restart gatekeeperd
        onrestart restart thermalservice
        writepid /dev/cpuset/system-background/tasks
        shutdown critical
    

    servicemanager服务的源码位于/frameworks/native/cmds/servicemanager/service_manager.c,我们将从这个类的入口开始看起。

    int main(int argc, char** argv)
    {
        struct binder_state *bs;
        union selinux_callback cb;
        char *driver;
    
        if (argc > 1) {
            driver = argv[1];
        } else {
            //启动时默认无参数,走这个分支
            driver = "/dev/binder";
        }
        //打开binder驱动,并设置mmap的内存大小为128k
        bs = binder_open(driver, 128*1024);
        if (!bs) {
    #ifdef VENDORSERVICEMANAGER
            ALOGW("failed to open binder driver %s\n", driver);
            while (true) {
                sleep(UINT_MAX);
            }
    #else
            ALOGE("failed to open binder driver %s\n", driver);
    #endif
            return -1;
        }
    
        if (binder_become_context_manager(bs)) {
            ALOGE("cannot become context manager (%s)\n", strerror(errno));
            return -1;
        }
    
        cb.func_audit = audit_callback;
        selinux_set_callback(SELINUX_CB_AUDIT, cb);
    #ifdef VENDORSERVICEMANAGER
        cb.func_log = selinux_vendor_log_callback;
    #else
        cb.func_log = selinux_log_callback;
    #endif
        selinux_set_callback(SELINUX_CB_LOG, cb);
    
    #ifdef VENDORSERVICEMANAGER
        sehandle = selinux_android_vendor_service_context_handle();
    #else
        sehandle = selinux_android_service_context_handle();
    #endif
        selinux_status_open(true);
    
        if (sehandle == NULL) {
            ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
            abort();
        }
    
        if (getcon(&service_manager_context) != 0) {
            ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
            abort();
        }
    
        /**
         * binder_loop 接收消息,并将binder解析完的消息返回给svcmgr_handler处理。
         * binder_loop已封装如下步骤:
         * 1.read data
         * 2.parse data, and process
         * 3.reply
         */
        binder_loop(bs, svcmgr_handler);
    
        return 0;
    }
    

    从main函数中可以看出,它主要做了三件事情:

    打开/dev/binder设备,并在内存中映射128K的空间。
    通知Binder设备,把自己变成context_manager,其他用户进程都通过0号句柄访问ServiceManager。
    进入循环,不停的去读Binder设备,看是否有对service的请求,如果有的话,就去调用svcmgr_handler函数回调处理请求。

    代码分析:
    BINDER_SERVICE_MANAGER的值的定义(在binder.h)如下:

    #define BINDER_SERVICE_MANAGER ((void*) 0)
    

    这个值有什么意义呢?
    每一个service都需要注册到servicemanager中,都对应有一个handle,这些service就是通过这个handle来区分的。而servicemanager本省也是一个service,它的handle是0,即BINDER_SERVICE_MANAGER。要查找一个service,都是从servicemanager开始进行查询。

    二、servicemanager打开binder驱动,调用loop

    我们知道,ServiceManager是基于binder机制实现的。进入binder.c中了解下binder_open,binder_loop,然后binder将解析完的消息,返回给svcmag_handler处理
    文件位置:frameworks\native\cmds\servicemanager\binder.c

    2.1 binder_open方法:

    struct binder_state *binder_open(const char* driver, size_t mapsize)
    {
        struct binder_state *bs;
        struct binder_version vers;
    
        bs = malloc(sizeof(*bs));
        if (!bs) {
            errno = ENOMEM;
            return NULL;
        }
    
        bs->fd = open(driver, O_RDWR | O_CLOEXEC);
        if (bs->fd < 0) {
            fprintf(stderr,"binder: cannot open %s (%s)\n",
                    driver, strerror(errno));
            goto fail_open;
        }
    
        if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
            (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
            fprintf(stderr,
                    "binder: kernel driver version (%d) differs from user space version (%d)\n",
                    vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
            goto fail_open;
        }
    
        bs->mapsize = mapsize;
        bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
        if (bs->mapped == MAP_FAILED) {
            fprintf(stderr,"binder: cannot map device (%s)\n",
                    strerror(errno));
            goto fail_map;
        }
    
        return bs;
    
    fail_map:
        close(bs->fd);
    fail_open:
        free(bs);
        return NULL;
    }
    

    2.2 binder_loop方法:

    void binder_loop(struct binder_state *bs, binder_handler func)
    {
        int res;
        struct binder_write_read bwr;
        uint32_t readbuf[32];
    
        bwr.write_size = 0;
        bwr.write_consumed = 0;
        bwr.write_buffer = 0;
    
        readbuf[0] = BC_ENTER_LOOPER;
        binder_write(bs, readbuf, sizeof(uint32_t));
    
        for (;;) {
            bwr.read_size = sizeof(readbuf);
            bwr.read_consumed = 0;
            bwr.read_buffer = (uintptr_t) readbuf;
    
            res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    
            if (res < 0) {
                ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
                break;
            }
    
            res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
            if (res == 0) {
                ALOGE("binder_loop: unexpected reply?!\n");
                break;
            }
            if (res < 0) {
                ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
                break;
            }
        }
    }
    

    binder_loop方法调用到的binder_write和binder_parse方法:

    //binder_write方法
    int binder_write(struct binder_state *bs, void *data, size_t len)
    {
        struct binder_write_read bwr;
        int res;
    
        bwr.write_size = len;
        bwr.write_consumed = 0;
        bwr.write_buffer = (uintptr_t) data;
        bwr.read_size = 0;
        bwr.read_consumed = 0;
        bwr.read_buffer = 0;
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        if (res < 0) {
            fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                    strerror(errno));
        }
        return res;
    }
    
    
    //binder_parse方法( 解析接收到的消息)
    int binder_parse(struct binder_state *bs, struct binder_io *bio,
                     uintptr_t ptr, size_t size, binder_handler func)
    {
        int r = 1;
        uintptr_t end = ptr + (uintptr_t) size;
    
        while (ptr < end) {
            uint32_t cmd = *(uint32_t *) ptr;
            ptr += sizeof(uint32_t);
    #if TRACE
            fprintf(stderr,"%s:\n", cmd_name(cmd));
    #endif
            switch(cmd) {
            case BR_NOOP:
                break;
            case BR_TRANSACTION_COMPLETE:
                break;
            case BR_INCREFS:
            case BR_ACQUIRE:
            case BR_RELEASE:
            case BR_DECREFS:
    #if TRACE
                fprintf(stderr,"  %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
    #endif
                ptr += sizeof(struct binder_ptr_cookie);
                break;
            case BR_TRANSACTION_SEC_CTX:
            case BR_TRANSACTION: {
                struct binder_transaction_data_secctx txn;
                if (cmd == BR_TRANSACTION_SEC_CTX) {
                    if ((end - ptr) < sizeof(struct binder_transaction_data_secctx)) {
                        ALOGE("parse: txn too small (binder_transaction_data_secctx)!\n");
                        return -1;
                    }
                    memcpy(&txn, (void*) ptr, sizeof(struct binder_transaction_data_secctx));
                    ptr += sizeof(struct binder_transaction_data_secctx);
                } else /* BR_TRANSACTION */ {
                    if ((end - ptr) < sizeof(struct binder_transaction_data)) {
                        ALOGE("parse: txn too small (binder_transaction_data)!\n");
                        return -1;
                    }
                    memcpy(&txn.transaction_data, (void*) ptr, sizeof(struct binder_transaction_data));
                    ptr += sizeof(struct binder_transaction_data);
    
                    txn.secctx = 0;
                }
    
                binder_dump_txn(&txn.transaction_data);
                if (func) {
                    unsigned rdata[256/4];
                    struct binder_io msg;
                    struct binder_io reply;
                    int res;
    
                    bio_init(&reply, rdata, sizeof(rdata), 4);
                    bio_init_from_txn(&msg, &txn.transaction_data);
                    res = func(bs, &txn, &msg, &reply);
                    if (txn.transaction_data.flags & TF_ONE_WAY) {
                        binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
                    } else {
                        binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
                    }
                }
                break;
            }
            case BR_REPLY: {
                struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
                if ((end - ptr) < sizeof(*txn)) {
                    ALOGE("parse: reply too small!\n");
                    return -1;
                }
                binder_dump_txn(txn);
                if (bio) {
                    bio_init_from_txn(bio, txn);
                    bio = 0;
                } else {
                    /* todo FREE BUFFER */
                }
                ptr += sizeof(*txn);
                r = 0;
                break;
            }
            case BR_DEAD_BINDER: {
                struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
                ptr += sizeof(binder_uintptr_t);
                death->func(bs, death->ptr);
                break;
            }
            case BR_FAILED_REPLY:
                r = -1;
                break;
            case BR_DEAD_REPLY:
                r = -1;
                break;
            default:
                ALOGE("parse: OOPS %d\n", cmd);
                return -1;
            }
        }
    
        return r;
    }
    

    binder_send_reply方法:(接收到消息,并解析完消息后,binder将解析后的消息返回给servicemanager)

    void binder_send_reply(struct binder_state *bs,
                           struct binder_io *reply,
                           binder_uintptr_t buffer_to_free,
                           int status)
    {
        struct {
            uint32_t cmd_free;
            binder_uintptr_t buffer;
            uint32_t cmd_reply;
            struct binder_transaction_data txn;
        } __attribute__((packed)) data;
    
        data.cmd_free = BC_FREE_BUFFER;
        data.buffer = buffer_to_free;
        data.cmd_reply = BC_REPLY;
        data.txn.target.ptr = 0;
        data.txn.cookie = 0;
        data.txn.code = 0;
        if (status) {
            data.txn.flags = TF_STATUS_CODE;
            data.txn.data_size = sizeof(int);
            data.txn.offsets_size = 0;
            data.txn.data.ptr.buffer = (uintptr_t)&status;
            data.txn.data.ptr.offsets = 0;
        } else {
            data.txn.flags = 0;
            data.txn.data_size = reply->data - reply->data0;
            data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
            data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
            data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
        }
        binder_write(bs, &data, sizeof(data));
    }
    

    三、我们再来看看svcmgr_handler函数的实现:

    int svcmgr_handler(struct binder_state *bs,
                       struct binder_transaction_data_secctx *txn_secctx,
                       struct binder_io *msg,
                       struct binder_io *reply)
    {
        struct svcinfo *si;
        uint16_t *s;
        size_t len;
        uint32_t handle;
        uint32_t strict_policy;
        int allow_isolated;
        uint32_t dumpsys_priority;
        //binder数据传输结构体
        struct binder_transaction_data *txn = &txn_secctx->transaction_data;
    
        //ALOGI("target=%p code=%d pid=%d uid=%d\n",
        //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);
    
        if (txn->target.ptr != BINDER_SERVICE_MANAGER)
            return -1;
    
        if (txn->code == PING_TRANSACTION)
            return 0;
    
        // Equivalent to Parcel::enforceInterface(), reading the RPC
        // header with the strict mode policy mask and the interface name.
        // Note that we ignore the strict_policy and don't propagate it
        // further (since we do no outbound RPCs anyway).
        strict_policy = bio_get_uint32(msg);
        bio_get_uint32(msg);  // Ignore worksource header.
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
    
        if ((len != (sizeof(svcmgr_id) / 2)) ||
            memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
            fprintf(stderr,"invalid id %s\n", str8(s, len));
            return -1;
        }
    
        if (sehandle && selinux_status_updated() > 0) {
    #ifdef VENDORSERVICEMANAGER
            struct selabel_handle *tmp_sehandle = selinux_android_vendor_service_context_handle();
    #else
            struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
    #endif
            if (tmp_sehandle) {
                selabel_close(sehandle);
                sehandle = tmp_sehandle;
            }
        }
    
        switch(txn->code) {
        //获取service,检查service
        case SVC_MGR_GET_SERVICE:
        case SVC_MGR_CHECK_SERVICE:
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            //查找service的函数
            handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid,
                                     (const char*) txn_secctx->secctx);
            if (!handle)
                break;
            bio_put_ref(reply, handle);
            return 0;
    
        //添加service
        case SVC_MGR_ADD_SERVICE:
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            handle = bio_get_ref(msg);
            allow_isolated = bio_get_uint32(msg) ? 1 : 0;
            dumpsys_priority = bio_get_uint32(msg);
            //添加service的函数
            if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
                               txn->sender_pid, (const char*) txn_secctx->secctx))
                return -1;
            break;
    
        //遍历service list
        case SVC_MGR_LIST_SERVICES: {
            uint32_t n = bio_get_uint32(msg);
            uint32_t req_dumpsys_priority = bio_get_uint32(msg);
            //遍历service list的函数
            if (!svc_can_list(txn->sender_pid, (const char*) txn_secctx->secctx, txn->sender_euid)) {
                ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                        txn->sender_euid);
                return -1;
            }
            si = svclist;
            // walk through the list of services n times skipping services that
            // do not support the requested priority
            while (si) {
                if (si->dumpsys_priority & req_dumpsys_priority) {
                    if (n == 0) break;
                    n--;
                }
                si = si->next;
            }
            if (si) {
                bio_put_string16(reply, si->name);
                return 0;
            }
            return -1;
        }
        default:
            ALOGE("unknown code %d\n", txn->code);
            return -1;
        }
    
        bio_put_uint32(reply, 0);
        return 0;
    }
    

    我们先来认识一下binder的数据传输载体binder_transaction_data:

    struct binder_transaction_data {
      union {
      /* 当binder_transaction_data是由用户空间的进程发送给Binder驱动时,
     handle是该事务的发送目标在Binder驱动中的信息,即该事务会交给handle来处理;
      handle的值是目标在Binder驱动中的Binder引用。*/
        __u32 handle; 
        /* 当binder_transaction_data是有Binder驱动反馈给用户空间进程时,
       ptr是该事务的发送目标在用户空间中的信息,即该事务会交给ptr对应的服务来处理;
       ptr是处理该事务的服务的服务在用户空间的本地Binder对象。*/
        binder_uintptr_t ptr;
      } target;  // 该事务的目标对象(即,该事务数据包是给该target来处理的)
      // 只有当事务是由Binder驱动传递给用户空间时,cookie才有意思,它的值是处理该事务的ServerC++层的本地Binder对象
      binder_uintptr_t cookie;
      // 事务编码。如果是请求,则以BC_开头;如果是回复,则以BR_开头。
      __u32 code;
      /* General information about the transaction. */
      __u32 flags;
      //表示事务发起者的pid和uid。
      pid_t sender_pid;
      uid_t sender_euid;
      // 数据大小
      binder_size_t data_size;
      //数据偏移量
      binder_size_t offsets_size;
      //data是一个共用体,当通讯数据很小的时,可以直接使用buf[8]来保存数据。当够大时,只能用指针buffer来描述一个申请的数据缓冲区。
      union {
        struct {
           /* transaction data */
          binder_uintptr_t buffer;
          binder_uintptr_t offsets;
        } ptr;
        __u8 buf[8];
      } data;
    };
    

    可以看到,svcmgr_handler函数中对binder data的事务编码进行了判断,并分别对SVC_MGR_GET_SERVICE(SVC_MGR_CHECK_SERVICE)、SVC_MGR_ADD_SERVICE、SVC_MGR_LIST_SERVICES三种类型的事务编码做了业务处理。

    3.1 获取服务的方法do_find_service:

    uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid, const char* sid)
    {
        struct svcinfo *si = find_svc(s, len);
    
        if (!si || !si->handle) {
            return 0;
        }
    
        if (!si->allow_isolated) {
            // If this service doesn't allow access from isolated processes,
            // then check the uid to see if it is isolated.
            uid_t appid = uid % AID_USER;
            if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
                return 0;
            }
        }
    
        if (!svc_can_find(s, len, spid, sid, uid)) {
            return 0;
        }
    
        return si->handle;
    }
    

    引用的查找服务方法find_svc:

    struct svcinfo *find_svc(const uint16_t *s16, size_t len)
    {
        struct svcinfo *si;
    
        for (si = svclist; si; si = si->next) {
            if ((len == si->len) &&
                !memcmp(s16, si->name, len * sizeof(uint16_t))) {
                return si;
            }
        }
        return NULL;
    }
    

    svclist 是一个单向链表,储存了所有向servicemanager注册的服务信息。find_svc遍历svclist链表,通过服务名称作为索引条件,最终找到符合条件的服务。

    3.2 添加服务的方法do_add_service:

    int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle,
                       uid_t uid, int allow_isolated, uint32_t dumpsys_priority, pid_t spid, const char* sid) {
        struct svcinfo *si;
    
        //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
        //        allow_isolated ? "allow_isolated" : "!allow_isolated", uid);
    
        if (!handle || (len == 0) || (len > 127))
            return -1;
    
        //检查是否能注册
        if (!svc_can_register(s, len, spid, sid, uid)) {
            ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
                 str8(s, len), handle, uid);
            return -1;
        }
        //检查service是否已经注册过
        si = find_svc(s, len);
        if (si) {
            if (si->handle) {
                ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                     str8(s, len), handle, uid);
                svcinfo_death(bs, si);
            }
            si->handle = handle;
        } else {
            //如果没有注册该service,就分配内存进行注册,并加入到svclist链表中。
            si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
            if (!si) {
                ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
                     str8(s, len), handle, uid);
                return -1;
            }
            si->handle = handle;
            si->len = len;
            memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
            si->name[len] = '\0';
            si->death.func = (void*) svcinfo_death;
            si->death.ptr = si;
            si->allow_isolated = allow_isolated;
            si->dumpsys_priority = dumpsys_priority;
            si->next = svclist;
            svclist = si;
        }
    
        binder_acquire(bs, handle);
        binder_link_to_death(bs, handle, &si->death);
        return 0;
    }
    

    在该函数中,首先会调用svc_can_register方法去检查客户端是否有权限注册service,如果没有权限就直接返回,不能注册。然后会调用find_svc方法去检查该service是否已经注册过了,如果已经注册过,那么就不能再注册了。如果没有注册过,就分配内存进行注册,并加入到svclist链表中。

    四、ServiceManager如何管理服务

    从上面源码分析我们知道了ServiceManager利用binder通信机制来管理一系列服务。
    上面源码的执行路径可以用下图所示:(注意,图中do_find_service7 应该是do_find_service.操作手误。)


    image.png

    代码分析抽象成逻辑分析,如下图所示:


    image.png

    综上所述,servicemanager主要负责查询和注册其他的系统服务,是系统服务的管理者。

    参考:https://www.cnblogs.com/bugzone/p/ServiceManager.html

    相关文章

      网友评论

          本文标题:Android Binder机制之ServiceManager源

          本文链接:https://www.haomeiwen.com/subject/mrwekdtx.html