ServiceManager 介绍

作者: ObadiObada | 来源:发表于2018-01-12 19:18 被阅读0次

ServiceManager 介绍

概述

Service Manager在Binder进程间通信机制中扮演着上下文管理者的角色,同时负责管理系统中的Service组件,并且向Client组件提供获取Service代理对象的服务。同时,它也是一个特殊的Service组件。ServiceManager代码位于如下路径:

frameworks/native/cmds/servicemanager

目录下Android.bp文件如下:

cc_binary {
    name: "servicemanager",
    defaults: ["servicemanager_flags"],
    srcs: [
        "service_manager.c",
        "binder.c",
    ],
    shared_libs: ["libcutils", "libselinux"],
    init_rc: ["servicemanager.rc"],
}

cc_binary {
    name: "vndservicemanager",
    defaults: ["servicemanager_flags"],
    vendor: true,
    srcs: [
        "service_manager.c",
        "binder.c",
    ],
    cflags: [
        "-DVENDORSERVICEMANAGER=1",
    ],
    shared_libs: ["libcutils", "libselinux_vendor"],
    init_rc: ["vndservicemanager.rc"],
}

从上面的代码可以看出,基于相同的代码分别在system和vendor目录下编译出了两个可执行文件,区别仅在于VENDORSERVICEMANAGER宏的值

ServiceManager由Init进程启动,其rc文件定义如下:

#frameworks/native/cmds/servicemanager/servicemanager.rc
service servicemanager /system/bin/servicemanager
    class core animation
    user system
    group system readproc
    critical
#...
#frameworks/native/cmds/servicemanager/servicemanager.rc
service vndservicemanager /vendor/bin/vndservicemanager /dev/vndbinder
    class core
    user system
    group system readproc

ServiceManager启动过程分析

ServiceManager的main函数主要逻辑如下,

//frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char** argv)
{
    struct binder_state *bs;
    char *driver;

    if (argc > 1) {
        driver = argv[1]; //dev/vndbinder
    } else {
        driver = "/dev/binder";
    }

    //1. Binder Open
    bs = binder_open(driver, 128*1024);

    //2. Become context manger
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    //3. Become context manger
    binder_loop(bs, svcmgr_handler);

    return 0;
}

main函数主要分三步:

  1. 打开Binder驱动
  2. 注册为context_manager
  3. 进入循环,等待请求

后续分别来介绍着三个步骤。

打开Binder驱动

binder_open函数主要是打开了binder驱动,并且映射了 128K的内存。分别将文件描述符和map的内存首地址信息等信息以一个binder_state结构体表示。回忆一下Kernle驱动中所讲述的内容mapped和kernel中的一个地址被同时映射到相同的一组(这里是一个)物理页中。

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;
    bs = malloc(sizeof(*bs));

    bs->fd = open(driver, O_RDWR | O_CLOEXEC);

    bs->mapsize = mapsize;
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);

    return bs;
}

注册为context_manager

ServiceManager要成为需要称为管理系统所有服务的特殊服务,首先需要向Binder驱动注册,相关代码如下:

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

这个函数很简单,仅仅向Binder驱动发送了一条BINDER_SET_CONTEXT_MGR命令,我们继续分析Binder驱动中的实现:

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    //...
    switch (cmd) {
    //...
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        break;
    }
    //...
    ret = 0;
    //...
    return ret;
}

处理BINDER_SET_CONTEXT_MGR命令的代码在binder_ioctl_set_ctx_mgr中实现:

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    struct binder_context *context = proc->context;
    struct binder_node *new_node;
    kuid_t curr_euid = current_euid();

    mutex_lock(&context->context_mgr_node_lock);
    /*1*/
    if (uid_valid(context->binder_context_mgr_uid)) {
        if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
            pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                   from_kuid(&init_user_ns, curr_euid),
                   from_kuid(&init_user_ns,
                     context->binder_context_mgr_uid));
            ret = -EPERM;
            goto out;
        }
    } else {
        context->binder_context_mgr_uid = curr_euid;
    }
    /*2*/
    new_node = binder_new_node(proc, NULL);
    //...
    binder_node_lock(new_node);
    new_node->local_weak_refs++;
    new_node->local_strong_refs++;
    new_node->has_strong_ref = 1;
    new_node->has_weak_ref = 1;

    context->binder_context_mgr_node = new_node;
    binder_node_unlock(new_node);
    binder_put_node(new_node);
out:
    mutex_unlock(&context->context_mgr_node_lock);
    return ret;
}

上述代码可以归纳为两个步骤:

  1. 检查UID,并将curr_euid赋值给context->binder_context_mgr_uid
  2. 初始化一个binder_node并将其赋值给context->binder_context_mgr_node

从之前的章节我们知道binder_context是在设备初始化时创建,并赋值给proc->context,其binder_context_mgr_uid字段为INVALID_UID,由于对于同一Binder设备,ServiceManager是系统唯一的因此,如果binder_context_mgr_uid已经时有效值,那其必须和curr_euid相等。

接下看具体分析下binder_new_node函数和binder_context结构体:

static struct binder_node *binder_new_node(struct binder_proc *proc,
                       struct flat_binder_object *fp)
{
    struct binder_node *node;
    struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);

    node = binder_init_node_ilocked(proc, new_node, fp);

    return node;
}

static struct binder_node *binder_init_node_ilocked(
                        struct binder_proc *proc,
                        struct binder_node *new_node, 
                        struct flat_binder_object *fp)
{
    struct rb_node **p = &proc->nodes.rb_node;
    struct rb_node *parent = NULL;
    struct binder_node *node;
    binder_uintptr_t ptr = fp ? fp->binder : 0;
    binder_uintptr_t cookie = fp ? fp->cookie : 0;
    __u32 flags = fp ? fp->flags : 0;
    s8 priority;

    while (*p) {
        parent = *p;
        node = rb_entry(parent, struct binder_node, rb_node);

        if (ptr < node->ptr)
            p = &(*p)->rb_left;
        else if (ptr > node->ptr)
            p = &(*p)->rb_right;
        else {
            binder_inc_node_tmpref_ilocked(node);
            return node;
        }
    }
    node = new_node;
    binder_stats_created(BINDER_STAT_NODE);
    node->tmp_refs++;
    rb_link_node(&node->rb_node, parent, p);
    rb_insert_color(&node->rb_node, &proc->nodes);
    node->debug_id = atomic_inc_return(&binder_last_id);
    node->proc = proc;
    node->ptr = ptr;
    node->cookie = cookie;
    node->work.type = BINDER_WORK_NODE;
    priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
    node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
        FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
    node->min_priority = to_kernel_prio(node->sched_policy, priority);
    node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
    node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
    spin_lock_init(&node->lock);
    INIT_LIST_HEAD(&node->work.entry);
    INIT_LIST_HEAD(&node->async_todo);

    return node;
}

BINDER_SET_CONTEXT_MGR命令中binder_init_node_ilocked函数flat_binder_object参数为空。因此上述代码流程可以归纳如下:

  1. 在proc->nodes这颗红黑树中查找新建的binder_node,如果没有找到则将binder_node插入到红黑树中
  2. 初始化这个binder_node,为其主要的字段赋值,例如proc指向当前调用的proc,初始化work链表和async_todo链表,ptr字段设置为0

向Client进程提供服务

当ServiceManager向Binder驱动将自己注册为Context Manager之后会调用binder_loop函数无限等待并处理客户进程的请求。

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
    }
}

在进入死循环之前,ServiceManager首先向调用了binder_write想Binder驱动发送了一条BINDER_WRITE_READ命令,具体代码如下:

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    return res;
}

在这个命令中,附加的数据是一个binder_write_read结构指针,用于表示一次通过Binder驱动读写的操作么,定义如下:

struct binder_write_read {
    binder_size_t       write_size; /* bytes to write */
    binder_size_t       write_consumed; /* bytes consumed by driver */
    binder_uintptr_t    write_buffer;
    binder_size_t       read_size;  /* bytes to read */
    binder_size_t       read_consumed;  /* bytes consumed by driver */
    binder_uintptr_t    read_buffer;
};

这里write_buffer的内容是BC_ENTER_LOOPER,read_budffer中不包含数据,继续分析驱动中的代码:

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{

    switch (cmd) {
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    }
    ret = 0;
    return ret;
}

驱动中处理BINDER_WRITE_READ命令的函数是,binder_ioctl_write_read函数,其主要代码如下:

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;
     
    /*1*/
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }

    /*2*/
    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);

    }

    /*3*/
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        binder_inner_proc_lock(proc);
        if (!binder_worklist_empty_ilocked(&proc->todo))
            binder_wakeup_proc_ilocked(proc);
        binder_inner_proc_unlock(proc);
    }
    /*4*/
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

代码流程可以总结为:

  1. 从用户空间拷贝binder_write_read结构
  2. 如果write_size大于0则执行binder_thread_write,如果binder_thread_read大于0则执行binder_thread_read
  3. 将修改后的binder_write_read拷贝回用户空间
    在这次命令中,读buffer为空,因此只执行了binder_thread_write函数,继续向下分析:
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    struct binder_context *context = proc->context;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error.cmd == BR_OK) {
        int ret;

        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        //...
        switch (cmd) {
        case BC_REGISTER_LOOPER:
            binder_inner_proc_lock(proc);
            //...
            thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
            binder_inner_proc_unlock(proc);
            break;
        }
        *consumed = ptr - buffer;
    }
}

binder_thread_write的基本逻辑是,从用户控件读取数据,并解析数据的CMD,然后分别对不同的命令做处理,处理完更新binder_write_read结构。对于BC_REGISTER_LOOPER命令仅为当前线程的looper字段增了BINDER_LOOPER_STATE_REGISTERED标志。随后代码进入死循环,死循环中分别会调用两个函数

  1. ioctl(bs->fd, BINDER_WRITE_READ, &bwr) 用于等待Client进程请求
  2. binder_parse用于处理Client进程请求

下面单独分析这两个部分。

等待Client进程请求

因此继续分析驱动代码,由于binder_write从内核空间返回后,read_buffer已向后移动,且read_consumed重新被置为0,因此循环中的ioctl所传递binder_write_read结构read_size不为0,write_size为0,因此驱动只会执行binder_thread_read函数:

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
{
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

retry:
    binder_inner_proc_lock(proc);
    wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
    binder_inner_proc_unlock(proc);

    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    //...

    if (non_block) {
        if (!binder_has_work(thread, wait_for_proc_work))
            ret = -EAGAIN;
    } else {
        ret = binder_wait_for_work(thread, wait_for_proc_work);
    }

    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
}

显然binder_thread_read函数中wait_for_proc_work的值为true,non_block也是false,因此最终执行到了
binder_wait_for_work函数:

static int binder_wait_for_work(struct binder_thread *thread,
                bool do_proc_work)
{
    DEFINE_WAIT(wait);
    struct binder_proc *proc = thread->proc;
    int ret = 0;

    freezer_do_not_count();
    binder_inner_proc_lock(proc);
    for (;;) {
        prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
        if (binder_has_work_ilocked(thread, do_proc_work))
            break;
        if (do_proc_work)
            list_add(&thread->waiting_thread_node,
                 &proc->waiting_threads);
        binder_inner_proc_unlock(proc);
        schedule();
        binder_inner_proc_lock(proc);
        list_del_init(&thread->waiting_thread_node);
    }
    finish_wait(&thread->wait, &wait);
    binder_inner_proc_unlock(proc);
    freezer_count();
    return ret;
}

这个函数首先创建一个wait对象,然后调用prepare_to_wait将自己加入到等待队列,最后调用schedule开始阻塞。
当阻塞结束,通过binder_has_work_ilocked函数判断是否有任务处理,如果有则跳出循环结束足则,否则继续阻塞。
执行到此时ServiceMnager进程就进入了阻塞,知道Client进程发出请求后才会唤醒。

响应Client进程请求

ServiceManager主要想客户端提供两项服务 addServicegetService 接下来分别讲解一下Client进程请求响应的过程。

addService

通常一个进程会通过下述代码请求ServiceManager将自身注册为一个服务:

defaultServiceManager()->addService(String16("RandomFetcherService"), new BnRandomFetcher());  

其中第一个参数为String类型,第二个参数为BnInterface或其子类,下面着重分析下这部分代码如何实现。

  1. defaultServiceManager()
sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
    //...
    gDefaultServiceManager = interface_cast<IServiceManager>(
        ProcessState::self()->getContextObject(NULL));
    //...
    return gDefaultServiceManager;
}

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;
    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            //...
            b = new BpBinder(handle); 
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            //...
        }
    }
    return result;
}

defaultServiceManager()方法实际上返回的是一个handle为0的BpBinder对象。接下来分析addService方法。IServiceManager在Client进程代码中的实现类为BpServiceManager:

class BpServiceManager : public BpInterface<IServiceManager>
{   //...
    virtual status_t addService(const String16& name, const sp<IBinder>& service,
            bool allowIsolated)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        data.writeStrongBinder(service);
        data.writeInt32(allowIsolated ? 1 : 0);
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }
}

这个方法将name,以及BnInterface对象写入到Parcel对象中,并调用了transact方法。这里remote()方法返回的即是通过defaultServiceManager返回的BpBinder,继续分析调用流程:

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        //...
        return status;
    }
}

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();

    flags |= TF_ACCEPT_FDS;
    /....
    if (err == NO_ERROR) {
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    //...
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            err = waitForResponse(reply);
        } else {
            //...
        }
    } else {
        //...
    }

    return err;
}

重点看IPCThreadState::transact方法,首先看writeTransactionData函数:

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } 
    //...

    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));

    return NO_ERROR;
}

这个方法创建并了一个binder_transaction_data结构,并为其复制。并将其写入mOut对象。

  1. waitForResponse
tatus_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        //..
        cmd = (uint32_t)mIn.readInt32();
        switch (cmd) {
            //..
        case BR_REPLY:
            {
                //..
            }
            goto finish;
            //...
        }
    }

finish:
    return err;
}

waitForResponse可以分为两个部分:

  1. talkWithDriver
tatus_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;

    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;

        if (mProcess->mDriverFD <= 0) {
            err = -EBADF;
        }

    } while (err == -EINTR);
    //..
    return err;
}

talkWithDriver函数实际上是向Binder驱动发送了一条BINDER_WRITE_READ命令,其中binder_write_read write_buffer指向了mOut的数据,write_size当前mOut中数据的大小,read_buffer指向了mIn中的Buffer,read_size为ReadBuffer剩余大小。继续分析Binder驱动的处理。首先看下binder_thread_write方法:

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;

    while (ptr < end && thread->return_error.cmd == BR_OK) {
        int ret;

        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        switch (cmd) {
        //..
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr,
                       cmd == BC_REPLY, 0);
            break;
        }
        //...
        *consumed = ptr - buffer;
    }
    return 0;
}

之前的代码传入的cmd微BC_TRANSACTION,继续分析binder_transaction方法:

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply,
                   binder_size_t extra_buffers_size)
{
    int ret;
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    binder_size_t *offp, *off_end, *off_start;
    binder_size_t off_min;
    u8 *sg_bufp, *sg_buf_end;
    struct binder_proc *target_proc = NULL;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error = 0;
    uint32_t return_error_param = 0;
    uint32_t return_error_line = 0;
    struct binder_buffer_object *last_fixup_obj = NULL;
    binder_size_t last_fixup_min_off = 0;
    struct binder_context *context = proc->context;
    int t_debug_id = atomic_inc_return(&binder_last_id);

    e = binder_transaction_log_add(&binder_transaction_log);
    e->debug_id = t_debug_id;
    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
    e->from_proc = proc->pid;
    e->from_thread = thread->pid;
    e->target_handle = tr->target.handle;
    e->data_size = tr->data_size;
    e->offsets_size = tr->offsets_size;
    e->context_name = proc->context->name;

    if (reply) {
        //...
    } else {
        if (tr->target.handle) {
            //...
        } else {
            mutex_lock(&context->context_mgr_node_lock);
            target_node = context->binder_context_mgr_node;
            //...
            binder_inc_node(target_node, 1, 0, NULL);
            mutex_unlock(&context->context_mgr_node_lock);
        }
        e->to_node = target_node->debug_id;
        binder_node_lock(target_node);
        target_proc = target_node->proc;
        //...
        binder_inner_proc_lock(target_proc);
        target_proc->tmp_ref++;
        binder_inner_proc_unlock(target_proc);
        binder_node_unlock(target_node);

        binder_inner_proc_lock(proc);
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
            //...
        }
        binder_inner_proc_unlock(proc);
    }
    if (target_thread)
        e->to_thread = target_thread->pid;
    e->to_proc = target_proc->pid;

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    binder_stats_created(BINDER_STAT_TRANSACTION);
    spin_lock_init(&t->lock);

    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    t->debug_id = t_debug_id;

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
    t->sender_euid = task_euid(proc->tsk);
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    if (!(t->flags & TF_ONE_WAY) &&
        //..
    } else {
        /* Otherwise, fall back to the default priority */
        t->priority = target_proc->default_priority;
    }

    trace_binder_transaction(reply, t, target_node);

    t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
        tr->offsets_size, extra_buffers_size,
        !reply && (t->flags & TF_ONE_WAY));
    //..
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    trace_binder_transaction_alloc_buf(t->buffer);
    off_start = (binder_size_t *)(t->buffer->data +
                      ALIGN(tr->data_size, sizeof(void *)));
    offp = off_start;

    if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
               tr->data.ptr.buffer, tr->data_size)) {
        binder_user_error("%d:%d got transaction with invalid data ptr\n",
                proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        return_error_param = -EFAULT;
        return_error_line = __LINE__;
        goto err_copy_data_failed;
    }
    if (copy_from_user(offp, (const void __user *)(uintptr_t)
               tr->data.ptr.offsets, tr->offsets_size)) {
        binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        return_error_param = -EFAULT;
        return_error_line = __LINE__;
        goto err_copy_data_failed;
    }

    off_end = (void *)off_start + tr->offsets_size;
    sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
    sg_buf_end = sg_bufp + extra_buffers_size;
    off_min = 0;
    //...
    if (reply) {
        //..
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        binder_inner_proc_lock(proc);
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
        binder_inner_proc_unlock(proc);
    } 
    if (target_thread)
        binder_thread_dec_tmpref(target_thread);
    binder_proc_dec_tmpref(target_proc);
    /*
     * write barrier to synchronize with initialization
     * of log entry
     */
    smp_wmb();
    WRITE_ONCE(e->debug_id_done, t_debug_id);
    return;
}
  1. 从mIn中读取数据,并处理

getService

相关文章

网友评论

    本文标题:ServiceManager 介绍

    本文链接:https://www.haomeiwen.com/subject/sstqoxtx.html