Zygote进程开始fork子进程,启动SystemServer
//path: /frameworks/base/services/java/com/android/server/SystemServer.java
/**
* The main entry point from zygote.
*/
public static void main(String[] args) {
new SystemServer().run();
}
private void run() {
//创建SystemServiceManager
mSystemServiceManager = new SystemServiceManager(mSystemContext);
//启动服务
startBootstrapServices();
}
private void startBootstrapServices() {
//通过反射创建AMS对象
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
//将AMS注册到SM中
mActivityManagerService.setSystemProcess();
}
ActivityManagerService.setSystemProcess
//path: /frameworks/base/services/core/java/com/android/server/am/ActivityManagerService.java
public void setSystemProcess() {
//调用addService向SM中注册AMS(this参数指的是AMS)
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, true);
}
ServiceManager.addService
//path: /frameworks/base/core/java/android/os/ServiceManager.java
public static void addService(String name, IBinder service) {
try {
//getIServiceManager()=>new ServiceManagerProxy(new binderProxy())
getIServiceManager().addService(name, service, false);
} catch (RemoteException e) {
Log.e(TAG, "error in addService", e);
}
}
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
//单例返回
return sServiceManager;
}
// Find the service manager
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
BinderInternal.getContextObject() 返回BinderProxy对象
//path: /frameworks/base/core/java/com/android/internal/os/BinderInternal.java
public static final native IBinder getContextObject();
getContextObject是native方法,通过JNI往下调用(调到哪个JNI方法分析见:Binder - Binder的JNI方法注册)
//path:/frameworks/base/core/jni/android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
//打开binder驱动,创建BpBinder(handle)对象
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
ProcessState::self()->getContextObject(NULL)和defaultServiceManager流程一样,都是创建BpBinder(handle)对象(具体分析见:Binder - 获取ServiceManager服务)
接着看javaObjectForIBinder,BinderProxy和BpBinder互相绑定
//path: /frameworks/base/core/jni/android_util_Binder.cpp
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
//从BpBinder中查找BinderProxy对象,第一次为null
//gBinderProxyOffsets结构体的初始化,是在int_register_android_os_BinderProxy()中完成的,在JNI注册的时候就会调用
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
//创建BinderProxy对象
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
//通过BinderProxy的mObject成员变量记录BpBinder对象
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
//将BinderProxy对象信息添加到BpBinder的mObject中
val->attachObject(&gBinderProxyOffsets, refObject,
jnienv_to_javavm(env), proxy_cleanup);
sp<DeathRecipientList> drl = new DeathRecipientList;
drl->incStrong((void*)javaObjectForIBinder);
//BinderProxy的mOrgue记录死亡通知对象
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));
//最终返回BinderProxy对象
return object;
}
ServiceManagerNative.asInterface
//path: /frameworks/base/core/java/android/os/ServiceManagerNative.java
static public IServiceManager asInterface(IBinder obj)
{
//obj为BinderProxy,默认返回null
IServiceManager in =
(IServiceManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}
//创建ServiceManagerProxy对象
return new ServiceManagerProxy(obj);
}
ServiceManagerProxy
//path: /frameworks/base/core/java/android/os/ServiceManagerNative.java
class ServiceManagerProxy implements IServiceManager {
//mRemote为BinderProxy对象
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;
}
//上面的getIServiceManager().addService调到了这里
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
//①此处的service是AMS,把ASM放到data
data.writeStrongBinder(service);
//②mRemote为BinderProxy对象
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
}
}
①Parcel.writeStrongBinder
//path: /frameworks/base/core/java/android/os/Parcel.java
public final void writeStrongBinder(IBinder val) {
nativeWriteStrongBinder(mNativePtr, val);
}
private static native void nativeWriteStrongBinder(long nativePtr, IBinder val);
nativeWriteStrongBinder映射关系对应android_os_Parcel_writeStrongBinder
//path: /frameworks/base/core/jni/android_os_Parcel.cpp
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
//将Java层的Parcel转换为Native层Parcel
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
}
}
ibinderForJavaObject 创建JavaBBinder对象
//path: /frameworks/base/core/jni/android_util_Binder.cpp
sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
{
//是否是Java层的Binder对象,此处obj是AMS,进入if
if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) {
JavaBBinderHolder* jbh = (JavaBBinderHolder*)
env->GetLongField(obj, gBinderOffsets.mObject);
//返回JavaBBinder对象
return jbh != NULL ? jbh->get(env, obj) : NULL;
}
}
class JavaBBinderHolder : public RefBase
{
public:
sp<JavaBBinder> get(JNIEnv* env, jobject obj)
{
//将弱指针升级为强指针,首次进来为空
sp<JavaBBinder> b = mBinder.promote();
if (b == NULL) {
//创建JavaBBinder对象
b = new JavaBBinder(env, obj);
}
return b;
}
}
parcel->writeStrongBinder
//path: /frameworks/native/libs/binder/Parcel.cpp
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
//binder为AMS,不为null
if (binder != NULL) {
//binder为AMS,继承自BBinder,local不为NULL
IBinder *local = binder->localBinder();
if (!local) {
}else{
//通过flat_binder_object保存AMS信息
obj.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
}
}
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(
const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
//将flat_binder_object写入out(out=>data,flat_binder_object=>AMS)
return out->writeObject(flat, false);
}
②BinderProxy.transact
//path: /frameworks/base/core/java/android/os/Binder.java
final class BinderProxy implements IBinder {
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
return transactNative(code, data, reply, flags);
}
//native方法调到android_os_BinderProxy_transact
public native boolean transactNative(int code, Parcel data, Parcel reply,
int flags) throws RemoteException;
}
//path: /frameworks/base/core/jni/android_util_Binder.cpp
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
//获取data对象
Parcel* data = parcelForJavaObject(env, dataObj);
//获取reply对象
Parcel* reply = parcelForJavaObject(env, replyObj);
//对应上面SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get()):通过BinderProxy的mObject成员变量记录BpBinder对象
//获取BpBinder对象
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
//target为BpBinder
status_t err = target->transact(code, *data, reply, flags);
}
//path: /frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
}
//path: /frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
//数据错误检查
status_t err = data.errorCheck();
//TF_ACCEPT_FDS:允许回复中包含文件描述符
flags |= TF_ACCEPT_FDS;
//整理数据,把结果存入mOut中
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
//TF_ONE_WAY表示当前业务是异步的,不需要等待
//当前不是异步,进入if
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
//等待回应事件
err = waitForResponse(reply);
}
}
}
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
//此处handle为0
tr.target.handle = handle;
//此处code为ADD_SERVICE_TRANSACTION
tr.code = code;
//此处flags为0
tr.flags = binderFlags;
//...将数据保存到tr
//mOut写入命令BC_TRANSACTION
mOut.writeInt32(cmd);
//写入binder_transaction_data数据
mOut.write(&tr, sizeof(tr));
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
while (1) {
//循环等待结果
if ((err=talkWithDriver()) < NO_ERROR) break;
}
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//读的buffer是否为空? mIn是IPCThreadState的成员变量,现在读为null
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
//读的时候不能写mOut
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//bwr需要写入的大小
bwr.write_size = outAvail;
//bwr需要写入的内容
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
//neerRead为false走else
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//while循环条件不成立,只会执行一次
do {
//通过ioctl写入命令BC_TRANSACTION
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
}while (err == -EINTR);
}
关于ioctl的分析见:Binder - 内核驱动层源码浅析
//path: /drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
break;
}
}
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
}
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
switch (cmd) {
case BC_TRANSACTION:
//此处cmd为BC_TRANSACTION
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
}
binder_transaction
//path: /drivers/staging/android/binder.c
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
//此处reply为false(cmd为BC_TRANSACTION)
if (reply) {}
else{
//找SM,handle为0,走else
if (tr->target.handle) {}
else{
//①获取目标对象SM的target_node,直接使用全局变量binder_context_mgr_node
target_node = binder_context_mgr_node;
}
//②获取SM进程
target_proc = target_node->proc;
}
//③找到SM的todo和wait队列
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
//生成一个binder_transaction变量t,用于描述本次要进行的transaction,最后将其加入target_thread->todo,这样目标对象被唤醒时,就可以从这个队列中取出要做的工作
t = kzalloc(sizeof(*t), GFP_KERNEL);
//生成一个binder_work变量tcomplete,用于说明当前调用者线程有未完成的transaction,最后会添加到本线程的todo队列中
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (!reply && !(tr->flags & TF_ONE_WAY))
//非oneway的通信方式,将当前的thread保存到transaction的from字段
t->from = thread;
else
t->from = NULL;
//④下面的一些操作是将SM的数据放入t
t->sender_euid = task_euid(proc->tsk);
//target_proc为此次通信的目标进程为SM进程
t->to_proc = target_proc;
t->to_thread = target_thread;
//此次通信的code为ADD_SERVICE_TRANSACTION
t->code = tr->code;
//此次通信flags为0
t->flags = tr->flags;
//当前线程任务的优先级
t->priority = task_nice(current);
//从SM进程中分配buffer(从mmap开辟的空间中申请内存)
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
/*****⑤真正一次拷贝的有效数据*****/
//拷贝用户空间的ptr.buffer到内核
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size))
//拷贝用户空间ptr.offsets到内核
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size))
//SM和Client是两个进程
//⑥记录本次transaction,这样就知道是谁调用的(方便SM找到client)
thread->transaction_stack = t;
//⑦设置t的类型为BINDER_WORK_TRANSACTION(t的命令是发送给SM的:工作)
t->work.type = BINDER_WORK_TRANSACTION;
//将t加入目标的处理队列中
list_add_tail(&t->work.entry, target_list);
//⑧设置binder_work的类型为BINDER_WORK_TRANSACTION_COMPLETE(tcomplete的命令是给client的:挂起)
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
//⑨唤醒SM
wake_up_interruptible(target_wait);
}
ServiceManager在内部会通过binder_loop循环监听client消息,代码会被阻塞在Binder驱动的binder_thread_read函数(具体分析见:Binder - 启动ServiceManager服务)
接着我们分析下ServiceManager被唤醒后的流程
binder_thread_read — ServiceManager被唤醒后
//path: /drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
if (*consumed == 0) {
//设置命令为BR_NOOP
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
while (1) {
struct binder_work *w;
//前面binder_transaction函数中:把一个binder_work添加到thread->todo队列中,类型为BINDER_WORK_TRANSACTION_COMPLETE
if (!list_empty(&thread->todo)) {
//w->type为BINDER_WORK_TRANSACTION_COMPLETE
w = list_first_entry(&thread->todo, struct binder_work,entry);
}
switch (w->type) {
case BINDER_WORK_TRANSACTION_COMPLETE:
cmd = BR_TRANSACTION_COMPLETE;
//写入命令BR_TRANSACTION_COMPLETE
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
break;
}
}
}
回到上面IPCThreadState::waitForResponse中循环等待结果处
//path: /frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
while (1) {
//循环等待结果
if ((err=talkWithDriver()) < NO_ERROR) break;
switch (cmd) {
//处理BR_TRANSACTION_COMPLETE命令
case BR_TRANSACTION_COMPLETE:
// 当前为同步(!reply为false),不进入if,继续循环
if (!reply && !acquireResult) goto finish;
break;
}
}
}
binder_thread_read — ServiceManager开始处理数据
//path: /drivers/staging/android/binder.c
static int binder_thread_read(...){
//同理:
case BINDER_WORK_TRANSACTION: {
//把用户请求复制的SM中并对个队列进行调整
t = container_of(w, struct binder_transaction, work);
//...
//设置命令为BR_TRANSACTION
cmd = BR_TRANSACTION;
}
SM启动的时候会调用binder_loop:循环监听,处理client发来的请求
//path: /frameworks/native/cmds/servicemanager/binder.c
void binder_loop(struct binder_state *bs, binder_handler func)
{
for (;;) {
//解析
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
switch(cmd) {
case BR_TRANSACTION: {
if (func) {
int res;
//①reply初始化
bio_init(&reply, rdata, sizeof(rdata), 4);
//②由svcmgr_handler处理请求(func是参数传来的svcmgr_handler)
res = func(bs, txn, &msg, &reply);
//③将reply发给binder驱动
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
}
}
svcmgr_handler
//path: /frameworks/native/cmds/servicemanager/service_manager.c
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
//addService
case SVC_MGR_ADD_SERVICE:
//注册指定服务
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
}
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
//因为SM是管理了很多服务,所以先要查找下有木有
si = find_svc(s, len);
if (si) {
if (si->handle) {
//先释放服务
svcinfo_death(bs, si);
}
//再重新添加
si->handle = handle;
}else{
//申请内存
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
//一些服务信息
si->handle = handle;
si->len = len;
//将服务保存到svclist,addService完成
si->next = svclist;
//svclist用来保存所有已注册的服务
svclist = si;
}
}
binder_send_reply
//path: /frameworks/native/cmds/servicemanager/binder.c
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
//BC_REPLY命令
data.cmd_reply = BC_REPLY;
//调用binder驱动的binder_thread_write
binder_write(bs, &data, sizeof(data));
}
//path: /drivers/staging/android/binder.c
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
case BC_REPLY:
//和上面binder_transaction一样
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
}
总结:
addService.png
网友评论