一、从init.rc文件开始
1、SurfaceFlinger 进程是由 init 进程创建的,运行在独立的 SurfaceFlinger 进程中。init 进程读取 init.rc 文件启动 SurfaceFlinger。
路径: /system/core/rootdir/init.rc
Init.rc 文件在加载的时候会加载所有的core服务
on boot
......
class_start core
**查看下surfaceflinger.rc文件 **路径: frameworks/native/services/surfaceflinger/surfaceflinger.rc
service surfaceflinger /system/bin/surfaceflinger
class core animation
user system
group graphics drmrpc readproc
capabilities SYS_NICE
onrestart restart zygote
....
Surfaceflinger.rc中可以看到class core,所以Init.rc 中 class_start core ,就会执行surfaceflinger.rc这个文件。之后会执行Main_surfaceflinger.cpp
2、main_surfaceflinger.cpp这个会创建其线程池、进程优先级、线程优先级、surfaceFlinger对象 等相关操作。
int main(int,char**){
signal(SIGPIPE,SIG_IGN); //表示让系统忽略SIGPIPE信号,SIGPIPE信号的默认处理是终止程序
//sf service启动时要设置binder的线程池,用于设置 当前进程用于hwbinder通信的最大线程数:
hardware::configureRpcThreadpool(1 /* maxThreads */,false /* callerWillJoin */);
//启动图形分配器服务
startGraphicsAllocatorService();
//sf进程启动后,限制其进程中binder线程数量为4
ProcessState::self()->setThreadPoolMaxThreadCount(4);
//start the thread pool
sp<ProcessState> ps(ProcessState::self());
ps->startThreadPool();//开启线程池
//instantiate surfaceflinger
sp<SurfaceFlinger> flinger = surfaceFlinger::createSurfaceFligner();
setpriority(PRIO_PROCESS,0,PRIORITY_URGENT_DISPLAY);//PRIO_PRCESS设置进程优先级,0是当前进程,PRIORITY_URGENT_DISPLAY是优先级档位,
set_sched_policy(0,SP_FOREGROUND);//设置调度策略,优先级
//Put most SufraceFlinger threads in the system-background cpuset
//Keeps us from unnecessarily using big cores
//Do this after the binder thread pool init
if(cpusets_enable()) set_cpuset_policy(0,SP_SYSTEM);//cpu 策略,sf被限制在了小核
//initialize before clients can connect
flinger->init();
//publish surface flinger
sp<IServiceManager> sm(defaultServiceManager());
//添加surfaceflinger服务到 ServiceMananger
sm>addService(String16(SurfaceFlinger::getServiceName()),flinger,false,IServiceManger::DUMP_FLAG_PRIORITY_CRITICAL | IServiceManager::DUMP_FLAG_PROTO);
//依赖于SF在上面注册,启动DisplayService
startDisplayService();
if(SurfaceFlinger::setSchedFifo(true) != NO_ERROR){
ALOGW("Couldn't set to SCHED_FIFO: %s",strerror(errno));
//run surface flinger in this thread
flinger->run();
return 0;
}
(一)、详细分解hardware::configureRpcThreadpool(1,false);
时序图:
1、configureRpcThreadpool用于设置 当前进程用于hwbinder通信的最大线程数
路径:/system/libhidl/transport/HidlTransportSupport.cpp
void configureRpcThreadpool(size_t maxThreads,bool callerWillJoin){
configureBinderRpcThreadpool(maxThreads,callerWillJoin);
}
路径:system/libhidl/transport/HidlBinderSupport.cpp
void configureBinderRpcThreadpool(size_t maxThreads,bool callerWillJoin){
status_t ret = ProcessState::self()->setThreadPoolCofiguration(maxThreads,callerWillJoin);
LOG_ALWAYS_FATAL_IF(ret !=ok,”Could not setThreadPoolConfiguration: %d”,ret);
gThreadPoolConfigured = true;
}
2、ProcessState::self()类似 binder 框架里面的流程,在当前进程中初始化 hwbinder
(1)、setThreadPoolConfiguration设置当前进程用于hwBinder 通信的进程数
google是为了HIDL实现一套专用 ProcessState、IPCThreadState、BpHwBinder 。HIDL是用于指定 HAL 和其用户之间的接口的统一接口。
当前进程fd中hwbinder框架的初始化hidl 的 PrcessState、IPCThreadState的代码位置为:
system/libhwbinder/ProcessState.cpp
system/libhwbinder/IPCThreadState.cpp
(2)、self方法里面创建 ProcessState对象
sp<ProcessState> ProcessState::self()
{//加锁,单例实现
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) { //C++中的单例模式
return gProcess;
}
gProcess = new ProcessState(kDefaultDriver);//创建 ProcessState对象
return gProcess;
}
ProcessState的构造方法中,会使用 open_driver 方法,去打开 /dev/hwbinder 节点,用来在 kernel 里面初始化 此进程 相关hwbinder数据结构, 并将打开 hwbinder 节点的 fd 保存在mDriverFD 变量里面,之后通过此 fd来和 kernel内部的驱动进行通信。
(3)、构造方法
ProcessState::ProcessState(const char *driver)
:mDriverName(String8(driver))
,mDriverFD(open_driver(driver))
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mBinderContextCheckFunc(nullptr)
, mBinderContextUserData(nullptr)
, mThreadPoolStarted(false)
, mSpawnThreadOnStart(true)
, mThreadPoolSeq(1)
, mCallRestriction(CalRestriction::NONE)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, mMmapSize, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/hwbinder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
mDriverName.clear();
}
}
}
(4)、open_driver会直接去打开 /dev/hwbinder 节点。
此方法会通知hwbinder驱动,在kernel中初始化 此进程中 hwBinder相关的数据结构。
static int open_driver(const char *dirver)
{
int fd = open(driver, O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers); //通知将此进程中的binder版本,与kernel的版本进行对比
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)!", vers, BINDER_CURRENT_PROTOCOL_VERSION);
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); //设置fd进程可用于hwBinder通信的最大进程数
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/hwbinder' failed: %s\n", strerror(errno));
}
return fd;
}
BINDER_VERSION将此进程中的binder版本,通过ioctl 方法告诉kernel,与kernel的版本进行对比
BINDER_SET_MAX_THREADS设置此进程给用于hwbinder 通信的线程数,为BINDER_SET_MAX_THREADS
(5)、ProcessState::self()完成下来看下 setThreadPoolConfiguration(maxThreads,callerWillJoin);
通过setThreadPoolConfiguration,对通信线程数进行了修改。
status_t ProcessState::setThreadPoolConfiguration(size_t maxThreads,bool callerJoinsPool){
//如果其他线程要加入binder线程池,线程池至少要有一个线程。
LOG_ALWAYS_FATAL_IF(maxThreads < 1, "Binder threadpool must have a minimum of one thread.");
size_t threadsToAllocate = maxThreads;
//如果调用者要加入线程池,它将向线程池贡献一个线程.这是API契约的一部分。
If(callerJoinsPool) threadsToAllocate--;
//如果可以,在线程池启动时从用户空间派生一个线程。这确保在线程池启动时,始终有一个线程可用来启动更多线程。
Bool spawnThreadOnStart = threadsToAllocate > 0;
If(spawnThreadOnStart) threadsToAllocate --;
//The BINDER_SET_MAX__THREADS ioctl really tells the kernel how many threads it’s allowed to spawn,*in addition* to any threads we may have already spawned locally.
//BINDER_SET_MAX_THREADS告诉内核允许生成多少线程,除了已经生成的一些线程。
size_t kernelMaxThreads = threadsToAllocate;
AutoMutex _l(mLock);
//ioctl执行指令
If(ioctl(mDriverFD,BINDER_SET_MAX_THREADS,&kernelMaxThreads) == -1){
ALOGE(“Binder ioctl to set max threads failed: %s”,strerror(errno));
return -errno;
}
mMaxThreads = maxThreads;
mSpawnThreadOnStart = spawnThreadOnstart;
return NO_ERROR;
}
(二) 、初始化surfaceFlinger
1、sp<SurfaceFlinger> flinger = surfaceflinger::createSurfaceFlinger();
路径: frameworks/native/services/surfaceflinger/SurfaceFlingerFactory.cpp
sp<SurfaceFlinger> createSurfaceFlinger(){
static DefaultFactory factory;
return new SurfaceFlinger(factory);
}
通过SurfaceFlingerFactory.cpp创建了一个SurfaceFlinger对象
2、ProcessState:self()->setThreadPoolMaxThreadCount(4)
路径:frameworks/native/libs/binder/ProcessState.cpp
sp<ProcessState> processState::self(){
Mutex::Autolock _l(gProcessMutex);
If(gProcess != nullptr){
return gProcess;
}
gProcess = new ProcessState(kDefaultDriver);
return gProcess;
}
status_t ProcessState::setThreadPoolMaxThreadCount(size_t maxThreads){
status_t result = NO_ERROR;
If(ioctl(mDriverFD,BINDER_SET_MAX_THREADS,&maxThreads) != -1){
mMaxThreads = maxThreads;
}else{
result = -errno;
ALOE(“Binder ioctl to set max threads failed: %s”,strerror(-result));
}
Return result;
}
surfaceFlinger创建,初始化,运行过程留到下一章。
网友评论