Binder学习笔记1—Service示例分析
一、示例一:SystemSuspend对binder使用
1. 示例代码
//vnd/system/hardware/interfaces/suspend/1.0/default/main.cpp #include <binder/IPCThreadState.h> #include <binder/IServiceManager.h> #include <binder/ProcessState.h> #include <hidl/HidlTransportSupport.h> #include <hwbinder/ProcessState.h> using android::hardware::configureRpcThreadpool; using android::hardware::joinRpcThreadpool; int main() ( ... configureRpcThreadpool(1, true /* callerWillJoin */); //操作 /dev/hwbinder sp<SuspendControlService> suspendControl = new SuspendControlService(); auto controlStatus = android::defaultServiceManager()->addService(android::String16("suspend_control"), suspendControl); sp<SuspendControlServiceInternal> suspendControlInternal = new SuspendControlServiceInternal(); controlStatus = android::defaultServiceManager()->addService(android::String16("suspend_control_internal"), suspendControlInternal); //Create non-HW binder threadpool for SuspendControlService. sp<android::ProcessState> ps{android::ProcessState::self()}; ps->startThreadPool(); sp<SystemSuspend> suspend = new SystemSuspend( td::move(wakeupCountFd), std::move(stateFd), std::move(suspendStatsFd), kStatsCapacity, std::move(kernelWakelockStatsFd), std::move(wakeupReasonsFd), std::move(suspendTimeFd), sleepTimeConfig, suspendControl, suspendControlInternal, true /* mUseSuspendCounter*/); status_t status = suspend->registerAsService(); joinRpcThreadpool(); //操作 /dev/hwbinder }
这里通过 addService 注册的服务可以查询到
# dumpsys | grep suspend_control
suspend_control
suspend_control_internal
2. configureRpcThreadpool()
调用路径:
configureRpcThreadpool(1, true /* callerWillJoin */); //vnd/system/hardware/interfaces/suspend/1.0/default/main.cpp configureBinderRpcThreadpool(maxThreads, callerWillJoin); //vnd/system/libhidl/transport/HidlTransportSupport.cpp ProcessState::self()->setThreadPoolConfiguration(maxThreads, callerWillJoin /*callerJoinsPool*/); //vnd/system/libhwbinder/ProcessState.cpp ioctl(mDriverFD, BINDER_SET_MAX_THREADS, &kernelMaxThreads)
setThreadPoolConfiguration 函数:
status_t ProcessState::setThreadPoolConfiguration(size_t maxThreads, bool callerJoinsPool) { ... size_t threadsToAllocate = maxThreads; /* 这里的maxThreads包含了当前线程,若传true会减1 */ if (callerJoinsPool) threadsToAllocate--; /* 如果可以,在线程池启动时从用户空间生成一个线程。 这确保一旦启动线程池,总是有一个线程可用于启动更多线程。*/ bool spawnThreadOnStart = threadsToAllocate > 0; if (spawnThreadOnStart) threadsToAllocate--; size_t kernelMaxThreads = threadsToAllocate; //此例中为0,即不需要内核再创建binder线程了 AutoMutex _l(mLock); if (ioctl(mDriverFD, BINDER_SET_MAX_THREADS, &kernelMaxThreads) == -1) { ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno)); return -errno; } ... }
可见,这个函数操作的是 HIDL 使用的 /dev/hwbinder 节点,参数1表示只需要1个binder线程,而参数2为true表示当前线程(主线程)将会成为一个binder线程,因为不需要再额外创建任何binder线程了。
3. addService()
TODO:
4. ProcessState::self()
sp<ProcessState> ProcessState::self() //ProcessState.cpp { return init(kDefaultDriver, false /*requireDefault*/); } sp<ProcessState> ProcessState::init(const char *driver, bool requireDefault) { static sp<ProcessState> gProcess = sp<ProcessState>::make(driver); //创建一个ProcessState,调用其构造函数 return gProcess; } ProcessState::ProcessState(const char *driver): mDriverName(String8(driver)), mDriverFD(open_driver(driver)) { mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0); //2M-8K }
就是调用构造函数,打开设备驱动节点,进行mmap映射。
5. ps->startThreadPool()
void ProcessState::startThreadPool() //ProcessState.cpp { AutoMutex _l(mLock); if (!mThreadPoolStarted) { mThreadPoolStarted = true; spawnPooledThread(true); } } void ProcessState::spawnPooledThread(bool isMain) { if (mThreadPoolStarted) { //true String8 name = makeBinderThreadName(); ALOGV("Spawning new pooled thread, name=%s\n", name.string()); sp<Thread> t = sp<PoolThread>::make(isMain); //调用PoolThread构造函数,只是将mIsMain=isMain /* 这里创建了一个新的线程,通常此时是首个binder线程,名为"Binder:<pid>_1*/ t->run(name.string()); } } virtual bool threadLoop() { IPCThreadState::self()->joinThreadPool(mIsMain); //这个函数非异常不会退出,循环等待命令处理命令 return false; } void IPCThreadState::joinThreadPool(bool isMain) //IPCThreadState.cpp { ... /* 主线程是 BC_ENTER_LOOPER,非主线程是 BC_REGISTER_LOOPER */ mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER); /* 这里面有个死循环,等待命令处理命令 */ do { processPendingDerefs(); // now get the next command to be processed, waiting if necessary result = getAndExecuteCommand(); if(result == TIMED_OUT && !isMain) { break; } } while (result != -ECONNREFUSED && result != -EBADF); /* 异常退出情况 */ mOut.writeInt32(BC_EXIT_LOOPER); mIsLooper = false; talkWithDriver(false); }
startThreadPool() 的执行路径中就会创建一个binder线程,通常是第一个binder线程,名为"Binder:<pid>_1"。spawnPooledThread 函数传参为true表示是主线程,准确来说应该是首个binder线程,因为其是新创建的然后添加的,并不是当前正在执行的主线程。然后告诉binder驱动此binder线程就绪。
6. registerAsService()
TODO:
7. joinRpcThreadpool()
执行路径:
joinRpcThreadpool() //vnd/system/libhidl/transport/HidlTransportSupport.cpp joinBinderRpcThreadpool() //vnd/system/libhidl/transport/HidlBinderSupport.cpp IPCThreadState::self()->joinThreadPool() //vnd/frameworks/native/libs/binder/IPCThreadState.cpp
joinThreadPool 函数定义为 void IPCThreadState::joinThreadPool(bool isMain = true), 隐式参数,不传参就是true,表示是主线程。标志服务的主线程已经就绪。注意,这里才是真正的主线程,没有创建新线程,而 startThreadPool() 函数中是新创建的线程作为binder线程。
二、示例二:SurfaceFlinger对binder使用
1. 示例代码
//sys/frameworks/native/services/surfaceflinger/main_surfaceflinger.cpp int main(int, char**) { ... char value[PROPERTY_VALUE_MAX]; property_get("debug.sf.binder_test", value, "0"); bool binder_test = atoi(value); ... /* 参数传false, 表示当前线程不会成为binder线程,因此后续不用调用 joinRpcThreadpool() */ hardware::configureRpcThreadpool(1 /* maxThreads */, false /* callerWillJoin */); //limit the number of binder threads to 4. ProcessState::self()->setThreadPoolMaxThreadCount(4); sp<ProcessState> ps(ProcessState::self()); ps->startThreadPool(); sp<IServiceManager> sm(defaultServiceManager()); sm->addService(String16(SurfaceFlinger::getServiceName()), flinger, false, IServiceManager::DUMP_FLAG_PRIORITY_CRITICAL | IServiceManager::DUMP_FLAG_PROTO); ... }
三、补充
1. startThreadPool 和 joinThreadPool 传的 isMain 参数都为true。isMain 并不是表示主线程,而是表示不会主动退出的线程。
posted on 2022-07-10 21:31 Hello-World3 阅读(1186) 评论(0) 编辑 收藏 举报