深入理解Bindler
Binder模型
Binder机制优点
第一. Binder能够很好的实现Client-Server架构
第二. Binder的传输效率和可操作性很好
第三. Binder机制的安全性很高
Binder通信模型
ServiceManager守护进程
ServiceManager是用户空间的一个守护进程,它一直运行在后台。它的职责是管理Binder机制中的各个Server。
当Server启动时,Server会将"Server对象的名字"连同"Server对象的信息"一起注册到ServiceManager中,
而当Client需要获取Server接入点时,则通过"Server的名字"来从ServiceManager中找到对应的Server。
ServiceManager流程图
ServiceManager 的main()函数源码
int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
bs = binder_open(128*1024);
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
庖丁解MediaServer
MediaServer是系统诸多重要service的栖息地,如
MediaServer入口函数
defaultServiceManager()的实现
defaultServiceManager相关类的类图
defaultServiceManager()
sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
ProcessState::getContextObject()
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
return getStrongProxyForHandle(0);
}
ProcessState::getStrongProxyForHandle()
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
// Special case for context manager...
// The context manager is the only object for which we create
// a BpBinder proxy without already holding a reference.
// Perform a dummy transaction to ensure the context manager
// is registered before we create the first local reference
// to it (which will occur when creating the BpBinder).
// If a local reference is created for the BpBinder when the
// context manager is not present, the driver will fail to
// provide a reference to the context manager, but the
// driver API does not return status.
//
// Note that this is not race-free if the context manager
// dies while this code runs.
//
// TODO: add a driver API to wait for context manager, or
// stop special casing handle 0 for context manager and add
// a driver API to get a handle to the context manager with
// proper reference counting.
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
IServiceManager::asInterface()
android::sp<IServiceManager> IServiceManager::asInterface(const android::sp<android::IBinder>& obj)
{
android::sp<IServiceManager> intr;
if (obj != NULL) {
intr = static_cast<IServiceManager*>(
obj->queryLocalInterface(
IServiceManager::descriptor).get());
if (intr == NULL) {
intr = new BpServiceManager(obj);
}
}
return intr;
}
注册MediaPlayerService
终于要开始讲解Client-Server交互了
MediaPlayerService服务通过addService请求注册到ServiceManager中
在这个addService请求中MediaPlayerService是Client,而ServiceManager是Server
addService流程的时序图
BpBinder::transact()
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// mAlive的初始值为1
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState::transact()
satus_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
//++[Debug][mark_chen][2014/07/11][Binder] Binder transaction bad command issue
if (!mCanTransact) {
#if (HTC_SECURITY_DEBUG_FLAG == 1)
TextOutput::Bundle _b(alog);
alog << "Invalid BC_TRANSACTION " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
#endif
}
//--[Debug][mark_chen][2014/07/11][Binder] Binder transaction bad command issue
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
#if 0
if (code == 4) { // relayout
ALOGI("<<<<<< RETURNING transaction 4");
} else {
ALOGI("<<<<<< RETURNING transaction %d", code);
}
#endif
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
IPCThreadState::waitForResponse()
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing waitForResponse Command: "
<< getReturnString(cmd) << endl;
}
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
MediaPlayerService服务的消息循环
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp<Thread> t = new PoolThread(isMain);
t->run(name.string());
}
}
IPCThreadState::joinThreadPool()
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
// This thread may have been spawned by a thread that was in the background
// scheduling group, so first we will make sure it is in the foreground
// one to avoid performing an initial transaction in the background.
set_sched_policy(mMyThreadId, SP_FOREGROUND);
status_t result;
do {
//++[Debug][mark_chen][2014/07/11][Binder] Binder transaction bad command issue
mCanTransact = false;
processPendingDerefs();
mCanTransact = true;
//--[Debug][mark_chen][2014/07/11][Binder] Binder transaction bad command issue
// now get the next command to be processed, waiting if necessary
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
//++[Debug][mark_chen][2015/05/27][Binder] Add log for debug
//LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p\n",
// (void*)pthread_self(), getpid(), (void*)result);
ALOGD("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%p, fd: %d\n",
(void*)pthread_self(), getpid(), (void*)(intptr_t)result, mProcess->mDriverFD);
//--[Debug][mark_chen][2015/05/27][Binder] Add log for debug
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
IPCThreadState::getAndExecuteCommand()
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
// 和Binder驱动交互
result = talkWithDriver();
if (result >= NO_ERROR) {
...
// 读取mIn中的数据
cmd = mIn.readInt32();
...
// 调用executeCommand()对数据进行处理。
result = executeCommand(cmd);
...
}
return result;
}
getService请求的发送
前面,以MediaPlayerService为例,介绍了Server服务是如何通过addService请求添加到ServiceManager中的。
下面,将以MediaPlayer获取MediaPlayerService服务为例,介绍Client是如何通过getService请求从ServiceManager中获取到Server接入点的。
getService的时序图
MediaPlayer的getService入口
sp<IMediaPlayerService> IMediaDeathNotifier::sMediaPlayerService;
...
const sp<IMediaPlayerService>& IMediaDeathNotifier::getMediaPlayerService()
{
...
if (sMediaPlayerService == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.player"));
...
usleep(500000); // 0.5 s
} while (true);
...
Java层中Binder进程间通信(AIDL)
1.创建你的.aidl文件
package com.cao.android.demos.binder.aidl;
import com.cao.android.demos.binder.aidl.AIDLActivity;
interface AIDLService {
void registerTestCall(AIDLActivity cb);
void invokCallBack();
}
2.实现服务端
private final AIDLService.Stub mBinder = new AIDLService.Stub() {
@Override
public void invokCallBack() throws RemoteException {
Log("AIDLService.invokCallBack");
Rect1 rect = new Rect1();
rect.bottom=-1;
rect.left=-1;
rect.right=1;
rect.top=1;
callback.performAction(rect);
}
3.实现代理端
AIDLService mService;
private ServiceConnection mConnection = new ServiceConnection() {
public void onServiceConnected(ComponentName className, IBinder service) {
Log("connect service");
mService = AIDLService.Stub.asInterface(service);
try {
mService.registerTestCall(mCallback);
} catch (RemoteException e) {
}
}
public void onServiceDisconnected(ComponentName className) {
Log("disconnect service");
}
主要基类
基类IInterface
为server 端提供接口,它的子类声明了service 能够实现的所有的方法;
基类IBinder
BBinder 与BpBinder 均为IBinder 的子类,因此可以看出IBinder 定义了binder IPC 的通信协议,BBinder 与BpBinder 在这个协议框架内进行的收和发操作,构建了基本的binder IPC 机制。
基类BpRefBase
client 端在查询SM 获得所需的的BpBinder 后,BpRefBase 负责管理当前获得的BpBinder 实例。
两个接口类
1.BpINTERFACE
如果client 想要使用binder IPC 来通信,那么首先会从SM 出查询并获得server 端service 的BpBinder ,在client 端,这个对象被认为是server 端的远程代理。为了能够使client 能够像本地调用一样调用一个远程server ,server 端需要向client 提供一个接口,client 在在这个接口的基础上创建一个BpINTERFACE ,使用这个对象,client 的应用能够想本地调用一样直接调用server 端的方法。而不用去关心具体的binder IPC 实现。
下面看一下BpINTERFACE 的原型:
class BpINTERFACE : public BpInterface<IINTERFACE>
顺着继承关系再往上看
template<typename INTERFACE>
class BpInterface : public INTERFACE, public BpRefBase
BpINTERFACE 分别继承自INTERFACE ,和BpRefBase ;
2.BnINTERFACE
在定义android native 端的service 时,每个service 均继承自BnINTERFACE(INTERFACE 为service name) 。BnINTERFACE 类型定义了一个onTransact 函数,这个函数负责解包收到的Parcel 并执行client 端的请求的方法。
顺着BnINTERFACE 的继承关系再往上看,
class BnINTERFACE: public BnInterface<IINTERFACE>
IINTERFACE 为client 端的代理接口BpINTERFACE 和server 端的BnINTERFACE 的共同接口类,这个共同接口类的目的就是保证service 方法在C-S 两端的一致性。
再往上看
class BnInterface : public INTERFACE, public BBinder
同时我们发现了BBinder 类型,这个类型又是干什么用的呢?既然每个service 均可视为一个binder ,那么真正的server 端的binder 的操作及状态的维护就是通过继承自BBinder 来实现的。可见BBinder 是service 作为binder 的本质所在。
class IBinder : public virtual RefBase
{
public:
...
virtual sp<IInterface> queryLocalInterface(const String16& descriptor); //返回一个IInterface对象
...
virtual const String16& getInterfaceDescriptor() const = 0;
virtual bool isBinderAlive() const = 0;
virtual status_t pingBinder() = 0;
virtual status_t dump(int fd, const Vector<String16>& args) = 0;
virtual status_t transact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0) = 0;
virtual status_t linkToDeath(const sp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0) = 0;
virtual status_t unlinkToDeath( const wp<DeathRecipient>& recipient,
void* cookie = NULL,
uint32_t flags = 0,
wp<DeathRecipient>* outRecipient = NULL) = 0;
...
virtual BBinder* localBinder(); //返回一个BBinder对象
virtual BpBinder* remoteBinder(); //返回一个BpBinder对象
};
那么BBinder 与BpBinder 的区别又是什么呢?
其实它们的区别很简单,BpBinder 是client 端创建的用于消息发送的代理,而BBinder 是server 端用于接收消息的通道。查看各自的代码就会发现,虽然两个类型均有transact 的方法,但是两者的作用不同,BpBinder 的transact 方法是向IPCThreadState 实例发送消息,通知其有消息要发送给BD ;而BBinder 则是当IPCThreadState 实例收到BD 消息时,通过BBinder 的transact 的方法将其传递给它的子类BnSERVICE 的onTransact 函数执行server 端的操作。
BBinder和BpBinder都是IBinder的实现类,它们干啥用的,有啥区别?
简单介绍Parcel
Parcel 是binder IPC 中的最基本的通信单元,它存储C-S 间函数调用的参数. 但是Parcel 只能存储基本的数据类型,
如果是复杂的数据类型的话,在存储时,需要将其拆分为基本的数据类型来存储。
简单的Parcel 读写不再介绍,下面着重介绍一下2 个函数
1.writeStrongBinder
当client 需要将一个binder 向server 发送时,可以调用此函数。例如
virtual status_t addService(const String16& name, const sp<IBinder>& service)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
看一下writeStrongBinder 的实体
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
接着往里看flatten_binder
还是拿addService 为例,它的参数为一个BnINTERFACE 类型指针,BnINTERFACE 又继承自BBinder ,
BBinder* BBinder::localBinder()
{
return this;
}
所以写入到Parcel 的binder 类型为BINDER_TYPE_BINDER ,同时你在阅读SM 的代码时会发现如果SM 收到的service 的binder 类型不为BINDER_TYPE_HANDLE 时,SM 将不会将此service 添加到svclist ,但是很显然每个service 的添加都是成功的,addService 在开始传递的binder 类型为BINDER_TYPE_BINDER ,SM 收到的binder 类型为BINDER_TYPE_HANDLE ,那么这个过程当中究竟发生了什么?
为了搞明白这个问题,花费我很多的事件,最终发现了问题的所在,原来在BD 中做了如下操作(drivers/staging/android/Binder.c) :
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
..........................................
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
..........................................
}
阅读完addService 的代码,你会发现SM 只是保存了service binder 的handle 和service 的name ,那么当client 需要和某个service 通信了如何获得service 的binder 呢?看下一个函数
2.readStrongBinder
当server 端收到client 的调用请求之后,如果需要返回一个binder 时,可以向BD 发送这个binder ,当IPCThreadState 实例收到这个返回的Parcel 时,client 可以通过这个函数将这个被server 返回的binder 读出。
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
往里查看unflatten_binder
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = static_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
发现如果server 返回的binder 类型为BINDER_TYPE_BINDER 的话,也就是返回一个binder 引用的话,直接获取这个binder ;如果server 返回的binder 类型为BINDER_TYPE_HANDLE 时,也就是server 返回的仅仅是binder 的handle ,那么需要重新创建一个BpBinder 返回给client 。
posted on 2016-09-19 16:09 jianrong.zheng 阅读(555) 评论(0) 编辑 收藏 举报