音频framework与中间层分析
1.如何追踪问题
我这里遇到的是在通话中开免提,对方无法听到我的声音
(1).在ADT的tool目录下找到hierarchyviewer,用这个工具找到我们的界面上的图标,得到id(audioButton), 然后就可以去相应的APK里面去找那个id了
(2).跟踪代码,根据流程电话界面开免提的过程,去找问题
2.分析APK和framework中的调用流程,这里的APK是通话界面的APK
CallButtonFragment.java (packages\apps\incallui\src\com\android\incallui)
onCreateView
mAudioButton = (CompoundButton) parent.findViewById(R.id.audioButton); //找到调用的地方
当按键按下时的操作
onClick(View view)
case R.id.audioButton:
onAudioButtonClicked();
getPresenter().toggleSpeakerphone(); //如果不是蓝牙状态
toggleSpeakerphone
//搜索到这个函数在CallButtonPresenter.java (packages\apps\incallui\src\com\android\incallui)
toggleSpeakerphone
int newMode = AudioState.ROUTE_SPEAKER;
setAudioMode(newMode);
TelecomAdapter.getInstance().setAudioRoute(mode); //得到TelecomAdapter类,并调用它的setAudioRoute
// TelecomAdapter.java (packages\apps\incallui\src\com\android\incallui)
mPhone.setAudioRoute(route);
//调用Phone.java (frameworks\base\telecomm\java\android\telecom)的函数
setAudioRoute
mInCallAdapter.setAudioRoute(route);
//调用InCallAdapter.java (frameworks\base\telecomm\java\android\telecom)
mAdapter.setAudioRoute(route);
//调用InCallAdapter.java (packages\services\telecomm\src\com\android\server\telecom)
mHandler.obtainMessage(MSG_SET_AUDIO_ROUTE, route, 0).sendToTarget();
case MSG_SET_AUDIO_ROUTE: //发送消息在这里处理
mCallsManager.setAudioRoute(msg.arg1);
//CallsManager.java (packages\services\telecomm\src\com\android\server\telecom)
mCallAudioManager.setAudioRoute(route);
//CallAudioManager.java (packages\services\telecomm\src\com\android\server\telecom)
setSystemAudioState(mAudioState.isMuted(), newRoute,mAudioState.getSupportedRouteMask());
setSystemAudioState(false /* force */, isMuted, route, supportedRouteMask);
接上面
setSystemAudioState(false /* force */, isMuted, route, supportedRouteMask);
turnOnSpeaker(true);
mAudioManager.setSpeakerphoneOn(on);
//AudioManager.java (frameworks\base\media\java\android\media)
service.setSpeakerphoneOn(on);
IAudioService service = getService(); //得到service,通过bindler
if (sService != null) {
return sService;}IBinder b = ServiceManager.getService(Context.AUDIO_SERVICE); //得到binder服务的引用对象sService = IAudioService.Stub.asInterface(b); //把引用对象装好为代理对象return sService;
service.setSpeakerphoneOn(on); //调用service的函数
通过binder调用AudioService.java (frameworks\base\media\java\android\media)的函数
setSpeakerphoneOn
mForcedUseForComm = AudioSystem.FORCE_SPEAKER;
sendMsg(mAudioHandler, MSG_SET_FORCE_USE, SENDMSG_QUEUE, AudioSystem.FOR_COMMUNICATION, mForcedUseForComm, null, 0); //发送消息MSG_SET_FORCE_USE
case MSG_SET_FORCE_USE:
case MSG_SET_FORCE_BT_A2DP_USE:
setForceUse(msg.arg1, msg.arg2); //调用这个函数
AudioSystem.setForceUse(usage, config); //调用这里,这里就开始调用到jni
AudioSystem.java (frameworks\base\media\java\android\media)
public static native int setForceUse(int usage, int config); //这个在jni中
3.中间层调用流程
android_media_AudioSystem.cpp (frameworks\base\core\jni)
{"setForceUse", "(II)I", (void *)android_media_AudioSystem_setForceUse},
//这里执行AudioSystem::setForceUse函数
check_AudioSystem_Command(AudioSystem::setForceUse(static_cast <audio_policy_force_use_t>(usage), static_cast <audio_policy_forced_cfg_t>(config)));
sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); //得到policy_service
sp<IServiceManager> sm = defaultServiceManager();
binder = sm->getService(String16("media.audio_policy")); //这两句得到media.audio_policy的引用对象
gAudioPolicyServiceClient = new AudioPolicyServiceClient(); //如果没有AudioPolicyServiceClient对象,就new一个
//调用这个IBinder的linkToDeath函数进行注册。可以注册一个IBinder.DeathRecipient类型的对象。其中IBinder.DeathRecipient是IBinder类中定义的一个嵌入类
//当这个IBinder所对应的Service进程被异常的退出时,比如被kill掉,这时系统会调用这个IBinder之前通过linkToDeath注册的DeathRecipient类对象的binderDied函数。
//一般实现中,Bp端会注册linkToDeath,目的是为了监听绑定的Service的异常退出,一般的binderDied函数的实现是用来释放一些相关的资源。
binder->linkToDeath(gAudioPolicyServiceClient);
//新创建一个AudioPolicyService对象并返回,且在创建BpAudioPolicyService时把binder做为其参数,结果是把binder对象赋值给其基类BpRefBase中的mRemote来保存。
//展开后最终是生成调用new BpAudioPolicyService(new BpBinder(handle)),这里的handle是一个句柄;这样我们最终得到了AudioPolicyService的代理BpAudioPolicyService,通过它就可以和AudioPolicyService的本地接口BnAudioPolicyService通讯了。
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
apc = gAudioPolicyServiceClient;
ap = gAudioPolicyService;
ap->registerClient(apc); //注册gAudioPolicyServiceClient
aps->getForceUse(usage); //下面单独讲解
AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
AudioPolicyManager.cpp (frameworks\av\services\audiopolicy)
setForceUse
checkA2dpSuspend(); //A2DP全名是Advanced Audio Distribution Profile 蓝牙音频传输模型协定
checkOutputForAllStrategies //校验输出策略
checkOutputForStrategy(STRATEGY_SONIFICATION) //这里只分析STRATEGY_SONIFICATION,其他情况基本一样
audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/); //得到原来的策略
return mDeviceForStrategy[strategy]; //直接返回mDeviceForStrategy[STRATEGY_SONIFICATION];
audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/); //getDeviceForStrategy.得到AUDIO_DEVICE_OUT_SPEAKER
audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types(); //得到可用的output设备
case STRATEGY_SONIFICATION:
if (isInCall()) { //打电话中
device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/); //应该返回device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
case STRATEGY_PHONE:
getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION); //得到输入源和混音器,这里是mic
getDeviceForInputSource(inputSource); //得到输入源
case AUDIO_SOURCE_VOICE_COMMUNICATION: // audio_source_t类型的一些判断
case AUDIO_POLICY_FORCE_SPEAKER:
device = AUDIO_DEVICE_IN_BACK_MIC; //返回mic
updateDevicesAndOutputs();
mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/); //更新设备策略
mPreviousOutputs = mOutputs; //纪录现在的输出设备
if (mPhoneState == AUDIO_MODE_IN_CALL) { //如果在电话中
//可见他们都是PlaybackThread的子类,将该thread添加到mPlaybackThreads中,mPlaybackThreads是一个vetor,它以id作为索引,将该线程保存起来,并返回给调用
者,后续播放声音时候通过传进该id(也就是audio_io_handle_t),从该vetor取就可以了。
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/); //这里mPrimaryOutput就是audio_io_handle_t句柄,调用相应的播放线程,在audioflinger.cpp中一些常用的函数,播放声音时候首先创建播放线程
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); //得到输出描述符
device = getDeviceForStrategy(STRATEGY_PHONE, fromCache); //得到输出设备
updateCallRouting(newDevice);
txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION); //得到输入源和混音器,这里是mic
outputs = getOutputsForDevice(rxDevice, mOutputs);
setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
//mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs); //改变 route ,最终会掉到 ALSAControl 中的 set 函数来设置 codec 的 switch 或者 widget 。
outputDesc->toAudioPortConfig(&patch.sources[0]); //5.0新引入的audio patch机制,单独有一章分析
AudioPolicyManager::AudioOutputDescriptor::toAudioPortConfig
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig); //config port
deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
AudioPolicyManager::DeviceDescriptor::toAudioPortConfig //config port
mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs); //创建AudioPatch,单独分析
applyStreamVolumes(output, device, delayMs);
audio_io_handle_t activeInput = getActiveInput(); //得到输入
setInputDevice(activeInput, getNewInputDevice(activeInput)); //设置输入
特别说明:
// 调用的其实是函数 AudioPolicyService::setParameters// 会通过函数 AudioPolicyService::AudioCommandThread::parametersCommand 向 AudioCommandThread 的 command list// 添加一个 command// AudioPolicyService::AudioCommandThread::threadLoop 函数中会处理 command list 中的 command// 对于 SET_PARAMETERS command ,最终调用了函数 AudioSystem::setParameters// 调用了 AudioFlinger::setParameters 函数// 调用了 AudioFlinger::ThreadBase::setParameters 函数添加成员到 mNewParameters// 函数 AudioFlinger::MixerThread::checkForNewParameters_l 中会处理 mNewParameters 中的参数// 函数 AudioFlinger::MixerThread::threadLoop 会调用函数 AudioFlinger::MixerThread::checkForNewParameters_l
mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs); //改变 route ,最终会掉到 ALSAControl 中的 set 函数来设置 codec 的 switch 或者 widget 。
// update stream volumes according to new device
// 设置 device 上各 stream 对应的音量
// 其中的实现是遍历各 stream ,调用函数 checkAndSetVolume 将 AudioOutputDescriptor 保存的各 stream 的音量进行设置
// checkAndSetVolume 函数的实现在后面有看
applyStreamVolumes(output, device, delayMs);
PatchPanel.cpp (frameworks\av\services\audioflinger)
/* Connect a patch between several source and sink ports */
AudioFlinger::createAudioPatch
AudioFlinger::PatchPanel::createAudioPatch
delete removedPatch; //删除patch
Patch *newPatch = new Patch(patch); //创建一个patch
audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module; //得到audio_module_handle_t ,应该是hw层的
ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); //得到在mAudioHwDevs里面的index
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); //得到HW
sp<ThreadBase> thread = audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle); //
////这个函数的意思是根据output值,从一堆线程中找到对应的那个线程:AudioFlinger.cpp (frameworks\av\services\audioflinger)
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
//Threads.cpp (frameworks\av\services\audioflinger)
status_t AudioFlinger::ThreadBase::sendCreateAudioPatchConfigEvent
sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle);
mData = new CreateAudioPatchConfigEventData(patch, handle); //new
const struct audio_patch mPatch; //patch结构体
audio_patch_handle_t mHandle; //audio_patch_handle_t 结构体
status_t status = sendConfigEvent_l(configEvent); //
status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
mConfigEvents.add(event); //加入到events中
mWaitWorkCV.signal(); //通过mWaitWorkCV.signal()唤醒void AudioFlinger::ThreadBase::processConfigEvents_l()
void AudioFlinger::ThreadBase::processConfigEvents_l()
event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
status = hwDevice->create_audio_patch(hwDevice, patch->num_sources, patch->sources, patch->num_sinks, patch->sinks, handle); //调用HW的模块, 单独分析
if (event->mCond.waitRelative(event->mLock, kConfigEventTimeoutNs) != NO_ERROR) //线程B和C的超时等待,B和C可以指定等待时间,当超过这个时间,条件却还不满足,则退出等待。
status = event->mStatus; //返回结果
CreateAudioPatchConfigEventData *data = (CreateAudioPatchConfigEventData *)configEvent->mData.get(); //得到CreateAudioPatchConfigEventData数据
*handle = data->mHandle; //返回audio_patch_handle_t
重要结构体:
class ConfigEvent: public RefBase {
public:
virtual ~ConfigEvent() {}
void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
const int mType; // event type e.g. CFG_EVENT_IO
Mutex mLock; // mutex associated with mCond
Condition mCond; // condition for status return
status_t mStatus; // status communicated to sender
bool mWaitStatus; // true if sender is waiting for status
sp<ConfigEventData> mData; // event specific parameter data
protected:
ConfigEvent(int type) : mType(type), mStatus(NO_ERROR), mWaitStatus(false), mData(NULL) {}
};
四.调用HA层
Audio_hw_hal.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\common\hardware\audio\aud_drv)
hwDevice->create_audio_patch(hwDevice, patch->num_sources, patch->sources, patch->num_sinks, patch->sinks, handle);
static int adev_create_audio_patch //在Audio_hw_hal.cpp
ladev->hwif->createAudioPatch(num_sources,sources,num_sinks,sinks,handle);
//AudioALSAHardware.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\common\hardware\audio\v3\aud_drv)
AudioALSAHardware::createAudioPatch
if (sources[0].type == AUDIO_PORT_TYPE_MIX)
eOutDeviceList |= sinks[dDeviceIndex].ext.device.type; //得到输出type
param.addInt(String8(AudioParameter::keyRouting), (int)eOutDeviceList); //把eOutDeviceList 放入keyRouting这个key
status = mStreamManager->setParameters(param.toString(), sources[0].ext.mix.handle);
//AudioALSAStreamManager.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAStreamManager::setParameters
index = mStreamOutVector.indexOfKey(IOport); //stream out的handle
AudioALSAStreamOut *pAudioALSAStreamOut = mStreamOutVector.valueAt(index); //得到AudioALSAStreamOut结构体
status = pAudioALSAStreamOut->setParameters(keyValuePairs);
//AudioALSAStreamOut.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAStreamOut::setParameters
status = mStreamManager->routingOutputDevice(mStreamAttributeSource.output_devices, static_cast<audio_devices_t>(value));
AudioALSAStreamManager::routingOutputDevice
// update the output device info for voice wakeup (even when "routing=0"), 更新信息
mAudioALSAVoiceWakeUpController->updateDeviceInfoForVoiceWakeUp();
bool bIsUseHeadsetMic = AudioMTKHeadSetMessager::getInstance()->isHeadsetPlugged(); //是否有耳机插入
setVoiceWakeUpEnable(true); //enable
mixer_ctl_set_enum_by_string(mixer_get_ctl_by_name(mMixer, "Audio_Vow_MIC_Type_Select"), "HeadsetMIC") //调用external/tinyalsa/,后面分析
// update if headset change
mHeadsetChange = CheckHeadsetChange(current_output_devices, output_devices); //看看headset是否变化
mAudioALSAVolumeController->setVoiceVolume(mAudioALSAVolumeController->getVoiceVolume(), mAudioMode , output_devices); //设置音量
AudioALSAStreamManager::setVoiceVolume
setAMPGain(ampgain, AMP_CONTROL_POINT,device); //设置
mSpeechPhoneCallController->routing( output_devices, mSpeechPhoneCallController->getInputDeviceForPhoneCall(output_devices));
//关闭原来的设备
// Set PMIC digital/analog part - uplink has pop, open first
mHardwareResourceManager->startInputDevice(input_device); //设置输入device
// Set PMIC digital/analog part - DL need trim code.
mHardwareResourceManager->startOutputDevice(output_device, sample_rate); //设置输出devic,后面还有很多其他操作
//AudioALSAHardwareResourceManager.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAHardwareResourceManager::startOutputDevice
OpenSpeakerPath(SampleRate);
mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADPHONE);
mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_EXT_SPEAKER);
mixer_ctl_set_enum_by_string(mixer_get_ctl_by_name(mMixer, cltname.string()), cltvalue.string()) //调用external/tinyalsa
五.调用tinyalsa
分析mixer_ctl_set_enum_by_string
external/tinyalsa/mixer.c
mixer_ctl_set_enum_by_string
ret = ioctl(ctl->mixer->fd, SNDRV_CTL_IOCTL_ELEM_WRITE, &ev); //直接调用ioctl与与底层交互