A2dp和HFP的使用
2015-09-04 10:51 fingertouch 阅读(5936) 评论(0) 编辑 收藏 举报这两天看了CSR中的A2dp和HFP协议的相关内容,主要是看如何在CSR8670上使用这两个协议。
一.A2dp协议
A2dp(Advanced Audio Distribution Profile)协议,即高级蓝牙音频传输模型协议,主要用来实现蓝牙音频传输,下面简单记录一下A2DP协议如何在CSR8670上使用。
- 调用ConnectionWriteScanEnable(hci_scan_enable_page)让蓝牙设备可以scan和page;
- 调用A2dpInit()初始化A2dp库;
- 编写handleA2dpMessage()函数处理A2dp消息。
注1:其中AudioConnect()函数是调用kalimba的DSP处理函数来实现的,所以工程目录里一个有一个对应的kalimba程序,而且应该在主程序的.mak文件将该程序的编译选项添加进去。比如:
###################################################################################################### ##### A2DP DECODER VERSIONS ###################################################################################################### # copy in sbc decoder image/sbc_decoder/sbc_decoder.kap : $(mkdir) image/sbc_decoder $(copyfile) ..\..\kalimba\apps\a2dp_sink\image\sbc_decoder\sbc_decoder.kap $@ image.fs : image/sbc_decoder/sbc_decoder.kap # copy in a2dp_low_latency_1mic decoder image/a2dp_low_latency_1mic/a2dp_low_latency_1mic.kap : $(mkdir) image/a2dp_low_latency_1mic $(copyfile) ..\..\kalimba\apps\a2dp_low_latency_1mic\image\a2dp_low_latency_1mic\a2dp_low_latency_1mic.kap $@ image.fs : image/a2dp_low_latency_1mic/a2dp_low_latency_1mic.kap
注2:AudioConnect()使用前要调用AudioLibraryInit()来初始化Audio库。
部分处理代码:
static void handleA2dpMessage(Task task, MessageId id, Message message) { Sink sink; AUDIO_PLUGIN_SET_VOLUME_A2DP_MSG_T volumeInitAudio; a2dp_codec_settings * codec_settings; AUDIO_MODE_T mode = AUDIO_MODE_CONNECTED; A2dpPluginConnectParams a2dp_audio_connect_params; AudioPluginFeatures PluginFeatures; MAIN_DEBUG(("A2dpMessage Received: [%x]\n",id)); switch(id) { case A2DP_INIT_CFM: MAIN_DEBUG(("A2DP_INIT_CFM : \n")); if( ((A2DP_INIT_CFM_T*)message)->status == a2dp_success) { MAIN_DEBUG(("A2DP Init success : \n")); } break; case A2DP_SIGNALLING_CONNECT_IND: MAIN_DEBUG(("A2DP_SIGNALLING_CONNECT_IND : \n")); A2dpSignallingConnectResponse(((A2DP_SIGNALLING_CONNECT_IND_T *)message)->device_id,TRUE); break; case A2DP_SIGNALLING_CONNECT_CFM: MAIN_DEBUG(("A2DP_SIGNALLING_CONNECT_CFM : \n")); A2dpMediaOpenRequest(((A2DP_SIGNALLING_CONNECT_CFM_T*)message)->device_id, 0,NULL); break; case A2DP_MEDIA_OPEN_IND: MAIN_DEBUG(("A2DP_MEDIA_OPEN_IND : \n")); A2dpMediaOpenResponse( ((A2DP_MEDIA_OPEN_CFM_T*)message)->device_id, TRUE); break; case A2DP_MEDIA_OPEN_CFM: MAIN_DEBUG(("A2DP_MEDIA_OPEN_CFM : \n")); if( ((A2DP_MEDIA_OPEN_CFM_T*)message)->status == a2dp_success ) { MAIN_DEBUG(("A2DP_MEDIA_OPEN_CFM SUCEESS: \n")); } else { MAIN_DEBUG(("A2DP_MEDIA_OPEN_CFM FAIL: \n")); } break; case A2DP_MEDIA_START_IND: MAIN_DEBUG(("A2DP_MEDIA_START_IND : \n")); A2dpMediaStartResponse(((A2DP_MEDIA_START_IND_T*)message)->device_id, ((A2DP_MEDIA_START_IND_T*)message)->stream_id, TRUE); break; case A2DP_MEDIA_START_CFM: MAIN_DEBUG(("A2DP_MEDIA_START_CFM : \n")); if( ((A2DP_MEDIA_START_CFM_T*)message)->status == a2dp_success) { MAIN_DEBUG(("A2DP_MEDIA_START_CFM SUCESS: \n")); sink = A2dpMediaGetSink( ((A2DP_MEDIA_START_CFM_T*)message)->device_id, ((A2DP_MEDIA_START_CFM_T*)message)->stream_id); if(sink) { codec_settings = A2dpCodecGetSettings( ((A2DP_MEDIA_START_CFM_T*)message)->device_id, ((A2DP_MEDIA_START_CFM_T*)message)->stream_id); if(codec_settings) { a2dp_audio_connect_params.packet_size = codec_settings->codecData.packet_size; /* Packet size retrieved from a2dp library */ a2dp_audio_connect_params.content_protection = codec_settings->codecData.content_protection; /* content protection retrieved from a2dp library */ a2dp_audio_connect_params.clock_mismatch =0xFF59; /*clock mismatch rate for this device */ a2dp_audio_connect_params.currentEQ =0x0000; a2dp_audio_connect_params.enhancements =0x0000; a2dp_audio_connect_params.silence_threshold =0x0000; a2dp_audio_connect_params.silence_trigger_time =0x0000; a2dp_audio_connect_params.speaker_pio =0x0000; theSink.a2dp_audio_mode_params.music_mode_processing = A2DP_MUSIC_PROCESSING_FULL_SET_EQ_BANK0; theSink.a2dp_audio_mode_params.external_mic_settings = EXTERNAL_MIC_NOT_FITTED; theSink.a2dp_audio_mode_params.mic_mute = SEND_PATH_UNMUTE; theSink.a2dp_audio_mode_params.external_volume_enabled = 0; theSink.a2dp_audio_mode_params.master_routing_mode = 1; theSink.a2dp_audio_mode_params.slave_routing_mode = 2; theSink.a2dp_audio_mode_params.unused = 0; theSink.a2dp_audio_mode_params.music_mode_enhancements = 0x0040; /* We need to set A2DP volume info as the audio is in mute state after connection */ volumeInitAudio.volume_type = DIGITAL_ONLY; volumeInitAudio.codec_task = theSink.codecTask; volumeInitAudio.system_gain = 15; volumeInitAudio.trim_gain_left = 0; volumeInitAudio.trim_gain_right= 0; volumeInitAudio.mute_active = FALSE; PluginFeatures.audio_output_type = OUTPUT_INTERFACE_TYPE_NONE; /* connect the audio via the audio plugin */ AudioConnect((TaskData *)&csr_sbc_decoder_plugin, sink , AUDIO_SINK_AV , theSink.codecTask, volumeInitAudio.tones_gain, codec_settings->rate, PluginFeatures , mode, AUDIO_ROUTE_INTERNAL, (AUDIO_POWER_T)POWER_BATT_LEVEL3, &a2dp_audio_connect_params, &theSink.task); AudioSetVolumeA2DP(&volumeInitAudio); AudioSetMode(mode, &theSink.a2dp_audio_mode_params); } } } break; default: MAIN_DEBUG(("unRecognised A2DP Message: [%x]\n",id)); break; } }
注:参数都是临时的,真正开发的时候应该将其配置在CSR的PSKEY上。
二.HFP协议
HFP(Hands-free Profile)协议,主要用来处理蓝牙语音连接的协议,通常用它来实现接听、挂断、拒接、语音拨号等功能。其实现方法和A2DP协议类似,直接上图。
部分处理代码:
static void handleHFPMessage(Task task, MessageId id, Message message) { AudioPluginFeatures features; sep_data_type seps[1]; typed_bdaddr ag_addr; Sink sink; sync_pkt_type packet_types; hfp_audio_params audio_params; bool disable_wbs_override = FALSE; pio_config_type* pio; uint16 ps_ret_len = 0; MAIN_DEBUG(("HFPMessage Received: [%x]\n",id)); switch(id) { case HFP_INIT_CFM: MAIN_DEBUG(("HFP_INIT_CFM\n")); seps[0].sep_config = codecList[0].config; seps[0].in_use = FALSE; A2dpInit(&theSink.task, A2DP_INIT_ROLE_SINK, NULL, 1, seps, 60); ConnectionWriteClassOfDevice(AUDIO_MAJOR_SERV_CLASS | AV_MAJOR_DEVICE_CLASS | AV_MINOR_HEADSET); if ( ((HFP_INIT_CFM_T*)message)->status == hfp_success ) { MAIN_DEBUG(("hfp_success\n")); } else Panic(); break; case HFP_SLC_CONNECT_IND: MAIN_DEBUG(("HFP_SLC_CONNECT_IND [%x]\n", ((HFP_SLC_CONNECT_IND_T *) message)->accepted )); break; case HFP_SLC_CONNECT_CFM: MAIN_DEBUG(("HFP_SLC_CONNECT_CFM [%x]\n", ((HFP_SLC_CONNECT_CFM_T *) message)->status )); break; case HFP_RING_IND: MAIN_DEBUG(("HFP_RING_IND\n")); features.audio_output_type = OUTPUT_INTERFACE_TYPE_NONE; AudioPlayTone( good_tone , TRUE , theSink.codecTask, 0x15, features) ; break; case HFP_SLC_LINK_LOSS_IND: MAIN_DEBUG(("HFP_SLC_LINK_LOSS_IND\n")); if( ((HFP_SLC_LINK_LOSS_IND_T*)message)->status == hfp_link_loss_recovery) { MAIN_DEBUG(("hfp_link_loss_recovery \n")); } else if( ((HFP_SLC_LINK_LOSS_IND_T*)message)->status == hfp_link_loss_none) { MAIN_DEBUG(("hfp_link_loss_none \n")); } HfpLinkGetSlcSink( ((HFP_SLC_LINK_LOSS_IND_T*)message)->priority, &sink); SinkGetBdAddr(sink, &ag_addr); A2dpSignallingConnectRequest((bdaddr *)&ag_addr.addr); break; case HFP_SERVICE_IND: MAIN_DEBUG(("HFP_SERVICE_IND [%x]\n" , ((HFP_SERVICE_IND_T*)message)->service )); break; case HFP_SIGNAL_IND: MAIN_DEBUG(("HS: HFP_SIGNAL_IND [%d]\n", ((HFP_SIGNAL_IND_T* )message)->signal )) ; break ; case HFP_ROAM_IND: MAIN_DEBUG(("HS: HFP_ROAM_IND [%d]\n", ((HFP_ROAM_IND_T* )message)->roam )) ; break; case HFP_BATTCHG_IND: MAIN_DEBUG(("HS: HFP_BATTCHG_IND [%d]\n", ((HFP_BATTCHG_IND_T* )message)->battchg )) ; break; case HFP_AUDIO_CONNECT_IND: MAIN_DEBUG(("HFP_AUDIO_CONNECT_IND\n")) ; packet_types = sync_all_sco; disable_wbs_override = TRUE; audio_params.bandwidth = 0x1f40; audio_params.max_latency = 0xc; audio_params.voice_settings = 0; audio_params.retx_effort = sync_retx_link_quality; HfpAudioConnectResponse( ((HFP_AUDIO_CONNECT_IND_T *)message)->priority, TRUE, packet_types, &audio_params, TRUE); break ; case HFP_AUDIO_CONNECT_CFM: MAIN_DEBUG(("HFP_AUDIO_CONNECT_CFM\n")) ; if( ((HFP_AUDIO_CONNECT_CFM_T *)message)->status == hfp_success) { HfpLinkGetSlcSink( ((HFP_AUDIO_CONNECT_IND_T*)message)->priority, &sink); features.audio_output_type = OUTPUT_INTERFACE_TYPE_NONE; pio = malloc( sizeof(pio_config_type) ); memset(pio, 0, sizeof (pio_config_type)); ps_ret_len = PsRetrieve(16, pio, sizeof (pio_config_type) ); MAIN_DEBUG(("ps return len:[%x]",ps_ret_len)) ; theSink.digital = malloc( sizeof(common_mic_params) ); (theSink.digital)->mic_a.digital = 0; (theSink.digital)->mic_a.pre_amp = 1; (theSink.digital)->mic_a.drive_pio = 1; (theSink.digital)->mic_a.pio = 0; (theSink.digital)->mic_a.bias = 1; (theSink.digital)->mic_a.unused = 0; (theSink.digital)->mic_a.gain = 5; (theSink.digital)->mic_b.digital = 0; (theSink.digital)->mic_b.pre_amp = 1; (theSink.digital)->mic_b.drive_pio = 1; (theSink.digital)->mic_b.pio = 1; (theSink.digital)->mic_b.bias = 1; (theSink.digital)->mic_b.unused = 0; (theSink.digital)->mic_b.gain = 5; (theSink.digital)->line_a.digital = 0; (theSink.digital)->line_a.pre_amp = 0; (theSink.digital)->line_a.drive_pio = 0; (theSink.digital)->line_a.pio = 0; (theSink.digital)->line_a.bias = 0; (theSink.digital)->line_a.unused = 0; (theSink.digital)->line_a.gain = 15; (theSink.digital)->line_b.digital = 0; (theSink.digital)->line_b.pre_amp = 0; (theSink.digital)->line_b.drive_pio = 0; (theSink.digital)->line_b.pio = 0; (theSink.digital)->line_b.bias = 0; (theSink.digital)->line_b.unused = 0; (theSink.digital)->line_b.gain = 15; theSink.plugin_params.digital = theSink.digital; theSink.dsp_data.key.key = 0x0063; theSink.dsp_data.key.len = 48; theSink.dsp_data.key.cur_len = 0; theSink.dsp_data.key.cache = theSink.dsp_data.cache; PblockInit(&theSink.dsp_data.key); /* connect audio using the audio plugin selected above */ AudioConnect ( CVCHS1MICWBS,/*CVCHS1MIC*/ sink, sync_link_esco,/*sync_link_esco*/ theSink.codecTask , 20, 0x00001F40, features, AUDIO_MODE_CONNECTED, AUDIO_ROUTE_INTERNAL, (AUDIO_POWER_T)POWER_BATT_LEVEL3, &theSink.plugin_params, NULL) ; } AudioSetVolume ( 20, 20, theSink.codecTask ); theSink.a2dp_audio_mode_params.music_mode_processing = A2DP_MUSIC_PROCESSING_FULL_SET_EQ_BANK0; theSink.a2dp_audio_mode_params.external_mic_settings = 1; theSink.a2dp_audio_mode_params.mic_mute = 1; theSink.a2dp_audio_mode_params.external_volume_enabled = 0; theSink.a2dp_audio_mode_params.master_routing_mode = 1; theSink.a2dp_audio_mode_params.slave_routing_mode = 2; theSink.a2dp_audio_mode_params.unused = 0; theSink.a2dp_audio_mode_params.music_mode_enhancements = 0; /* mute control */ AudioSetMode(AUDIO_MODE_CONNECTED, &theSink.a2dp_audio_mode_params); break; default: MAIN_DEBUG(("unRecognised HFP Message: [%x]\n",id)); break; } }
注:参数都是临时的,真正开发的时候应该将其配置在CSR的PSKEY上。