海康摄像头音频方案(播放音频文件+语音对讲+语音转发)-支持window/Linux-java版本
应用场景:
1. 有告警出现时,海康摄像头能自动播报(如:禁止游泳,请快速里离开);
2. 在web页面点击“开始对讲”,能讲PC上的麦克风声音传输到海康摄像头进行对讲;点击“停止对讲”,海康摄像头停止对讲;
技术实现:
使用海康的SDK实现,nettyserver框架,提供webapi接口及websocket接口,兼容window、linux
软件功能:
1. 集成海康SDK
2. WebAPI接口(PlayMedia,StartTalk,StopTalk),采集的是本地音频数据
3. WebSocket接口,讲web用户的音频数据转发到Server程序,通过SDK发送至海康摄像机
对讲初始化
H5->Server 发送指令 StartTalk::{ "ip": "192.168.3.2", "port": 8000, "name": "admin", "password": "yswy123456" }
Server->H5 返回指令 StartTalk::ACK_OK
Server->H5 返回指令 StartTalk::ACK_ERROE
开始对讲
H5->Server 发送指令 Base64音频字符串
结束对讲
H5->Server 发送指令 StopTalk::
Server->H5 返回指令 StopTalk::ACK_OK
Server->H5 返回指令 StartTalk::ACK_ERROR
4. 链路检查超时退出SDK
测试报告
音频播放
开始对讲
结束对讲
方案内容
相关配置
1. 海康相机音频配置
2. 将dll/so文件拷贝至系统目录下
window:C:/Windows/System32 及 C:\Windows\SysWOW64
linux: /usr/lib
HCNetSDK INSTANCE = System.getProperties().getProperty("os.name").equals("Linux") ? (HCNetSDK) Native.loadLibrary("hcnetsdk", HCNetSDK.class) : (HCNetSDK) Native.loadLibrary("HCNetSDK", HCNetSDK.class);这是原来的
变更
HCNetSDK INSTANCE = System.getProperties().getProperty("os.name").equals("Linux") ? (HCNetSDK) Native.loadLibrary("hcnetsdk", HCNetSDK.class) : (HCNetSDK) Native.loadLibrary(System.getProperty("user.dir") + "\\lib\\HCNetSDK.dll", HCNetSDK.class);
3. pcm制作
1. 安装ekho-5.8.exe
2. 安装girl_xiaokun.exe
3. 运行ttsapp.exe
4.使用UltraEdit编辑,然后选中文件头的44个字节并剪切(因为退格键不管用),将这44个字节删掉,另存为pcm文件
核心代码
// 开始音频文件 static void StartMedia(Camera entity, String sfilePath) { lockAudio.lock(); HCNetSDK.NET_DVR_COMPRESSION_AUDIO lpCompressAudio = new HCNetSDK.NET_DVR_COMPRESSION_AUDIO(); boolean net_DVR_GetCurrentAudioCompress = hCNetSDK.NET_DVR_GetCurrentAudioCompress(entity.UserID, lpCompressAudio); if (!net_DVR_GetCurrentAudioCompress) return; byte byAudioEncType = lpCompressAudio.byAudioEncType; byte byAudioSamplingRate = lpCompressAudio.byAudioSamplingRate; byte byAudioBitRate = lpCompressAudio.byAudioBitRate; byte bySupport = lpCompressAudio.bySupport; System.out.println("音频编码类型=" + byAudioEncType + " 音频采样率=" + byAudioSamplingRate + " 音频码率=" + byAudioBitRate + " bySupport=" + bySupport); NativeLong mr = hCNetSDK.NET_DVR_StartVoiceCom_MR_V30(entity.UserID, 1, null, null); File file = new File(sfilePath); FileInputStream inputStream = null; try { inputStream = new FileInputStream(file); Memory pInBuff = new Memory(file.length()); pInBuff.clear(); if (pInBuff != Memory.NULL) { int buffLen = 320; long currFileLen = 0; int readLen; byte[] buffer = new byte[buffLen]; Memory pIB = new Memory(buffLen); while (currFileLen < file.length()) { entity.LastTime = System.currentTimeMillis(); readLen = inputStream.read(buffer); pIB.write(0, buffer, 0, readLen); currFileLen += readLen; Memory pOutBuffer = new Memory(buffLen); HCNetSDK.NET_DVR_AUDIOENC_INFO enc_info = new HCNetSDK.NET_DVR_AUDIOENC_INFO(); enc_info.in_frame_size = buffLen; Pointer encoder = hCNetSDK.NET_DVR_InitG711Encoder(enc_info); // HCNetSDK.NET_DVR_AUDIOENC_PROCESS_PARAM param = new HCNetSDK.NET_DVR_AUDIOENC_PROCESS_PARAM(); param.in_buf = pIB; param.out_buf = pOutBuffer; param.out_frame_size = 160; param.g711_type = 0; boolean frame = hCNetSDK.NET_DVR_EncodeG711Frame(encoder, param); if (!frame) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("G711音频编码失败!iErr = " + iErr); break; } frame = hCNetSDK.NET_DVR_ReleaseG711Encoder(encoder); if (!frame) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("G711音频编码失败!iErr = " + iErr); break; } boolean sendData = hCNetSDK.NET_DVR_VoiceComSendData(mr, pOutBuffer, 160); if (!sendData) { int iErr = hCNetSDK.NET_DVR_GetLastError(); System.out.println("转发语音数据!iErr = " + iErr); break; } Thread.sleep(20); } } } catch (Exception ex) { DataHelper.AddExceptionStackTrace("[Error] StartMedia", ex); } finally { if (null != inputStream) { try { inputStream.close(); } catch(Exception e) {} } lockAudio.unlock(); } hCNetSDK.NET_DVR_StopVoiceCom(mr); System.out.println("读取完成"); }