Android audio recorder(草稿)

参考:

http://androidcommunity.com/forums/f4/recording-the-audio-from-mic-in-pcm-format-into-a-buffer-using-the-native-libraries-26690/

http://www.devdaily.com/java/jwarehouse/android/media/java/android/media/AudioRecord.java.shtml

http://www.wuzhaopai.com/html/mobile/android/2011101015289.html

http://blog.csdn.net/innost/article/details/6125779

http://www.cnblogs.com/innost/archive/2011/01/15/1936425.html

 最上层,在android java程序里使用了AudioRecord read 函数,

{
...
AudioRecord audioRecord;
short[] buffer = new short[minBufferSize];
int minBufferSize;

int res = audioRecord.read(buffer, 0, minBufferSize);
...
}



首先见识见识这个read的原型:

public int read(short[] audioData, int offsetInShorts, int sizeInShorts) {
if (mState != STATE_INITIALIZED) {
return ERROR_INVALID_OPERATION;
}

if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
|| (offsetInShorts + sizeInShorts > audioData.length)) {
return ERROR_BAD_VALUE;
}

return native_read_in_short_array(audioData, offsetInShorts, sizeInShorts);//在这里调用native c++ read
}


native c++ read 又会被重命名,看看他是怎么定义的

static JNINativeMethod gMethods[] = {
// name, signature, funcPtr
{"native_read_in_byte_array",
"([BII)I", (void *)android_media_AudioRecord_readInByteArray},
{"native_read_in_short_array",
"([SII)I", (void *)android_media_AudioRecord_readInShortArray} //看见没,他们改名换姓了
};

//----------------------------------------------------------------------------------------
static jint android_media_AudioRecord_readInShortArray(JNIEnv *env, jobject thiz,
jshortArray javaAudioData,
jint offsetInShorts, jint sizeInShorts) {

return (android_media_AudioRecord_readInByteArray(env, thiz,
(jbyteArray) javaAudioData,
offsetInShorts*2, sizeInShorts*2)
/ 2);//都调用了 readInByteArry
}




很好奇吧,不妨先看看readInByteArray他长什么样:

static jint android_media_AudioRecord_readInByteArray(JNIEnv *env,  jobject thiz,
jbyteArray javaAudioData,
jint offsetInBytes, jint sizeInBytes) {
jbyte* recordBuff = NULL;
AudioRecord *lpRecorder = NULL;

lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
if (lpRecorder == NULL) {
LOGE("Unable to retrieve AudioRecord object, can't record");
return 0;
}

if (!javaAudioData) {
LOGE("Invalid Java array to store recorded audio, can't record");
return 0;
}
 

    recordBuff = (jbyte *)env->GetByteArrayElements(javaAudioData, NULL); //jbyteArray 转成 c++中的BYTE[]

if (recordBuff == NULL) {
LOGE("Error retrieving destination for recorded audio data, can't record");
return 0;
}

ssize_t recorderBuffSize = lpRecorder->frameCount()*lpRecorder->frameSize();
ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,
sizeInBytes > (jint)recorderBuffSize ?
(jint)recorderBuffSize : sizeInBytes );//这是表哥的核心秘密,他难以忘却read小姐,c++家族里的read小姐
env->ReleaseByteArrayElements(javaAudioData, recordBuff, 0);

return (jint) readSize;
}



 ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes, 
sizeInBytes > (jint)recorderBuffSize ?
(jint)recorderBuffSize : sizeInBytes );
c++的read要繁文缛节的多,相比java的,请看:
ssize_t AudioRecord::read(void* buffer, size_t userSize)
{
ssize_t read = 0;
Buffer audioBuffer;
int8_t *dst = static_cast<int8_t*>(buffer);

if (ssize_t(userSize) < 0) {
// sanity-check. user is most-likely passing an error code.
LOGE("AudioRecord::read(buffer=%p, size=%u (%d)",
buffer, userSize, userSize);
return BAD_VALUE;
}

mLock.lock();
// acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
// while we are accessing the cblk
sp <IAudioRecord> audioRecord = mAudioRecord;
sp <IMemory> iMem = mCblkMemory;
mLock.unlock();

do {

audioBuffer.frameCount = userSize/frameSize();

// By using a wait count corresponding to twice the timeout period in
// obtainBuffer() we give a chance to recover once for a read timeout
// (if media_server crashed for instance) before returning a length of
// 0 bytes read to the client
status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
if (err < 0) {
// out of buffers, return #bytes written
if (err == status_t(NO_MORE_BUFFERS))
break;
if (err == status_t(TIMED_OUT))
err = 0;
return ssize_t(err);
}

size_t bytesRead = audioBuffer.size;
memcpy(dst, audioBuffer.i8, bytesRead);

dst += bytesRead;
userSize -= bytesRead;
read += bytesRead;

releaseBuffer(&audioBuffer);
} while (userSize);

return read;
}

关于这个延时是怎么回事:
status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType)
{
OutputDescriptor *outputDesc;
audio_io_handle_t output;

if (streamType == AUDIO_STREAM_DEFAULT) {
streamType = AUDIO_STREAM_MUSIC;
}

output = getOutput((audio_stream_type_t)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}

gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == 0) {
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
*latency = af->latency(output);
} else {
*latency = outputDesc->latency;
gLock.unlock();
}



posted @ 2012-01-13 17:06  maadiah  阅读(2725)  评论(0编辑  收藏  举报