iOS AudioUnit实现录音功能

audioUnit是iOS底层的框架,实现起来相对于其他几种上层封装的实现来说比较灵活

上代码

  1. 定义audioUnit
@interface ViewController ()
{
  AudioUnit audioUnit;
}
@property (nonatomic,assign) BOOL isRecording;
@end
  1. 初始化并开始录制
#define kSampleRate  44100
#define kBits 16
#define kChannels 1

static void CheckError(OSStatus error, const char *operation)
{
    if (error == noErr) return;
    char errorString[20];
    // See if it appears to be a 4-char-code
    *(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(error);
    if (isprint(errorString[1]) && isprint(errorString[2]) &&
        isprint(errorString[3]) && isprint(errorString[4])) {
        errorString[0] = errorString[5] = '\'';
        errorString[6] = '\0';
    } else
        // No, format it as an integer
        sprintf(errorString, "%d", (int)error);
    fprintf(stderr, "Error: %s (%s)\n", operation, errorString);
    exit(1);
}

- (void)startRecordPCM{
        self.isRecording = YES;
        [self initInputAudioUnitWithRate:kSampleRate bit:kBits channel:kChannels];
        AudioOutputUnitStart(audioUnit);
}

- (void)initInputAudioUnitWithRate:(UInt32)rate
                               bit:(UInt32)bit
                           channel:(UInt32)channel{
    //设置AVAudioSession
    NSError *error = nil;
    AVAudioSession* session = [AVAudioSession sharedInstance];
    [session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:&error];
    [session setActive:YES error:nil];
    
    //初始化audioUnit 音频单元描述 kAudioUnitSubType_RemoteI
    AudioComponentDescription inputDesc;
    inputDesc.componentType = kAudioUnitType_Output;
    inputDesc.componentSubType = kAudioUnitSubType_RemoteIO;
    inputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
    inputDesc.componentFlags = 0;
    inputDesc.componentFlagsMask = 0;
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &inputDesc);
    AudioComponentInstanceNew(inputComponent, &audioUnit);
    
    //设置输出流格式为PCM格式
    int mFramesPerPacket = 1;
    AudioStreamBasicDescription inputStreamDesc;
    memset(&inputStreamDesc, 0, sizeof(inputStreamDesc));
    inputStreamDesc.mSampleRate       = rate;
    inputStreamDesc.mFormatID         = kAudioFormatLinearPCM;
    inputStreamDesc.mFormatFlags      = (kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked);
    inputStreamDesc.mFramesPerPacket  = mFramesPerPacket;
    inputStreamDesc.mChannelsPerFrame = (UInt32)channel;
    inputStreamDesc.mBitsPerChannel   = (UInt32)bit;//采样精度
    inputStreamDesc.mBytesPerFrame    = (UInt32)(bit * channel / 8);//每帧的字节数 16 * 2 /8
    inputStreamDesc.mBytesPerPacket   = (UInt32)(bit * channel / 8 * mFramesPerPacket);
    
    OSStatus status = AudioUnitSetProperty(audioUnit,
                                           kAudioUnitProperty_StreamFormat,
                                           kAudioUnitScope_Output,
                                           1,
                                           &inputStreamDesc,
                                           sizeof(inputStreamDesc));
    CheckError(status, "setProperty StreamFormat error");
    
    //麦克风输入设置为1(yes)
    int inputEnable = 1;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  1,
                                  &inputEnable,
                                  sizeof(inputEnable));
    CheckError(status, "setProperty EnableIO error");
    
    //设置回调
    AURenderCallbackStruct inputCallBackStruce;
    inputCallBackStruce.inputProc = inputCallBackFun;
    inputCallBackStruce.inputProcRefCon = (__bridge void * _Nullable)(self);
    
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Output,
                                  1,
                                  &inputCallBackStruce,
                                  sizeof(inputCallBackStruce));
    CheckError(status, "setProperty InputCallback error");
}

  1. 停止录制
- (void)stopRecordPCM{
          self.isRecording = NO;
        CheckError(AudioOutputUnitStop(audioUnit),
                   "AudioOutputUnitStop failed");
        CheckError(AudioComponentInstanceDispose(audioUnit),
                   "AudioComponentInstanceDispose failed");
}
  1. 回调以及写pcm本地文件
static OSStatus inputCallBackFun(void * inRefCon,
                                 AudioUnitRenderActionFlags *    ioActionFlags,
                                 const AudioTimeStamp *            inTimeStamp,
                                 UInt32                            inBusNumber,
                                 UInt32                            inNumberFrames,
                                 AudioBufferList * __nullable ioData){
    
    ViewController *recorder = (__bridge ViewController *)(inRefCon);
    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0].mData = NULL;
    bufferList.mBuffers[0].mDataByteSize = 0;
    
    AudioUnitRender(recorder->audioUnit,
                    ioActionFlags,
                    inTimeStamp,
                    1,
                    inNumberFrames,
                    &bufferList);
    AudioBuffer buffer = bufferList.mBuffers[0];
    int len = buffer.mDataByteSize;
//    NSData *data = [NSData dataWithBytes:buffer.mData length:len];
    if (recorder.isRecording) {
        [recorder writePCMData:buffer.mData size:len];
    }
    return noErr;
}

- (void)writePCMData:(Byte *)buffer size:(int)size {
    static FILE *file = NULL;
    NSString *path = [NSHomeDirectory() stringByAppendingString:@"/Documents/record.pcm"];
    static dispatch_once_t onceToken;
    dispatch_once(&onceToken, ^{
        NSLog(@"path =  %@",path);
    });
    
    if (!file) {
        file = fopen(path.UTF8String, "w");
    }
    fwrite(buffer, size, 1, file);
}
未经作者授权,禁止转载
THE END
posted @ 2022-04-19 19:06  CoderWGB  阅读(492)  评论(0)    收藏  举报