- 开启AudioSession
- 创建AudioUnitIO,连接麦克风
- 设置采集格式
- 设置采集回调

开启AudioSession
如果是录制,只需要开启AVAudioSessionCategoryRecord
[[RGAudioSession shareInstance] setAudioSessionCategory:AVAudioSessionCategoryRecord];
[[RGAudioSession shareInstance] setPreferredIOBufferDuration:0.05];
//激活
[[RGAudioSession shareInstance] setActive:YES];
创建AudioUnitIO,连接麦克风
kInputElement
输入总线的值为1
且inData
的值flag在连接麦克风时需要设置为1
(NSInteger flag = 1)
注意是将kAudioUnitScope_Input
与kInputElement
连接 (输入总线与输入部分相连接)
AudioComponentDescription acd;
acd.componentType = kAudioUnitType_Output;
acd.componentSubType = kAudioUnitSubType_RemoteIO; //kAudioUnitSubType_VoiceProcessingIO
acd.componentManufacturer = kAudioUnitManufacturer_Apple;
acd.componentFlags = 0;
acd.componentFlagsMask = 0;
OSStatus status;
AudioComponent ioUnitRef = AudioComponentFindNext(NULL, &acd);
status = AudioComponentInstanceNew(ioUnitRef, &_ioUnit);
CheckStatus(status, @"创建unit失败", YES);
//连接麦克风
NSInteger flag = 1;
AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputElement, &flag, sizeof(flag));
设置采集格式
,
mFormatFlags 如果选择了kLinearPCMFormatFlagIsPacked
,则mBytesPerFrame
必须等于mChannelsPerFrame
* mBitsPerChannel / 8
, 如果没有设置,则可以设置为任意n * mChannelsPerFrame
* mBitsPerChannel / 8
, 为一个完整声道的整数倍数, n > =1的整数
从图中可以看出,采集数据格式设置时,必须是kAudioUnitScope_Output
与kInputElement
相连接(输入总线与输入范围连接)
AudioStreamBasicDescription asbd;
bzero(&asbd, sizeof(asbd));
asbd.mSampleRate = 44100;
asbd.mFormatID = kAudioFormatLinearPCM; //编码格式
//kAudioFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked
asbd.mFormatFlags = kAudioFormatFlagIsSignedInteger ;
asbd.mBytesPerPacket = 2;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerFrame = 2;
asbd.mChannelsPerFrame = 1;
asbd.mBitsPerChannel = 2 * 8;
status = AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputElement, &asbd, sizeof(asbd));
CheckStatus(status, @"设置采集格式失败", YES);
设置采集回调
AudioUnitPropertyID为kAudioOutputUnitProperty_SetInputCallback
使用AudioUnitRender
函数,将recorder->_ioUnit
采集到的数据写入recorder->_buffList
注意,首先先要给recorder->_buffList
分配内存
_buffList = malloc(sizeof(AudioBufferList));
_buffList->mNumberBuffers = 1;
_buffList->mBuffers[0].mNumberChannels = 1;
_buffList->mBuffers[0].mDataByteSize = CONST_BUFFER_SIZE;
_buffList->mBuffers[0].mData = malloc(CONST_BUFFER_SIZE);
//设置采集回调
AURenderCallbackStruct callBack;
callBack.inputProc = RecorderCallBack;
callBack.inputProcRefCon = (__bridge void *)self;
status = AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Output, kInputElement, &callBack, sizeof(callBack));
CheckStatus(status, @"设置采集回调失败", YES);
//采集回调
OSStatus RecorderCallBack( void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * __nullable ioData) {
RGAudioBaseRecorder *recorder = (__bridge RGAudioBaseRecorder *)inRefCon;
AudioUnitRender(recorder->_ioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, recorder->_buffList);
NSLog(@"size = %d",recorder->_buffList->mBuffers[0].mDataByteSize);
[recorder writePCMData:recorder->_buffList->mBuffers[0].mData size:recorder->_buffList->mBuffers[0].mDataByteSize];
return noErr;
}
写入文件,下次可以直接读取播放
- (void)writePCMData:(Byte *)buffer size:(int)size {
static FILE *file = NULL;
NSString *path = [NSTemporaryDirectory() stringByAppendingString:@"/record.pcm"];
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
NSLog(@"path = %@",path);
});
if (!file) {
file = fopen(path.UTF8String, "w");
}
fwrite(buffer, size, 1, file);
}
网友评论