主要部分:
- 播放pcm文件流
- 实时录音和播放
AudioQueue播放pcm文件流
导入头文件定义所需变量
#import <AudioToolbox/AudioToolbox.h>
#define QUEUE_BUFFER_SIZE 5 //队列缓冲个数
#define EVERY_READ_LENGTH 1000 //每次从文件读取的长度
#define MIN_SIZE_PER_FRAME 2000 //每侦最小数据长度
@interface ViewController ()
{
AudioStreamBasicDescription audioDescription;///音频参数
AudioQueueRef audioQueue;//音频播放队列
AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE];//音频缓存
NSLock *synlock ;//同步控制
Byte *pcmDataBuffer;//pcm的读文件数据区
NSInputStream *inputSteam;//用于读pcm文件
}
@end
读pcm文件
- (void)initFile
{
NSString *filepath = [[[NSBundle mainBundle] bundlePath] stringByAppendingPathComponent:@"abc.pcm"];
inputSteam = [[NSInputStream alloc] initWithFileAtPath:filepath];
[inputSteam open];
pcmDataBuffer = malloc(EVERY_READ_LENGTH);
synlock = [[NSLock alloc] init];
}
设置音频参数
-(void)initAudio
{
///设置音频参数
audioDescription.mSampleRate = 44100;//采样率
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioDescription.mChannelsPerFrame = 1;///单声道
audioDescription.mFramesPerPacket = 1;//每一个packet一侦数据
audioDescription.mBitsPerChannel = 16;//每个采样点16bit量化
audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel/8) * audioDescription.mChannelsPerFrame;
audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame ;
///创建一个新的从audioqueue到硬件层的通道
// AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &audioQueue);///使用当前线程播
AudioQueueNewOutput(&audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, nil, 0, &audioQueue);//使用player的内部线程播
////添加buffer区
for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
{
int result = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]);///创建buffer区,MIN_SIZE_PER_FRAME为每一侦所需要的最小的大小,该大小应该比每次往buffer里写的最大的一次还大
NSLog(@"AudioQueueAllocateBuffer i = %d,result = %d",i,result);
}
}
注意:此时设置的音频参数需要和pcm文件的数据相匹配,本文用到的
abc.pcm
采样率是44100
读入数据到缓冲区等待播放
-(void)readPCMAndPlay:(AudioQueueRef)outQ buffer:(AudioQueueBufferRef)outQB
{
[synlock lock];
size_t readLength = [inputSteam read:pcmDataBuffer maxLength:EVERY_READ_LENGTH];
NSLog(@"read raw data size = %zi",readLength);
if (readLength == 0) {
dispatch_async(dispatch_get_main_queue(), ^{
NSLog(@"文件读取完成");
});
return ;
}
outQB->mAudioDataByteSize = (UInt32)readLength;
memcpy((Byte *)outQB->mAudioData, pcmDataBuffer, readLength);
/*
将创建的buffer区添加到audioqueue里播放
AudioQueueBufferRef用来缓存待播放的数据区,AudioQueueBufferRef有两个比较重要的参数,AudioQueueBufferRef->mAudioDataByteSize用来指示数据区大小,AudioQueueBufferRef->mAudioData用来保存数据区
*/
AudioQueueEnqueueBuffer(outQ, outQB, 0, NULL);
[synlock unlock];
}
开始播放
-(void)startPlay
{
[self initFile];
[self initAudio];
AudioQueueStart(audioQueue, NULL);
for(int i=0;i<QUEUE_BUFFER_SIZE;i++)
{
[self readPCMAndPlay:audioQueue buffer:audioQueueBuffers[i]];
}
/*
audioQueue使用的是驱动回调方式,即通过AudioQueueEnqueueBuffer(outQ, outQB, 0, NULL);传入一个buff去播放,播放完buffer区后通过回调通知用户,
用户得到通知后再重新初始化buff去播放,周而复始,当然,可以使用多个buff提高效率(测试发现使用单个buff会小卡)
*/
}
回调函数
void AudioPlayerAQInputCallback(void *input, AudioQueueRef outQ, AudioQueueBufferRef outQB)
{
NSLog(@"AudioPlayerAQInputCallback");
ViewController *mainviewcontroller = (__bridge ViewController *)input;
[mainviewcontroller checkUsedQueueBuffer:outQB];
[mainviewcontroller readPCMAndPlay:outQ buffer:outQB];
}
检测当前回调的是哪个缓冲区
-(void)checkUsedQueueBuffer:(AudioQueueBufferRef) qbuf
{
if(qbuf == audioQueueBuffers[0])
{
NSLog(@"AudioPlayerAQInputCallback,bufferindex = 0");
}
if(qbuf == audioQueueBuffers[1])
{
NSLog(@"AudioPlayerAQInputCallback,bufferindex = 1");
}
if(qbuf == audioQueueBuffers[2])
{
NSLog(@"AudioPlayerAQInputCallback,bufferindex = 2");
}
if(qbuf == audioQueueBuffers[3])
{
NSLog(@"AudioPlayerAQInputCallback,bufferindex = 3");
}
if(qbuf == audioQueueBuffers[4])
{
NSLog(@"AudioPlayerAQInputCallback,bufferindex = 4");
}
}
注意:播放pcm文件中用到
NSInputStream
的部分知识,对这块有疑问的可以看这里
AudioQueue实时录音
AudioQueueRecorder头文件定义
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
@class AudioQueueRecorder;
@protocol AudioQueueRecorderDelegate < NSObject>
@optional
//实时录音pcm数据流
-(void)AudioQueueRecorder:(AudioQueueRecorder *)recorder pcmData:(NSData *)pcmData;
@end
@interface AudioQueueRecorder : NSObject
@property (nonatomic, weak) id<AudioQueueRecorderDelegate> deledate;
-(void)startRecording;
-(void)stopRecording;
@end
定义变量
#define QUEUE_BUFFER_SIZE 3 // 输出音频队列缓冲个数
#define kDefaultBufferDurationSeconds 0.03//调整这个值使得录音的缓冲区大小为960,实际会小于或等于960,需要处理小于960的情况
#define kDefaultSampleRate 16000 //定义采样率为16000
static BOOL isRecording = NO;
@interface AudioQueueRecorder (){
AudioQueueRef _audioQueue;//输出音频播放队列
AudioStreamBasicDescription _recordFormat;//音频参数
AudioQueueBufferRef _audioBuffers[QUEUE_BUFFER_SIZE];//输出音频缓存
UInt32 bufferByteSize;//缓存区大小
}
@end
设置参数以及初始化缓冲器
- (instancetype)init
{
self = [super init];
if (self) {
//重置下
memset(&_recordFormat, 0, sizeof(_recordFormat));
_recordFormat.mSampleRate = kDefaultSampleRate;
_recordFormat.mChannelsPerFrame = 1;
_recordFormat.mFormatID = kAudioFormatLinearPCM;
_recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
_recordFormat.mBitsPerChannel = 16;
_recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame;
_recordFormat.mFramesPerPacket = 1;
//初始化音频输入队列
AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, NULL, 0, &_audioQueue);
//计算估算的缓存区大小
DeriveBufferSize(_audioQueue, _recordFormat, kDefaultBufferDurationSeconds, &bufferByteSize);
NSLog(@"缓存区大小%d",bufferByteSize);
//创建缓冲器
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++){
AudioQueueAllocateBuffer(_audioQueue, bufferByteSize, &_audioBuffers[i]);
AudioQueueEnqueueBuffer(_audioQueue, _audioBuffers[i], 0, NULL);
}
}
return self;
}
开始录音
-(void)startRecording
{
// 开始录音
AudioQueueStart(_audioQueue, NULL);
isRecording = YES;
}
停止录音
-(void)stopRecording
{
if (isRecording)
{
isRecording = NO;
//停止录音队列和移除缓冲区,以及关闭session,这里无需考虑成功与否
AudioQueueStop(_audioQueue, true);
//移除缓冲区,true代表立即结束录制,false代表将缓冲区处理完再结束
AudioQueueDispose(_audioQueue, true);
}
NSLog(@"停止录音");
}
录音回调函数
void inputBufferHandler(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc)
{
if (inNumPackets > 0) {
AudioQueueRecorder *recorder = (__bridge AudioQueueRecorder*)inUserData;
[recorder processAudioBuffer:inBuffer withQueue:inAQ];
}
if (isRecording) {
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
}
处理数据,通过代理返回pcm实时数据流
- (void)processAudioBuffer:(AudioQueueBufferRef )audioQueueBufferRef withQueue:(AudioQueueRef )audioQueueRef
{
NSMutableData * dataM = [NSMutableData dataWithBytes:audioQueueBufferRef->mAudioData length:audioQueueBufferRef->mAudioDataByteSize];
if (dataM.length < bufferByteSize) { //处理长度小于bufferByteSize的情况,此处是补00
Byte byte[] = {0x00};
NSData * zeroData = [[NSData alloc] initWithBytes:byte length:1];
for (NSUInteger i = dataM.length; i < bufferByteSize; i++) {
[dataM appendData:zeroData];
}
}
if(self.deledate&&[self.deledate respondsToSelector:@selector(AudioQueueRecorder:pcmData:)]){
[self.deledate AudioQueueRecorder:self pcmData:dataM];
}
}
计算估算的缓存区的大小
void DeriveBufferSize (AudioQueueRef audioQueue,
AudioStreamBasicDescription ASBDescription,
Float64 seconds,
UInt32 *outBufferSize)
{
static const int maxBufferSize = 0x50000; // 5
int maxPacketSize = ASBDescription.mBytesPerPacket; // 6
if (maxPacketSize == 0) { // 7
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
AudioQueueGetProperty (
audioQueue,
kAudioQueueProperty_MaximumOutputPacketSize,
// in Mac OS X v10.5, instead use
// kAudioConverterPropertyMaximumOutputPacketSize
&maxPacketSize,
&maxVBRPacketSize
);
}
Float64 numBytesForTime = ASBDescription.mSampleRate * maxPacketSize * seconds; // 8
*outBufferSize = (UInt32)(numBytesForTime < maxBufferSize ?
numBytesForTime : maxBufferSize); // 9
}
AudioQueue实时播放
AudioQueuePlay头文件定义
#import <Foundation/Foundation.h>
@interface AudioQueuePlay : NSObject
// 播放的数据流数据
- (void)playWithData:(NSData *)data;
// 声音播放出现问题的时候可以重置一下
- (void)resetPlay;
// 停止播放
- (void)stop;
@end
定义变量
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
#define MIN_SIZE_PER_FRAME 1920 //每个包的大小,室内机要求为960,具体看下面的配置信息
#define QUEUE_BUFFER_SIZE 3 //缓冲器个数
#define SAMPLE_RATE 16000 //采样频率
@interface AudioQueuePlay(){
AudioQueueRef audioQueue; //音频播放队列
AudioStreamBasicDescription _audioDescription;
AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE]; //音频缓存
BOOL audioQueueBufferUsed[QUEUE_BUFFER_SIZE]; //判断音频缓存是否在使用
NSLock *sysnLock;
NSMutableData *tempData;
OSStatus osState;
Byte *pcmDataBuffer;//pcm的读文件数据区
}
@end
initialize方法
#pragma mark - 提前设置AVAudioSessionCategoryMultiRoute 播放和录音
+ (void)initialize
{
NSError *error = nil;
//只想要播放:AVAudioSessionCategoryPlayback
//只想要录音:AVAudioSessionCategoryRecord
//想要"播放和录音"同时进行 必须设置为:AVAudioSessionCategoryMultiRoute 而不是AVAudioSessionCategoryPlayAndRecord(设置这个不好使)
BOOL ret = [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryMultiRoute error:&error];
if (!ret) {
NSLog(@"设置声音环境失败");
return;
}
//启用audio session
ret = [[AVAudioSession sharedInstance] setActive:YES error:&error];
if (!ret)
{
NSLog(@"启动失败");
return;
}
}
设置参数以及初始化缓冲器
- (instancetype)init
{
self = [super init];
if (self) {
sysnLock = [[NSLock alloc]init];
pcmDataBuffer = malloc(MIN_SIZE_PER_FRAME);
//设置音频参数 具体的信息需要问后台
_audioDescription.mSampleRate = SAMPLE_RATE;
_audioDescription.mFormatID = kAudioFormatLinearPCM;
_audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
//1单声道
_audioDescription.mChannelsPerFrame = 1;
//每一个packet一侦数据,每个数据包下的桢数,即每个数据包里面有多少桢
_audioDescription.mFramesPerPacket = 1;
//每个采样点16bit量化 语音每采样点占用位数
_audioDescription.mBitsPerChannel = 16;
_audioDescription.mBytesPerFrame = (_audioDescription.mBitsPerChannel / 8) * _audioDescription.mChannelsPerFrame;
//每个数据包的bytes总数,每桢的bytes数*每个数据包的桢数
_audioDescription.mBytesPerPacket = _audioDescription.mBytesPerFrame * _audioDescription.mFramesPerPacket;
// 使用player的内部线程播放 新建输出
AudioQueueNewOutput(&_audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, 0, 0, &audioQueue);
// 设置音量
AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, 1.0);
// 初始化需要的缓冲区
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
audioQueueBufferUsed[i] = false;
osState = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]);
}
osState = AudioQueueStart(audioQueue, NULL);
if (osState != noErr) {
NSLog(@"AudioQueueStart Error");
}
}
return self;
}
得到空闲的缓冲区
- (AudioQueueBufferRef)getNotUsedBuffer
{
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
if (NO == audioQueueBufferUsed[i]) {
audioQueueBufferUsed[i] = YES;
return audioQueueBuffers[i];
}
}
return NULL;
}
拿到pcm数据播放
// 播放数据
-(void)playWithData:(NSData *)data
{
[sysnLock lock];
tempData = [NSMutableData new];
[tempData appendData: data];
NSUInteger len = tempData.length;
[tempData getBytes:pcmDataBuffer length: len];
AudioQueueBufferRef audioQueueBuffer = NULL;
//获取可用buffer
while (true) {
[NSThread sleepForTimeInterval:0.0005];
audioQueueBuffer = [self getNotUsedBuffer];
if (audioQueueBuffer != NULL) {
break;
}
}
audioQueueBuffer -> mAudioDataByteSize = (unsigned int)len;
// 把bytes的头地址开始的len字节给mAudioData,向第i个缓冲器
memcpy(audioQueueBuffer -> mAudioData, pcmDataBuffer, len);
//将第i个缓冲器放到队列中,剩下的都交给系统了
AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffer, 0, NULL);
[sysnLock unlock];
}
回调函数重置缓冲区状态
static void AudioPlayerAQInputCallback(void* inUserData,AudioQueueRef audioQueueRef, AudioQueueBufferRef audioQueueBufferRef) {
AudioQueuePlay* audio = (__bridge AudioQueuePlay*)inUserData;
[audio resetBufferState:audioQueueRef and:audioQueueBufferRef];
}
- (void)resetBufferState:(AudioQueueRef)audioQueueRef and:(AudioQueueBufferRef)audioQueueBufferRef {
// 防止空数据让audioqueue后续都不播放,为了安全防护一下
if (tempData.length == 0) {
audioQueueBufferRef->mAudioDataByteSize = 1;
Byte* byte = audioQueueBufferRef->mAudioData;
byte = 0;
AudioQueueEnqueueBuffer(audioQueueRef, audioQueueBufferRef, 0, NULL);
}
for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) {
// 将这个buffer设为未使用
if (audioQueueBufferRef == audioQueueBuffers[i]) {
audioQueueBufferUsed[i] = false;
}
}
}
使用方法
self.audioQueuePlay = [[AudioQueuePlay alloc]init];
self.audioQueueRecorder = [[AudioQueueRecorder alloc]init];
self.audioQueueRecorder.deledate = self;
代理方法
-(void)AudioQueueRecorder:(AudioQueueRecorder *)recorder pcmData:(NSData *)pcmData
{
dispatch_async(dispatch_get_main_queue(), ^{
[self.audioQueuePlay playWithData:pcmData];
});
}
总结:本文基本上都是代码的实现,并没有太多原理上的介绍,不久便会补上原理性的文章,多多关注我哟!
网友评论