创建一个类
如下:
#import "DBYAudioQueuePlayer.h"
@implementation DBYAudioQueuePlayer
@end
给它一些需要用到的属性
{
AudioStreamBasicDescription streamDescription; ///音频参数
AudioQueueRef audioQueue; //音频播放队列
AudioQueueBufferRef audioQueueBuffers[kNumberBuffers]; //音频缓存
BOOL audioQueueUsed[kNumberBuffers];
UInt32 mNumPacketsToRead;
UInt32 bufferByteSize;
}
重写它的初始化方法方便使用
- (instancetype)initWithSampleRate:(int)sampleRate {
if (self = [self init]) {
streamDescription.mSampleRate = sampleRate;//采样率
}
return self;
}
-(instancetype)init{
if (self = [super init]) {
[self setup];
}
return self;
}
初始化的时候搞点事情
设置你所需的音频参数,根据真实音频数据来设置,不然声音不对,也是添加一个initWithSampleRate初始化方法的原因。
- (void)setUpStreamDescription {
//设置音频参数
streamDescription.mSampleRate = 16000;//采样率
streamDescription.mFormatID = kAudioFormatLinearPCM;
streamDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamDescription.mChannelsPerFrame = 1;//单声道
streamDescription.mFramesPerPacket = 1;//每一个packet一侦数据
streamDescription.mBitsPerChannel = 16;// 0 for compressed format
streamDescription.mBytesPerFrame = streamDescription.mChannelsPerFrame * (streamDescription.mBitsPerChannel/8);
streamDescription.mBytesPerPacket = streamDescription.mBytesPerFrame;
streamDescription.mReserved = 0;
}
创建播放队列,加锁是为了控制资源的访问
- (void)setup {
[self setUpStreamDescription];
@synchronized (self){
OSStatus status;
//创建播放队列
status = AudioQueueNewOutput(&streamDescription, outputCallback, (__bridge void * _Nullable)self, nil, nil, 0, &audioQueue); //使用player的内部线程播放
if(status != noErr) {
NSLog(@"error:%d",status);
return;
}
}
}
接下来我们创建三个buffer给audio queue调度,创建buffer前的一顿操作还不太明白。
- (void)setup {
[self setUpStreamDescription];
@synchronized (self){
UInt32 maxPacketSize = 1;
UInt32 size = sizeof(maxPacketSize);
AudioQueueGetProperty(audioQueue, kAudioQueueProperty_MaximumOutputPacketSize, &size, &maxPacketSize);
CalculateBytesForTime(streamDescription, maxPacketSize, 0.01, &bufferByteSize, &mNumPacketsToRead);
bool isFormatVBR = (streamDescription.mBytesPerPacket == 0 || streamDescription.mFramesPerPacket == 0);
//初始化音频缓冲区
for (int i = 0; i < kNumberBuffers; i++) {
status = AudioQueueAllocateBufferWithPacketDescriptions(audioQueue, kMinSizePerFrame, (isFormatVBR ? mNumPacketsToRead : 0), &audioQueueBuffers[i]);
if(status != noErr) {
NSLog(@"Audio Queue alloc buffer error %d %d",i,(int)status);
return;
}
}
//设置AudioQueue为软解码
UInt32 value = kAudioQueueHardwareCodecPolicy_UseSoftwareOnly;
status = AudioQueueSetProperty(audioQueue, kAudioQueueProperty_HardwareCodecPolicy, &value, sizeof(value));
if(status != noErr) {
NSLog(@"software code not use");
}
//设置音量
Float32 gain=1.0;
AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, gain);
}
}
这里是设置回调函数,我理解的是这里相当于一个通知,通知buffer满了,或者是buffer里面的数据用完了,然后你可以做对应的操作,可能理解的和官方文档不太一样。
static void outputCallback(void *inUserData, AudioQueueRef outAQ, AudioQueueBufferRef outBuffer) {
DBYAudioQueuePlayer *player = (__bridge DBYAudioQueuePlayer *)inUserData;
for (int i = 0; i < kNumberBuffers; i++) {
if (outBuffer == player->audioQueueBuffers[i]) {
//清空缓存 AudioQueue 使用完buffer后,会自动清空buffer
player->audioQueueUsed[i] = NO;
}
}
}
接下来是重点了,我们开始往buffer里面塞数据
- (void)playWith:(void *)audioData length:(unsigned int)length {
OSStatus status;
if (audioQueue == nil || [self checkBuffersHasFilled]) {
[self setup];
//播放
status = AudioQueueStart(audioQueue, NULL);
if(status != noErr) {
NSLog(@"error:%d",status);
return;
}
}
@synchronized (self){
AudioQueueBufferRef audioQueueBuffer = NULL;
//获取可用buffer
while (true) {
audioQueueBuffer = [self getNotUsedBuffer];
if (audioQueueBuffer != NULL) {
break;
}
}
memcpy(audioQueueBuffer->mAudioData, audioData, length);//将pcmdata按长度length赋值给audioQueueBuffer->mAudioData,内存拷贝
audioQueueBuffer->mAudioDataByteSize = length;
//将buffer放入buffer queue中
AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffer, 0, NULL);
}
}
上面有两个方法checkBuffersHasFilled和getNotUsedBuffer,我解释一下。
- checkBuffersHasFilled,我们有长度为3的buffer数组,还有一个长度为3的bool数组标记这些buffer的状态。在outputCallback中,我们知道了outBuffer这个已经被使用完毕,把它的状态标记为NO(表示可以填数据)。然后通过checkBuffersHasFilled来判断是否有一个buffer有数据,如果是,那么AudioQueueStart。
- getNotUsedBuffer,如果buffer数组中有没被使用的,那么取出来填充数据,并标记为YES(已使用)。
它们的实现如下:
- (BOOL)checkBufferHasUsed
{
for (int i = 0; i < kNumberBuffers; i++) {
if (YES == audioQueueUsed[i]) {
return YES;
}
}
//buffer中有数据,可以开始播放数据了
NSLog(@"开始播放............");
return NO;
}
- (AudioQueueBufferRef)getNotUsedBuffer
{
for (int i = 0; i < kNumberBuffers; i++) {
if (NO == audioQueueUsed[i]) {
audioQueueUsed[i] = YES;
return audioQueueBuffers[i];
}
}
return NULL;
}
来吧,上完整代码,赶紧拿去试试吧。欢迎转载!
{
AudioStreamBasicDescription streamDescription; ///音频参数
AudioQueueRef audioQueue; //音频播放队列
AudioQueueBufferRef audioQueueBuffers[kNumberBuffers]; //音频缓存
BOOL audioQueueUsed[kNumberBuffers];
UInt32 mNumPacketsToRead;
UInt32 bufferByteSize;
}
- (instancetype)initWithSampleRate:(int)sampleRate {
if (self = [self init]) {
streamDescription.mSampleRate = sampleRate;//采样率
}
return self;
}
-(instancetype)init{
if (self = [super init]) {
[self setup];
}
return self;
}
- (void)dealloc
{
if (audioQueue != nil) {
AudioQueueStop(audioQueue, true);
}
audioQueue = nil;
NSLog(@"PCMDataPlayer dealloc...");
}
- (void)setup {
[self setUpStreamDescription];
@synchronized (self){
OSStatus status;
//创建播放队列
status = AudioQueueNewOutput(&streamDescription, outputCallback, (__bridge void * _Nullable)self, nil, nil, 0, &audioQueue); //使用player的内部线程播放
if(status != noErr) {
NSLog(@"error:%d",status);
return;
}
//计算audioQueue实例的大小
UInt32 maxPacketSize = 1;
UInt32 size = sizeof(maxPacketSize);
AudioQueueGetProperty(audioQueue, kAudioQueueProperty_MaximumOutputPacketSize, &size, &maxPacketSize);
//计算0.01秒的数据大小
CalculateBytesForTime(streamDescription, maxPacketSize, 0.01, &bufferByteSize, &mNumPacketsToRead);
bool isFormatVBR = (streamDescription.mBytesPerPacket == 0 || streamDescription.mFramesPerPacket == 0);
//初始化音频缓冲区
for (int i = 0; i < kNumberBuffers; i++) {
status = AudioQueueAllocateBufferWithPacketDescriptions(audioQueue, kMinSizePerFrame, (isFormatVBR ? mNumPacketsToRead : 0), &audioQueueBuffers[i]);
if(status != noErr) {
NSLog(@"Audio Queue alloc buffer error %d %d",i,(int)status);
return;
}
}
//设置AudioQueue为软解码
UInt32 value = kAudioQueueHardwareCodecPolicy_UseSoftwareOnly;
status = AudioQueueSetProperty(audioQueue, kAudioQueueProperty_HardwareCodecPolicy, &value, sizeof(value));
if(status != noErr) {
NSLog(@"software code not use");
}
//设置音量
Float32 gain=1.0;
AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, gain);
}
}
- (void)setUpStreamDescription {
//设置音频参数
streamDescription.mSampleRate = 16000;//采样率 44100 16000
streamDescription.mFormatID = kAudioFormatLinearPCM;
streamDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamDescription.mChannelsPerFrame = 1;//单声道
streamDescription.mFramesPerPacket = 1;//每一个packet一侦数据
streamDescription.mBitsPerChannel = 16;// 0 for compressed format
streamDescription.mBytesPerFrame = streamDescription.mChannelsPerFrame * (streamDescription.mBitsPerChannel/8);
streamDescription.mBytesPerPacket = streamDescription.mBytesPerFrame;
streamDescription.mReserved = 0;
}
- (void)playWith:(void *)audioData length:(unsigned int)length {
OSStatus status;
if (audioQueue == nil || ![self checkBufferHasUsed]) {
[self setup];
//播放
status = AudioQueueStart(audioQueue, NULL);
if(status != noErr) {
NSLog(@"error:%d",status);
return;
}
}
@synchronized (self){
AudioQueueBufferRef audioQueueBuffer = NULL;
//获取可用buffer
while (true) {
audioQueueBuffer = [self getNotUsedBuffer];
if (audioQueueBuffer != NULL) {
break;
}
}
memcpy(audioQueueBuffer->mAudioData, audioData, length);//将pcmdata按长度length赋值给audioQueueBuffer->mAudioData,内存拷贝
audioQueueBuffer->mAudioDataByteSize = length;
//将buffer放入buffer queue中
AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffer, 0, NULL);
}
}
- (BOOL)checkBufferHasUsed
{
for (int i = 0; i < kNumberBuffers; i++) {
if (YES == audioQueueUsed[i]) {
return YES;
}
}
//buffer中有数据,可以开始播放数据了
NSLog(@"开始播放............");
return NO;
}
- (AudioQueueBufferRef)getNotUsedBuffer
{
for (int i = 0; i < kNumberBuffers; i++) {
if (NO == audioQueueUsed[i]) {
audioQueueUsed[i] = YES;
return audioQueueBuffers[i];
}
}
return NULL;
}
#pragma mark - ccccc
void CalculateBytesForTime(AudioStreamBasicDescription inDesc, UInt32 inMaxPacketSize, Float64 inSeconds, UInt32 *outBufferSize, UInt32 *outNumPackets)
{
// we only use time here as a guideline
// we're really trying to get somewhere between 16K and 64K buffers, but not allocate too much if we don't need it
static const int maxBufferSize = 0x1000; // limit size to 4K 4096
static const int minBufferSize = 0x400; // limit size to 1K 1024
if (inDesc.mFramesPerPacket) {
Float64 numPacketsForTime = inDesc.mSampleRate / inDesc.mFramesPerPacket * inSeconds;
*outBufferSize = numPacketsForTime * inMaxPacketSize;
} else {
// if frames per packet is zero, then the codec has no predictable packet == time
// so we can't tailor this (we don't know how many Packets represent a time period
// we'll just return a default buffer size
*outBufferSize = maxBufferSize > inMaxPacketSize ? maxBufferSize : inMaxPacketSize;
}
// we're going to limit our size to our default
if (*outBufferSize > maxBufferSize && *outBufferSize > inMaxPacketSize)
*outBufferSize = maxBufferSize;
else {
// also make sure we're not too small - we don't want to go the disk for too small chunks
if (*outBufferSize < minBufferSize)
*outBufferSize = minBufferSize;
}
*outNumPackets = *outBufferSize / inMaxPacketSize;
}
static void outputCallback(void *inUserData, AudioQueueRef outAQ, AudioQueueBufferRef outBuffer) {
DBYAudioQueuePlayer *player = (__bridge DBYAudioQueuePlayer *)inUserData;
for (int i = 0; i < kNumberBuffers; i++) {
if (outBuffer == player->audioQueueBuffers[i]) {
// AudioQueue 使用完buffer
player->audioQueueUsed[i] = NO;
}
}
}
参考文档
网友评论