美文网首页音视频
AudioUnit简单的使用流程

AudioUnit简单的使用流程

作者: 当AI已成往事55 | 来源:发表于2016-04-16 09:41 被阅读2212次

#define kOutputBus 0

#define kInputBus 1

#define kSampleRate 8000

#define kFramesPerPacket 1

#define kChannelsPerFrame 1

#define kBitsPerChannel 16

#define SEND_PORT 9533

#define REC_PORT 9534

#define MAX_CLIENT_COUNT 20

#define BUFFER_SIZE 1024

@interface RemoteAudioProcessor : NSObject

@property (readonly) AudioComponentInstance audioUnit;

@property (readonly) AudioBuffer audioBuffer;

@property (strong, readwrite) NSMutableData *mIn;

@property (strong, readwrite) NSMutableData *mOut;

- (void)hasError:(int)statusCode file:(char*)file line:(int)line;

- (void)processBuffer: (AudioBufferList* )audioBufferList;

@end

//  RemoteAudioProcessor.m

//  AOIAudioSimulator

#import "RemoteAudioProcessor.h"

static NSMutableData *mIn;

static NSMutableData *mOut;

static bool mIsStarted; // audio unit start

static bool mSendServerStart; // send server continue loop

static bool mRecServerStart; // rec server continue loop

static bool mIsTele; // telephone call

static OSStatus recordingCallback(void *inRefCon,

AudioUnitRenderActionFlags *ioActionFlags,

const AudioTimeStamp *inTimeStamp,

UInt32 inBusNumber,

UInt32 inNumberFrames,

AudioBufferList *ioData) {

// the data gets rendered here

AudioBuffer buffer;

// a variable where we check the status

OSStatus status;

//This is the reference to the object who owns the callback.

RemoteAudioProcessor *audioProcessor = (__bridge RemoteAudioProcessor* )inRefCon;

/**

on this point we define the number of channels, which is mono

for the iphone. the number of frames is usally 512 or 1024.

*/

buffer.mDataByteSize = inNumberFrames * 2; // sample size

buffer.mNumberChannels = 1; // one channel

buffer.mData = malloc( inNumberFrames * 2 ); // buffer size

// we put our buffer into a bufferlist array for rendering

AudioBufferList bufferList;

bufferList.mNumberBuffers = 1;

bufferList.mBuffers[0] = buffer;

// render input and check for error

status = AudioUnitRender([audioProcessor audioUnit], ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);

[audioProcessor hasError:status file:__FILE__ line:__LINE__];

// process the bufferlist in the audio processor

[audioProcessor processBuffer: &bufferList];

// clean up the buffer

free(bufferList.mBuffers[0].mData);

return noErr;

}

#pragma mark Playback callback

static OSStatus playbackCallback(void *inRefCon,

AudioUnitRenderActionFlags *ioActionFlags,

const AudioTimeStamp *inTimeStamp,

UInt32 inBusNumber,

UInt32 inNumberFrames,

AudioBufferList *ioData) {

long len = [mIn length];

len = len > 1024 ? 1024 : len;

if (len <= 0) {

return noErr;

}

// to be changed

//    RemoteAudioProcessor *audioProcessor = (__bridge RemoteAudioProcessor* )inRefCon;

for (int i = 0; i < ioData -> mNumberBuffers; i++) {

ASLog( @"len:%ld", len);

AudioBuffer buffer = ioData -> mBuffers[i];

NSData *pcmBlock = [mIn subdataWithRange: NSMakeRange(0, len)];

UInt32 size = (UInt32)MIN(buffer.mDataByteSize, [pcmBlock length]);// ? buffer.mDataByteSize : [pcmBlock length];

memcpy(buffer.mData, [pcmBlock bytes], size);

[mIn replaceBytesInRange: NSMakeRange(0, size) withBytes: NULL length: 0];

buffer.mDataByteSize = size;

}

return noErr;

}

@implementation RemoteAudioProcessor

@synthesize audioUnit;

@synthesize audioBuffer;

/*

* It's Singleton pattern

* the flow is init(if there isn't existed self) -> initializeAudioConfig(set audio format, io pipe and callback functions)

*                                              -> recordingCallback -> processBuffer

*                                              -> playbackCallback

*/

- (RemoteAudioProcessor* )init {

self = [super init];

if (self) {

[self initializeAudioConfig];

mIn = [[NSMutableData alloc] init];

mOut = [[NSMutableData alloc] init];

mIsStarted = false;

mSendServerStart = false;

mRecServerStart = false;

mIsTele = false;

[NSThread detachNewThreadSelector:@selector(initSendSocketServer)

toTarget:self

withObject:nil];

[NSThread detachNewThreadSelector:@selector(initRecSocketServer)

toTarget:self

withObject:nil];

}

return self;

}

- (void)initializeAudioConfig {

OSStatus status;

AudioComponentDescription desc;

desc.componentType = kAudioUnitType_Output; // we want to ouput

desc.componentSubType = kAudioUnitSubType_RemoteIO; // we want in and ouput

desc.componentFlags = 0; // must be zero

desc.componentFlagsMask = 0; // must be zero

desc.componentManufacturer = kAudioUnitManufacturer_Apple; // select provider

AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

status = AudioComponentInstanceNew(inputComponent, &audioUnit);

[self hasError:status file:__FILE__ line:__LINE__];

// define that we want record io on the input bus

UInt32 flag = 1;

status = AudioUnitSetProperty(audioUnit,

kAudioOutputUnitProperty_EnableIO, // use io

kAudioUnitScope_Input, // scope to input

kInputBus, // select input bus (1)

&flag, // set flag

sizeof(flag));

[self hasError:status file:__FILE__ line:__LINE__];

// define that we want play on io on the output bus

status = AudioUnitSetProperty(audioUnit,

kAudioOutputUnitProperty_EnableIO, // use io

kAudioUnitScope_Output, // scope to output

kOutputBus, // select output bus (0)

&flag, // set flag

sizeof(flag));

[self hasError:status file:__FILE__ line:__LINE__];

// specifie our format on which we want to work.

AudioStreamBasicDescription audioFormat;

audioFormat.mSampleRate = kSampleRate;

audioFormat.mFormatID = kAudioFormatLinearPCM;

audioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;

audioFormat.mFramesPerPacket = kFramesPerPacket;

audioFormat.mChannelsPerFrame = kChannelsPerFrame;

audioFormat.mBitsPerChannel = kBitsPerChannel;

audioFormat.mBytesPerPacket = kBitsPerChannel * kChannelsPerFrame * kFramesPerPacket / 8;

audioFormat.mBytesPerFrame = kBitsPerChannel * kChannelsPerFrame / 8;

// set the format on the output stream

status = AudioUnitSetProperty(audioUnit,

kAudioUnitProperty_StreamFormat,

kAudioUnitScope_Output,

kInputBus,

&audioFormat,

sizeof(audioFormat));

[self hasError:status file:__FILE__ line:__LINE__];

// set the format on the input stream

status = AudioUnitSetProperty(audioUnit,

kAudioUnitProperty_StreamFormat,

kAudioUnitScope_Input,

kOutputBus,

&audioFormat,

sizeof(audioFormat));

[self hasError:status file:__FILE__ line:__LINE__];

/**

We need to define a callback structure which holds

a pointer to the recordingCallback and a reference to

the audio processor object

*/

AURenderCallbackStruct callbackStruct;

// set recording callback struct

callbackStruct.inputProc = recordingCallback; // recordingCallback pointer

callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);

// set input callback to recording callback on the input bus

status = AudioUnitSetProperty(audioUnit,

kAudioOutputUnitProperty_SetInputCallback,

kAudioUnitScope_Global,

kInputBus,

&callbackStruct,

sizeof(callbackStruct));

[self hasError:status file:__FILE__ line:__LINE__];

// set playback callback struct

callbackStruct.inputProc = playbackCallback;

callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);

// set playbackCallback as callback on our renderer for the output bus

status = AudioUnitSetProperty(audioUnit,

kAudioUnitProperty_SetRenderCallback,

kAudioUnitScope_Global,

kOutputBus,

&callbackStruct,

sizeof(callbackStruct));

[self hasError:status file:__FILE__ line:__LINE__];

// reset flag to 0

flag = 0;

/*

we need to tell the audio unit to allocate the render buffer,

that we can directly write into it.

*/

status = AudioUnitSetProperty(audioUnit,

kAudioUnitProperty_ShouldAllocateBuffer,

kAudioUnitScope_Output,

kInputBus,

&flag,

sizeof(flag));

/*

we set the number of channels to mono and allocate our block size to

1024 bytes.

kiki: I don't know where the size 1024 bytes comes from...

*/

audioBuffer.mNumberChannels = kChannelsPerFrame;

audioBuffer.mDataByteSize = 512 * 2;

audioBuffer.mData = malloc( 512 * 2 );

// Initialize the Audio Unit and cross fingers =)

status = AudioUnitInitialize(audioUnit);

[self hasError:status file:__FILE__ line:__LINE__];

}

- (void)processBuffer: (AudioBufferList* )audioBufferList {

AudioBuffer sourceBuffer = audioBufferList -> mBuffers[0];

// we check here if the input data byte size has changed

if (audioBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {

// clear old buffer

free(audioBuffer.mData);

// assing new byte size and allocate them on mData

audioBuffer.mDataByteSize = sourceBuffer.mDataByteSize;

audioBuffer.mData = malloc(sourceBuffer.mDataByteSize);

}

// copy incoming audio data to the audio buffer

memcpy(audioBuffer.mData, audioBufferList -> mBuffers[0].mData, audioBufferList -> mBuffers[0].mDataByteSize);

NSData *pcmBlock = [NSData dataWithBytes:sourceBuffer.mData length:sourceBuffer.mDataByteSize];

[mOut appendData: pcmBlock];

//

}

- (void)start {

if (mIsStarted) {

ASLog( @"-- already start --");

return;

}

ASLog( @"-- start --");

mIsStarted = true;

[mIn replaceBytesInRange: NSMakeRange(0, [mIn length]) withBytes: NULL length: 0];

[mOut replaceBytesInRange: NSMakeRange(0, [mOut length]) withBytes: NULL length: 0];

OSStatus status = AudioOutputUnitStart(audioUnit);

[self hasError:status file:__FILE__ line:__LINE__];

}

- (void)stop {

ASLog( @"-- stop --");

OSStatus status = AudioOutputUnitStop(audioUnit);

[self hasError:status file:__FILE__ line:__LINE__];

mIsStarted = false;

[mIn replaceBytesInRange: NSMakeRange(0, [mIn length]) withBytes: NULL length: 0];

[mOut replaceBytesInRange: NSMakeRange(0, [mOut length]) withBytes: NULL length: 0];

}

#pragma mark Error handling

- (void)hasError:(int)statusCode file:(char*)file line:(int)line {

if (statusCode) {

ASLog(@"Error Code responded %d in file %s on line %d", statusCode, file, line);

exit(-1);

}

}

#pragma mark Socket Implementation

- (int)initSendSocketServer {

struct sockaddr_in server_addr;

bzero(&server_addr, sizeof(server_addr));

server_addr.sin_family = AF_INET;

server_addr.sin_addr.s_addr = htons(INADDR_ANY);

server_addr.sin_port = htons(SEND_PORT);

int server_socket = socket(AF_INET, SOCK_STREAM, 0);

if (server_socket < 0) {

ASLog( @"Create send socket failed");

return -1;

}

int yes = 1;

setsockopt(server_socket, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int));

if (bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) {

[self closeSocket:server_socket error:@"Send Server bind failed"];

}

if (listen(server_socket, MAX_CLIENT_COUNT)) {

[self closeSocket:server_socket error:@"Send Server listen failed"];

}

mSendServerStart = true;

while (mSendServerStart) {

ASLog( @"wait send client");

struct sockaddr_in client_addr;

socklen_t length = sizeof(client_addr);

int client_socket = accept(server_socket, (struct sockaddr*)&client_addr, &length);

if (client_socket < 0) {

ASLog( @"Create send client socket failed");

break;

}

[NSThread detachNewThreadSelector:@selector(sendClientRun:)

toTarget:self

withObject:[NSNumber numberWithInt:client_socket]];

}

close(server_socket);

return 0;

}

- (int)initRecSocketServer {

struct sockaddr_in server_addr;

bzero(&server_addr, sizeof(server_addr));

server_addr.sin_family = AF_INET;

server_addr.sin_addr.s_addr = htons(INADDR_ANY);

server_addr.sin_port = htons(REC_PORT);

int server_socket = socket(AF_INET, SOCK_STREAM, 0);

if (server_socket < 0) {

ASLog( @"Create rec socket failed");

return -1;

}

int yes = 1;

setsockopt(server_socket, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int));

if (bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) {

[self closeSocket:server_socket error:@"Rec Server bind failed"];

}

if (listen(server_socket, MAX_CLIENT_COUNT)) {

[self closeSocket:server_socket error:@"Rec Server listen failed"];

}

mSendServerStart = true;

while (mSendServerStart) {

ASLog( @"wait rec client");

struct sockaddr_in client_addr;

socklen_t length = sizeof(client_addr);

int client_socket = accept(server_socket, (struct sockaddr*)&client_addr, &length);

if (client_socket < 0) {

ASLog( @"Create rec client socket failed");

break;

}

[NSThread detachNewThreadSelector:@selector(recClientRun:)

toTarget:self

withObject:[NSNumber numberWithInt:client_socket]];

}

close(server_socket);

return 0;

}

- (int)closeSocket: (int)socket

error: (NSString *)errorMsg {

if (errorMsg) {

ASLog(@"%@", errorMsg);

close(socket);

return -1;

} else {

close(socket);

return 0;

}

}

- (void)stopSocketServer {

mSendServerStart = false;

mRecServerStart = false;

}

- (void)sendClientRun: (NSNumber *)socket {

int client_socket = [socket intValue];

[self stop];

[self start];

ASLog( @"send client connection with %d", client_socket);

bool isClientAlive = true;

while (isClientAlive) {

if ([mOut length] <= 0) {

continue;

}

ssize_t res = send(client_socket, [mOut bytes], [mOut length], 0);

if (res > 0) {

[mOut replaceBytesInRange: NSMakeRange(0, res) withBytes: NULL length: 0];

} else {

isClientAlive = false;

[self closeSocket: client_socket error: @"send socket died"];

}

}

}

- (void)recClientRun: (NSNumber *)socket {

char buffer[BUFFER_SIZE];

int client_socket = [socket intValue];

ASLog( @"rec client connection with %d", client_socket);

bool isClientAlive = true;

while (isClientAlive) {

ssize_t res = recv(client_socket, buffer, BUFFER_SIZE, 0);

if (res > 0) {

NSData* pcmBlock = [[NSData alloc] initWithBytes: buffer length: res];

[mIn appendBytes: (__bridge const void * _Nonnull)(pcmBlock) length: res];

ASLog( @"rec:%zd", res);

} else {

isClientAlive = false;

[self closeSocket: client_socket error: @"rec socket died"];

}

}

}

@end

相关文章

  • AudioUnit简单的使用流程

    #define kOutputBus 0 #define kInputBus 1 #define kSampleR...

  • iOS下解码AAC并播放

    前言 今天我们介绍一下如何在iOS进行AAC解码,并使用AudioUnit播放解码后的PCM数据。 基本流程 iO...

  • iOS使用AudioUnit/AudioQueue实现耳返功能

    首先理清思路我这边使用AudioUnit录音,AudioQueue播放1、创建AudioUnit对象,并初始化设置...

  • iOS AudioUnit 总结

    iOS AudioUnit 总结 iOS 的 AudioUnit 功能十分强大,使用图的形式连接各个节点,来实现我...

  • iOS音频-audioUnit总结

    在看LFLiveKit代码的时候,看到音频部分使用的是audioUnit做的,所以把audioUnit学习了一下。...

  • AudioUnit使用

    在iOS上Audio Unit是比较底层的接口,可以理解为其是对音频硬件驱动的封装。用来进行低延迟的音频采集和播放...

  • CoreAudio基础概念

    1.AudioUnit 在所有API中,AudioUnit延迟最短,使用最灵活.代价很复杂。 2.Audio Fi...

  • 第四章 移动平台下的音视频渲染

    声音的渲染在iOS平台上会直接使用AudioUnit(AUGraph)之类的API接口,Android平台则使用 ...

  • Fragment事务流程分析

    Fragment事务流程分析 简言 简单的事务使用流程代码 使用的方法很简单,但是Activity是如何实现事务的...

  • 音频采集

    音频采集 音频采集的方式 AudioUnit音频单元 AudioUnit总结 最底层 AVFoundation...

网友评论

  • 8f64fc6e6524:博主你好,请问一下audiounit在录音的回调函数里,AudioUnitRender这个函数是做什么用的

本文标题:AudioUnit简单的使用流程

本文链接:https://www.haomeiwen.com/subject/uxonlttx.html