美文网首页
iOS 音频学习边录音边转码AVCaptureSession

iOS 音频学习边录音边转码AVCaptureSession

作者: 床前明月_光 | 来源:发表于2017-07-17 15:06 被阅读0次

    这种方法用来录音相对比较少, 主要是用来录视频的多一些, 不过也是一种方法, 所以就说一下,主要的难点也是在AudioBooxTool这个框架, 用起来不是很熟。

    1.录制声音

    利用AVCaptureSession来录音, 这部分是属于AVFounation的内容, 相对比较基础, 所以直接看代码就好

        //建立会话者
        AVCaptureSession *captureSession = [[AVCaptureSession alloc] init];
        self.captureSession = captureSession;
        //连接输入设备
        AVCaptureDevice *audioDevice = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio].lastObject;
        AVCaptureDeviceInput *captureAudioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioDevice error:nil];
        if ([captureSession canAddInput:captureAudioInput]) {
            [captureSession addInput:captureAudioInput];
        }
        //连接输出设备
        AVCaptureAudioDataOutput *captureAudioOutput = [[AVCaptureAudioDataOutput alloc] init];
        self.captureAudioOutput = captureAudioOutput;
        if ([captureSession canAddOutput:captureAudioOutput]) {
            [captureSession addOutput:captureAudioOutput];
        }
        dispatch_queue_t captureAudioOutputQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
        [captureAudioOutput setSampleBufferDelegate:self queue:captureAudioOutputQueue];
        //文件存储位置
        NSString *audioFilePath = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES).lastObject stringByAppendingPathComponent:@"abc.aac"];
        self.audioFilePath = audioFilePath;
        [[NSFileManager defaultManager] removeItemAtPath:audioFilePath error:nil];
        [[NSFileManager defaultManager] createFileAtPath:audioFilePath contents:nil attributes:nil];
        NSFileHandle *audioFileHandle = [NSFileHandle fileHandleForWritingAtPath:audioFilePath];
        self.audioFileHandle = audioFileHandle;
        [captureSession startRunning];
    

    然后根据代理获取录制声音的原始数据回调, 然后根据自己自定义的编码器对原始数据进行编码, 我这里给出的是AAC数据格式的编码样例

    #pragma mark - AVCaptureAudioDataOutputSampleBufferDelegate
    -(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
        if (captureOutput == self.captureAudioOutput) {
            NSLog(@"%@--%@", sampleBuffer, [NSDate date]);
            [self.aacEncoder encodeSampleBuffer:sampleBuffer completionBlock:^(NSData *encodedData, NSError *error) {
                [self.audioFileHandle writeData:encodedData];
            }];
        }
    
    }
    

    2.转码器

    外部属性和API:

    
    @property (nonatomic) dispatch_queue_t encoderQueue;//转码队列
    @property (nonatomic) dispatch_queue_t callbackQueue;//转码回调队列
    
    
    
    //把PCM数据传过来, 编码完成后回调出去
    - (void) encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer completionBlock:(void (^)(NSData *encodedData, NSError* error))completionBlock;
    
    
    

    内部属性:

    @property (nonatomic) AudioConverterRef audioConverter;//格式装换器
    @property (nonatomic) uint8_t *aacBuffer;//编码后AAC的缓存数据
    @property (nonatomic) NSUInteger aacBufferSize;//编码后AAC的缓存数据大小
    @property (nonatomic) char *pcmBuffer;//编码前PCM的缓存数据
    @property (nonatomic) size_t pcmBufferSize;//编码前PCM的缓存数据大小
    
    
    

    初始化:

    - (id) init {
        if (self = [super init]) {
            _encoderQueue = dispatch_queue_create("AAC Encoder Queue", DISPATCH_QUEUE_SERIAL);
            _callbackQueue = dispatch_queue_create("AAC Encoder Callback Queue", DISPATCH_QUEUE_SERIAL);
            _audioConverter = NULL;
            _pcmBufferSize = 0;
            _pcmBuffer = NULL;
            _aacBufferSize = 1024;
            _aacBuffer = malloc(_aacBufferSize * sizeof(uint8_t));
            memset(_aacBuffer, 0, _aacBufferSize);
        }
        return self;
    }
    
    

    开始编码:

    - (void) encodeSampleBuffer:(CMSampleBufferRef)sampleBuffer completionBlock:(void (^)(NSData * encodedData, NSError* error))completionBlock {
        CFRetain(sampleBuffer);
        dispatch_async(_encoderQueue, ^{
            if (!_audioConverter) {
                [self setupEncoderFromSampleBuffer:sampleBuffer];
            }
            CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
            CFRetain(blockBuffer);
            OSStatus status = CMBlockBufferGetDataPointer(blockBuffer, 0, NULL, &_pcmBufferSize, &_pcmBuffer);
            NSError *error = nil;
            if (status != kCMBlockBufferNoErr) {
                error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
            }
            memset(_aacBuffer, 0, _aacBufferSize);
            
            AudioBufferList outAudioBufferList = {0};
            outAudioBufferList.mNumberBuffers = 1;
            outAudioBufferList.mBuffers[0].mNumberChannels = 1;
            outAudioBufferList.mBuffers[0].mDataByteSize = (int)_aacBufferSize;
            outAudioBufferList.mBuffers[0].mData = _aacBuffer;
            AudioStreamPacketDescription *outPacketDescription = NULL;
            UInt32 ioOutputDataPacketSize = 1;
            // Converts data supplied by an input callback function, supporting non-interleaved and packetized formats.
            // Produces a buffer list of output data from an AudioConverter. The supplied input callback function is called whenever necessary.
            status = AudioConverterFillComplexBuffer(_audioConverter, inInputDataProc, (__bridge void *)(self), &ioOutputDataPacketSize, &outAudioBufferList, outPacketDescription);
            NSData *data = nil;
            if (status == 0) {
                NSData *rawAAC = [NSData dataWithBytes:outAudioBufferList.mBuffers[0].mData length:outAudioBufferList.mBuffers[0].mDataByteSize];
                NSData *adtsHeader = [self adtsDataForPacketLength:rawAAC.length];
                NSMutableData *fullData = [NSMutableData dataWithData:adtsHeader];
                [fullData appendData:rawAAC];
                data = fullData;
            } else {
                error = [NSError errorWithDomain:NSOSStatusErrorDomain code:status userInfo:nil];
            }
            if (completionBlock) {
                dispatch_async(_callbackQueue, ^{
                    completionBlock(data, error);
                });
            }
            CFRelease(sampleBuffer);
            CFRelease(blockBuffer);
        });
    }
    
    

    输出参数的配置和根据输出参数创建转换器:

    /**
     *  设置编码参数
     *
     *  @param sampleBuffer 音频
     */
    - (void) setupEncoderFromSampleBuffer:(CMSampleBufferRef)sampleBuffer {
        AudioStreamBasicDescription inAudioStreamBasicDescription = *CMAudioFormatDescriptionGetStreamBasicDescription((CMAudioFormatDescriptionRef)CMSampleBufferGetFormatDescription(sampleBuffer));
       
        
        AudioStreamBasicDescription outAudioStreamBasicDescription = {0}; // 初始化输出流的结构体描述为0. 很重要。
        outAudioStreamBasicDescription.mSampleRate = inAudioStreamBasicDescription.mSampleRate; // 音频流,在正常播放情况下的帧率。如果是压缩的格式,这个属性表示解压缩后的帧率。帧率不能为0。
        outAudioStreamBasicDescription.mFormatID = kAudioFormatMPEG4AAC; // 设置编码格式
        outAudioStreamBasicDescription.mFormatFlags = kMPEG4Object_AAC_LC; // 无损编码 ,0表示没有
        outAudioStreamBasicDescription.mBytesPerPacket = 0; // 每一个packet的音频数据大小。如果的动态大小,设置为0。动态大小的格式,需要用AudioStreamPacketDescription 来确定每个packet的大小。
        outAudioStreamBasicDescription.mFramesPerPacket = 1024; // 每个packet的帧数。如果是未压缩的音频数据,值是1。动态码率格式,这个值是一个较大的固定数字,比如说AAC的1024。如果是动态大小帧数(比如Ogg格式)设置为0。
        outAudioStreamBasicDescription.mBytesPerFrame = 0; //  每帧的大小。每一帧的起始点到下一帧的起始点。如果是压缩格式,设置为0 。
        outAudioStreamBasicDescription.mChannelsPerFrame = 1; // 声道数
        outAudioStreamBasicDescription.mBitsPerChannel = 0; // 压缩格式设置为0
        outAudioStreamBasicDescription.mReserved = 0; // 8字节对齐,填0.
        AudioClassDescription *description = [self
                                              getAudioClassDescriptionWithType:kAudioFormatMPEG4AAC
                                              fromManufacturer:kAppleSoftwareAudioCodecManufacturer]; //软编
        
        OSStatus status = AudioConverterNewSpecific(&inAudioStreamBasicDescription, &outAudioStreamBasicDescription, 1, description, &_audioConverter); // 创建转换器
      
        if (status != 0) {
            NSLog(@"setup converter: %d", (int)status);
        }
    }
    
    
    /**
     *  获取编解码器
     *
     *  @param type         编码格式
     *  @param manufacturer 软/硬编
     *
     编解码器(codec)指的是一个能够对一个信号或者一个数据流进行变换的设备或者程序。这里指的变换既包括将 信号或者数据流进行编码(通常是为了传输、存储或者加密)或者提取得到一个编码流的操作,也包括为了观察或者处理从这个编码流中恢复适合观察或操作的形式的操作。编解码器经常用在视频会议和流媒体等应用中。
     *  @return 指定编码器
     */
    - (AudioClassDescription *)getAudioClassDescriptionWithType:(UInt32)type
                                               fromManufacturer:(UInt32)manufacturer
    {
        static AudioClassDescription desc;
        
        UInt32 encoderSpecifier = type;
        OSStatus st;
        
        UInt32 size;
        st = AudioFormatGetPropertyInfo(kAudioFormatProperty_Encoders,
                                        sizeof(encoderSpecifier),
                                        &encoderSpecifier,
                                        &size);
      
        if (st) {
            NSLog(@"error getting audio format propery info: %d", (int)(st));
            return nil;
        }
        
        unsigned int count = size / sizeof(AudioClassDescription);
        AudioClassDescription descriptions[count];
        st = AudioFormatGetProperty(kAudioFormatProperty_Encoders,
                                    sizeof(encoderSpecifier),
                                    &encoderSpecifier,
                                    &size,
                                    descriptions);
        if (st) {
            NSLog(@"error getting audio format propery: %d", (int)(st));
            return nil;
        }
        
        for (unsigned int i = 0; i < count; i++) {
            if ((type == descriptions[i].mSubType) &&
                (manufacturer == descriptions[i].mManufacturer)) {
                memcpy(&desc, &(descriptions[i]), sizeof(desc));
                return &desc;
            }
        }
        
        return nil;
    }
    
    

    转换的回调函数:

    /**
     *  A callback function that supplies audio data to convert. This callback is invoked repeatedly as the converter is ready for new input data.
     
     */
    OSStatus inInputDataProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
    {
        AACEncoder *encoder = (__bridge AACEncoder *)(inUserData);
        UInt32 requestedPackets = *ioNumberDataPackets;
        
        size_t copiedSamples = [encoder copyPCMSamplesIntoBuffer:ioData];
        if (copiedSamples < requestedPackets) {
            //PCM 缓冲区还没满
            *ioNumberDataPackets = 0;
            return -1;
        }
        *ioNumberDataPackets = 1;
        
        return noErr;
    }
    
    /**
     *  填充PCM到缓冲区
     */
    - (size_t) copyPCMSamplesIntoBuffer:(AudioBufferList*)ioData {
        size_t originalBufferSize = _pcmBufferSize;
        if (!originalBufferSize) {
            return 0;
        }
        ioData->mBuffers[0].mData = _pcmBuffer;
        ioData->mBuffers[0].mDataByteSize = (int)_pcmBufferSize;
        _pcmBuffer = NULL;
        _pcmBufferSize = 0;
        return originalBufferSize;
    }
    
    

    装换后的要拼上AAC的格式头:

    /**
     *  Add ADTS header at the beginning of each and every AAC packet.
     *  This is needed as MediaCodec encoder generates a packet of raw
     *  AAC data.
     *
     *  Note the packetLen must count in the ADTS header itself.
     *  See: http://wiki.multimedia.cx/index.php?title=ADTS
     *  Also: http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio#Channel_Configurations
     **/
    - (NSData*) adtsDataForPacketLength:(NSUInteger)packetLength {
        int adtsLength = 7;
        char *packet = malloc(sizeof(char) * adtsLength);
        // Variables Recycled by addADTStoPacket
        int profile = 2;  //AAC LC
        //39=MediaCodecInfo.CodecProfileLevel.AACObjectELD;
        int freqIdx = 4;  //44.1KHz
        int chanCfg = 1;  //MPEG-4 Audio Channel Configuration. 1 Channel front-center
        NSUInteger fullLength = adtsLength + packetLength;
        // fill in ADTS data
        packet[0] = (char)0xFF; // 11111111     = syncword
        packet[1] = (char)0xF9; // 1111 1 00 1  = syncword MPEG-2 Layer CRC
        packet[2] = (char)(((profile-1)<<6) + (freqIdx<<2) +(chanCfg>>2));
        packet[3] = (char)(((chanCfg&3)<<6) + (fullLength>>11));
        packet[4] = (char)((fullLength&0x7FF) >> 3);
        packet[5] = (char)(((fullLength&7)<<5) + 0x1F);
        packet[6] = (char)0xFC;
        NSData *data = [NSData dataWithBytesNoCopy:packet length:adtsLength freeWhenDone:YES];
        return data;
    }
    

    注意点:

    录音的数据是可以根据帧为单位进行切割来播放的。

    1. 未压缩的PCM数据, 看你是封装成什么文件格式(而不是数据格式), 根据文件格式拼上格式头就可以播放了。
      例如: pcm数据格式的wav文件的格式头:
    //wav的格式头, 纯语音文件拼上格式头可播放
    NSData* WriteWavFileHeader(long totalAudioLen, long totalDataLen, long longSampleRate,int channels, long byteRate)
    {
        Byte  header[44];
        //4byte,资源交换文件标志:RIFF
        header[0] = 'R';  // RIFF/WAVE header
        header[1] = 'I';
        header[2] = 'F';
        header[3] = 'F';
        //4byte,从下个地址到文件结尾的总字节数
        header[4] = (Byte) (totalDataLen & 0xff);  //file-size (equals file-size - 8)
        header[5] = (Byte) ((totalDataLen >> 8) & 0xff);
        header[6] = (Byte) ((totalDataLen >> 16) & 0xff);
        header[7] = (Byte) ((totalDataLen >> 24) & 0xff);
        //4byte,wav文件标志:WAVE
        header[8] = 'W';  // Mark it as type "WAVE"
        header[9] = 'A';
        header[10] = 'V';
        header[11] = 'E';
        //4byte,波形文件标志:FMT(最后一位空格符)
        header[12] = 'f';  // Mark the format section 'fmt ' chunk
        header[13] = 'm';
        header[14] = 't';
        header[15] = ' ';
        //4byte,音频属性
        header[16] = 16;   // 4 bytes: size of 'fmt ' chunk, Length of format data.  Always 16
        header[17] = 0;
        header[18] = 0;
        header[19] = 0;
        //2byte,格式种类(1-线性pcm-WAVE_FORMAT_PCM,WAVEFORMAT_ADPCM)
        header[20] = 1;  // format = 1 ,Wave type PCM
        header[21] = 0;
        //2byte,通道数
        header[22] = (Byte) channels;  // channels
        header[23] = 0;
        //4byte,采样率
        header[24] = (Byte) (longSampleRate & 0xff);
        header[25] = (Byte) ((longSampleRate >> 8) & 0xff);
        header[26] = (Byte) ((longSampleRate >> 16) & 0xff);
        header[27] = (Byte) ((longSampleRate >> 24) & 0xff);
        //4byte 传输速率,Byte率=采样频率*音频通道数*每次采样得到的样本位数/8,00005622H,也就是22050Byte/s=11025*1*16/8。
        header[28] = (Byte) (byteRate & 0xff);
        header[29] = (Byte) ((byteRate >> 8) & 0xff);
        header[30] = (Byte) ((byteRate >> 16) & 0xff);
        header[31] = (Byte) ((byteRate >> 24) & 0xff);
        //2byte   一个采样多声道数据块大小,块对齐=通道数*每次采样得到的样本位数/8,0002H,也就是2=1*16/8
        header[32] = (Byte) (channels * 16 / 8);
        header[33] = 0;
        //2byte,采样精度-PCM位宽
        header[34] = 16; // bits per sample
        header[35] = 0;
        //4byte,数据标志:data
        header[36] = 'd'; //"data" marker
        header[37] = 'a';
        header[38] = 't';
        header[39] = 'a';
        //4byte,从下个地址到文件结尾的总字节数,即除了wav header以外的pcm data length(纯音频数据)
        header[40] = (Byte) (totalAudioLen & 0xff);  //data-size (equals file-size - 44).
        header[41] = (Byte) ((totalAudioLen >> 8) & 0xff);
        header[42] = (Byte) ((totalAudioLen >> 16) & 0xff);
        header[43] = (Byte) ((totalAudioLen >> 24) & 0xff);
        
        return [[NSData alloc] initWithBytes:header length:44];;
    }
    

    2.压缩的数据, 压缩(编码)后的数据, 一般都是每一帧都有独立的格式头的, 所以, 根据帧切割后,直接就可以播放了, 我自己测的时候用AVAudioPlayer是播放不了AAC数据格式文件的, 要用 AudioServicesPlaySystemSound(),或者AudioQueueStart()来播放

    最后附上AAC录音编码, 和AAC解码播放的DEMO

    相关文章

      网友评论

          本文标题:iOS 音频学习边录音边转码AVCaptureSession

          本文链接:https://www.haomeiwen.com/subject/brgfkxtx.html