美文网首页
第十四节 MediaCodec实现边播边录功能、pcm分包

第十四节 MediaCodec实现边播边录功能、pcm分包

作者: 最美下雨天 | 来源:发表于2018-12-06 14:51 被阅读22次
    image.png image.png
    ADTS头部信息:
    参考资料:https://blog.csdn.net/jay100500/article/details/52955232
    image.png
    image.png
    image.png
    image.png

    关于MediaCodec介绍:https://juejin.im/entry/5aa234f751882555712bf210
    Player.java

    rivate native int n_samplerate();
    
        private MediaFormat mediaFormat;
        private MediaCodec mediaCodec;
        private MediaCodec.BufferInfo bufferInfo;
        private FileOutputStream outputStream;
    
        public void startRecord(File outfile)
        {
            if(n_samplerate() > 0)
            {
                initMediaCodec(n_samplerate(), outfile);
            }
        }
    
        public void initMediaCodec(int samplerate,File outfile)
        {
            try {
                aacSampleRate=getADTSsamplerate(samplerate);
                mediaFormat=MediaFormat.createAudioFormat(MediaFormat.MIMETYPE_AUDIO_AAC,samplerate,2);
                mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE,96000);//码率也叫比特率
                mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
                mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE,1024*4);
                mediaCodec=MediaCodec.createEncoderByType(MediaFormat.MIMETYPE_AUDIO_AAC);
    
                bufferInfo=new MediaCodec.BufferInfo();
                if(mediaCodec==null)
                {
                    Log.e("VoicePlayer","[Java]创建编码器失败");
                }
                mediaCodec.configure(mediaFormat,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
                outputStream=new FileOutputStream(outfile);
                mediaCodec.start();
    
            }
            catch (Exception e)
            {
                e.printStackTrace();
            }
    
        }
    
        private int perpcmsize=0;
        private byte[] outByteBuffer;
        private void encodePcm2AAC(int size,byte[] buffer)
        {
            if(buffer!=null&&mediaCodec!=null)
            {
                int inputBufferIndex=mediaCodec.dequeueInputBuffer(0);
                if(inputBufferIndex>=0)
                {
                    ByteBuffer byteBuffer=mediaCodec.getInputBuffers()[inputBufferIndex];
                    byteBuffer.clear();
                    byteBuffer.put(buffer);
                    mediaCodec.queueInputBuffer(inputBufferIndex,0,size,0,0);
    
                }
                int index=mediaCodec.dequeueOutputBuffer(bufferInfo,0);
                while (index>0)
                {
                    try {
                        perpcmsize=bufferInfo.size+7;
                        outByteBuffer=new byte[perpcmsize];
    
                        ByteBuffer byteBuffer=mediaCodec.getOutputBuffers()[index];
                        byteBuffer.position(bufferInfo.offset);
                        byteBuffer.limit(bufferInfo.offset+bufferInfo.size);
    
                        addADtsHeader(outByteBuffer,perpcmsize,aacSampleRate);
    
                        byteBuffer.get(outByteBuffer,7,bufferInfo.size);
                        byteBuffer.position(bufferInfo.offset);
                        outputStream.write(outByteBuffer,0,perpcmsize);
    
                        mediaCodec.releaseOutputBuffer(index,false);
                        index=mediaCodec.dequeueOutputBuffer(bufferInfo,0);
                        outByteBuffer=null;
    
                    }
                    catch (Exception e)
                    {
    
                    }
                }
            }
    
    
        }
    
        private int aacSampleRate=4;
    
        private void addADtsHeader(byte[] packet, int packetLen, int samplerate)
        {
            int profile = 2; // AAC LC
            int freqIdx = samplerate; // samplerate
            int chanCfg = 2; // CPE
    
            packet[0] = (byte) 0xFF; // 0xFFF(12bit) 这里只取了8位,所以还差4位放到下一个里面
            packet[1] = (byte) 0xF9; // 第一个t位放F
            packet[2] = (byte) (((profile - 1) << 6) + (freqIdx << 2) + (chanCfg >> 2));
            packet[3] = (byte) (((chanCfg & 3) << 6) + (packetLen >> 11));
            packet[4] = (byte) ((packetLen & 0x7FF) >> 3);
            packet[5] = (byte) (((packetLen & 7) << 5) + 0x1F);
            packet[6] = (byte) 0xFC;
        }
    
        private int getADTSsamplerate(int samplerate)
        {
            int rate = 4;
            switch (samplerate)
            {
                case 96000:
                    rate = 0;
                    break;
                case 88200:
                    rate = 1;
                    break;
                case 64000:
                    rate = 2;
                    break;
                case 48000:
                    rate = 3;
                    break;
                case 44100:
                    rate = 4;
                    break;
                case 32000:
                    rate = 5;
                    break;
                case 24000:
                    rate = 6;
                    break;
                case 22050:
                    rate = 7;
                    break;
                case 16000:
                    rate = 8;
                    break;
                case 12000:
                    rate = 9;
                    break;
                case 11025:
                    rate = 10;
                    break;
                case 8000:
                    rate = 11;
                    break;
                case 7350:
                    rate = 12;
                    break;
            }
            return rate;
        }
    }
    

    HAudio.cpp

    int HAudio::getSampleRate() {
    
        return avCodecContext->sample_rate;
    //    return sample_rate;
    
    }
    
    void pcmBufferCallBack(SLAndroidSimpleBufferQueueItf bf, void * context)
    {
        HAudio *wlAudio = (HAudio *) context;
        if(wlAudio != NULL)
        {
            if(LOG_DEBUG)
            {
                LOGI("循环调用重采样");
            }
            int buffersize = wlAudio->getSoundTouchData();
    
            //int buffersize = wlAudio->resampleAudio((void **) &wlAudio->out_buffer);;
            if(buffersize > 0)
            {
                int out_channels=av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
                int totalBytes=buffersize*out_channels*av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                wlAudio->clock+=totalBytes/(double)(wlAudio->sample_rate*2*2);
                //我们0.1s回调一次
                if(wlAudio->clock-wlAudio->last_time>0.1)
                {
                    wlAudio->last_time=wlAudio->clock;
                    //回调Java层显示时间
                    wlAudio->callBackJava->onShowTime(CHILD_THREAD,wlAudio->duration,wlAudio->clock);
    
                }
                //写出aac数据到文件
                wlAudio->callBackJava->onCallPcmToAAc(CHILD_THREAD, totalBytes, wlAudio->sampleBuffer);
                //(* wlAudio-> pcmBufferQueue)->Enqueue( wlAudio->pcmBufferQueue, (char *) wlAudio-> buffer, buffersize);
                //注意这儿的代码变化
                //(* wlAudio-> pcmBufferQueue)->Enqueue( wlAudio->pcmBufferQueue, (char *) wlAudio-> sampleBuffer, buffersize*out_channels*av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
                (* wlAudio-> pcmBufferQueue)->Enqueue( wlAudio->pcmBufferQueue, (char *) wlAudio-> sampleBuffer, totalBytes);
            }
        }
    }
    

    测试


    image.png image.png
    mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE,1024*4);
    

    把这个值尽量往大的调可以解决这个问题(比如1024*8)
    测试:


    image.png

    但是多大是最合适的,没有具体标准,所以我们还是定义为4096,为了避免异常发生,对pcm进行分包处理

    对pcm大数据分包

    image.png

    原理:a.开启一个新线程对队列中的每一帧pcm数据的大小进行判断,如果大于了默认的大小就进行拆分
    b.在OpenSL ES的播放回调中往对列中push每一帧pcm数据
    创建两个类
    WIPcmBean类和WIBufferQueue类
    WIPcmBean.h

    //
    // Created by yangw on 2018-4-1.
    //
    
    #ifndef WLMUSIC_PCMBEAN_H
    #define WLMUSIC_PCMBEAN_H
    
    #include <SoundTouch.h>
    
    using namespace soundtouch;
    
    class WlPcmBean {
    
    public:
        char *buffer;
        int buffsize;
    
    public:
        WlPcmBean(SAMPLETYPE *buffer, int size);
        ~WlPcmBean();
    
    
    };
    
    
    #endif //WLMUSIC_PCMBEAN_H
    
    

    WIPcmBean.cpp

    //
    // Created by yangw on 2018-4-1.
    //
    
    #include "WlPcmBean.h"
    
    WlPcmBean::WlPcmBean(SAMPLETYPE *buffer, int size) {
    
        this->buffer = (char *) malloc(size);
        this->buffsize = size;
        memcpy(this->buffer, buffer, size);
    
    }
    
    WlPcmBean::~WlPcmBean() {
        free(buffer);
        buffer = NULL;
    }
    
    

    WlBufferQueue.h

    //
    // Created by ywl on 2017-12-3.
    //
    
    #ifndef WLPLAYER_BUFFERQUEUE_H
    #define WLPLAYER_BUFFERQUEUE_H
    
    #include "deque"
    #include "HPlayStatus.h"
    #include "WlPcmBean.h"
    
    extern "C"
    {
    #include <libavcodec/avcodec.h>
    #include "pthread.h"
    };
    
    class WlBufferQueue {
    
    public:
        std::deque<WlPcmBean *> queueBuffer;
        pthread_mutex_t mutexBuffer;
        pthread_cond_t condBuffer;
        HPlayStatus *wlPlayStatus = NULL;
    
    public:
        WlBufferQueue(HPlayStatus *playStatus);
        ~WlBufferQueue();
        int putBuffer(SAMPLETYPE *buffer, int size);
        int getBuffer(WlPcmBean **pcmBean);
        int clearBuffer();
    
        void release();
        int getBufferSize();
    
        int noticeThread();
    };
    
    
    #endif //WLPLAYER_BUFFERQUEUE_H
    
    

    WlBufferQueue.cpp

    //
    // Created by ywl on 2017-12-3.
    //
    
    #include "WlBufferQueue.h"
    #include "Log.h"
    #include "HPlayStatus.h"
    
    WlBufferQueue::WlBufferQueue(HPlayStatus *playStatus) {
        wlPlayStatus = playStatus;
        pthread_mutex_init(&mutexBuffer, NULL);
        pthread_cond_init(&condBuffer, NULL);
    }
    
    WlBufferQueue::~WlBufferQueue() {
        wlPlayStatus = NULL;
        pthread_mutex_destroy(&mutexBuffer);
        pthread_cond_destroy(&condBuffer);
        if(LOG_DEBUG)
        {
            LOGI("WlBufferQueue 释放完了");
        }
    }
    
    void WlBufferQueue::release() {
    
        if(LOG_DEBUG)
        {
            LOGI("WlBufferQueue::release");
        }
        noticeThread();
        clearBuffer();
    
        if(LOG_DEBUG)
        {
            LOGI("WlBufferQueue::release success");
        }
    }
    
    int WlBufferQueue::putBuffer(SAMPLETYPE *buffer, int size) {
        pthread_mutex_lock(&mutexBuffer);
        WlPcmBean *pcmBean = new WlPcmBean(buffer, size);
        queueBuffer.push_back(pcmBean);
        pthread_cond_signal(&condBuffer);
        pthread_mutex_unlock(&mutexBuffer);
        return 0;
    }
    
    int WlBufferQueue::getBuffer(WlPcmBean **pcmBean) {
    
        pthread_mutex_lock(&mutexBuffer);
    
        while(wlPlayStatus != NULL && !wlPlayStatus->exit)
        {
            if(queueBuffer.size() > 0)
            {
                *pcmBean = queueBuffer.front();
                queueBuffer.pop_front();
                break;
            } else{
                if(!wlPlayStatus->exit)
                {
                    pthread_cond_wait(&condBuffer, &mutexBuffer);
                }
            }
        }
        pthread_mutex_unlock(&mutexBuffer);
        return 0;
    }
    
    int WlBufferQueue::clearBuffer() {
    
        pthread_cond_signal(&condBuffer);
        pthread_mutex_lock(&mutexBuffer);
        while (!queueBuffer.empty())
        {
            WlPcmBean *pcmBean = queueBuffer.front();
            queueBuffer.pop_front();
            delete(pcmBean);
        }
        pthread_mutex_unlock(&mutexBuffer);
        return 0;
    }
    
    int WlBufferQueue::getBufferSize() {
        int size = 0;
        pthread_mutex_lock(&mutexBuffer);
        size = queueBuffer.size();
        pthread_mutex_unlock(&mutexBuffer);
        return size;
    }
    
    
    int WlBufferQueue::noticeThread() {
        pthread_cond_signal(&condBuffer);
        return 0;
    }
    
    
    

    HAudio.cpp

    void *pcmCallBack(void *data)
    {
        HAudio *audio = (HAudio *) (data);
        while(audio->hPlayStatus != NULL && !audio->hPlayStatus->exit)
        {
            WlPcmBean *pcmBean = NULL;
            audio->bufferQueue->getBuffer(&pcmBean);
            if(pcmBean == NULL)
            {
                continue;
            }
    
            LOGI("pcmbean buffer size is %d", pcmBean->buffsize);
    
            if(pcmBean->buffsize <= audio->defaultPcmSize)//不用分包
            {
                audio->callBackJava->onCallPcmToAAc(CHILD_THREAD, pcmBean->buffsize, pcmBean->buffer);
            } else{
    
                int pack_num = pcmBean->buffsize / audio->defaultPcmSize;
                int pack_sub = pcmBean->buffsize % audio->defaultPcmSize;
    
                for(int i = 0; i < pack_num; i++)
                {
                    char *bf = static_cast<char *>(malloc(audio->defaultPcmSize));
                    memcpy(bf, pcmBean->buffer + i * audio->defaultPcmSize, audio->defaultPcmSize);
                    audio->callBackJava->onCallPcmToAAc(CHILD_THREAD, audio->defaultPcmSize, bf);
    
                    free(bf);
                    bf = NULL;
                }
    
                if(pack_sub > 0)
                {
                    char *bf = static_cast<char *>(malloc(pack_sub));
                    memcpy(bf, pcmBean->buffer + pack_num * audio->defaultPcmSize, pack_sub);
                    audio->callBackJava->onCallPcmToAAc(CHILD_THREAD, pack_sub, bf);
                }
            }
            delete(pcmBean);
            pcmBean = NULL;
        }
        pthread_exit(&audio->pcmCallBackThread);
    }
    void HAudio::play() {
    
        //创建一个线程,准备写入pcm数据
        pthread_create(&pthread,NULL,writeData,this);
        bufferQueue = new WlBufferQueue(hPlayStatus);
        pthread_create(&pcmCallBackThread, NULL, pcmCallBack, this);
    
    }
    
    image.png

    相关文章

      网友评论

          本文标题:第十四节 MediaCodec实现边播边录功能、pcm分包

          本文链接:https://www.haomeiwen.com/subject/sskctqtx.html