美文网首页iOS音视频(直播 音频 视频)
《音视频:Android音频渲染学习总结》

《音视频:Android音频渲染学习总结》

作者: Oneminute | 来源:发表于2018-07-19 13:54 被阅读97次

    一、android 平台的音频渲染方式

    MediaPlayer (高级API,Java层的,使用简单)
    SoundPool(适用于播放比较短的音频)
    AudioTrack(底层的API、低延迟、解决性能瓶颈)
    OpenLS ES (夸平台的C语言接口、非常强大的音效处理、低延迟)

    二、音频数据

    音频裸数据PCM:

    裸数据比较大,可以被设备最终播放的数据,也称为原始数据。

    音频压缩数据:

    MP3、ACC等都是音频压缩数据,由于裸数据太大,不利益于存储和网络传入,所以就按一个的格式进行压缩(实际上压缩就是编码的过程)。

    三、音频数据化的过程

    模拟信号数字化:采样、量化、编码

    采样:1秒内采样的次数称为采样频率(44.1kHz, 1s内 采样44100次)

    量化:将一次采样数字化,16比特来标识声音的幅度范围。

    编码:就是按一定的格式,对采样量化的数字进行压缩存储。(pcm->acc或pcm->mp3...)

    四、OpenSL ES的使用

    基本流程
    1、创建接口对象
    2、设置混音器
    3、创建播放器(录音器)
    4、设置缓冲队列和回调函数
    5、设置播放状态
    6、启动回调函数

    //native-lib.cpp
    #include <jni.h>
    #include <string>
    
    
    extern "C"
    {
    #include <SLES/OpenSLES.h>
    #include <SLES/OpenSLES_Android.h>
    #include <malloc.h>
    }
    
    #include <android/log.h>
    #define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"zds",FORMAT,##__VA_ARGS__);
    #define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"zds",FORMAT,##__VA_ARGS__);
    
    // 引擎接口
    SLObjectItf engineObject = NULL;
    SLEngineItf engineEngine = NULL;
    
    //混音器
    SLObjectItf outputMixObject = NULL;
    SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL;
    SLEnvironmentalReverbSettings reverbSettings = SL_I3DL2_ENVIRONMENT_PRESET_STONECORRIDOR;
    
    
    //pcm
    SLObjectItf pcmPlayerObject = NULL;
    SLPlayItf pcmPlayerPlay = NULL;
    SLVolumeItf pcmPlayerVolume = NULL;
    
    //缓冲器队列接口
    SLAndroidSimpleBufferQueueItf pcmBufferQueue;
    
    FILE *pcmFile;
    void *buffer;
    
    uint8_t *out_buffer;
    
    void getPcmData(void **pcm)
    {
        while(!feof(pcmFile))
        {
            fread(out_buffer, 44100 * 2 * 2, 1, pcmFile);
            if(out_buffer == NULL)
            {
                LOGI("%s", "read end");
                break;
            } else{
                LOGI("%s", "reading");
            }
            *pcm = out_buffer;
            break;
        }
    }
    
    void pcmBufferCallBack(SLAndroidSimpleBufferQueueItf bf, void * context)
    {
        //assert(NULL == context);
        getPcmData(&buffer);
        // for streaming playback, replace this test by logic to find and fill the next buffer
        if (NULL != buffer) {
            SLresult result;
            // enqueue another buffer
            result = (*pcmBufferQueue)->Enqueue(pcmBufferQueue, buffer, 44100 * 2 * 2);
            // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
            // which for this code example would indicate a programming error
        }
    }
    
    extern "C"
    JNIEXPORT void JNICALL
    Java_com_example_yangw_androidopenslaudio_MainActivity_palypcm(JNIEnv *env, jobject instance,
                                                                   jstring url_) {
        const char *url = env->GetStringUTFChars(url_, 0);
    
        // TODO
        //读取pcm文件
        pcmFile = fopen(url, "r");
        if(pcmFile == NULL)
        {
            LOGE("%s", "fopen file error");
            return;
        }
        out_buffer = (uint8_t *) malloc(44100 * 2 * 2);
    
    
        SLresult result;
        //第一步------------------------------------------
        // 创建引擎对象
        slCreateEngine(&engineObject, 0, 0, 0, 0, 0);
        (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
        (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
    
    
        //第二步-------------------------------------------
        // 创建混音器
        const SLInterfaceID mids[1] = {SL_IID_ENVIRONMENTALREVERB};
        const SLboolean mreq[1] = {SL_BOOLEAN_FALSE};
        result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, mids, mreq);
        (void)result;
        result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
        (void)result;
        result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB, &outputMixEnvironmentalReverb);
        if (SL_RESULT_SUCCESS == result) {
            result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
                    outputMixEnvironmentalReverb, &reverbSettings);
            (void)result;
        }
        SLDataLocator_OutputMix outputMix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
    
        // 第三步--------------------------------------------
        // 创建播放器
        SLDataLocator_AndroidSimpleBufferQueue android_queue={SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,2};
        SLDataFormat_PCM pcm={
                SL_DATAFORMAT_PCM,//播放pcm格式的数据
                2,//2个声道(立体声)
                SL_SAMPLINGRATE_44_1,//44100hz的频率
                SL_PCMSAMPLEFORMAT_FIXED_16,//位数 16位
                SL_PCMSAMPLEFORMAT_FIXED_16,//和位数一致就行
                SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,//立体声(前左前右)
                SL_BYTEORDER_LITTLEENDIAN//结束标志
        };
    
        SLDataSource slDataSource = {&android_queue, &pcm};
        SLDataSink audioSnk = {&outputMix, NULL};
        const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
        const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
    
        result = (*engineEngine)->CreateAudioPlayer(engineEngine, &pcmPlayerObject, &slDataSource, &audioSnk, 3, ids, req);
        // 初始化播放器
        (*pcmPlayerObject)->Realize(pcmPlayerObject, SL_BOOLEAN_FALSE);
    
        //得到接口后调用  获取Player接口
        (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_PLAY, &pcmPlayerPlay);
    
        //第四步---------------------------------------
        // 创建缓冲区和回调函数
        (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_BUFFERQUEUE, &pcmBufferQueue);
    
        //缓冲接口回调
        (*pcmBufferQueue)->RegisterCallback(pcmBufferQueue, pcmBufferCallBack, NULL);
        //获取音量接口
        (*pcmPlayerObject)->GetInterface(pcmPlayerObject, SL_IID_VOLUME, &pcmPlayerVolume);
    
        //第五步----------------------------------------
        // 设置播放状态
        (*pcmPlayerPlay)->SetPlayState(pcmPlayerPlay, SL_PLAYSTATE_PLAYING);
    
    
        //第六步----------------------------------------
        // 主动调用回调函数开始工作
        pcmBufferCallBack(pcmBufferQueue, NULL);
    
        env->ReleaseStringUTFChars(url_, url);
    }
    
    //activity
     public native void palypcm(String url);
    
        public void play(View view) {
            String path = Environment.getExternalStorageDirectory()+"/mydream.pcm";
            palypcm(path);
        }
    

    以上代码播放的是PCM数据,实际使用过程中,要结合解码器一起使用。

    getPcmData方法获取时长为一秒的数据,播放时长也是一秒。

    相关文章

      网友评论

        本文标题:《音视频:Android音频渲染学习总结》

        本文链接:https://www.haomeiwen.com/subject/avgzpftx.html