FFmpeg添加滤镜

作者: 张俊峰0613 | 来源:发表于2018-12-27 11:42 被阅读132次

    按照这篇文章创建一个Android项目;

    native方法

    /**
     *
     * @param filePath      源文件目录
     * @param surface       surfaceview
     * @param filterType        滤镜类型
     * @return
     */
    public native int filter(String filePath, Object surface, String filterType);
    

    生成头文件:

    javah com.example.zjf.ffmpegavfilter.FFmpeg
    

    实现jni方法;

    AVFilterContext *buffersink_ctx;
    AVFilterContext *buffersrc_ctx;//认为它是AVFilter的一个实例就OK了
    AVFilterGraph *filter_graph;
    
    JNIEXPORT jint JNICALL Java_com_example_zjf_ffmpegavfilter_FFmpeg_filter
            (JNIEnv* env, jobject obj, jstring filePath,jobject surface, jstring filterDescr){
        LOGD("play");
    
     
        const char *file_name = (env)->GetStringUTFChars(filePath, JNI_FALSE);
        const char *filter_descr = (env)->GetStringUTFChars(filterDescr, JNI_FALSE);
    
        av_register_all();
    
        avfilter_register_all();////注册滤波器组件
    
        AVFormatContext *pFormatCtx = avformat_alloc_context();
    
        //打开视频文件
        if (avformat_open_input(&pFormatCtx, file_name, NULL, NULL) != 0) {
    
            LOGD("Couldn't open file:%s\n", file_name);
            //goto end;
            return -1; // Couldn't open file
        }
    
        //检索多媒体流信息
        if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
            LOGD("Couldn't find stream information.");
            return -1;
        }
    
        //寻找视频流的第一帧
        int videoStream = -1, i;
        for (i = 0; i < pFormatCtx->nb_streams; i++) {
            if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0){
                videoStream = i;
            }
        }
        if (videoStream == -1) {
            LOGD("Didn't find a video stream.");
            return -1; // Didn't find a video stream
            //goto end;
        }
    
        //获取codec上下文指针
        AVCodecContext *pCodecCtx = avcodec_alloc_context3(NULL);
        if (pCodecCtx == NULL)
        {
            LOGD("Could not allocate AVCodecContext\n");
            return -1;
            //goto end;
        }
        avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoStream]->codecpar);
        
        /**
        *   开始初始化滤波器
        */
        char args[512];
        int ret;
        
        //Filter的具体定义,只要是libavfilter中已注册的filter,
        //就可以直接通过查询filter名字的方法获得其具体定义,所谓
        //定义即filter的名称、功能描述、输入输出pad、相关回调函数等
        AVFilter *buffersrc  = avfilter_get_by_name("buffer"); /* 输入buffer filter */  
        AVFilter *buffersink = avfilter_get_by_name("buffersink"); /* 输出buffer filter */
        
        //AVFilterInOut对应buffer和buffersink这两个首尾端的filter的输入输出
        AVFilterInOut *outputs = avfilter_inout_alloc();
        AVFilterInOut *inputs  = avfilter_inout_alloc();
        
        enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
        AVRational time_base = pFormatCtx->streams[videoStream]->time_base;/* 时间基数 */  
        AVBufferSinkParams *buffersink_params;
    
        filter_graph = avfilter_graph_alloc();
    
        snprintf(args, sizeof(args),
                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                 pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                 time_base.num, time_base.den,
                 pCodecCtx->sample_aspect_ratio.num, pCodecCtx->sample_aspect_ratio.den);
    
        //根据指定的Filter,这里就是buffer,构造对应的初始化参数args,二者结合即可创建Filter的示例,并放入filter_graph中
        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                           args, NULL, filter_graph);
        if (ret < 0) {
            LOGD("Cannot create buffer source === %d\n",ret);
            return ret;
        }
    
        /* buffer video sink: to terminate the filter chain. */
        buffersink_params = av_buffersink_params_alloc();
        buffersink_params->pixel_fmts = pix_fmts;
        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                           NULL, buffersink_params, filter_graph);
        av_free(buffersink_params);
        if (ret < 0) {
            LOGD("Cannot create buffer sink\n");
            return ret;
        }
    
        /* Endpoints for the filter graph. */
        outputs->name       = av_strdup("in");
        outputs->filter_ctx = buffersrc_ctx;
        outputs->pad_idx    = 0;
        outputs->next       = NULL;
    
        inputs->name       = av_strdup("out");
        inputs->filter_ctx = buffersink_ctx;
        inputs->pad_idx    = 0;
        inputs->next       = NULL;
    
        //filter_descr是一个filter命令,例如"overlay=iw:ih",该函数可以解析这个命令,
        //然后自动完成FilterGraph中各个Filter之间的联接
        if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
                                            &inputs, &outputs, NULL)) < 0) {
            LOGD("Cannot avfilter_graph_parse_ptr\n");
            return ret;
        }
    
        if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {//检查当前所构造的FilterGraph的完整性与可用性
            LOGD("Cannot avfilter_graph_config\n");
            return ret;
        }
    
      
    
        //寻找视频流的解码器
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            LOGD("Codec not found.");
            //goto end;
            return -1; // Codec not found
        }
    
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            LOGD("Could not open codec.");
            //goto end;
            return -1; // Could not open codec
        }
    
        // 获取native window
        ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env, surface);
    
        // 获取视频宽高
        int videoWidth = pCodecCtx->width;
        int videoHeight = pCodecCtx->height;
    
        // 设置native window的buffer大小,可自动拉伸
        ANativeWindow_setBuffersGeometry(nativeWindow, videoWidth, videoHeight,
                                         WINDOW_FORMAT_RGBA_8888);
        ANativeWindow_Buffer windowBuffer;
    
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            LOGD("Could not open codec.");
            //goto end;
            return -1; // Could not open codec
        }
    
        AVFrame *pFrame = av_frame_alloc();
    
        //用于渲染
        AVFrame *pFrameRGBA = av_frame_alloc();
        if (pFrameRGBA == NULL || pFrame == NULL) {
            LOGD("Could not allocate video frame.");
            //goto end;
            return -1;
        }
    
        // buffer中数据就是用于渲染的,且格式为RGBA
        int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height,
                                                1);
        uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
        av_image_fill_arrays(pFrameRGBA->data, pFrameRGBA->linesize, buffer, AV_PIX_FMT_RGBA,
                             pCodecCtx->width, pCodecCtx->height, 1);
    
        // 由于解码出来的帧格式不是RGBA的,在渲染之前需要进行格式转换
        struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
                                                    pCodecCtx->height,
                                                    pCodecCtx->pix_fmt,
                                                    pCodecCtx->width,
                                                    pCodecCtx->height,
                                                    AV_PIX_FMT_RGBA,
                                                    SWS_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);
    
        AVPacket packet;
        while (av_read_frame(pFormatCtx, &packet) >= 0) {
            if(again){
                goto again;
            }
            if (packet.stream_index == videoStream) {
                //解码该帧
                if(avcodec_send_packet(pCodecCtx, &packet) == 0){
                    while (avcodec_receive_frame(pCodecCtx, pFrame) == 0) {
                        
                        pFrame->pts = av_frame_get_best_effort_timestamp(pFrame);//获取PTS
    
                        //把解码后视频帧添加到filter_graph
                        if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) {
                            LOGD("Could not av_buffersrc_add_frame");
                            break;
                        }
                        //把滤波后的视频帧从filter graph取出来
                        ret = av_buffersink_get_frame(buffersink_ctx, pFrame);
                        if (ret < 0) {
                            LOGD("Could not av_buffersink_get_frame");
                            break;
                        }
    
                       
                        ANativeWindow_lock(nativeWindow, &windowBuffer, 0);
    
                        //格式转换
                        sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data,
                                  pFrame->linesize, 0, pCodecCtx->height,
                                  pFrameRGBA->data, pFrameRGBA->linesize);
    
                        //获取stride
                        uint8_t *dst = (uint8_t *) windowBuffer.bits;
                        int dstStride = windowBuffer.stride * 4;
                        uint8_t *src = (pFrameRGBA->data[0]);
                        int srcStride = pFrameRGBA->linesize[0];
    
                        //由于window的stride和帧的stride不同,因此需要逐行复制
                        int h;
                        for (h = 0; h < videoHeight; h++) {
                            memcpy(dst + h * dstStride, src + h * srcStride, srcStride);
                        }
                        ANativeWindow_unlockAndPost(nativeWindow);
                    }
                }
            }
            av_packet_unref(&packet);
        }
    
        is_playing = 0;
        //释放内存以及关闭文件
        av_free(buffer);
        av_free(pFrameRGBA);
        av_free(pFrame);
        avcodec_close(pCodecCtx);
        avformat_close_input(&pFormatCtx);
        avfilter_free(buffersrc_ctx);
        avfilter_free(buffersink_ctx);
        avfilter_graph_free(&filter_graph);
    
        free(buffer);
        free(sws_ctx);
        free(&windowBuffer);
    
        ANativeWindow_release(nativeWindow);
        (env)->ReleaseStringUTFChars(filePath, file_name);
        (env)->ReleaseStringUTFChars(filterDescr, filter_descr);
        LOGE("do release...");
        again:
        again = 0;
        LOGE("play again...");
        return ret;
    }
    

    https://github.com/Xiaoben336/FFmpegAVfilter

    相关文章

      网友评论

        本文标题:FFmpeg添加滤镜

        本文链接:https://www.haomeiwen.com/subject/jdmzkqtx.html