美文网首页
FFmpeg解码AVI后SurfaceView显示

FFmpeg解码AVI后SurfaceView显示

作者: 张俊峰0613 | 来源:发表于2018-12-24 15:54 被阅读0次

    思路

    1.把封装好的视频数据解码为YUV
    2.YUV数据转化为RGB。
    3.一帧一帧的传给SurfaceView显示出来

    就是在上一篇的基础上添加了相关方法

    JNIEXPORT jint JNICALL Java_com_example_zjf_ffmpegdecoder_FFmpeg_play
            (JNIEnv *env, jclass clazz,jstring file_path, jobject surface){
    
        LOGE("play");
    
        av_register_all();
    
        char file_name[500] = {0};
        sprintf(file_name,"%s",env->GetStringUTFChars(file_path, NULL));
    
        char *filename = "/storage/emulated/0/bird.avi";
    
        AVFormatContext *pFormatCtx = avformat_alloc_context();
    
        //打开视频文件
        if (int err_code = avformat_open_input(&pFormatCtx,file_name,NULL,NULL)){
            LOGE("Couldn't open file:%s\n", filename);
            char buf[] = {0};
            av_strerror(err_code, buf, 1024);
            LOGE("Couldn't open file %d:(%s)",err_code, buf);
            return -1;
    
        }
    
        //检索流信息
        if (avformat_find_stream_info(pFormatCtx,NULL) < 0) {
            LOGE("Couldn't find stream information.");
            return -1;
        }
    
    
        int videoStream = -1, i;
        for (i = 0; i < pFormatCtx->nb_streams; i++) {
            if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO
                && videoStream < 0) {
                videoStream = i;
            }
        }
        if (videoStream == -1) {
            LOGE("Didn't find a video stream.");
            return -1; // Didn't find a video stream
        }
    
    
        AVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            LOGE("Codec not found.");
            return -1; // Codec not found
        }
    
        if (avcodec_open2(pCodecCtx,pCodec,NULL) < 0) {
            LOGE("Could not open codec.");
            return -1; // Could not open codec
        }
    
    
        ANativeWindow *nativeWindow = ANativeWindow_fromSurface(env,surface);
    
        int videoWidth = pCodecCtx->width;
        int videoHeight = pCodecCtx->height;
    
        ANativeWindow_setBuffersGeometry(nativeWindow, videoWidth, videoHeight,
                                         WINDOW_FORMAT_RGBA_8888);
    
        ANativeWindow_Buffer windowBuffer;
    
        // Allocate video frame
        AVFrame *pFrame = av_frame_alloc();
        // 用于渲染
        AVFrame *pFrameRGBA = av_frame_alloc();
        if (pFrameRGBA == NULL || pFrame == NULL) {
            LOGE("Could not allocate video frame.");
            return -1;
        }
    /*    (a)计算所需内存大小av_image_get_bufferz_size()
                   --> (b) 按计算的内存大小申请所需内存 av_malloc()
                                                         --> (c) 对申请的内存进行格式化 av_image_fill_arrays()*/
        // buffer中数据就是用于渲染的,且格式为RGBA
        int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height,1);
        uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
        av_image_fill_arrays(pFrameRGBA->data, pFrameRGBA->linesize, buffer, AV_PIX_FMT_RGBA,
                             pCodecCtx->width, pCodecCtx->height, 1);
    
        // 由于解码出来的帧格式不是RGBA的,在渲染之前需要进行格式转换
        struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
                                                    pCodecCtx->height,
                                                    pCodecCtx->pix_fmt,
                                                    pCodecCtx->width,
                                                    pCodecCtx->height,
                                                    AV_PIX_FMT_RGBA,
                                                    SWS_BILINEAR,
                                                    NULL,
                                                    NULL,
                                                    NULL);
    
        int frameFinished;
        AVPacket packet;
        while (av_read_frame(pFormatCtx, &packet) >= 0) {
            // Is this a packet from the video stream?
            if (packet.stream_index == videoStream) {
    
                // Decode video frame
                avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    
                // 并不是decode一次就可解码出一帧
                if (frameFinished) {
    
                    // lock native window buffer
                    ANativeWindow_lock(nativeWindow, &windowBuffer, 0);
    
                    // 格式转换
                    sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data,
                              pFrame->linesize, 0, pCodecCtx->height,
                              pFrameRGBA->data, pFrameRGBA->linesize);
    
                    // 获取stride
                    uint8_t *dst = (uint8_t *) windowBuffer.bits;
                    int dstStride = windowBuffer.stride * 4;
                    uint8_t *src = (pFrameRGBA->data[0]);
                    int srcStride = pFrameRGBA->linesize[0];
    
                    // 由于window的stride和帧的stride不同,因此需要逐行复制
                    int h;
                    for (h = 0; h < videoHeight; h++) {
                        memcpy(dst + h * dstStride, src + h * srcStride, srcStride);
                    }
    
                    ANativeWindow_unlockAndPost(nativeWindow);
                }
    
            }
            av_packet_unref(&packet);
        }
    
        av_free(buffer);
        av_free(pFrameRGBA);
    
        // Free the YUV frame
        av_free(pFrame);
    
        // Close the codecs
        avcodec_close(pCodecCtx);
    
        // Close the video file
        avformat_close_input(&pFormatCtx);
        return 0;
    }
    

    源码地址:https://github.com/Xiaoben336/FFmpegDecoder

    相关文章

      网友评论

          本文标题:FFmpeg解码AVI后SurfaceView显示

          本文链接:https://www.haomeiwen.com/subject/fqhzkqtx.html