美文网首页
FFmpeg Decoder

FFmpeg Decoder

作者: Renbing | 来源:发表于2017-11-17 07:50 被阅读0次

    添加lib 快捷:avcodec.lib;avdevice.lib;avformat.lib;avutil.lib;swscale.lib

    视频解码顺序:

    1. 解协议:udp,TCP,http,rtp,file...

    2. 解封装:mkv,mp4,mpegts,avi,flv...

    3. 解码: h265, h264, mpeg2, aac...

    4. 拿到像素数据:RGB24, YUV...

    视频解码步骤:

    1. av_register_all() //初始化所有编解码及格式等

    2. avformat_open_input() //打开视频文件(解封装)

    3. avformat_find_stream_info() //获取视频流信息,若有,则返回值>=0,否则<0

    4. avcodec_find_decoder() //寻找分配相应的解码器

    5. avcodec_open2() //打开编码解码器

    6. av_frame_alloc() //为frame分配地址, 通过 numBytes(Bytes) = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height) 得到buffer(bits)大小

    7. avpicture_fill() //把buffer合适的位置分配给frame

    8. av_read_frame() //读取frame,如果返回值<0则错误或文件结束,=0则读取成功

    9. avcodec_decode_video2() //解码,保存进pFrame

    10. sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,

    pFrameRGB->data, pFrameRGB->linesize);//处理图像数据,保存为RGB。这里用到了之前的sws_ctx,它类似于一种包含了源格式和目标格式参数的配置文件,处理后的数据结果保存进pFrameRGB

    1. SaveFrame() //自定义方法,用于保存转换后的frame

    2. av_free_packet(&packet); //释放packet空间

    3. 结束后,需要释放buffer, pFrameRGB, pFrame, 并且关闭解码器,关闭文件

    源代码:

    #include "stdafx.h"
    extern "C"
    {
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>
    }
    #include "stdio.h"
    #include <iostream>
    // compatibility with newer API
    #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
    #define av_frame_alloc avcodec_alloc_frame
    #define av_frame_free avcodec_free_frame
    #endif
    
    using namespace std;
    
    
    void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
        FILE *pFile;
        char szFilename[32];
        int  y;
    
        // Open file
        sprintf_s(szFilename, "frame%d.ppm", iFrame);
        errno_t err;
        err = fopen_s(&pFile, szFilename, "wb");
        if (pFile == NULL)
            return;
    
        // Write header
        fprintf(pFile, "P6\n%d %d\n255\n", width, height);
    
        // Write pixel data
        for (y = 0; y<height; y++)
            fwrite(pFrame->data[0] + y*pFrame->linesize[0], 1, width * 3, pFile);
    
        // Close file
        fclose(pFile);
    }
    
    int main() {
    
        char path[] = "D:\\documents\\SA\\FFmpegTest\\Debug\\vtest.mp4";
        // Initalizing these to NULL prevents segfaults!
        AVFormatContext   *pFormatCtx = NULL;
        int               i, videoStream;
        AVCodecContext    *pCodecCtxOrig = NULL;
        AVCodecContext    *pCodecCtx = NULL;
        AVCodec           *pCodec = NULL;
        AVFrame           *pFrame = NULL;
        AVFrame           *pFrameRGB = NULL;
        AVPacket          packet;
        int               frameFinished;
        int               numBytes;
        uint8_t           *buffer = NULL;
        struct SwsContext *sws_ctx = NULL;
    
        /*if (argc < 2) {
            printf("Please provide a movie file\n");
            return -1;
        }*/
        // Register all formats and codecs
        av_register_all();
    
        // Open video file
        if (avformat_open_input(&pFormatCtx, path, NULL, NULL) != 0)
            return -1; // Couldn't open file
    
                       // Retrieve stream information
        if (avformat_find_stream_info(pFormatCtx, NULL)<0)
            return -1; // Couldn't find stream information
    
                       // Dump information about file onto standard error
        av_dump_format(pFormatCtx, 0, path, 0);
    
        // Find the first video stream
        videoStream = -1;
    
        for (i = 0; i<pFormatCtx->nb_streams; i++)
            if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoStream = i;
                break;
            }
        if (videoStream == -1)
            return -1; // Didn't find a video stream
    
                       // Get a pointer to the codec context for the video stream
        pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
        // Find the decoder for the video stream
        pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
        if (pCodec == NULL) {
            fprintf(stderr, "Unsupported codec!\n");
            return -1; // Codec not found
        }
        // Copy context
        pCodecCtx = avcodec_alloc_context3(pCodec);
        if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
            fprintf(stderr, "Couldn't copy codec context");
            return -1; // Error copying codec context
        }
    
        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL)<0)
            return -1; // Could not open codec
    
                       // Allocate video frame
        pFrame = av_frame_alloc();
    
        // Allocate an AVFrame structure
        pFrameRGB = av_frame_alloc();
        if (pFrameRGB == NULL)
            return -1;
    
        // Determine required buffer size and allocate buffer
        numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
            pCodecCtx->height);
    
        cout << "test: " << pCodecCtx->width * pCodecCtx->height << endl;
    
        buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    
        // Assign appropriate parts of buffer to image planes in pFrameRGB
        // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
        // of AVPicture
        avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
            pCodecCtx->width, pCodecCtx->height);
    
        // initialize SWS context for software scaling
        sws_ctx = sws_getContext(pCodecCtx->width,
            pCodecCtx->height,
            pCodecCtx->pix_fmt,
            pCodecCtx->width,
            pCodecCtx->height,
            PIX_FMT_RGB24,
            SWS_BILINEAR,
            NULL,
            NULL,
            NULL
        );
        // Read frames and save first five frames to disk
        i = 0;
        while (av_read_frame(pFormatCtx, &packet) >= 0) {
            // Is this a packet from the video stream?
            if (packet.stream_index == videoStream) {
                // Decode video frame
                avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    
                // Did we get a video frame?
                if (frameFinished) {
                    // Convert the image from its native format to RGB
                    sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                        pFrame->linesize, 0, pCodecCtx->height,
                        pFrameRGB->data, pFrameRGB->linesize);
    
                    // Save the frame to disk
                    if (++i <= 5)
                        SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
                            i);
                }
            }
    
            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }
    
        // Free the RGB image
        av_free(buffer);
        av_frame_free(&pFrameRGB);
    
        // Free the YUV frame
        av_frame_free(&pFrame);
    
        // Close the codecs
        avcodec_close(pCodecCtx);
        avcodec_close(pCodecCtxOrig);
    
        // Close the video file
        avformat_close_input(&pFormatCtx);
    
        return 0;
    }
    

    相关文章

      网友评论

          本文标题:FFmpeg Decoder

          本文链接:https://www.haomeiwen.com/subject/lxsdvxtx.html