美文网首页
FFMPEG常用代码一

FFMPEG常用代码一

作者: 何亮hook_8285 | 来源:发表于2022-12-17 20:10 被阅读0次

描述

本文根据FFMEPG的API函数实现视频转换、音频提取、视频提取、音频视频裸流重封装、截图、YUV转JPEG、桌面抓图、动态生成M3U8、队列操作、时间基操作、windows下获取摄像头设备、windows下获取音频设备、windows下获取屏幕宽高

ffmpeg命令

#提取1秒钟25帧
ffmpeg -i 1.mp4 -s 1280*720 -vf fps=fps=25 -ss 00:00:00 -to 00:00:01 -pix_fmt rgb24  1.rgb
#提取yuv420p
ffmpeg -i 1.mp4 -s 800*400 -ss 00:00:02 -to 00:00:10 -pix_fmt yuv420p 1.yuv
#提取rgb文件
ffmpeg -i 1.mp4 -s 800*400 -pix_fmt rgba 1.rgb
#提取s16格式
ffmpeg -i 1.mp4 -ar 48000 -ac 2 -f s16le 48000_2_s16le.pcm
#提取flt格式
ffmpeg -i 1.aac -ar 48000 -ac 2 -f f32le 48000_2_f32le.pcm
#播放rgba
ffplay -pixel_format rgba -video_size 800*400 -framerate 5 -i 1.rgb
#播放yuv420
ffplay -pixel_format yuv420p -video_size 300*300 -framerate 5 -i 300_300.yuv
#播放pcm
ffplay -ar 48000 -ac 2 -f s16le 1.pcm
#播放bgra
ffplay -pixel_format bgra -video_size 1366*768 -framerate 15 -i deskTop.rgb

码流计算

分辨率

x轴的像素个数*y轴的像素个数

常见的宽高比例16:9或4:3

16:9分辨率:360P/720P/1K/2K

帧率

每秒钟采集/播放图像的个数

动画的帧率是25帧/s

常见的帧率:15帧/s 、30帧/s、60帧s

未编码视频的RGB码流
RGB=分辨率(宽*高)*3(Byte)*帧率(25帧) 例如:1280*720*3*25=69120000 约等于69M

AVFrame结构体

AVFrame是FFMPEG中一帧没压缩数据的结构体对象,以下代码是AVFrame结构体操作。

#include <iostream>


extern "C"{
    #include <libavformat/avformat.h>
};


//heliang
int main()
{
    //创建avframe空间
    AVFrame *frame=av_frame_alloc();

    //宽度
    frame->width = 400;
    //高度
    frame->height = 300;
    //格式
    frame->format=AV_PIX_FMT_ARGB;

    //分配空间
    //av_frame_get_buffer 对齐方式默认32字节,yuv设置16字节
    int re=av_frame_get_buffer(frame,16);
    if(re != 0)
    {
        char buff[1024]={0};
        //打印错误
        av_strerror(re,buff,sizeof(buff));
        std::cout << buff << std::endl;
    }
    //rgba 只有第一个索引有值,如果是yuv,linesize的是3个索引值
    std::cout << frame->linesize[0] << std::endl;

    if(frame->buf[0]!=NULL)
    {
        std::cout << "frame ref count =" << av_buffer_get_ref_count(frame->buf[0]) << std::endl;
    }
    //初始化avframe空间
    AVFrame *frame2=av_frame_alloc();
    //将frame对象中的缓存数据拷贝frame2中
    av_frame_ref(frame2,frame);
    //将frame2的引用移除
    av_frame_unref(frame2);
    //将frame的引用移除
    av_frame_unref(frame);

    if(frame2->buf[0]!=NULL)
    {
        std::cout << "frame2 ref count = " << av_buffer_get_ref_count(frame2->buf[0]) << std::endl;
    }

    //销毁avframe
    av_frame_free(&frame);
    av_frame_free(&frame2);
    return 0;
}

帧率测试

#include <iostream>
#include <ctime>
#include <thread>


//自定义休眠,解决sleep_for不准确问题
void MySleep(unsigned int ms)
{
    clock_t beg=clock();
    for(int i=0;i<ms;i++)
    {
        std::this_thread::sleep_for(std::chrono::milliseconds(1));
        if((clock()-beg)/(CLOCKS_PER_SEC/1000)>=ms)
        {
            break;
        }
    }
}
//heliang
int main()
{
    //开始时间
    clock_t beg=clock();
    //帧率
    int fps=0;
    for(;;)
    {
        fps++;
        clock_t  tmp=clock();
        //消息10毫秒,sleep函数休眠不精准
        std::this_thread::sleep_for(std::chrono::milliseconds(10));
//        MySleep(10);
        std::cout << clock() -tmp  << " " << std::flush ;
        //1秒钟开始统计,CLOCKS_PER_SEC CPU每秒跳数
        //间隔毫秒数
        if((clock()-beg)/(CLOCKS_PER_SEC/1000) >1000)
        {
            std::cout << "sleep for fps :" << fps << std::endl;
            break;
        }
    }
    return 0;
}

设置帧率

//一秒帧率
int fps=25;
//一秒钟除以25帧得到休眠毫秒数
int sleepMS=1000/25;
//设置休眠
MySleep(sleepMS);

码率计算公式

文件大小(bit) / 时长(秒) / 1024 = kbps每秒传输千位数
例如一个2M的视频,时长是20s

2M=2*1024*1024*8=16777216bit
码率=16777216/20/1024=819.2kbps

手机设置码率建议

通过上面的介绍,结合我做过的一些手机项目,我总结了一套设置码率的公式,分享给大家如下:项目计算公式

公式 192X144 320X240 480X360 640X480 1280X720 1920X1080
极低码率(宽X高X3)/4 30kb/s 60kb/s 120kps 250kbps 500kbps 1mbps
低码率 (宽X高X3) /2 60kb/s 120kb/s 50kbps 500kbps 1mbps 2mbps
中码率(宽X高X3) 120kb/s 250kb/s 500kbps 1mbps 2mbps 4mbps
高码率 (宽X高X3) X 2 250kb/s 500kb/s 1mbps 2mbps 4mbps 8mbps
极高码率(宽X高X3)X4 500kb/s 1mb/s 2mbps 4mbps 8mbps 16mbps

图像重采样-YUV转RGBA

#include <iostream>
#include <fstream>


typedef unsigned char Uint8;

extern "C"{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
};

//heliang
int main()
{
    //源yuv宽度
    int yuvWidth=1364;
    //源yuv高度
    int yuvHeight=768;

    //yuv的linesize
    int yuv_linesize[3]={yuvWidth,yuvWidth/2,yuvWidth/2};

    //存储yuv数据
    Uint8 *yuv[3]={0};
    yuv[0]=new Uint8 [yuvWidth*yuvHeight];
    yuv[1]=new Uint8 [yuvWidth*yuvHeight/4];
    yuv[2]=new Uint8 [yuvWidth*yuvHeight/4];

    //打开yuv视频文件
    std::ifstream ifs;
    ifs.open("d:\\ds.yuv",std::ios::binary);

    //yuv重采样rgb
    SwsContext *yuvToRgbCtx= nullptr;
    //rgb宽度
    int rgbWidth=800;
    //rgb宽度
    int rgbHeight=600;

    //存储rgba数据
    Uint8 *rgbaOne=new Uint8[rgbWidth*rgbHeight*4];

    //输出rgba数据
    std::ofstream ofs;
    ofs.open("d:\\1.rgba",std::ios::binary);

    //rgb高度
    for(;;)
    {
        //读取yuv数据
        ifs.read((char*)yuv[0],yuvWidth*yuvHeight);
        ifs.read((char*)yuv[1],yuvWidth*yuvHeight/4);
        ifs.read((char*)yuv[2],yuvWidth*yuvHeight/4);
        if(ifs.gcount() ==0)break;


        //创建重采样的上下文
        yuvToRgbCtx=sws_getCachedContext(
                yuvToRgbCtx,  //重采样上下文
                yuvWidth, //源yuv宽度
                yuvHeight, //源yuv高度
                AV_PIX_FMT_YUV420P, //yuv存储格式
                rgbWidth, //转rgb宽度
                rgbHeight, //转rgb高度
                AV_PIX_FMT_RGBA, //rgba格式
                SWS_BILINEAR, //重采样算法,线性算法
                NULL, //源过滤,不使用
                NULL, //目标过滤,不使用
                0  //过滤参数,不使用
                );

        //重采样上下文创建失败
        if(!yuvToRgbCtx)
        {
            std::cerr << "sws_getCachedContext failed" << std::endl;
            break;
        }

        Uint8 *rgbData[1];
        rgbData[0]=rgbaOne;

        int rgbaLineSize[1];
        rgbaLineSize[0]=rgbWidth*4;

        //重采样
        int re=sws_scale(yuvToRgbCtx, //重采样上下文
                yuv, //yuv数据
                yuv_linesize, //yuv设置一行大小
                0, //设置y,不考虑,设置0
                yuvHeight, //设置yuv高度
                rgbData, //设置rgba的存储空间
                rgbaLineSize //rgba存储空间
        );

        std::cout << re << " " ;
        //写出rgb数据
        ofs.write((char *)rgbData[0],rgbWidth*rgbHeight*4);

    }

    ifs.close();
    ofs.close();
    //销毁数据
    delete yuv[0];
    delete yuv[1];
    delete yuv[2];
    delete rgbaOne;
    return 0;
}

图像重采样-RGBA转YUV

#include <iostream>
#include <fstream>


typedef unsigned char Uint8;

extern "C"{
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
};

//heliang
int main()
{
    //rgba宽度
    int rgbaWidth=800;
    //rgba高度
    int rgbaHeight=400;
    //rgba像素字节
    int rgbaPixSize=4;
    //存储rgba数据
    Uint8 *rgbaData=new Uint8[rgbaWidth*rgbaHeight*rgbaPixSize];
    std::ifstream ifs;
    ifs.open("d:\\1\\1.rgb",std::ios::binary);
    if(!ifs.is_open())
    {
        std::cout << "open rgb file is failed !" << std::endl;
        return 0;
    }

    SwsContext *rgbaToYuvCtx= NULL;

    int yuvWidth=300;
    int yuvHeight=300;

    Uint8 *yuv[3];
    yuv[0]=new Uint8[yuvWidth*yuvHeight];
    yuv[1]=new Uint8[yuvWidth*yuvHeight/4];
    yuv[2]=new Uint8[yuvWidth*yuvHeight/4];
    int yuvLineSize[3]={yuvWidth,yuvWidth/2,yuvHeight/2};
    std::ofstream ofs;
    ofs.open("d:\\300_300.yuv",std::ios::binary);

    for(;;)
    {
        //读取rgba数据
        ifs.read((char*)rgbaData,rgbaWidth*rgbaHeight*rgbaPixSize);
        if(ifs.gcount()==0)break;


        //初始化重采样上下文
        rgbaToYuvCtx=sws_getCachedContext(
                rgbaToYuvCtx, //上下文
                rgbaWidth, //源宽度
                rgbaHeight, //源高度
                AV_PIX_FMT_RGBA, //rgba存储格式
                yuvWidth, //目标宽度
                yuvHeight, //目标高度
                AV_PIX_FMT_YUV420P, //重采样格式,yuv420
                SWS_BILINEAR, //重采样算法,线性算法
                0, //过滤器参数,不使用
                0, //过滤器参数,不使用
                0 //过滤器参数,不使用
                );


        Uint8 *srcSlice[1];
        srcSlice[0]=rgbaData;
        int srcStride[1];
        srcStride[0]=rgbaWidth*rgbaPixSize;



        //重采样
        int reH=sws_scale(
                rgbaToYuvCtx, //重采样对象
                srcSlice, //rgba存储空间
                srcStride, //rgba 每行存储大小
                0, //y轴
                rgbaHeight, //高度
                yuv,   //设置yuv存储空间
                yuvLineSize //设置yuv每行存储大小
                );

        std::cout << reH << " ";

        ofs.write((char*)yuv[0],yuvWidth*yuvHeight);
        ofs.write((char*)yuv[1],yuvWidth*yuvHeight/4);
        ofs.write((char*)yuv[2],yuvWidth*yuvHeight/4);

    }

    ofs.close();
    ifs.close();
    delete yuv[0];
    delete yuv[1];
    delete yuv[2];
    delete rgbaData;
    return 0;
}

音频重采样-PCM

#include <iostream>
#include <fstream>

extern "C"{
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavutil/opt.h>
};


//heliang
int main()
{

    //打印编解码器信息
    std::cout << avcodec_configuration() << std::endl;


    //重采样原来的采用存储格式
    AVSampleFormat audioOldFormat=AV_SAMPLE_FMT_S16;

    //重采样新的采用存储格式
    AVSampleFormat audioNewFormat=AV_SAMPLE_FMT_FLTP;

    //1.音频重采样上下文开辟空间
    SwrContext *audioSwrCtx=swr_alloc();

    //2.设置重采样上下文
    swr_alloc_set_opts(
            audioSwrCtx, //上下文
            AV_CH_LAYOUT_STEREO, //输出的layout, 如:5.1声道
            audioNewFormat, // 输出的采样格式
            44100,  //输出采样率,频率,单位:Hz
            AV_CH_LAYOUT_STEREO, //输⼊的layout
            audioOldFormat, //输⼊的采样格式
            48000,  // 输⼊的采样率,频率,单位:Hz
            0, //⽇志相关,不⽤管先,直接为0
            NULL
    );

    //3.初始化重采样上下文的参数
    swr_init(audioSwrCtx);

    //4.存储未重采样的数据
    AVFrame *audioOldframe=av_frame_alloc();
    //通道布局情况
    audioOldframe->channel_layout=AV_CH_LAYOUT_STEREO;
    //通道数
    audioOldframe->channels=av_get_channel_layout_nb_channels(audioOldframe->channel_layout);
    //采样格式
    audioOldframe->format=audioOldFormat;
    //采样率
    audioOldframe->sample_rate=48000;
    //nb_samples 必须设置
    //AAC的LC编码级:1024,ACC的HE编码级:2048,MP3:1152
    audioOldframe->nb_samples=1024;
    //packed:多个声道数据交错存放,frame中buffer会data[0],linesize[0] LRLR LRLR
    //planar 每个声道数据单独存放,frame中buffer会data[0],data[1],linesize[0]和 linesize[1]  LLLL RRRR
    /* 为frame分配buffer */
    int re = av_frame_get_buffer(audioOldframe, 0);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        return 0;
    }

    //存储重采样数据
    AVFrame  *audioNewframe=av_frame_alloc();

    //通道布局情况
    audioNewframe->channel_layout=AV_CH_LAYOUT_STEREO;
    //通道数
    audioNewframe->channels=av_get_channel_layout_nb_channels(audioOldframe->channel_layout);
    //采样格式
    audioNewframe->format=audioNewFormat;
    //采样率
    audioNewframe->sample_rate=44100;
    //nb_samples 必须设置
    //AAC的LC编码级:1024,ACC的HE编码级:2048,MP3:1152
    audioNewframe->nb_samples= av_rescale_rnd(audioOldframe->nb_samples, audioNewframe->sample_rate, audioOldframe->sample_rate, AV_ROUND_UP);
    re = av_frame_get_buffer(audioNewframe, 0);

    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        return 0;
    }


    //读取PCM数据,并且重采样
    std::ifstream ifs("d:\\48000_2_s16le.pcm",std::ios::binary);
    //获取未重采样帧的大小
    int audioOldFrameDataSize=audioOldframe->nb_samples*av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*audioOldframe->channels;

    std::ofstream ofs("d:\\41000_2_fltp.pcm",std::ios::binary);

    for(int i=0;;i++)
    {
        if(ifs.eof())break;
        ifs.read((char*)audioOldframe->data[0],audioOldFrameDataSize);

        //4.重采样音频
        int out_samples =swr_convert(audioSwrCtx,
                audioNewframe->data,
                audioNewframe->nb_samples,
                (const uint8_t**)audioOldframe->data,
                audioOldframe->nb_samples
                );

        //planar LRLRLR
        if(av_sample_fmt_is_planar(audioNewFormat)) {
            //获取重采样帧的大小
            int size=av_get_bytes_per_sample(audioNewFormat);
            for(int i = 0; i < out_samples; i++) {
                for(int c = 0; c < audioNewframe->channels; c++)
                ofs.write((char*)audioNewframe->data[c]+i*size,size);

            }
        }
        else {  // packed   LLLL RRRR
            //获取重采样帧的大小
            int audioNewFrameDataSize=audioNewframe->nb_samples*av_get_bytes_per_sample(audioNewFormat)*audioNewframe->channels;
            ofs.write((char*)audioNewframe->data[0], audioNewFrameDataSize);
        }

    }

    ofs.close();
    ifs.close();
    av_frame_free(&audioOldframe);
    av_frame_free(&audioNewframe);
    swr_close(audioSwrCtx);
    return 0;
}

视频编码器初始代码

#include <iostream>

extern "C"{
#include <libavcodec/avcodec.h>
};

//heliang
int main()
{
    //1.查到编码器 AV_CODEC_ID_HEVC h265
    //avcodec_find_decoder_by_name() 按名称获取编码器
    AVCodec *codec=avcodec_find_encoder(AV_CODEC_ID_H264);

    //2创建编码器上下文
    AVCodecContext *codecCtx=avcodec_alloc_context3(codec);

    //3.设置编码器上下文
    //视频宽度
    codecCtx->width=400;
    //视频高度
    codecCtx->height=300;
    //设置帧的时间单位,pts*time_base =播放时间秒
    codecCtx->time_base={1,25};
    //设置像素格式
    codecCtx->pix_fmt=AV_PIX_FMT_YUV420P;
    //编码线程数
    codecCtx->thread_count=16;


    //4 打开编码上器下文
    int re=avcodec_open2(codecCtx,codec,NULL);
    if(re!=0)
    {
      char buf[1024]={0};
      av_strerror(re,buf,sizeof(buf));
      std::cerr << buf << std::endl;
      return 0;
    }

    //释放上下文
    avcodec_free_context(&codecCtx);
    return 0;
}

视频编码器压缩数据

#include <iostream>
#include <fstream>


extern "C"{
#include <libavcodec/avcodec.h>
};

//heliang
int main()
{
    //1.查到编码器 AV_CODEC_ID_HEVC  AV_CODEC_ID_H264
    //avcodec_find_encoder_by_name() 按名称获取编码器
    AVCodec *codec=avcodec_find_encoder(AV_CODEC_ID_H264);

    //2创建编码器上下文
    AVCodecContext *codecCtx=avcodec_alloc_context3(codec);

    //3.设置编码器上下文
    //视频宽度
    codecCtx->width=400;
    //视频高度
    codecCtx->height=300;
    //设置帧的时间单位,pts*time_base =播放时间秒
    codecCtx->time_base={1,25};
    //设置像素格式
    codecCtx->pix_fmt=AV_PIX_FMT_YUV420P;
    //编码线程数
    codecCtx->thread_count=16;


    //4 打开编码上器下文
    int re=avcodec_open2(codecCtx,codec,NULL);
    if(re!=0)
    {
      char buf[1024]={0};
      av_strerror(re,buf,sizeof(buf));
      std::cerr << buf << std::endl;
      return 0;
    }

    //创建原始帧空间
    AVFrame *frame=av_frame_alloc();
    frame->width=codecCtx->width;
    frame->height=codecCtx->height;
    frame->format=codecCtx->pix_fmt;
    //设置buffer,并且设置对齐方式
    av_frame_get_buffer(frame,0);

    //初始化编码压缩包的空间
    AVPacket *packet=av_packet_alloc();

    //打包文件
    std::ofstream ofs;
    ofs.open("d:\\400_300.h264",std::ios::binary);


    //生成h264数据
    for(int i=0;i<220;i++)
    {
        //生成y数据
        for(int y=0;y<codecCtx->height;y++)
        {
            for(int x=0;x<codecCtx->width;x++)
            {
                frame->data[0][y*frame->linesize[0]+x]=x+y+i*3;
            }
        }

        //生成uv数据
        for(int y=0;y<codecCtx->height/2;y++)
        {
            for(int x=0;x<codecCtx->width/2;x++)
            {
                frame->data[1][y*frame->linesize[1]+x]=128+y+i*2;
                frame->data[2][y*frame->linesize[2]+x]=64+x+i*5;
            }
        }

        frame->pts= i ; //显示时间

        //发送未压缩帧到线程中压缩
        re=avcodec_send_frame(codecCtx,frame);
        if(re!=0)
        {
            break;
        }

        //返回多帧
        while(re >=0)
        {
            //接收压缩帧,一般前几次调用返回空(缓冲,编码未完成)
            //独立线程编码
            re=avcodec_receive_packet(codecCtx,packet);
            if(re == AVERROR(EAGAIN) || re == AVERROR_EOF)
                break;
            if(re <0)
            {
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cerr << "avcodec_receive_packet error:" << buf << std::endl;
                break;
            }

            std::cout << packet->size << " " << std::flush;

            //写文件
            ofs.write((char*)packet->data,packet->size);
            //解引用
            av_packet_unref(packet);


        }


    }

    ofs.close();
    //释放帧
    av_frame_free(&frame);
    //释放压缩包
    av_packet_free(&packet);
    //释放上下文
    avcodec_free_context(&codecCtx);
    return 0;
}

视频-ABR比特率设置(不推荐)

//abr平均比特率(不推荐用,会生成脏码)
codecCtx->bit_rate=400000; //400KB

视频-CQP质量设置

#include <libavutil/opt.h>

//qp范围0-51,qp值越大像素越差,默认23
av_opt_set(codecCtx->priv_data,"qp","23",0);

视频-CBR比特率设置

#include <libavutil/opt.h>

//恒定比特率(CBR)由于MP4不支持NAL填充,因此输出文件必须为(MPEG-2 TS)
//400KB
int br=400000;
codecCtx->rc_min_rate=br;
codecCtx->rc_max_rate=br;
codecCtx->rc_buffer_size=br;
codecCtx->bit_rate=br;
av_opt_set(codecCtx->priv_data,"nal-hrd","cbr",0);

视频-CRF质量设置(推荐)

#include <libavutil/opt.h>

//范围0-51,qp值越大像素越差
av_opt_set(codecCtx->priv_data,"crf","23",0);

视频-VBV比特率设置

#include <libavutil/opt.h>

av_opt_set(codecCtx->priv_data,"crf","23",0);
int rate=400000;
codecCtx->rc_max_rate=rate;
codecCtx->rc_buffer_size=rate*2;

视频-预设值

//B帧设为0, 降低延时,增大空间
codecCtx->max_b_frames=0;
//预设编码器参数
//设置编码速度和质量平衡
av_opt_set(codecCtx->priv_data,"prese","ultrafast",0);
//设置视觉优化,零延迟,用在需要非常低的延迟的情况下,比如电视电话会议的编码,不能设置b帧
av_opt_set(codecCtx->priv_data,"tune","zerolatency",0);

音频-编码器

#include <iostream>
#include <fstream>

extern "C"{
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavutil/opt.h>
};


//heliang
int main()
{

    //打印编解码器信息
    std::cout << avcodec_configuration() << std::endl;

    AVCodec *audioCodec=avcodec_find_encoder(AV_CODEC_ID_MP3);
    AVCodecContext *audioCodecCtx=avcodec_alloc_context3(audioCodec);
    audioCodecCtx->time_base={1,25};
    //音频流
    //编码类型 比特率64K
    audioCodecCtx->bit_rate=64000;
    //样本采样大小,2个字节存储
    audioCodecCtx->sample_fmt=AV_SAMPLE_FMT_S16;
    //音频格式
    audioCodecCtx->codec_type=AVMEDIA_TYPE_AUDIO;
    //采样频率,44100
    audioCodecCtx->sample_rate=48000;
    //音频通道格式类型 如 单通道 双通道
    //av_get_default_channel_layout(2)
    //AV_CH_LAYOUT_STEREO 表示双通道
    audioCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
    //音频的通道数
    audioCodecCtx->channels=av_get_channel_layout_nb_channels(audioCodecCtx->channel_layout);
    //设置每一帧大小
    audioCodecCtx->frame_size=1024;

    //打开编码上器下文
    int re=avcodec_open2(audioCodecCtx,audioCodec,NULL);

    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avcodec_open2 failed error msg :" << buf << std::endl;
        return 0;
    }




    //原始帧
    AVFrame *frame=av_frame_alloc();
    //每帧单个通道的采样点数
    frame->nb_samples=audioCodecCtx->frame_size;
    //采样点格式
    frame->format=audioCodecCtx->sample_fmt;
    //通道布局情况
    frame->channel_layout=audioCodecCtx->channel_layout;
    //通道数
    frame->channels=av_get_channel_layout_nb_channels(frame->channel_layout);
    //采样率
    frame->sample_rate=audioCodecCtx->sample_rate;

     re = av_frame_get_buffer(frame, 0);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        return 0;
    }
    //初始化音频压缩帧
    AVPacket *pkt=av_packet_alloc();
    std::ifstream  ifs("d:\\48000_2_s16le.pcm",std::ios::binary);
    std::ofstream  ofs("d:\\1.mp3",std::ios::binary);
    //获取一帧数据长度
    int size = frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * frame->channels;
    for(int i=0;;i++) {

        if (ifs.eof())break;;
        ifs.read((char *) frame->data[0], size);
        frame->pts = i;
        //发送未压缩帧到线程中压缩
        re=avcodec_send_frame(audioCodecCtx,frame);
        if(re<0)
        {
            break;
        }

        while(re >=0) {
            //接收压缩帧,一般前几次调用返回空(缓冲,编码未完成)
            //独立线程编码
            re = avcodec_receive_packet(audioCodecCtx, pkt);
            if(re == AVERROR(EAGAIN) || re == AVERROR_EOF)
                break;
            if(re <0){
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cerr << "avcodec_receive_packet error:" << buf << std::endl;
                break;
            }
            av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioCodecCtx->time_base);


            ofs.write((char*)pkt->data,pkt->size);
            //解引用
            av_packet_unref(pkt);
        }

    }


    //将未消费的帧,消费掉
    int ret = avcodec_send_frame(audioCodecCtx, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_packet(audioCodecCtx, pkt);
        if (ret < 0)
            break;

        av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioCodecCtx->time_base);

        ofs.write((char*)pkt->data,pkt->size);
        pkt->pts=-1;
        //解引用
        av_packet_unref(pkt);
    }


    av_packet_free(&pkt);
    av_frame_free(&frame);
    avcodec_close(audioCodecCtx);
    return 0;
}

解码器代码

#include <iostream>
#include <fstream>
#include <chrono>

extern "C"{
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
};



//heliang
int main()
{
    std::cout << avcodec_configuration() << std::endl;
    //1.查到解码器 AV_CODEC_ID_HEVC  AV_CODEC_ID_H264
    //avcodec_find_decoder_by_name() 按名称获取解码器
    AVCodec *codec=avcodec_find_decoder(AV_CODEC_ID_H264);

    //2创建编码器上下文
    AVCodecContext *codecCtx=avcodec_alloc_context3(codec);
    //线程数量
    codecCtx->thread_count=16;

    //3 打开编码上器下文
    int re=avcodec_open2(codecCtx,codec,NULL);

    //分割上下文
    AVCodecParserContext *codecPCtx=av_parser_init(AV_CODEC_ID_H264);
    AVPacket *pkt= av_packet_alloc();
    AVFrame *frame=av_frame_alloc();

    unsigned char buf[4068]={0};
    std::ifstream ifs("d:\\1\\test.h264", std::ios::binary);
    auto begTime=std::chrono::steady_clock::now();
    int count=0;

    while (!ifs.eof())
    {
        ifs.read((char*)buf,sizeof(buf));
        //读取字节数
        int dataSize=ifs.gcount();
        if(dataSize<=0)break;

        auto data = buf;
        //一次有多帧数据
        while(dataSize>0)
        {
            int ret=av_parser_parse2(
                    codecPCtx,
                    codecCtx,
                    &pkt->data, //输出
                    &pkt->size,  //输出
                    data,dataSize, //输入
                    AV_NOPTS_VALUE,AV_NOPTS_VALUE,0
                    );
            data+=ret;
            dataSize-=ret;

            if(pkt->size)
            {
                //cout << pkt->size << " "<<flush;
                //发送packet到解码线程
                ret = avcodec_send_packet(codecCtx, pkt);
                if (ret < 0)
                    break;
                //获取多帧解码数据
                while (ret >= 0)
                {
                    //每次回调用av_frame_unref
                    ret = avcodec_receive_frame(codecCtx, frame);
                    if (ret < 0)
                        break;
//                    std::cout << frame->format << " " << std::flush;

                    auto curTime=std::chrono::steady_clock::now();
                    auto interval=std::chrono::duration_cast<std::chrono::milliseconds>(curTime-begTime);
                    //1/10秒钟计算法fps
                    if(interval.count()>=100)
                    {
                        std::cout << " fps=" << count*10 << std::endl;
                        count=0;
                        begTime=curTime;
                    }
                    count++;
                }
            }
        }

    }


    //取出结尾数据
    ///取出缓存数据
    int ret = avcodec_send_packet(codecCtx, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_frame(codecCtx, frame);
        if (ret < 0)
            break;
        std::cout << frame->format << "-" << std::flush;
    }

    av_parser_close(codecPCtx);
    avcodec_free_context(&codecCtx);
    av_frame_free(&frame);
    av_packet_free(&pkt);
    return 0;
}

硬解码代码

#include <iostream>
#include <fstream>
#include <chrono>

extern "C"{
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
};



//heliang
int main()
{
    //查找解码器
    AVCodec *codec=avcodec_find_decoder(AV_CODEC_ID_H264);
    //初始化上下文
    AVCodecContext *codecContext=avcodec_alloc_context3(codec);


    //打印所有支持硬件加速方式
    for(int i=0;;i++)
    {
        auto hwconfig=avcodec_get_hw_config(codec,i);
        if(!hwconfig)break;
        if(!hwconfig->device_type)break;
        const char *hwName=av_hwdevice_get_type_name(hwconfig->device_type);
        std::cout << hwName << std::endl;
    }

    //初始化硬件加速上下文
    AVBufferRef *hw_ctx=NULL;
    av_hwdevice_ctx_create(&hw_ctx,AV_HWDEVICE_TYPE_DXVA2,NULL,NULL,0);
    //绑定硬件解码器
    codecContext->hw_device_ctx=av_buffer_ref(hw_ctx);
    codecContext->thread_count=16;

    //打开上下文
    int re=avcodec_open2(codecContext,codec,NULL);

    //分割上下文
    AVCodecParserContext *codecPCtx=av_parser_init(AV_CODEC_ID_H264);
    AVPacket *pkt= av_packet_alloc();
    AVFrame *frame=av_frame_alloc();
    AVFrame *hwframe=av_frame_alloc();

    unsigned char buf[4068]={0};
    std::ifstream ifs("d:\\1\\test.h264", std::ios::binary);
    std::ofstream ofs("d:\\test.yuv",std::ios::binary);

    auto begTime=std::chrono::steady_clock::now();
    int count=0;

    while (!ifs.eof())
    {
        ifs.read((char*)buf,sizeof(buf));
        //读取字节数
        int dataSize=ifs.gcount();
        if(dataSize<=0)break;

        auto data = buf;
        //一次有多帧数据
        while(dataSize>0)
        {
            int ret=av_parser_parse2(
                    codecPCtx,
                    codecContext,
                    &pkt->data, //输出
                    &pkt->size,  //输出
                    data,dataSize, //输入
                    AV_NOPTS_VALUE,AV_NOPTS_VALUE,0
            );
            data+=ret;
            dataSize-=ret;

            if(pkt->size)
            {
                //cout << pkt->size << " "<<flush;
                //发送packet到解码线程
                ret = avcodec_send_packet(codecContext, pkt);
                if (ret < 0)
                    break;
                //获取多帧解码数据
                while (ret >= 0)
                {
                    //每次回调用av_frame_unref
                    ret = avcodec_receive_frame(codecContext, frame);
                    if (ret < 0)
                        break;


                    //临时frame
                    AVFrame *tempFrame=frame;
                    //硬解码
                    if(codecContext->hw_device_ctx)
                    {
                        //硬解码 返回的格式是AV_PIX_FMT_NV12,需要转换成420P
                        av_hwframe_transfer_data(hwframe,tempFrame,0);
                        tempFrame=hwframe;
                    }

                    std::cout << tempFrame->format << " " << std::flush;
                    if(tempFrame->format==AV_PIX_FMT_YUV420P)
                    {
                        uint8_t **pBuf = frame->data;//保存地址
                        int*    pStride = frame->linesize;//保存位宽
                        for (int color_idx = 0; color_idx < 3; color_idx++)
                        {
                            int  nWidth = color_idx == 0 ? tempFrame->width : tempFrame->width / 2;
                            int  nHeight = color_idx == 0 ? tempFrame->height : tempFrame->height / 2;
                            for (int idx = 0; idx < nHeight; idx++)
                            {
                                ofs.write((char*)pBuf[color_idx],nWidth);
                                pBuf[color_idx] += pStride[color_idx];
                            }
                        }
                    }else if(tempFrame->format==AV_PIX_FMT_NV12)
                    {
                        uint8_t  *nvData=new uint8_t[tempFrame->width * tempFrame->height * 1.5];
                        if(tempFrame->linesize[0]==tempFrame->width)
                        {
                            memcpy(nvData, tempFrame->data[0], tempFrame->linesize[0] * tempFrame->height); //Y
                            memcpy(nvData + tempFrame->linesize[0] * tempFrame->height, tempFrame->data[1], tempFrame->linesize[1] * tempFrame->height / 2); //UV
                            ofs.write((char*)nvData,(tempFrame->linesize[0] * tempFrame->height)+(tempFrame->linesize[1] * tempFrame->height / 2));
                        }else //逐行复制
                        {
                            for (int i = 0; i < tempFrame->height; i++) //Y
                            {
                                memcpy(nvData + i * tempFrame->width,
                                       tempFrame->data[0] + i * tempFrame->linesize[0],
                                       tempFrame->width
                                );
                            }
                            for (int i = 0; i < tempFrame->height/2; i++)  //UV
                            {
                                auto p = nvData + tempFrame->height * tempFrame->width;// 移位Y
                                memcpy(p + i * tempFrame->width,
                                       tempFrame->data[1] + i * tempFrame->linesize[1],
                                       tempFrame->width
                                );
                            }

                            ofs.write((char*)nvData,(tempFrame->width*frame->height)+(tempFrame->width*frame->height/2));

                        }
                        delete [] nvData;

                    }

                    auto curTime=std::chrono::steady_clock::now();
                    auto interval=std::chrono::duration_cast<std::chrono::milliseconds>(curTime-begTime);

                    //1秒钟计算法fps
                    if(interval.count()>=1000)
                    {
                        std::cout << " fps=" << count << std::endl;
                        count=0;
                        begTime=curTime;
                    }
                    count++;
                }
            }
        }

    }


    //取出结尾数据
    ///取出缓存数据
    int ret = avcodec_send_packet(codecContext, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_frame(codecContext, frame);
        if (ret < 0)
            break;
        std::cout << frame->format << "-" << std::flush;
    }

    ofs.close();
    ifs.close();
    av_parser_close(codecPCtx);
    avcodec_free_context(&codecContext);
    av_frame_free(&frame);
    av_packet_free(&pkt);

    return 0;
}

解封装

#include <iostream>
#include <fstream>
#include <sstream>

extern "C"{
#include <libavformat/avformat.h>
};

const std::string GetDurationStr(int duration)
{
    std::stringstream durationStr;
    //时
    int hour= duration / 3600;
    //分
    int minute=(duration % 3600)/60;
    //秒
    int second= duration % 60;
    durationStr << hour << ":" << minute <<":" << second;
    return durationStr.str();
}

//写出420文件
void Yuv420Write(std::ofstream &ofs,AVFrame *frame)
{
    uint8_t **pBuf = frame->data;//保存地址
    int*    pStride = frame->linesize;//保存位宽
    for (int color_idx = 0; color_idx < 3; color_idx++)
    {
        int  nWidth = color_idx == 0 ? frame->width : frame->width / 2;
        int  nHeight = color_idx == 0 ? frame->height : frame->height / 2;
        for (int idx = 0; idx < nHeight; idx++)
        {
            ofs.write((char*)pBuf[color_idx],nWidth);
            pBuf[color_idx] += pStride[color_idx];
        }
    }
}

//写出pcm文件
void PcmWrite(std::ofstream &ofs,AVFrame *frame,AVSampleFormat sample_fmt)
{
    //获取格式存储占用几个字节
    int data_size = av_get_bytes_per_sample(sample_fmt);

    for (int i = 0; i < frame->nb_samples; i++)
    {
        for (int ch = 0; ch < frame->channels; ch++)  // 交错的方式写入, 大部分float的格式输出
        {
            ofs.write((char*)frame->data[ch] + data_size*i,data_size);
        }
    }
}

//heliang
int main()
{
    //打开媒体文件
    AVFormatContext *afctxt=avformat_alloc_context();
    const char *url="d:\\1\\1.flv";
    //打开解封装上下文
    int re=avformat_open_input(&afctxt,
            url, //打开文件路径
            NULL, //封装格式,自动探测
            NULL //参数设置,如rtmp,rstp
            );

    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_open_input failed! error msg:"  << buf << std::endl;
        return 0;
    }

    //获取媒体信息
    re=avformat_find_stream_info(afctxt,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_find_stream_info failed! error msg:"  << buf << std::endl;
        return 0;
    }

    //打印封装信息
    av_dump_format(afctxt,0,url,0);
    //查找视频索引
    int videoStrIndex=av_find_best_stream(afctxt,AVMEDIA_TYPE_VIDEO,-1,0,NULL,0);
    //获取视频流
    AVStream *videoStream=afctxt->streams[videoStrIndex];

    //视频信息
    //流的总时间
    int duration_video=0;
    //播放时间有时候获取不到
    if(videoStream->duration != AV_NOPTS_VALUE)
    {
         duration_video =  (videoStream->duration) * av_q2d(videoStream->time_base);;
    } else
    {
        // 1000us = 1ms, 1000ms = 1秒
         duration_video = (afctxt->duration)  / AV_TIME_BASE;
    }


    //帧率
    int fps= (int)av_q2d(videoStream->avg_frame_rate);
    //宽度
    int width=videoStream->codecpar->width;
    //高度
    int height=videoStream->codecpar->height;
    //编码格式
    int videoCodecId=videoStream->codecpar->codec_id;

    std::cout << "width=" << width << ",height=" << height << ",duration=" << GetDurationStr(duration_video) << ",fps=" <<fps<<  ",videoCodecId=" << videoCodecId << std::endl;

    //查找视频视频
    int audioStrIndex=av_find_best_stream(afctxt,AVMEDIA_TYPE_AUDIO,-1,0,NULL,0);
    //获取音频流
    AVStream *audioStream=afctxt->streams[audioStrIndex];

    int audio_video=0;
    if(audioStream->duration != AV_NOPTS_VALUE)
    {
        audio_video = (audioStream->duration) * av_q2d(audioStream->time_base);;
    } else
    {
        // 1000us = 1ms, 1000ms = 1秒
        audio_video = (afctxt->duration)  / AV_TIME_BASE;
    }
    //音频编解码器的采样率,单位为Hz
    int aduioSampleRate=audioStream->codecpar->sample_rate;
    //音频采样格式
    int audioFormat=audioStream->codecpar->format;
    //音频通道数
    int audioChanel=audioStream->codecpar->channels;
    //音频压缩编码格式
    int audioCodecId= audioStream->codecpar->codec_id;
    std::cout << "SampleRate=" << aduioSampleRate << ",Format=" << audioFormat << ",duration=" << GetDurationStr(audio_video) << ",Chanel=" <<audioChanel<<  ",CodecId=" << audioCodecId << std::endl;


    //解包
    //视频解码器
    AVCodec *videoCodec=avcodec_find_decoder(videoStream->codecpar->codec_id);
    AVCodecContext *videoCodecCtx=avcodec_alloc_context3(videoCodec);
    videoCodecCtx->thread_count=8;
    avcodec_parameters_to_context(videoCodecCtx,videoStream->codecpar);
    //打开视频解码器
    avcodec_open2(videoCodecCtx,videoCodec,NULL);
    //视频解码帧
    AVFrame  *videoFrame=av_frame_alloc();


    //音频解码器
    AVCodec *audioCodec=avcodec_find_decoder(audioStream->codecpar->codec_id);
    AVCodecContext  *audioCodecCtx=avcodec_alloc_context3(audioCodec);
    audioCodecCtx->thread_count=8;
    avcodec_parameters_to_context(audioCodecCtx,audioStream->codecpar);
    //打开音频解码器
    avcodec_open2(audioCodecCtx,audioCodec,NULL);

    //音频解码帧
    AVFrame *audioFrame=av_frame_alloc();


    //yuv视频文件存储对象
    std::ofstream videoOfs("d:\\test.yuv",std::ios::binary);
    //pcm音频文件
    std::ofstream audioOfs("d:\\1.pcm",std::ios::binary);

    AVPacket *pkt=av_packet_alloc();

    for(;;)
    {
        re=av_read_frame(afctxt,pkt);
        if(re!=0)
        {
            char buf[1024]={0};
            av_strerror(re,buf,sizeof(buf));
            std::cout << "avformat_find_stream_info failed! error msg:"  << buf << std::endl;
            continue;
        }

        int curDuration=0;
        //视频包
        if(pkt->stream_index==videoStrIndex)
        {
            curDuration=pkt->duration * av_q2d(videoStream->time_base);
//            std::cout << "video=" << "pts:"<< pkt->pts << ",dts:" << pkt->dts  << ",duration:"  << curDuration <<",format:"<< videoStream->codecpar->format<< std::endl;
            //发送到解码队列
            re = avcodec_send_packet(videoCodecCtx, pkt);
            if(re<0)break;
            while(re>=0)
            {
                //接入到视频解码数据
                re=avcodec_receive_frame(videoCodecCtx,videoFrame);
                if(re<0)break;
                std::cout << "width=" <<videoFrame->width <<",height=" <<videoFrame->height << ",format=" << videoStream->codecpar->format << std::endl;
                //将文件写出yuv
                Yuv420Write(videoOfs,videoFrame);
            }



        }
        //音频包
        else if(pkt->stream_index==audioStrIndex)
        {
            curDuration=pkt->duration * av_q2d(audioStream->time_base);
            std::cout << "audio=" << "pts:"<< pkt->pts << ",dts:" << pkt->dts  << ",duration:"  << curDuration  << ",format=" << audioStream->codecpar->format  << std::endl;
            //发送到解码队列
            re=avcodec_send_packet(audioCodecCtx,pkt);
            if(re<0)break;
            while (re>=0)
            {
                //接入到音频解码数据
                re=avcodec_receive_frame(audioCodecCtx,audioFrame);
                if(re<0)break;
                PcmWrite(audioOfs,audioFrame,audioCodecCtx->sample_fmt);
            }

        }


        //解引用
        av_packet_unref(pkt);
    }


    //将未处理完成音频数据,处理掉
    re = avcodec_send_packet(audioCodecCtx, NULL);
    while (re >= 0)
    {
        re = avcodec_receive_frame(audioCodecCtx, audioFrame);
        if (re < 0)
            break;
        PcmWrite(audioOfs,audioFrame,audioCodecCtx->sample_fmt);
    }

    //将未处理完成音频数据,处理掉
    re = avcodec_send_packet(videoCodecCtx, NULL);
    while (re >= 0)
    {
        re = avcodec_receive_frame(videoCodecCtx, videoFrame);
        if (re < 0)
            break;
        Yuv420Write(videoOfs,videoFrame);
    }

    //释放
    videoOfs.close();
    audioOfs.close();
    av_frame_free(&videoFrame);
    av_frame_free(&audioFrame);
    av_packet_free(&pkt);
    avcodec_free_context(&videoCodecCtx);
    avcodec_free_context(&audioCodecCtx);
    avformat_close_input(&afctxt);
    avformat_free_context(afctxt);
    return 0;
}

重封装-视频文件

#include <iostream>
#include <fstream>

extern "C"{
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
};


//heliang
int main()
{
    //yuv 宽度
    int yuvWidth=1920;
    //yuv 高度
    int yuvHeight=818;
    //一帧yuv的占用字节大小 yuvWidth*yuvHeight*1.5
    int yuvTotalBytes=av_image_get_buffer_size(AV_PIX_FMT_YUV420P,yuvWidth,yuvHeight,1);
    //存储一帧yuv空间


    //yuv文件输入流
    std::ifstream ifs("d:\\1\\1920_818.yuv",std::ios::binary);

    //创建重封装空间
    AVFormatContext *outFCtx=avformat_alloc_context();

    const char *outFileUrl="d:\\1920_818.mp4";

    //初始化重封装上下文
    int re=avformat_alloc_output_context2(&outFCtx,NULL,"mp4",outFileUrl);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
        return 0;
    }

    //创建视频流
    AVStream *videoStream=avformat_new_stream(outFCtx,NULL);
    videoStream->time_base={1,25};

    //查询编码器
    AVCodec *videoCodec=avcodec_find_encoder(AV_CODEC_ID_H264);
    //创建编码器上下文
    AVCodecContext *videoCodecCtx=avcodec_alloc_context3(videoCodec);

    //视频流
    videoCodecCtx->codec_type=AVMEDIA_TYPE_VIDEO;
    //宽度
    videoCodecCtx->width=yuvWidth;
    //高度
    videoCodecCtx->height=yuvHeight;
    //编码格式
    videoCodecCtx->codec_id=AV_CODEC_ID_H264;
    //输出格式
    videoCodecCtx->pix_fmt=AV_PIX_FMT_YUV420P;
    //输出视频比特率
    videoCodecCtx->bit_rate=1200000;
    //预设值
    av_opt_set(videoCodecCtx->priv_data, "preset", "ultrafast", 0);
    av_opt_set(videoCodecCtx->priv_data,"tune","zerolatency", 0);
    av_opt_set(videoCodecCtx->priv_data,"crf","23",0);

    videoCodecCtx->qmin = 10;
    videoCodecCtx->qmax = 51;
    videoCodecCtx->qcompress = 0.6;
    videoCodecCtx->profile= FF_PROFILE_H264_BASELINE;


    //时间基数
    videoCodecCtx->time_base={1,25};
    //分组
    videoCodecCtx->gop_size=25;
    //线程编码
    videoCodecCtx->thread_count=8;

    //AVCodecContext复制到AVCodecParameters中
    avcodec_parameters_from_context(videoStream->codecpar,videoCodecCtx);

    //打开编码上器下文
    re=avcodec_open2(videoCodecCtx,videoCodec,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cerr << buf << std::endl;
        return 0;
    }

    //打开文件输出
    re=avio_open(&outFCtx->pb, outFileUrl, AVIO_FLAG_READ_WRITE);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avio_open failed error msg :" << buf << std::endl;
        return 0;
    }

    //写入文件头
    re=avformat_write_header(outFCtx,NULL);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_write_header failed error msg :" << buf << std::endl;
        return 0;
    }

    //打印信息
    av_dump_format(outFCtx,0,outFileUrl,1);

    //初始化压缩包
    AVPacket *pkt=av_packet_alloc();
    //创建原始帧空间
    AVFrame *frame=av_frame_alloc();
    frame->width=videoCodecCtx->width;
    frame->height=videoCodecCtx->height;
    frame->format=videoCodecCtx->pix_fmt;
    //设置buffer,并且设置对齐方式
    av_frame_get_buffer(frame,0);

    //设置当前帧的时间戳
    int frame_pts=0;

    //读取yuv数据
    while(!ifs.eof())
    {
        //读取一帧yuv数据
        ifs.read((char*)frame->data[0],frame->linesize[0]*yuvHeight);
        ifs.read((char*)frame->data[1],frame->linesize[1]*yuvHeight/2);
        ifs.read((char*)frame->data[2],frame->linesize[2]*yuvHeight/2);
        //显示时间
        frame->pts= frame_pts++ ;

        //发送未压缩帧到线程中压缩
        re=avcodec_send_frame(videoCodecCtx,frame);
        if(re<0)
        {
            break;
        }

        //返回多帧
        while(re >=0) {
            //接收压缩帧,一般前几次调用返回空(缓冲,编码未完成)
            //独立线程编码
            re = avcodec_receive_packet(videoCodecCtx, pkt);
            if(re == AVERROR(EAGAIN) || re == AVERROR_EOF)
                break;
            if(re <0){
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cerr << "avcodec_receive_packet error:" << buf << std::endl;
                break;
            }

            /////它的作用是计算 "a * b / c" 的值并分五种方式来取整.
            //根据timebase计算输出流的时间戳,av_packet_rescale_ts函数会将pts、dts、duration计算并且绑定到pkt中
            av_packet_rescale_ts(pkt, videoCodecCtx->time_base, videoStream->time_base);

            //分别计算 pts
//            pkt->pts = av_rescale_q_rnd(pkt->pts,videoCodecCtx->time_base,videoStream->time_base,(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));

            //分别计算 dts
//            pkt->dts = av_rescale_q_rnd(pkt->dts, videoCodecCtx->time_base,
//                                        videoStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)
//            );
            //分别计算 duration
//            pkt->duration = av_rescale_q(pkt->duration, videoCodecCtx->time_base, videoStream->time_base);

            //写入音视频帧 会清理pkt
            re = av_interleaved_write_frame(outFCtx,
                                            pkt);
            if (re != 0)
            {
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
            }
            //解引用
            av_packet_unref(pkt);
        }
    }


    //将未消费的帧,消费掉
    int ret = avcodec_send_frame(videoCodecCtx, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_packet(videoCodecCtx, pkt);
        if (ret < 0)
            break;

        av_packet_rescale_ts(pkt, videoCodecCtx->time_base, videoStream->time_base);
        //写入视频帧 会清理pkt
        re = av_interleaved_write_frame(outFCtx,
                                        pkt);
        if (re != 0)
        {
            char buf[1024]={0};
            av_strerror(re,buf,sizeof(buf));
            std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
        }
        //解引用
        av_packet_unref(pkt);
    }




    //写入结尾 包含文件偏移索引
    re = av_write_trailer(outFCtx);
    if (re != 0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_write_trailer failed error msg :" << buf << std::endl;
    }

    ifs.close();
    return 0;
}

重封装-音频文件

采样频率 41000 hz某44.1khz
样本采样大小(sample_size ) 8位(256)个 16位(65536) 32位(4G)
量化噪声 质量部分 信噪比
s/n=6n+1.8(db)
分贝(db)
信噪比:s/n,信噪比应该越高越好
n位量化位置 采样级别
声音通道:双通道交叉存储和平面存储plane
交叉: LR LR LR
平面:LLLL RRRR

PCM(Pulse code modulation)叫做脉冲编码

采样:对连续的模拟信号进行数字化的过程(电压)

量化:对采样后的信号在幅度上按分层单位进行四舍五入取整数的过程。量化层次越多,量化误差越小。误差就是量化噪声。

编码:用二进制编码还原量化信号

音频编码:

 MPEG1:Layer1 CD(192KB/S) 、Layer2 数字电视广播(128KB/S) 、Layer3 MP4(64kb/s)

 MPEG-2:AAC(advaced audio coding)

 MPEG-4:MPEG-4 AAC(m4a MPEG-4 音频标准的文件)

 FLAC APE m4a:无损音频压缩编码
#include <iostream>
#include <fstream>

extern "C"{
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
};


//heliang
int main()
{

    std::cout << avcodec_configuration() << std::endl;

    AVFormatContext *audioFCtx=avformat_alloc_context();

    const char *audioOutPutUrl="d:\\3.mp4";
    int re=avformat_alloc_output_context2(&audioFCtx, NULL,"mp4",audioOutPutUrl);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
        return 0;
    }

    //创建音频流
    AVStream *audioStream=avformat_new_stream(audioFCtx,NULL);

    //查询编码器
    AVCodec *audioCodec=avcodec_find_encoder(AV_CODEC_ID_MP3);
    //创建编码器上下文
    AVCodecContext *audioCodecCtx=avcodec_alloc_context3(audioCodec);


    //音频流
    //编码类型
    audioCodecCtx->codec_id=audioCodec->id;
    //比特率64K
    audioCodecCtx->bit_rate=64000;
    //样本采样大小,2个字节存储
    audioCodecCtx->sample_fmt=AV_SAMPLE_FMT_S16;
    //音频格式
    audioCodecCtx->codec_type=AVMEDIA_TYPE_AUDIO;
    //采样频率,44100
    audioCodecCtx->sample_rate=48000;
    //音频通道格式类型 如 单通道 双通道
    //av_get_default_channel_layout(2)
    //AV_CH_LAYOUT_STEREO 表示双通道
    audioCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
    //音频的通道数
    audioCodecCtx->channels=av_get_channel_layout_nb_channels(audioCodecCtx->channel_layout);
    //设置每一帧大小
    audioCodecCtx->frame_size=1024;

    audioCodecCtx->time_base={1,25};

    //打开编码上器下文
    re=avcodec_open2(audioCodecCtx,audioCodec,NULL);

    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avcodec_open2 failed error msg :" << buf << std::endl;
        return 0;
    }

    avcodec_parameters_from_context(audioStream->codecpar,audioCodecCtx);

    //打开文件输出
    re=avio_open(&audioFCtx->pb, audioOutPutUrl, AVIO_FLAG_READ_WRITE);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avio_open failed error msg :" << buf << std::endl;
        return 0;
    }


    //写入文件头
    re=avformat_write_header(audioFCtx,NULL);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_write_header failed error msg :" << buf << std::endl;
        return 0;
    }

    //打印信息
    av_dump_format(audioFCtx,0,audioOutPutUrl,1);

    //设置音频帧参数
    AVFrame *frame=av_frame_alloc();
    //每帧单个通道的采样点数
    frame->nb_samples=audioCodecCtx->frame_size;
    //采样点格式
    frame->format=audioCodecCtx->sample_fmt;
    //通道布局情况
    frame->channel_layout=audioCodecCtx->channel_layout;
    //通道数
    frame->channels=av_get_channel_layout_nb_channels(frame->channel_layout);
    /* 为frame分配buffer */
    re = av_frame_get_buffer(frame, 0);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        return 0;
    }
    //初始化音频压缩帧
    AVPacket *pkt=av_packet_alloc();

    std::ifstream  ifs("d:\\48000_2_s16le.pcm",std::ios::binary);

    for(int i=0;;i++)
    {
        //获取一帧数据长度
        int length=frame->nb_samples*av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*frame->channels;
        if(ifs.eof())break;;
        ifs.read((char*)frame->data[0],length);

        frame->pts = i;

        //发送未压缩帧到线程中压缩
        re=avcodec_send_frame(audioCodecCtx,frame);
        if(re<0)
        {
            break;
        }

        while(re >=0) {
            //接收压缩帧,一般前几次调用返回空(缓冲,编码未完成)
            //独立线程编码
            re = avcodec_receive_packet(audioCodecCtx, pkt);
            if(re == AVERROR(EAGAIN) || re == AVERROR_EOF)
                break;
            if(re <0){
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cerr << "avcodec_receive_packet error:" << buf << std::endl;
                break;
            }
            av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioCodecCtx->time_base);

            //写入音频帧 会清理pkt
            re = av_interleaved_write_frame(audioFCtx,
                                            pkt);

            if (re != 0)
            {
                char buf[1024]={0};
                av_strerror(re,buf,sizeof(buf));
                std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
            }
            //解引用
            av_packet_unref(pkt);
        }

    }

    //将未消费的帧,消费掉
    int ret = avcodec_send_frame(audioCodecCtx, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_packet(audioCodecCtx, pkt);
        if (ret < 0)
            break;

        av_packet_rescale_ts(pkt, audioCodecCtx->time_base, audioCodecCtx->time_base);
        //写入视频帧 会清理pkt
        re = av_interleaved_write_frame(audioFCtx,
                                        pkt);
        if (re != 0)
        {
            char buf[1024]={0};
            av_strerror(re,buf,sizeof(buf));
            std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
        }
        pkt->pts=-1;
        //解引用
        av_packet_unref(pkt);
    }

    //写入结尾 包含文件偏移索引
    re = av_write_trailer(audioFCtx);
    if (re != 0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_write_trailer failed error msg :" << buf << std::endl;
    }

    av_packet_free(&pkt);
    av_frame_free(&frame);
    avcodec_close(audioCodecCtx);
    avformat_close_input(&audioFCtx);
    return 0;
}

重封装-音视频文件

#include <iostream>
#include <string>
#include <fstream>

extern "C"{
#include <libavformat/avformat.h>
#include <libavutil/pixdesc.h>
#include <libavutil/opt.h>
};


struct MediaCtx
{
    //输出上下文
    AVFormatContext *outPCtx;
    AVCodecContext *encodecCtx;
    AVStream  *stream;
    AVFrame  *frame;
    int next_pts;
    AVPacket *pkt;
    std::ifstream ifs;
    bool  flag;
};

AVFrame* createVideoFrame(MediaCtx &mediaCtx)
{
    //创建原始帧空间
    AVFrame *frame=av_frame_alloc();
    frame->width=mediaCtx.stream->codecpar->width;
    frame->height=mediaCtx.stream->codecpar->height;
    frame->format=mediaCtx.stream->codecpar->format;
    int re=av_frame_get_buffer(frame,0);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        av_frame_free(&frame);
        return nullptr;
    }
    return frame;
}


//处理音频
AVFrame* createAudioFrame(MediaCtx &mediaCtx)
{
    //设置音频帧参数
    AVFrame *frame=av_frame_alloc();
    //每帧单个通道的采样点数
    frame->nb_samples=mediaCtx.encodecCtx->frame_size;
    //采样点格式
    frame->format=mediaCtx.encodecCtx->sample_fmt;
    //通道布局情况
    frame->channel_layout=mediaCtx.encodecCtx->channel_layout;
    //通道数
    frame->channels=av_get_channel_layout_nb_channels(frame->channel_layout);
    /* 为frame分配buffer */
    int re = av_frame_get_buffer(frame, 0);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_frame_get_buffer failed error msg :" << buf << std::endl;
        return nullptr;
    }

    return frame;
}

void sendPack(MediaCtx& mediaCtx)
{
    if(mediaCtx.ifs.eof()){
        mediaCtx.flag= false;
        return;
    };

    if(mediaCtx.encodecCtx->codec_type==AVMEDIA_TYPE_VIDEO) //读取视频
    {
        mediaCtx.ifs.read((char*)mediaCtx.frame->data[0],mediaCtx.frame->linesize[0]*mediaCtx.frame->height);
        mediaCtx.ifs.read((char*)mediaCtx.frame->data[1],mediaCtx.frame->linesize[1]*mediaCtx.frame->height/2);
        mediaCtx.ifs.read((char*)mediaCtx.frame->data[2],mediaCtx.frame->linesize[2]*mediaCtx.frame->height/2);
        //设置视频pts
        mediaCtx.frame->pts=mediaCtx.next_pts++;
    }else if(mediaCtx.encodecCtx->codec_type==AVMEDIA_TYPE_AUDIO)  //读取音频
    {
        int length=mediaCtx.frame->nb_samples*av_get_bytes_per_sample(mediaCtx.encodecCtx->sample_fmt)*mediaCtx.frame->channels;
        mediaCtx.ifs.read((char*)mediaCtx.frame->data[0],length);
        //设置音频pts
        mediaCtx.frame->pts=mediaCtx.next_pts;
        mediaCtx.next_pts +=mediaCtx.frame->nb_samples;
    }else{
        return;
    }

    int re=avcodec_send_frame(mediaCtx.encodecCtx,mediaCtx.frame);
    if(re<0)
    {
        return;
    }


    while (re >= 0) {
        re = avcodec_receive_packet(mediaCtx.encodecCtx, mediaCtx.pkt);
        if(re == AVERROR(EAGAIN) || re == AVERROR_EOF)return;
        //指定写出的包是音频还是视频
        mediaCtx.pkt->stream_index=mediaCtx.stream->codecpar->codec_type;
        //设置每个包所属时间
        av_packet_rescale_ts(mediaCtx.pkt, mediaCtx.encodecCtx->time_base, mediaCtx.stream->time_base);
        //写入到文件中
        av_interleaved_write_frame(mediaCtx.outPCtx,mediaCtx.pkt);
        av_packet_unref(mediaCtx.pkt);
    }

}

void tailPkt(MediaCtx& mediaCtx)
{
    int ret = avcodec_send_frame(mediaCtx.encodecCtx, NULL);
    while (ret >= 0)
    {
        ret = avcodec_receive_packet(mediaCtx.encodecCtx, mediaCtx.pkt);
        if (ret < 0)
            break;

        mediaCtx.pkt->stream_index=mediaCtx.stream->codecpar->codec_type;
        av_packet_rescale_ts(mediaCtx.pkt, mediaCtx.encodecCtx->time_base, mediaCtx.stream->time_base);
        //写入视频帧 会清理pkt
        ret = av_interleaved_write_frame(mediaCtx.outPCtx,
                                         mediaCtx.pkt);
        if (ret != 0)
        {
            char buf[1024]={0};
            av_strerror(ret,buf,sizeof(buf));
            std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
        }
        //解引用
        av_packet_unref(mediaCtx.pkt);
    }
}

//heliang
//提出视频文件:ffmpeg -i test.mp4 -s 1920*818  -pix_fmt yuv420p test.yuv
//提出音频文件:ffmpeg -i test.mp4   -ar 48000 -ac 2 -f  s16le  test.pcm
int main()
{

    AVFormatContext *outPCtx = avformat_alloc_context();
    const char *outFileUrl = "d:\\bak_test.mp4";
    AVOutputFormat* fmt = av_guess_format(NULL, outFileUrl, NULL);
    int re = avformat_alloc_output_context2(&outPCtx,fmt,NULL,outFileUrl);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
        return 0;
    }

    //视频
    AVStream *videoStream=avformat_new_stream(outPCtx,NULL);
    AVCodec *videoCodec=avcodec_find_encoder(AV_CODEC_ID_H264);
    AVCodecContext *videoCodecCtx=avcodec_alloc_context3(videoCodec);
    videoCodecCtx->codec_id=videoCodec->id;
    videoCodecCtx->width=1920;
    videoCodecCtx->height=818;
    videoCodecCtx->pix_fmt=AV_PIX_FMT_YUV420P;
    videoCodecCtx->codec_type=AVMEDIA_TYPE_VIDEO;
    //时间基数
    videoCodecCtx->time_base={1,25};
    //帧率(Frame rate)也叫帧频率,帧率是视频文件中每一秒的帧数,肉眼想看到连续移动图像至少需要15帧
    videoCodecCtx->framerate={25,1};
    //码率也叫比特率(Bit rate)(也叫数据率)是一个确定整体视频/音频质量的参数,秒为单位处理的字节数,码率和视频质量成正比,在视频文件中中比特率用bps来表达
    videoCodecCtx->bit_rate=1100*1024;
    //分组
    videoCodecCtx->gop_size=25;
    videoCodecCtx->thread_count=8;
    //设置视频时间基,很重要
    videoStream->time_base=videoCodecCtx->time_base;
    //预设值
    av_opt_set(videoCodecCtx->priv_data, "preset", "ultrafast", 0);
    av_opt_set(videoCodecCtx->priv_data,"tune","zerolatency", 0);
    av_opt_set(videoCodecCtx->priv_data,"crf","23",0);


    re=avcodec_open2(videoCodecCtx,videoCodec,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avcodec_open2 failed error msg :" << buf << std::endl;
        return 0;
    }
    //AVCodecContext复制到AVCodecParameters中
    avcodec_parameters_from_context(videoStream->codecpar,videoCodecCtx);


    //音频
    AVStream *audioStream=avformat_new_stream(outPCtx,NULL);
    AVCodec *audioCodec=avcodec_find_encoder_by_name("libfdk_aac");
    AVCodecContext *audioCodecCtx=avcodec_alloc_context3(audioCodec);
    audioCodecCtx->codec_id=audioCodec->id;
    //样本采样大小,2个字节存储
    audioCodecCtx->sample_fmt=AV_SAMPLE_FMT_S16;
    //音频格式
    audioCodecCtx->codec_type=AVMEDIA_TYPE_AUDIO;
    //采样频率,44100
    audioCodecCtx->sample_rate=48000;
    //音频通道格式类型 如 单通道 双通道
    audioCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
    //音频的通道数
    audioCodecCtx->channels=av_get_channel_layout_nb_channels(audioCodecCtx->channel_layout);
    audioCodecCtx->bit_rate=128*1024;
    //设置音频时间基,很重要
    audioStream->time_base= {1,audioCodecCtx->sample_rate};

    re=avcodec_open2(audioCodecCtx,audioCodec,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avcodec_open2 failed error msg :" << buf << std::endl;
        return 0;
    }
    //AVCodecContext复制到AVCodecParameters中
    avcodec_parameters_from_context(audioStream->codecpar,audioCodecCtx);

    //打印信息
    av_dump_format(outPCtx,0,outFileUrl,1);

    //打开文件输出
    re=avio_open(&outPCtx->pb, outFileUrl, AVIO_FLAG_READ_WRITE);
    if(re<0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avio_open failed error msg :" << buf << std::endl;
        return 0;
    }
    //写入文件头
    re=avformat_write_header(outPCtx,NULL);

    //视频上下文
    MediaCtx videoMediaCtx;
    videoMediaCtx.encodecCtx=videoCodecCtx;
    videoMediaCtx.stream=videoStream;
    videoMediaCtx.outPCtx=outPCtx;
    videoMediaCtx.frame=createVideoFrame(videoMediaCtx);
    videoMediaCtx.pkt=av_packet_alloc();
    videoMediaCtx.next_pts=0;
    videoMediaCtx.flag=true;
    videoMediaCtx.ifs.open("d:\\test.yuv",std::ios::binary);
    //音频上下文
    MediaCtx audioMediaCtx;
    audioMediaCtx.encodecCtx=audioCodecCtx;
    audioMediaCtx.stream=audioStream;
    audioMediaCtx.outPCtx=outPCtx;
    audioMediaCtx.frame=createAudioFrame(audioMediaCtx);
    audioMediaCtx.pkt=av_packet_alloc();
    audioMediaCtx.next_pts=0;
    audioMediaCtx.flag=true;
    audioMediaCtx.ifs.open("d:\\test.pcm",std::ios::binary);


    //写视频和音频文件
    while (videoMediaCtx.flag || audioMediaCtx.flag)
    {

        std::cout << av_compare_ts(videoMediaCtx.next_pts, videoMediaCtx.encodecCtx->time_base,
                                   audioMediaCtx.next_pts, audioMediaCtx.encodecCtx->time_base) << std::endl;

        if((av_compare_ts(videoMediaCtx.next_pts, videoMediaCtx.encodecCtx->time_base,
                         audioMediaCtx.next_pts, audioMediaCtx.encodecCtx->time_base) <= 0) || !audioMediaCtx.flag ){
            sendPack(videoMediaCtx);
        }else{
            sendPack(audioMediaCtx);
        }

    }



    tailPkt(videoMediaCtx);
    tailPkt(audioMediaCtx);
    //写入结尾 包含文件偏移索引
    re = av_write_trailer(outPCtx);
    //销毁
    avio_close(outPCtx->pb);
    av_packet_free(&videoMediaCtx.pkt);
    av_packet_free(&audioMediaCtx.pkt);
    av_frame_free(&videoMediaCtx.frame);
    av_frame_free(&audioMediaCtx.frame);
    avcodec_free_context(&videoCodecCtx);
    avcodec_free_context(&audioCodecCtx);
    avformat_free_context(outPCtx);
    return 0;
}

重封装-截取视频某段内容

#include <iostream>

extern "C"{
#include <libavformat/avformat.h>
};


//heliang
int main()
{

    int re=0;
    const char *inputUrl="d:\\1\\2.mp4";
    //输入解封装
    AVFormatContext *inputFCtx=avformat_alloc_context();
    //打开视频
    re=avformat_open_input(&inputFCtx,inputUrl,NULL,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
        return 0;
    }
    //获取视频信息
    re=avformat_find_stream_info(inputFCtx,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
        return 0;
    }

    int videoInputIndex=av_find_best_stream(inputFCtx,AVMEDIA_TYPE_VIDEO,-1,0,NULL,0);
    int audioInputIndex=av_find_best_stream(inputFCtx,AVMEDIA_TYPE_AUDIO,-1,0,NULL,0);
    AVStream *videoInputStream=inputFCtx->streams[videoInputIndex];
    AVStream *audioInputStream=inputFCtx->streams[audioInputIndex];

    //重复用
    AVFormatContext *outFctx=avformat_alloc_context();
    const char *outUrl="d:\\test.mp4";
    //输出路径
     re=avformat_alloc_output_context2(
            &outFctx, NULL,NULL,
            outUrl
            );
    if(re!=0)
    {
      char buf[1024]={0};
      av_strerror(re,buf,sizeof(buf));
      std::cout << "avformat_alloc_output_context2 failed error msg :" << buf << std::endl;
      return 0;
    }

    //视频
    AVStream *videoOutStream=avformat_new_stream(outFctx,NULL);
    //时间基数
    videoOutStream->time_base=videoInputStream->time_base;
    //从解封装复制参数
    avcodec_parameters_copy(videoOutStream->codecpar,videoInputStream->codecpar);


    //音频
    AVStream *audioOutStream=avformat_new_stream(outFctx,NULL);
    audioOutStream->time_base=audioInputStream->time_base;
    //从解封装复制参数
    avcodec_parameters_copy(audioOutStream->codecpar,audioInputStream->codecpar);


    //打开输出IO
    re=avio_open(&outFctx->pb,outUrl,AVIO_FLAG_WRITE);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avio_open failed error msg :" << buf << std::endl;
        return 0;
    }

    //写入文件头
    re=avformat_write_header(outFctx,NULL);
    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "avformat_write_header failed error msg :" << buf << std::endl;
        return 0;
    }

    //打印输出上下文
    av_dump_format(outFctx, 0, outUrl, 1);


    //截取10-20秒之间的音频视频 取多不取少
    //截取开始时间
    double begin_sec=1.0*40;
    //截取结束时间
    double end_sec=1.0*60;
    long long begin_pts=0;
    //音频的开始时间
    long long begin_audio_pts=0;
    long long end_pts=0;

    //换算成pts换算成输入的pts,以视频流为准
    if(videoInputStream&&videoInputStream->time_base.num>0)
    {
        //den分母/num分子
        double t=(double)videoInputStream->time_base.den/(double )videoInputStream->time_base.num;
        begin_pts = begin_sec * t;
        end_pts = end_sec * t;
    }


    if (audioInputStream&&audioInputStream->time_base.num>0)
        begin_audio_pts = begin_sec * ((double)audioInputStream->time_base.den / (double)audioInputStream->time_base.num);

    //seek输入媒体 移动到第十秒的关键帧位置
    if(videoInputStream)
        re = av_seek_frame(inputFCtx, videoInputStream->index, begin_pts,
                           AVSEEK_FLAG_FRAME | AVSEEK_FLAG_BACKWARD); //向后关键帧

    if(re!=0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_seek_frame failed error msg :" << buf << std::endl;
        return 0;
    }


    AVPacket *pkt=av_packet_alloc();
    for (;;)
    {
        re = av_read_frame(inputFCtx, pkt);
        if (re != 0)
        {
            char buf[1024]={0};
            av_strerror(re,buf,sizeof(buf));
            std::cout << "av_read_frame failed error msg :" << buf << std::endl;
            break;
        }
        long long offset_pts = 0; //偏移pts,用于截断的开头pts运算

        AVStream *in_stream=inputFCtx->streams[pkt->stream_index];
        AVStream *out_stream= nullptr;
        //视频
        if(pkt->stream_index==videoInputIndex)
        {
            //超过第20秒退出,只存10~20秒
            if(pkt->pts > end_pts)
            {
                av_packet_unref(pkt);
                break;
            }
            offset_pts = begin_pts;
            out_stream=videoOutStream;
        }
        //音频
        else if(pkt->stream_index==audioInputIndex)
        {
            offset_pts = begin_audio_pts;
            out_stream=audioOutStream;
        }

        std::cout << pkt->pts << " : " << pkt->dts << " :" << pkt->size << std::endl;

        //重新计算pts dts duration
        //`a * bq(输入basetime) / cq(输出basetime)`
        if(out_stream)
        {
            pkt->pts = av_rescale_q_rnd(pkt->pts- offset_pts, in_stream->time_base,
                                        out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)
            );
            pkt->dts = av_rescale_q_rnd(pkt->dts- offset_pts, in_stream->time_base,
                                        out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)
            );
            pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
        }

        pkt->pos = -1;

        //写入音视频帧 会清理pkt
        re = av_interleaved_write_frame(outFctx,
                                        pkt);
        if (re != 0)
        {
            char buf[1024]={0};
            av_strerror(re,buf,sizeof(buf));
            std::cout << "av_interleaved_write_frame failed error msg :" << buf << std::endl;
        }


    }


    //写入结尾 包含文件偏移索引
    re = av_write_trailer(outFctx);
    if (re != 0)
    {
        char buf[1024]={0};
        av_strerror(re,buf,sizeof(buf));
        std::cout << "av_write_trailer failed error msg :" << buf << std::endl;
    }

    avformat_close_input(&inputFCtx);

    avio_closep(&outFctx->pb);
    avformat_free_context(outFctx);

    return 0;
}

相关文章

网友评论

      本文标题:FFMPEG常用代码一

      本文链接:https://www.haomeiwen.com/subject/nlssqdtx.html