解码流程:
- 获取文件信息,数据存储在AVFormatContext里面
- 根据AVFormatContext获取对应的AVCodecContext
- 解码原始数据AVPacket,解码为自己需要的数据AVFrame
- 释放相关资源
(图片来源于网络)
#include "lang.h"
#include <string>
//封装格式
//解码
#include "log.h"
extern "C" {
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
};
static void video_decode_example(const char *outfilename, const char *filename) {
//1.注册
av_register_all();
AVFormatContext *pFormatCtx = NULL;
//2. 打开文件,从文件头获取格式信息,数据封装在AVFormatContext里面
if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0) {
LOGE ("从文件头获取格式信息失败");
return;
}
//3. 获取流信息,数据封装在AVFormatContext里面
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
LOGE ("获取流信息失败");
return;
}
//只输出输入文件的格式信息
av_dump_format(pFormatCtx, 0, filename, 0);
int video_index = -1;
//4. 从流中遍历获取video的index
for (int i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
video_index = i;
LOGE ("video_index = %d", video_index);
break;
}
}
if (video_index == -1) {
LOGE ("遍历获取video_index失败");
return;
}
AVCodecContext *pCodecCtxOrg = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
//5. 解码器获取
//5.1 根据video_index获取解码器上下文AVCodecContext
pCodecCtxOrg = pFormatCtx->streams[video_index]->codec; // codec context
//5.1 根据AVCodecContext获取解码器
pCodec = avcodec_find_decoder(pCodecCtxOrg->codec_id);
if (!pCodec) {
LOGE ("解码器获取失败");
return;
}
//6.获取一个AVCodecContext实例,并将第五步获取的AVCodecContext数据copy过来,解码的时候需要用这个
pCodecCtx = avcodec_alloc_context3(pCodec);
if (avcodec_copy_context(pCodecCtx, pCodecCtxOrg) != 0) {
LOGE ("解码器上下文数据copy失败");
return;
}
//7. 打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
LOGE ("打开解码器失败");
return;
}
//原始数据帧
AVFrame *pFrame = NULL;
//yuv数据帧
AVFrame *pFrameYUV = NULL;
//内存开辟 不要忘记free
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
int numBytes = 0;
uint8_t *buffer = NULL;
//根据需要解码的类型,获取需要的buffer,不要忘记free
numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
//根据指定的图像参数和提供的数组设置数据指针和行数 ,数据填充到对应的pFrameYUV里面
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffer, AV_PIX_FMT_YUV420P,
pCodecCtx->width,
pCodecCtx->height, 1);
//获取SwsContext
struct SwsContext *sws_ctx = NULL;
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC,
NULL, NULL, NULL);
FILE *pFile = fopen(outfilename, "wb+");
int ret;
AVPacket packet;
int frameFinished = 0;
//8. 根据AVFormatContext 读取帧数据,读取的编码数据存储到AVPacket里面
while (av_read_frame(pFormatCtx, &packet) >= 0) {
if (packet.stream_index == video_index) {
//9. 将读取到的AVPacket,转换为AVFrame
ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret < 0) {
LOGE("解码失败");
return;
}
if (frameFinished) {
//10. 将原始的AVFrame数据转换为自己需要的YUV AVFrame数据
sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data, pFrame->linesize, 0,
pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
//11. 根据YUV AVFrame数据保存文件
if (pFile == NULL)
return;
int y_size = pCodecCtx->width * pCodecCtx->height;
//yuv420 存储为4:1:1
fwrite(pFrame->data[0], 1, static_cast<size_t>(y_size), pFile); //y
fwrite(pFrame->data[1], 1, static_cast<size_t>(y_size / 4), pFile);//u
fwrite(pFrame->data[2], 1, static_cast<size_t>(y_size / 4), pFile);//v
}
}
av_packet_unref(&packet);
}
//flush decoder
//FIX: Flush Frames remained in Codec
//12. 刷新解码器
while (1) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret < 0)
break;
if (!frameFinished)
break;
sws_scale(sws_ctx, (const unsigned char *const *) pFrame->data, pFrame->linesize, 0,
pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
int y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, static_cast<size_t>(y_size), pFile); //Y
fwrite(pFrameYUV->data[1], 1, static_cast<size_t>(y_size / 4), pFile); //U
fwrite(pFrameYUV->data[2], 1, static_cast<size_t>(y_size / 4), pFile); //V
LOGE("Flush Decoder: Succeed to decode 1 frame!\n");
}
//release resource
sws_freeContext(sws_ctx);
fclose(pFile);
av_free(buffer);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrg);
avformat_close_input(&pFormatCtx);
}
extern "C"
JNIEXPORT jint JNICALL
Java_zzw_com_ffmpegdemo_VideoUtils_decode(JNIEnv *env, jclass type, jstring input_,
jstring output_) {
const char *input_file_name = env->GetStringUTFChars(input_, 0);
const char *output_file_name = env->GetStringUTFChars(output_, 0);
video_decode_example(output_file_name, input_file_name);
env->ReleaseStringUTFChars(input_, input_file_name);
env->ReleaseStringUTFChars(output_, output_file_name);
return 0;
}
参考:
https://blog.csdn.net/leixiaohua1020/article/details/46889389
https://blog.csdn.net/u011913612/article/details/53419986
网友评论