记录ffmpeg采集摄像头,解码,转换颜色空间,sdl2播放过程中的一些坑。
首先ffmpeg3.2较之前的版本还是又很大的改动的,用老的接口会报声明已被否决的错误,这个时候就要将其改变为新的接口。如:
avcodec_decode_video2转变为
avcodec_send_packet(pCodecCtx, packet)+ avcodec_receive_frame(pCodecCtx, pFrame),
其中avcodec_receive_frame还要放到一个循环中不断去取。
还有一个avpicture_fillxxx啥的变成了
out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 1);
这些都是编程过程中要注意的。
另外使用SDL_CreateTexture时其中的SDL_PIXELFORMAT_BGR24宏一开始想当然的填成AVPixelFormat类型,导致图像一直显示不出来。另外SDL_PIXELFORMAT_YV12指的其实就是YUV422.这些宏在SDL_pixels.h中定义。
sws_scale接口第四个参数坑的时间最长,一直误以为填入width就行了,实际上这个位置应该填的时要转换的每行的第一个位置,填0就行了,否则颜色空间转换时会出错。
看代码!(版本为ffmpeg3.2,sdl2.0,VS2017),项目名称为ffopencamera,摄像头采集部分直接用的雷神的:
#include <iostream>
using namespace std;
#define SDL_MAIN_HANDLED
extern "C" {
#include "libavformat\avformat.h"
#include "libavcodec\avcodec.h"
#include "libavdevice\avdevice.h"
#include "libswscale\swscale.h"
#include "libavutil\imgutils.h"
#include "SDL\SDL.h"
#include "SDL\SDL_main.h"
}
int main()
{
int ret = 0;
avdevice_register_all();
av_register_all();
AVFormatContext *pFormatCtx;
pFormatCtx = avformat_alloc_context();
AVDictionary *pOptions = NULL;
av_dict_set(&pOptions, "list_options", "true", 0);
AVInputFormat *pIformat = av_find_input_format("dshow");
cout << "========Device Option Info======" << endl;
avformat_open_input(&pFormatCtx, "video=A4 TECH HD PC Camera", pIformat, &pOptions);
cout << "================================" << endl;
const char *filename = "video=A4 TECH HD PC Camera";
if ((ret = avformat_open_input(&pFormatCtx, filename, pIformat, NULL)) < 0) {
printf("Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(pFormatCtx, NULL)) < 0) {
cout << "Cannot find stream information" << endl;
return ret;
}
int video_index = -1;
AVCodec *dec;
if ((ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0)) < 0) {
cout << "Cannot find a video stream in the input file" << endl;
return ret;
}
int i;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_index = i;
}
if (video_index == -1) {
cout << "Couldn't find a video stream." << endl;
return -1;
}
AVCodecContext *pCodecCtx;
pCodecCtx = avcodec_alloc_context3(NULL);
//pCodecCtx = pFormatCtx->streams[video_index]->codec; //新的版本改用下面方式对pCodecCtx初始化
avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[video_index]->codecpar);
dec = avcodec_find_decoder(pCodecCtx->codec_id);
if (dec == NULL){
cout << "Codec not found." << endl;
return -1;
}
if ((ret = avcodec_open2(pCodecCtx, dec, NULL)) < 0) {
cout << "Cannot open video decoder" << endl;
return ret;
}
if (SDL_Init(SDL_INIT_AUDIO | SDL_INIT_VIDEO)) {
cout << "Could not initialize SDL" << endl;
return -1;
}
SDL_Window *window;
SDL_Renderer *render;
SDL_Texture *texture;
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
window = SDL_CreateWindow("show", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_RESIZABLE);
render = SDL_CreateRenderer(window, -1, 0);
texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);
AVFrame *pFrame = av_frame_alloc();
AVFrame *pFrameYUV = av_frame_alloc();
AVPacket *packet = av_packet_alloc();
uint8_t *out_buffer;
out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
while (1) {
if ((ret = av_read_frame(pFormatCtx, packet)) < 0) {
break;
}
if (packet->stream_index == video_index) {
if ((ret = avcodec_send_packet(pCodecCtx, packet)) < 0) {
cout << "Error decoding video" << endl;
break;
}
while (1)
{
if ((ret = avcodec_receive_frame(pCodecCtx, pFrame)) < 0) {
cout << "Error2 decoding video" << endl;
break;
}
pFrame->pts = av_frame_get_best_effort_timestamp(pFrame);
//SDL_UpdateTexture(texture, NULL,
// pFrame->data[0], pFrame->linesize[0]);
//这里有个坑,注意0这里是要处理的第一个位置,之前想当然的以为后一个参数填height,这里就要填width,所以要看清楚函数的声明
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
//SDL_UpdateTexture(texture, NULL,
// pFrameYUV->data[0], pFrameYUV->linesize[0]);
//SDL---------------------------
SDL_UpdateYUVTexture(texture, &rect,
pFrameYUV->data[0], pFrameYUV->linesize[0],
pFrameYUV->data[1], pFrameYUV->linesize[1],
pFrameYUV->data[2], pFrameYUV->linesize[2]);
SDL_RenderClear(render);
SDL_RenderCopy(render, texture, NULL, &rect);
SDL_RenderPresent(render);
//SDL End-----------------------
//Delay 40ms
SDL_Delay(40);
}
}
av_packet_unref(packet);
}
return 0;
}
项目代码下载地址:

网友评论