#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <time.h>
#include <unistd.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/time.h>
AVDictionary* pOptionsDict;
int vi_stream_out_ = 0;
AVFormatContext* ifmtCtx = NULL;
AVFormatContext* ofmtCtx = NULL;
AVCodecParameters *origin_par = NULL;
const AVCodec *videoCodec = NULL;
int inVideoStreamIndex = 0;
int inAudioStreamIndex = 0;
int byte_buffer_size;
unsigned char *byte_buffer = NULL;
AVFrame* inFrame = NULL;
AVFrame* outFrame = NULL;
AVCodecContext* encodeCtx = NULL;
AVCodecContext* decodecCtx = NULL;
AVStream *inStream = NULL;
AVStream *outStream = NULL;
AVPacket *in_pkt = NULL;
AVPacket *out_pkt = NULL;
char WaterStr[100] = {0};
const char* rtspUrl = “”
const char* pushUrl = "";
static int open_input_file(const char *filename)
{
AVCodec *dec;
int ret;
av_dict_set(&pOptionsDict, "rtsp_transport", "tcp", 0);
//av_dict_set(&pOptionsDict, "stimeout", "1000000", 0);
//av_dict_set(&pOptionsDict, "max_delay", "500000", 0);
//av_dict_set(&pOptionsDict, "buffer_size", "409600000", 0);
if ((ret = avformat_open_input(&ifmtCtx, filename, NULL, &pOptionsDict)) < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
if ((ret = avformat_find_stream_info(ifmtCtx, NULL)) < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
#if 1
// dump information
for (unsigned int i = 0; i < ifmtCtx->nb_streams; i++)
{
av_dump_format(ifmtCtx, i, filename, 0);
}
#endif
inVideoStreamIndex = av_find_best_stream(ifmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if (inVideoStreamIndex < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
inStream = ifmtCtx->streams[inVideoStreamIndex];
inAudioStreamIndex = av_find_best_stream(ifmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
if (inAudioStreamIndex < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
decodecCtx = avcodec_alloc_context3((const AVCodec*)dec);
if (!decodecCtx)
{
printf("%s,%d\n",__func__,__LINE__);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(decodecCtx, inStream->codecpar);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
/* init the video decoder */
if ((ret = avcodec_open2(decodecCtx, dec, NULL)) != 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
return 0;
}
static int open_output_file(const char *filename)
{
int ret;
int i = 0;
enum AVMediaType media_type;
AVCodec *encoder;
ret = avformat_alloc_output_context2(&ofmtCtx, NULL, "rtsp", filename);
if (ret < 0 || NULL == ofmtCtx)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return AVERROR_UNKNOWN;
}
av_opt_set(ofmtCtx->priv_data, "rtsp_transport", "tcp", 0);
//ofmtCtx->max_interleave_delta = 1000000;
//ofmtCtx->max_delay = 100000;
for(i = 0; i < ifmtCtx->nb_streams; i++)
{
inStream = ifmtCtx->streams[i];
media_type = inStream->codecpar->codec_type;
if(AVMEDIA_TYPE_VIDEO
!= media_type)
{
continue;
}
encoder = avcodec_find_encoder(decodecCtx->codec_id);
if(NULL == encoder)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return -1;
}
encodeCtx = avcodec_alloc_context3(encoder);
if(NULL == encodeCtx)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return -1;
}
encodeCtx->codec_id = inStream->codecpar->codec_id;
encodeCtx->codec_type = inStream->codecpar->codec_type;
encodeCtx->height = inStream->codecpar->height;
encodeCtx->width = inStream->codecpar->width;
encodeCtx->time_base = inStream->time_base;
//encodeCtx->framerate = decodecCtx->framerate;
encodeCtx->gop_size = decodecCtx->gop_size;
//encodeCtx->max_b_frames = 0;
//av_opt_set(encodeCtx->priv_data, "tune", "zerolatency", 0);
if (encodeCtx->pix_fmt)
{
encodeCtx->pix_fmt = encoder->pix_fmts[0];
}
else
{
encodeCtx->pix_fmt = encodeCtx->pix_fmt;
}
outStream = avformat_new_stream(ofmtCtx, NULL);
if(!outStream)
{
printf("%s,%d\n",__func__,__LINE__);
return AVERROR_UNKNOWN;
}
//outStream->avg_frame_rate = inStream->avg_frame_rate;
//outStream->time_base = inStream->time_base;
encodeCtx->level = decodecCtx->level;
if ((ret = avcodec_open2(encodeCtx, encoder, NULL)) != 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return ret;
}
if (avcodec_parameters_from_context(outStream->codecpar, encodeCtx) < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return -1;
}
ofmtCtx->video_codec_id = ofmtCtx->oformat->video_codec;
if(avformat_write_header(ofmtCtx, NULL) < 0)
{
avformat_free_context(ofmtCtx);
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return -5;
}
av_dump_format(ofmtCtx, 0, filename, 1);
}
return 0;
}
//����ˮӡ
int addWaterMark(AVFrame *frame_in,AVFrame *frame_out,int w,int h, char *str)
{
int ret;
const AVFilter *buffersrc=avfilter_get_by_name("buffer");//ԭʼ����
const AVFilter *buffersink=avfilter_get_by_name("buffersink");//�����������
AVFilterInOut *outputs=avfilter_inout_alloc();
AVFilterInOut *inputs=avfilter_inout_alloc();
AVFilterGraph *filter_graph;//��filtersϵͳ����������ṹ��
filter_graph = avfilter_graph_alloc();
enum AVPixelFormat pix_fmts[]={AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};//���ø�ʽ
/*�������������������Ľ���֡�����������*/
char args[256];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
w,h,AV_PIX_FMT_YUV420P,1,25,1,1);//ͼ����ߣ���ʽ��֡�ʣ�������ݱ�
/*����������������,Դ����AVFilterContext*/
AVFilterContext *buffersrc_ctx;
ret=avfilter_graph_create_filter(&buffersrc_ctx,buffersrc,"in",args,NULL,filter_graph);
if(ret<0)
{
printf("����src������������ʧ��AVFilterContext\n");
return -1;
}
/*���������������ģ�����������buffersink_params*/
AVBufferSinkParams *buffersink_params;
buffersink_params=av_buffersink_params_alloc();
buffersink_params->pixel_fmts=pix_fmts;//���ø�ʽ
AVFilterContext *buffersink_ctx;
ret=avfilter_graph_create_filter(&buffersink_ctx,buffersink,"out",NULL,buffersink_params,filter_graph);
av_free(buffersink_params);
if(ret<0)
{
printf("����sink������������ʧ��AVFilterContext\n");
return -2;
}
/*������������/��������б�*/
outputs->name =av_strdup("in");
outputs->filter_ctx =buffersrc_ctx;
outputs->pad_idx =0;
outputs->next =NULL;
inputs->name =av_strdup("out");
inputs->filter_ctx =buffersink_ctx;
inputs->pad_idx =0;
inputs->next =NULL;
char filter_desrc[200]={0};//Ҫ���ӵ�ˮӡ����
snprintf(filter_desrc,sizeof(filter_desrc),"drawtext=fontfile=AvenirLTPro-Black.ttf:fontcolor=red:fontsize=50:x=(w-text_w)/2:y=(h-text_h)/2-text_h:text='%s\n'",str);
if(avfilter_graph_parse_ptr(filter_graph,filter_desrc,&inputs,&outputs, NULL)<0)//���ù�������������
{
printf("�����ַ�����Ϣʧ��\n");
return -3;
}
/*���������Ϣ�Ƿ�����*/
if(avfilter_graph_config(filter_graph,NULL)<0)
{
printf("������Ϣ����\n");
return -4;
}
#if 0
/*
����Ҫ��ʹ�õĹ���������Ҫ���������������ӵ�������
ע�⣺ʱ���������洫��(��144������������)����˴�����Ҫ���ң�ֱ�����Ӽ��ɣ�������Ҫ�����������
*/
AVFilterContext* filter_ctx;//������
int parsed_drawtext_0_index = -1;
for(int i=0;i<filter_graph->nb_filters;i++)//����ʹ�õĹ�����
{
AVFilterContext *filter_ctxn=filter_graph->filters[i];
printf("[%s %d]:filter_ctxn_name=%s\n",__FUNCTION__,__LINE__,filter_ctxn->name);
if(!strcmp(filter_ctxn->name,"Parsed_drawtext_0"))
{
parsed_drawtext_0_index=i;
}
}
if(parsed_drawtext_0_index==-1)
{
printf("[%s %d]:no Parsed_drawtext_0\n",__FUNCTION__,__LINE__);//û���ҵ�������
}
filter_ctx=filter_graph->filters[parsed_drawtext_0_index];//�����ҵ��Ĺ�����
/*��ȡϵͳʱ�䣬��ʱ����뵽������*/
char sys_time[64];
time_t sec,sec2;
sec=time(NULL);
if(sec!=sec2)
{
sec2=sec;
struct tm* today = localtime(&sec2);
strftime(sys_time, sizeof(sys_time), "%Y/%m/%d %H\\:%M\\:%S", today); //24Сʱ��
}
av_opt_set(filter_ctx->priv, "text", sys_time, 0 ); //����text��������
#endif
/*��Դ�˲���buffer���������������*/
if(av_buffersrc_add_frame(buffersrc_ctx,frame_in)<0)
{
return -5;
}
/*���˲����������������*/
if(av_buffersink_get_frame(buffersink_ctx, frame_out)<0)
{
return -6;
}
avfilter_inout_free(&outputs);
avfilter_inout_free(&inputs);
avfilter_graph_free(&filter_graph);
return 0;
}
#if 0
//����������
AVCodecContext* createVideoEncoderCtx(AVStream *pVst)
{
AVCodec* codec = NULL;
AVCodecContext* codecCtx = NULL;
if(NULL == pVst)
{
printf("%s,%d\n",__func__,__LINE__);
return NULL;
}
//codec = avcodec_find_encoder_by_name("h264_omx");
codec = avcodec_find_encoder(pVst->codecpar->codec_id);
if (NULL == codec)
{
printf("%s,%d\n",__func__,__LINE__);
return NULL;
}
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
codecCtx->width = pVst->codecpar->width;
codecCtx->height = pVst->codecpar->height;
codecCtx->bit_rate = 1024000;
codecCtx->gop_size = 50;
codecCtx->framerate.num = pVst->codec->framerate.num;
codecCtx->framerate.den = pVst->codec->framerate.den;
codecCtx->time_base.num = pVst->codec->time_base.num;
codecCtx->time_base.den = pVst->codec->time_base.den;
codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_opt_set(codecCtx->priv_data, "tune", "zerolatency", 0);
if (avcodec_open2(codecCtx, codec, NULL) < 0)
{
printf("%s,%d\n",__func__,__LINE__);
avcodec_free_context(&codecCtx);
return NULL;
}
printf("%s,%d\n",__func__,__LINE__);
return codecCtx;
}
//����������
AVCodecContext* createVideoDecoderCtx(AVStream *pVst)
{
AVCodec* videoCodec = NULL;
AVCodecContext* codecCtx = NULL;
videoCodec = avcodec_find_decoder(pVst->codecpar->codec_id);
if(NULL == videoCodec)
{
printf("%s,%d\n",__func__,__LINE__);
return NULL;
}
codecCtx = avcodec_alloc_context3(videoCodec);
if(NULL == codecCtx)
{
printf("%s,%d\n",__func__,__LINE__);
return NULL;
}
avcodec_parameters_to_context(codecCtx, pVst->codecpar);
codecCtx->time_base.num = pVst->codec->time_base.num;
codecCtx->time_base.den = pVst->codec->time_base.den;
if (avcodec_open2(codecCtx, videoCodec, NULL) < 0)
{
printf("%s,%d\n",__func__,__LINE__);
avcodec_free_context(&codecCtx);
return NULL;
}
return codecCtx;
}
#endif
//���װ
AVFrame* decodePacketToFrame(AVCodecContext *ctx, AVPacket pkt)
{
int ret = 0;
AVFrame* frame = NULL;
if(NULL == ctx)
{
printf("%s,%d\n",__func__,__LINE__);
return NULL;
}
ret = avcodec_send_packet(ctx, &pkt);
if (ret != 0)
{
printf("%s,%d,ret = %d\n",__func__,__LINE__,ret);
return NULL;
}
frame = av_frame_alloc();
ret = avcodec_receive_frame(ctx, frame);
if (ret != 0 || NULL == frame)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
return NULL;
}
return frame;
}
int createPushStream(AVFormatContext *ctx, AVFormatContext *ofmtCtx, AVCodecContext *encodeCtx)
{
int ret = -1;
int i = 0;
AVStream *in_stream = NULL;
AVStream *out_stream = NULL;
enum AVMediaType media_type;
for(i = 0; i < ctx->nb_streams; i++)
{
in_stream = ctx->streams[i];
media_type = in_stream->codecpar->codec_type;
if(media_type != AVMEDIA_TYPE_VIDEO)
{
continue;
}
out_stream = avformat_new_stream(ofmtCtx, NULL);
if(NULL == out_stream)
{
return -1;
}
if (avcodec_parameters_from_context(out_stream->codecpar, encodeCtx) < 0)
{
return -2;
}
vi_stream_out_ = out_stream->index;
out_stream->time_base = in_stream->time_base;
out_stream->avg_frame_rate = in_stream->avg_frame_rate;
}
return 0 ;
}
int startPushStrem(const char *url, AVFormatContext *ifmtCtx, AVFormatContext *ofmtCtx, AVCodecContext *encodeCtx)
{
int ret = 0;
if (ofmtCtx != NULL)
{
return -1;
}
ret = avformat_alloc_output_context2(&ofmtCtx, NULL, "rtsp", url);
if (ret < 0)
{
return -2;
}
av_opt_set(ofmtCtx->priv_data, "rtsp_transport", "tcp", 0);
ofmtCtx->max_interleave_delta = 1000000; // us
ofmtCtx->max_delay = 100000;
ret = createPushStream(ifmtCtx,ofmtCtx,encodeCtx);
if (ret < 0) {
avformat_free_context(ofmtCtx);
return -3;
}
av_dump_format(ofmtCtx, 0, url, 1);
if (!(ofmtCtx->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmtCtx->pb, url, AVIO_FLAG_WRITE);
if (ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
avformat_free_context(ofmtCtx);
return -4;
}
}
ofmtCtx->video_codec_id = ofmtCtx->oformat->video_codec;
if(avformat_write_header(ofmtCtx, NULL) < 0)
{
avformat_free_context(ofmtCtx);
return -5;
}
return 0;
}
void releaseObj()
{
if(NULL != pOptionsDict)
{
av_dict_free(&pOptionsDict);
}
if(NULL != in_pkt)
{
av_packet_free(&in_pkt);
}
if(NULL != out_pkt)
{
av_packet_free(&out_pkt);
}
if(NULL != inFrame)
{
//av_frame_free(&inFrame);
}
if(NULL != outFrame)
{
av_frame_free(&outFrame);
}
if(NULL != encodeCtx)
{
avcodec_free_context(&encodeCtx);
}
if(NULL != encodeCtx)
{
avcodec_free_context(&decodecCtx);
}
if(NULL != ifmtCtx)
{
avformat_close_input(&ifmtCtx);
}
if(NULL != ofmtCtx)
{
avformat_close_input(&ofmtCtx);
}
}
void *myThread1(void)
{
int i = 1;
printf("!!str = %s",WaterStr);
while(1)
{
printf("!!str = %s\n",WaterStr);
snprintf(WaterStr,sizeof(WaterStr),"%d", i);
i++;
sleep(2);
}
}
int main()
{
int ret = 0;
int i = 0;
unsigned int stream_index;
int firstFrame = 1;
pthread_t test_id;
ret= pthread_create(&test_id,NULL,(void*)myThread1,NULL ); //�����߳�
if(ret)
{
printf("create error\n");
return -1;
}
ret = open_input_file(rtspUrl);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
goto end;
}
ret = open_output_file(pushUrl);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
goto end;
}
in_pkt = av_packet_alloc();
out_pkt = av_packet_alloc();
outFrame = av_frame_alloc();
if(!in_pkt || !out_pkt || !outFrame)
{
printf("%s,%d\n",__func__,__LINE__);
goto end;
}
//pthread_join(test_id,NULL);
while(1)
{
ret = av_read_frame(ifmtCtx, in_pkt);
if (ret == AVERROR(EAGAIN))
{
av_usleep(10000);
continue;
}
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
break;
}
stream_index = in_pkt->stream_index;
if(inAudioStreamIndex == stream_index)
{
av_packet_unref(in_pkt);
continue;
}
inFrame = decodePacketToFrame(decodecCtx, *in_pkt);
if(NULL == inFrame)
{
printf("%s,%d\n",__func__,__LINE__);
break;
}
addWaterMark(inFrame,outFrame, encodeCtx->width, encodeCtx->height , WaterStr);
//outFrame = inFrame;
//printf("@@@type = %d\n", inFrame->pict_type);
outFrame->pict_type = AV_PICTURE_TYPE_I;
if(outFrame->pict_type == 3)
{
printf("!!!!!!type = %d\n", inFrame->pict_type);
}
outFrame->pict_type = inFrame->pict_type;
ret = avcodec_send_frame(encodeCtx, outFrame);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
break;
}
ret = avcodec_receive_packet(encodeCtx, out_pkt);
if(ret >= 0)
{
out_pkt->stream_index = stream_index;
av_packet_rescale_ts(out_pkt,
ifmtCtx->streams[stream_index]->time_base,
ofmtCtx->streams[stream_index]->time_base);
if (out_pkt->dts < 0 || out_pkt->pts < 0 || out_pkt->dts > out_pkt->pts || firstFrame)
{
firstFrame=0;
out_pkt->dts = out_pkt->pts = out_pkt->duration = 0;
}
ret = av_interleaved_write_frame(ofmtCtx,out_pkt);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
break;
}
}
#if 0
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
break;
}
out_pkt->stream_index = stream_index;
av_packet_rescale_ts(out_pkt,
ifmtCtx->streams[stream_index]->time_base,
ofmtCtx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmtCtx,out_pkt);
if(ret < 0)
{
printf("%s,%d, ret = %d\n",__func__,__LINE__,ret);
break;
}
#endif
//av_packet_unref(out_pkt);
av_frame_unref(inFrame);
av_packet_unref(in_pkt);
}
end:
return 0;
}
网友评论