美文网首页
ffmpeg视频加水印

ffmpeg视频加水印

作者: 嘿嘿和露红叶 | 来源:发表于2018-06-15 11:22 被阅读48次

int64_t            lastPts;

int64_t            lastDts;

int64_t            lastFrameRealtime;

int64_t            startTime;

AVCodecContext      *outputEncContext;

AVCodecContext      *decoderContext[2];

structSwsContext  *pSwsContext;

AVFilterInOut      *inputs;

AVFilterInOut      *outputs;

AVFilterGraph      *filter_graph;

AVFilterContext    *inputFilterContext[2];

AVFilterContext    *outputFilterContext;

#define SrcWidth1920

#define SrcHeight1080

#define DstWidth640

#define DstHeight480

int64_t            firstPts =AV_NOPTS_VALUE;

constchar          *filter_descr ="overlay=100:100";

intinterrupt_cb(void*ctx)

{

    return 0;

}

+ (int)openInput:(charconst*)fileName inputIndex:(int)inputIndex {

    context[inputIndex] = avformat_alloc_context();

    context[inputIndex]->interrupt_callback.callback = interrupt_cb;

    AVDictionary*format_opts =NULL;

    intret =avformat_open_input(&context[inputIndex], fileName,NULL, &format_opts);

    if(ret <0) {

        returnret;

    }

    ret =avformat_find_stream_info(context[inputIndex],NULL);

    av_dump_format(context[inputIndex],0, fileName,0);

    if(ret >=0) {

        NSLog(@"open input stream successfully!");

    }

    returnret;

}

+ (AVPacket*)readPacketFromSource:(int)inputIndex {

    AVPacket*packet =av_malloc(sizeof(AVPacket));

    av_init_packet(packet);

    intret =av_read_frame(context[inputIndex], packet);

    if(ret >=0) {

        returnpacket;

    }else{

        returnnil;

    }

}

+ (int)openOutput:(charconst*)fileName inputIndex:(int)inputIndex {

    intret =0;

    ret =avformat_alloc_output_context2(&outputContext, NULL, "mpegts", fileName);

    if(ret <0) {

        gotoError;

    }

    ret =avio_open2(&outputContext->pb, fileName,AVIO_FLAG_READ_WRITE,NULL,NULL);

    if(ret <0) {

        gotoError;

    }

    for(inti =0;context[inputIndex]->nb_streams; i++) {

        AVStream *stream = avformat_new_stream(outputContext, outputEncContext->codec);

        stream->codec = outputEncContext;

        if(ret <0) {

            gotoError;

        }

    }

    av_dump_format(outputContext,0, fileName,1);

    ret =avformat_write_header(outputContext, NULL);

    if(ret <0) {

        gotoError;

    }else{

        NSLog(@"open output stream successfully!");

        returnret;

    }

Error:

    if (outputContext) {

        avformat_close_input(&outputContext);

    }

    returnret;

}

+ (void)closeInput:(int)inputIndex {

    if(context[inputIndex]) {

        avformat_close_input(&context[inputIndex]);

    }

}

+ (void)closeOutput {

    if (outputContext) {

        for(inti =0; i nb_streams; i++) {

            AVCodecContext *codecContext = outputContext->streams[i]->codec;

            avcodec_close(codecContext);

        }

        avformat_close_input(&outputContext);

    }

}

+ (int)initEncoderCodecWithWidth:(int)width height:(int)height inputIndex:(int)inputIndex {

    AVCodec *pH264Codec = avcodec_find_encoder(AV_CODEC_ID_H264);

    if(pH264Codec ==NULL) {

        printf("%s","avcodec_find_encoder failed !");

        return-1;

    }

    outputEncContext = avcodec_alloc_context3(pH264Codec);

    outputEncContext->gop_size = 30;

    outputEncContext->has_b_frames = 0;

    outputEncContext->max_b_frames = 0;

    outputEncContext->codec_id = pH264Codec->id;

    outputEncContext->time_base.num = context[inputIndex]->streams[0]->codec->time_base.num;

    outputEncContext->time_base.den = context[inputIndex]->streams[0]->codec->time_base.den;

    outputEncContext->pix_fmt      = *pH264Codec->pix_fmts;

    outputEncContext->width        = width;

    outputEncContext->height        = height;

    outputEncContext->me_subpel_quality = 0;

    outputEncContext->refs = 1;

    outputEncContext->scenechange_threshold = 0;

    outputEncContext->trellis = 0;

    AVDictionary*options =NULL;

    outputEncContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    intret =avcodec_open2(outputEncContext, pH264Codec, &options);

    if(ret <0) {

        printf("%s","open codec failed !\n");

        returnret;

    }

    return 1;

}

+ (int)initDecodeCodecWith:(enumAVCodecID)codecId inputIndex:(int)inputIndex {

    AVCodec*deCodec =avcodec_find_decoder(codecId);

    if(!deCodec) {

        return-1;

    }

    decoderContext[inputIndex] =context[inputIndex]->streams[0]->codec;

    if(!decoderContext[inputIndex]) {

        printf("Could not allocate video codec context\n");

        return-1;

    }

    if (deCodec->capabilities & AV_CODEC_CAP_TRUNCATED) {

        decoderContext[inputIndex]->flags|=AV_CODEC_FLAG_TRUNCATED;

    }

    intret =avcodec_open2(decoderContext[inputIndex], deCodec,NULL);

    if(ret <0) {

        printf("%s","open decodec failed !");

    }

    returnret;

}

+ (BOOL)decodeVideo:(AVPacket*)packet frame:(AVFrame*)frame inputIndex:(int)inputIndex {

    intgotFrame =0;

    inthr =avcodec_decode_video2(decoderContext[inputIndex], frame, &gotFrame, packet);

    if(hr >=0&& gotFrame !=0) {

        frame->pts= packet->pts;

        return true;

    }

    return false;

}

+ (int)initInputFilter:(AVFilterInOut*)input fileName:(constchar*)fileName inputIndex:(int)inputIndex {

    charargs[512];

    memset(args,0,sizeof(args));

    AVFilterContext*padFilterContext = input->filter_ctx;

    const AVFilter *filter = avfilter_get_by_name("buffer");

    AVCodecContext*codecContext =context[inputIndex]->streams[0]->codec;

    sprintf(args,sizeof(args),

            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect+%d/%d",

            codecContext->width,codecContext->height,codecContext->pix_fmt,

            codecContext->time_base.num,codecContext->time_base.den/codecContext->ticks_per_frame,

            codecContext->sample_aspect_ratio.num,codecContext->sample_aspect_ratio.den

            );

    intret =avfilter_graph_create_filter(&inputFilterContext[inputIndex], filter, fileName, args,NULL,filter_graph);

    if(ret <0) {

        returnret;

    }

    ret =avfilter_link(inputFilterContext[inputIndex],0, padFilterContext, input->pad_idx);

    returnret;

}

+ (int)initOutputFilter:(AVFilterInOut*)output filterName:(constchar*)filterName {

    AVFilterContext*padFilterContext = output->filter_ctx;

    const AVFilter *filter = avfilter_get_by_name("buffersink");

    intret =avfilter_graph_create_filter(&outputFilterContext, filter, filterName,NULL,NULL,filter_graph);

    if(ret <0) {

        returnret;

    }

    ret =avfilter_link(padFilterContext, output->pad_idx,outputFilterContext,0);

    returnret;

}

+ (void)freeInout {

    avfilter_inout_free(&inputs->next);

    avfilter_inout_free(&inputs);

    avfilter_inout_free(&outputs);

}

+ (int)_tmain {

    char const      *fileInput1 = [[[NSBundle mainBundle] pathForResource:@"test" ofType:@"mp4"] UTF8String];

    char const      *fileInput2 = [[[NSBundle mainBundle] pathForResource:@"test" ofType:@"jpg"] UTF8String];

    char const      *outputFileName ="/Users/ubaby/Library/Containers/bylh.testFFmpegOS/Data/Documents/test0.mp4";

    av_register_all();

    avfilter_register_all();

    avformat_network_init();

    avdevice_register_all();

    av_log_set_level(AV_LOG_ERROR);

    charconst*fileName = fileInput1;

    for(inti =0; i <2; i++) {

        if(i >0) {

            fileName = fileInput2;

        }

        if([selfopenInput:fileNameinputIndex:i] <0) {

            printf("Open file Input failed !");

            return0;

        }

    }

    for(inti =0; i <2; i++) {

        intret = [selfinitDecodeCodecWith:context[i]->streams[0]->codec->codec_idinputIndex:i];

        if(ret <0) {

            printf("initDecodeCodec failed !");

            return0;

        }

    }

    int ret = [self initEncoderCodecWithWidth:decoderContext[0]->width height:decoderContext[0]->height inputIndex:0];

    if(ret <0) {

        printf("open encoder failed ret is %d",ret);

        printf("initEncoderCodec failed !");

        return0;

    }

    filter_graph = avfilter_graph_alloc();

    if (!filter_graph) {

        printf("graph alloc failed !");

        gotoEnd;

    }

    avfilter_graph_parse2(filter_graph, filter_descr, &inputs, &outputs);

    [self initInputFilter:inputs fileName:"MainFrame" inputIndex:0];

    [self initInputFilter:inputs->next fileName:"OverlayFrame" inputIndex:1];

    [self initOutputFilter:outputs filterName:"output"];

    [self freeInout];

    ret =avfilter_graph_config(filter_graph, NULL);

    if(ret <0) {

        gotoEnd;

    }

    if([selfopenOutput:outputFileNameinputIndex:0] <0) {

        printf("open file Output failed !");

        return0;

    }

    AVFrame*pSrcFrame[2];

    AVFrame*inputFrame[2];

    pSrcFrame[0] =av_frame_alloc();

    pSrcFrame[1] =av_frame_alloc();

    inputFrame[0] =av_frame_alloc();

    inputFrame[1] =av_frame_alloc();

    AVFrame*filterFrame =av_frame_alloc();

    intgot_output =0;

    int64_ttimeRecord =0;

    int64_tfirstPacketTime =0;

    int64_toutLastTime =av_gettime();

    int64_tinLastTime =av_gettime();

    int64_tvideoCount =0;

    while(1) {

        AVPacket*packet = [selfreadPacketFromSource:1];

        BOOLret = [selfdecodeVideo:packetframe:pSrcFrame[1]inputIndex:1];

        if(ret) {

            break;

        }

    }

    while(1) {

        outLastTime =av_gettime();

        AVPacket*packet = [selfreadPacketFromSource:0];

        if(packet) {

            if([selfdecodeVideo:packetframe:pSrcFrame[0]inputIndex:0]) {

                av_frame_ref(inputFrame[0], pSrcFrame[0]);

                if (av_buffersrc_add_frame_flags(inputFilterContext[0], inputFrame[0], AV_BUFFERSRC_FLAG_PUSH) >= 0) {

                    pSrcFrame[1]->pts= pSrcFrame[0]->pts;

                    if (av_buffersrc_add_frame_flags(inputFilterContext[1], pSrcFrame[1], AV_BUFFERSRC_FLAG_PUSH) >= 0) {

                        ret =av_buffersink_get_frame_flags(outputFilterContext, filterFrame,AV_BUFFERSINK_FLAG_NO_REQUEST);

                        if(ret >=0) {

                            AVPacket*pTmpPkt =av_malloc(sizeof(AVPacket));

                            av_init_packet(pTmpPkt);

                            pTmpPkt->data=NULL;

                            pTmpPkt->size=0;

                            ret =avcodec_encode_video2(outputEncContext, pTmpPkt, filterFrame, &got_output);

                            if(ret >=0&& got_output) {

                                intret =av_write_frame(outputContext, pTmpPkt);

                                if(ret <0) {

                                    printf("av_write_frame failed !");

                                    return0;

                                }

                            }

                        }

                        av_frame_free(&filterFrame);

                    }

                }

            }

        }else{

            break;

        }

    }

End:

    [self closeInput:0];

    [self closeInput:1];

    [self closeOutput];

    printf("Transcode file end !");

    return 0;

}

相关文章

  • ffmpeg命令基础

    ffmpeg 命令 去水印命令 快影去水印ffmpeg命令(建议二) 命令一: 命令二:show=1 调试 视频加...

  • FFmpeg滤镜(3)

    FFmpeg为视频加图片水印 1、movie滤镜 如图,通过movie读取图片文件作为水印,显示在x坐标30、y坐...

  • FFmpeg滤镜(2)

    FFmpeg为视频加文字水印 1、准备条件 需要提前准备好环境:1、下载安装freetype-2.9。2、下载fo...

  • mac ffmpeg简单应用

    homebrew安装ffmpeg(附带ffplay)加水印 去水印(马赛克) 用播放器播放视频 ,找出水印的具体位...

  • FFmpeg滤镜(9)

    FFmpeg视频抠图合并 FFmpeg除了半透明、透明水印处理外,还支持视频抠图与背景视频合并的操作——chrom...

  • FFMpeg常用指令

    水印篇 1、添加水印ffmpeg官方提供了另外一种添加图片水印的方法在视频右下角的添加图片水印 在视频左下角添加图...

  • ffmpeg视频加水印

    int64_t lastPts;int64_t lastDts;int64_t la...

  • FFmpeg 学习4 -- 加水印--使用文本加水印;

    加实时时间的水印: ffmpeg -i dy.mp4 -vf drawtext=fontcolor=white:f...

  • FFmpeg功能命令集合

    前言 如此强大的FFmpeg,能够实现视频采集、视频格式转化、视频截图、视频添加水印、视频切片、视频录制、视频推流...

  • FFmpeg功能命令汇总

    前言 如此强大的FFmpeg,能够实现视频采集、视频格式转化、视频截图、视频添加水印、视频切片、视频录制、视频推流...

网友评论

      本文标题:ffmpeg视频加水印

      本文链接:https://www.haomeiwen.com/subject/cybpeftx.html