当前位置:网站首页>Add watermark to ffmpeg video
Add watermark to ffmpeg video
2022-07-26 04:29:00 【Mr.codeee】
1. brief introduction
This example , Add a in the video logo picture , Save the watermarked image locally .
2. technological process

2.1 Open the input file
First open the input video file , Find the video stream index , Find the corresponding video decoder , Copy some important parameters to the decoder , Finally, open the decoder .
//av_register_all();
avformat_network_init();
/// Open the input stream
int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
if (ret != 0)
{
printf("Couldn't open input stream.\n");
return -1;
}
// Find stream information
if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
// Find the video stream index
video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
AVStream* st = fmt_ctx->streams[video_index];
AVCodec* codec = nullptr;
// Find the decoder
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec)
{
fprintf(stderr, "Codec not found\n");
return -1;
}
// apply AVCodecContext
dec_ctx = avcodec_alloc_context3(codec);
if (!dec_ctx)
{
return -1;
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
// Turn on the decoder
if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
{
return -1;
}
return 0;2.2 Initialize filter
2.2.1 Get the source of filter processing
Obtain the source of filter treatment and filter treatment sink Filter , Filter structure that applies both input and output AVFilterInOut.
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();2.2.2 Handle AVFilterGraph
Needed AVFilter and AVFilterInOut After the application is completed , You need to apply for one AVFilterGraph, Used to store Filter Of in and out Description information
AVFilterGraph* filter_graph = NULL;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph)
{
ret = AVERROR(ENOMEM);
return ret;
}2.2.3 establish AVFilterContext
Next, create a AVFilterContext Structure is used to store Filter The processing content of , Include input And output Of Filter Information , Creating input Information , You need to add information about the original video , such as pix_fmt、time_base etc. .
First enter the parameters :
char args[512];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);Then create AVFilterContext:
AVFilterContext* buffersink_ctx = NULL;
AVFilterContext* buffersrc_ctx = NULL;
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}2.2.4 Set other parameters
Create input and output AVFilterContext after , If you need to set some other and Filter Related parameters . By using av_opt_set_int_list Set it up , For example, set the output pix_fmt Parameters .
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
return ret;
}2.2.5 Build a filter parser
After parameter setting , It can be set for Filter Related content to establish filter parser .
const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
{
return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
return ret;
}2.3 Read the data, decode it and get the data with good watermark .
while (av_read_frame(fmt_ctx, pkt) >= 0)
{
if (pkt->stream_index == video_index)
{
int ret = avcodec_send_packet(dec_ctx, pkt);
if (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
continue;
}
else if (ret < 0)
{
continue;
}
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1)
{
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
break;
switch (dec_ctx->pix_fmt)
{
case AV_PIX_FMT_YUV420P:
{
int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
char fileName[20] = { 0 };
sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
FILE* fp;
fp = fopen(fileName, "wb");
for (int i = 0; i < filt_frame->height; i++)
{
fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
}
fclose(fp);
}
break;
default:
return -1;
}
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
}3. effect
logo The graph is as follows

The renderings are as follows , You can see that it is added to the upper left corner .

4. Source code
#include "pch.h"
#include <iostream>
#include <Windows.h>
extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
};
static AVFormatContext* fmt_ctx = NULL;
static AVCodecContext* dec_ctx = NULL;
AVFilterContext* buffersink_ctx = NULL;
AVFilterContext* buffersrc_ctx = NULL;
AVFilterGraph* filter_graph = NULL;
int video_index = -1;
const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char* filename)
{
//av_register_all();
avformat_network_init();
/// Open the input stream
int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
if (ret != 0)
{
printf("Couldn't open input stream.\n");
return -1;
}
// Find stream information
if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
// Find the video stream index
video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
AVStream* st = fmt_ctx->streams[video_index];
AVCodec* codec = nullptr;
// Find the decoder
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec)
{
fprintf(stderr, "Codec not found\n");
return -1;
}
// apply AVCodecContext
dec_ctx = avcodec_alloc_context3(codec);
if (!dec_ctx)
{
return -1;
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
// Turn on the decoder
if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
{
return -1;
}
return 0;
}
static int init_filters(const char* filters_descr)
{
char args[512];
int ret = 0;
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph)
{
ret = AVERROR(ENOMEM);
return ret;
}
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
return ret;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
{
return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
return ret;
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
int main()
{
///1. Open file
const char* inputUrl = "test.mp4";
int ret = -1;
if ((ret = open_input_file(inputUrl) < 0))
{
return -1;
}
///2. Initialize filter
if ((ret = init_filters(filter_descr)) < 0)
{
return -1;
}
AVPacket* pkt = av_packet_alloc();
//av_init_packet(pkt);
AVFrame* frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
while (av_read_frame(fmt_ctx, pkt) >= 0)
{
if (pkt->stream_index == video_index)
{
int ret = avcodec_send_packet(dec_ctx, pkt);
if (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
continue;
}
else if (ret < 0)
{
continue;
}
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1)
{
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
break;
switch (dec_ctx->pix_fmt)
{
case AV_PIX_FMT_YUV420P:
{
int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
char fileName[20] = { 0 };
sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
FILE* fp;
fp = fopen(fileName, "wb");
for (int i = 0; i < filt_frame->height; i++)
{
fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
}
fclose(fp);
}
break;
default:
return -1;
}
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
}
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
av_packet_free(&pkt);
return 0;
}
边栏推荐
- Steam科学教育赋予课堂教学的创造力
- Weights & Biases (二)
- 低成本、快速、高效搭建数字藏品APP、H5系统,扇贝科技专业开发更放心!
- 1. Mx6u system migration-6-uboot graphical configuration
- 生活相关——十年的职业历程(转)
- Can literature | relationship research draw causal conclusions
- Life related - ten years of career experience (turn)
- What are the consequences and problems of computer system restoration
- How does win11 set the theme color of the status bar? Win11 method of setting theme color of status bar
- MySQL的优化分析及效率执行
猜你喜欢

Sangi diagram of machine learning (for user behavior analysis)

支持代理直连Oracle数据库,JumpServer堡垒机v2.24.0发布

Unable to find sygwin.s file during vscode debugging

A series of problems about the number of DP paths

7、 Restful

idea插件离线安装(持续更新)

VM virtual machine has no un bridged host network adapter, unable to restore the default configuration

Huawei issued another global convening order of "genius youth", and some people once gave up their annual salary of 3.6 million to join

Steam科学教育赋予课堂教学的创造力

MySQL - multi table query - Cartesian product sum, correct multi table query, equivalent connection and unequal connection, inner connection and outer connection
随机推荐
2022杭电多校第二场 A.Static Query on Tree(树剖)
Matlab drawing
Use Baidu PaddlePaddle easydl to complete garbage classification
Li Kou daily question - day 42 -661. Picture smoother
数组排序3
How to write the introduction and conclusion of an overview paper?
Keil v5安装和使用
How does win11 22h2 skip networking and Microsoft account login?
5、 Domain objects share data
Sweet butter
Sangi diagram of machine learning (for user behavior analysis)
How does win11 change the power mode? Win11 method of changing power mode
Cnosdb Nirvana Rebirth: abandon go and fully embrace rust
Postman 导入curl 、导出成curl、导出成对应语言代码
qt编译报错整理及Remote模块下载
Soft simulation rasterization renderer
远坂凛壁纸
The difference between positive samples, negative samples, simple samples and difficult samples in deep learning (simple and easy to understand)
吴恩达机器学习课后习题——线性回归
生活相关——十年的职业历程(转)