当前位置:网站首页>Add watermark to ffmpeg video
Add watermark to ffmpeg video
2022-07-26 04:29:00 【Mr.codeee】
1. brief introduction
This example , Add a in the video logo picture , Save the watermarked image locally .
2. technological process

2.1 Open the input file
First open the input video file , Find the video stream index , Find the corresponding video decoder , Copy some important parameters to the decoder , Finally, open the decoder .
//av_register_all();
avformat_network_init();
/// Open the input stream
int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
if (ret != 0)
{
printf("Couldn't open input stream.\n");
return -1;
}
// Find stream information
if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
// Find the video stream index
video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
AVStream* st = fmt_ctx->streams[video_index];
AVCodec* codec = nullptr;
// Find the decoder
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec)
{
fprintf(stderr, "Codec not found\n");
return -1;
}
// apply AVCodecContext
dec_ctx = avcodec_alloc_context3(codec);
if (!dec_ctx)
{
return -1;
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
// Turn on the decoder
if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
{
return -1;
}
return 0;2.2 Initialize filter
2.2.1 Get the source of filter processing
Obtain the source of filter treatment and filter treatment sink Filter , Filter structure that applies both input and output AVFilterInOut.
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();2.2.2 Handle AVFilterGraph
Needed AVFilter and AVFilterInOut After the application is completed , You need to apply for one AVFilterGraph, Used to store Filter Of in and out Description information
AVFilterGraph* filter_graph = NULL;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph)
{
ret = AVERROR(ENOMEM);
return ret;
}2.2.3 establish AVFilterContext
Next, create a AVFilterContext Structure is used to store Filter The processing content of , Include input And output Of Filter Information , Creating input Information , You need to add information about the original video , such as pix_fmt、time_base etc. .
First enter the parameters :
char args[512];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);Then create AVFilterContext:
AVFilterContext* buffersink_ctx = NULL;
AVFilterContext* buffersrc_ctx = NULL;
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}2.2.4 Set other parameters
Create input and output AVFilterContext after , If you need to set some other and Filter Related parameters . By using av_opt_set_int_list Set it up , For example, set the output pix_fmt Parameters .
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
return ret;
}2.2.5 Build a filter parser
After parameter setting , It can be set for Filter Related content to establish filter parser .
const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
{
return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
return ret;
}2.3 Read the data, decode it and get the data with good watermark .
while (av_read_frame(fmt_ctx, pkt) >= 0)
{
if (pkt->stream_index == video_index)
{
int ret = avcodec_send_packet(dec_ctx, pkt);
if (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
continue;
}
else if (ret < 0)
{
continue;
}
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1)
{
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
break;
switch (dec_ctx->pix_fmt)
{
case AV_PIX_FMT_YUV420P:
{
int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
char fileName[20] = { 0 };
sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
FILE* fp;
fp = fopen(fileName, "wb");
for (int i = 0; i < filt_frame->height; i++)
{
fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
}
fclose(fp);
}
break;
default:
return -1;
}
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
}3. effect
logo The graph is as follows

The renderings are as follows , You can see that it is added to the upper left corner .

4. Source code
#include "pch.h"
#include <iostream>
#include <Windows.h>
extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
};
static AVFormatContext* fmt_ctx = NULL;
static AVCodecContext* dec_ctx = NULL;
AVFilterContext* buffersink_ctx = NULL;
AVFilterContext* buffersrc_ctx = NULL;
AVFilterGraph* filter_graph = NULL;
int video_index = -1;
const char* filter_descr = "movie=logo.jpg[wm];[in][wm]overlay=5:5[out]";
static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char* filename)
{
//av_register_all();
avformat_network_init();
/// Open the input stream
int ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
if (ret != 0)
{
printf("Couldn't open input stream.\n");
return -1;
}
// Find stream information
if (avformat_find_stream_info(fmt_ctx, NULL) < 0)
{
printf("Couldn't find stream information.\n");
return -1;
}
// Find the video stream index
video_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
AVStream* st = fmt_ctx->streams[video_index];
AVCodec* codec = nullptr;
// Find the decoder
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec)
{
fprintf(stderr, "Codec not found\n");
return -1;
}
// apply AVCodecContext
dec_ctx = avcodec_alloc_context3(codec);
if (!dec_ctx)
{
return -1;
}
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_index]->codecpar);
// Turn on the decoder
if ((ret = avcodec_open2(dec_ctx, codec, NULL) < 0))
{
return -1;
}
return 0;
}
static int init_filters(const char* filters_descr)
{
char args[512];
int ret = 0;
const AVFilter* buffersrc = avfilter_get_by_name("buffer");
const AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph)
{
ret = AVERROR(ENOMEM);
return ret;
}
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
return ret;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
{
return ret;
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
{
return ret;
}
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
int main()
{
///1. Open file
const char* inputUrl = "test.mp4";
int ret = -1;
if ((ret = open_input_file(inputUrl) < 0))
{
return -1;
}
///2. Initialize filter
if ((ret = init_filters(filter_descr)) < 0)
{
return -1;
}
AVPacket* pkt = av_packet_alloc();
//av_init_packet(pkt);
AVFrame* frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
while (av_read_frame(fmt_ctx, pkt) >= 0)
{
if (pkt->stream_index == video_index)
{
int ret = avcodec_send_packet(dec_ctx, pkt);
if (ret >= 0)
{
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
continue;
}
else if (ret < 0)
{
continue;
}
frame->pts = frame->best_effort_timestamp;
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1)
{
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
break;
switch (dec_ctx->pix_fmt)
{
case AV_PIX_FMT_YUV420P:
{
int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, filt_frame->width, filt_frame->height, 1);
char fileName[20] = { 0 };
sprintf(fileName, "img2/%d.yuv", dec_ctx->frame_number);
FILE* fp;
fp = fopen(fileName, "wb");
for (int i = 0; i < filt_frame->height; i++)
{
fwrite(filt_frame->data[0] + filt_frame->linesize[0] * i, 1, filt_frame->width, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[1] + filt_frame->linesize[1] * i, 1, filt_frame->width / 2, fp);
}
for (int i = 0; i < filt_frame->height / 2; i++)
{
fwrite(filt_frame->data[2] + filt_frame->linesize[2] * i, 1, filt_frame->width / 2, fp);
}
fclose(fp);
}
break;
default:
return -1;
}
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
}
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
av_packet_free(&pkt);
return 0;
}
边栏推荐
猜你喜欢

Scroll view pull-down refresh and pull-up load (bottom)

Low cost, fast and efficient construction of digital collection app and H5 system, professional development of scallop technology is more assured!

机器学习之桑基图(用于用户行为分析)

Comprehensive evaluation and decision-making method

Acwing_ 12. Find a specific solution for the knapsack problem_ dp

Analyzing the curriculum design evaluation system of steam Education

力扣每日一题-第42天-661. 图片平滑器
![[C language foundation] 13 preprocessor](/img/4c/ab25d88e9a0cf29bde6e33a2b14225.jpg)
[C language foundation] 13 preprocessor

Acwing brush questions

Optimization analysis and efficiency execution of MySQL
随机推荐
Acwing brush questions
生活相关——一个华科研究生导师的肺腑之言(主要适用于理工科)
UE4 通过按键控制物体的旋转
1. Mx6u-alpha development board (GPIO interrupt experiment)
Phaser(一):平台跳跃收集游戏
10、 Interceptor
UE4 靠近物体时显示文字,远离时文字消失
dijango学习
Dijikstra (preprocessing first) +dfs, relocation truncated to fit
UE4 键盘控制开关灯
UE4 获取玩家控制权的两种方式
这种是我的vs没连上数据库吗
Credit card fraud detection based on machine learning
吴恩达机器学习课后习题——逻辑回归
The auxiliary role of rational cognitive educational robot in teaching and entertainment
吴恩达机器学习课后习题——线性回归
egg-ts-sequelize-CLI
Function knowledge points
A series of problems about the number of DP paths
Scroll view pull-down refresh and pull-up load (bottom)