1、创建graph
/**
* Allocate a filter graph.
*
* @return the allocated filter graph on success or NULL.
*/
AVFilterGraph *avfilter_graph_alloc(void);
相当于一张图,filter的各种操作就按照这张图
2、创建buffer filter的上下文
通过上面的图我们知道解码出来的数据要先保存到buffer filter中
所以我们要先创建buffer filter的context
//第二步创建buffer_filter_ctx
const AVFilter *bufsrc = avfilter_get_by_name("buffer");
if (!bufsrc)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_get_by_name fail");
goto __INIT_FILTER_ERROR;
}
//args 来源于ffmpeg -h filter=buffer
snprintf(args,
512,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
video_codec_par->width,
video_codec_par->height,
video_codec_par->format,
time_base.num,
time_base.den,
video_codec_ctx->sample_aspect_ratio.num,
video_codec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffer_filter_ctx, bufsrc, "in", args, NULL, graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_create_filter buffer_filter_ctx %s", av_err2str(ret));
goto __INIT_FILTER_ERROR;
}
const AVFilter *bufsrc = avfilter_get_by_name("buffer");
我们可以很容易的通过avfilter_get_by_name获得buffer filter
创建成功后,我们要把filter插入graph中。需要通过
/**
* Create and add a filter instance into an existing graph.
* The filter instance is created from the filter filt and inited
* with the parameter args. opaque is currently ignored.
*
* In case of success put in *filt_ctx the pointer to the created
* filter instance, otherwise set *filt_ctx to NULL.
*
* @param name the instance name to give to the created filter instance
* @param graph_ctx the filter graph
* @return a negative AVERROR error code in case of failure, a non
* negative value otherwise
*/
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
const char *name, const char *args, void *opaque,
AVFilterGraph *graph_ctx);
filt_ctx: 插入成功后,我们能获取到的buffer filter的上下文
filt :我们通过avfilter_get_by_name("buffer");获取的filter
name: buffer_filter是用来作为输入的所以要填"in"
args: 是我们要设置的bufferfileter的参数
我们通过 ffmpeg -h filter=buffer 查看下buffer filter的参数
Filter buffer
Buffer video frames, and make them accessible to the filterchain.
Inputs:
none (source filter)
Outputs:
#0: default (video)
buffer AVOptions:
width <int> ..FV....... (from 0 to INT_MAX) (default 0)
video_size <image_size> ..FV.......
height <int> ..FV....... (from 0 to INT_MAX) (default 0)
pix_fmt <pix_fmt> ..FV....... (default none)
sar <rational> ..FV....... sample aspect ratio (from 0 to DBL_MAX) (default 0/1)
pixel_aspect <rational> ..FV....... sample aspect ratio (from 0 to DBL_MAX) (default 0/1)
time_base <rational> ..FV....... (from 0 to DBL_MAX) (default 0/1)
frame_rate <rational> ..FV....... (from 0 to DBL_MAX) (default 0/1)
sws_param <string> ..FV.......
所以我们增加如下设置
snprintf(args,
512,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
video_codec_par->width,
video_codec_par->height,
video_codec_par->format,
time_base.num,
time_base.den,
video_codec_ctx->sample_aspect_ratio.num,
video_codec_ctx->sample_aspect_ratio.den);
opaque 暂时不用
graph_ctx 我们第一步创建的graph
3、创建buffersink filter
filter处理后的数据要放在buffersink filter。
类似于创建buffer filter,我们设置avfilter_get_by_name的参数为buffersink来获取buffersink的fileter
const AVFilter *bufsink = avfilter_get_by_name("buffersink");
我们也需要avfilter_graph_create_filter将bufsink插入到graph
我们先看下buffersink的参数
Filter buffersink
Buffer video frames, and make them available to the end of the filter graph.
Inputs:
#0: default (video)
Outputs:
none (sink filter)
buffersink AVOptions:
pix_fmts <binary> ..FV....... set the supported pixel formats
可以知道需要一个参数pix_fmts的二进制
如何进行设置呢?
enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE};
av_opt_set_int_list(buffer_shink_filter_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int_list的第四个参数是AV_PIX_FMT_NONE是pix_fmts的结束元素
AV_OPT_SEARCH_CHILDREN 的意思是包括所有的子元素
4、分析filter描述符,并构建AVFilterGraph
/**
* Add a graph described by a string to a graph.
*
* In the graph filters description, if the input label of the first
* filter is not specified, "in" is assumed; if the output label of
* the last filter is not specified, "out" is assumed.
*
* @param graph the filter graph where to link the parsed graph context
* @param filters string to be parsed
* @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
* If non-NULL, *inputs is updated to contain the list of open inputs
* after the parsing, should be freed with avfilter_inout_free().
* @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
* If non-NULL, *outputs is updated to contain the list of open outputs
* after the parsing, should be freed with avfilter_inout_free().
* @return non negative on success, a negative AVERROR code on error
*/
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
AVFilterInOut **inputs, AVFilterInOut **outputs,
void *log_ctx);
filters 我们要使用的滤镜 比如"drawbox=x=30:y=10:w=64:h=64:c=red"
inputs和outputs要特别注意
inputs代表的是fileter处理后要输出到的filter
outputs代表的是fileter处理前要输入的filter
所以也就有了下面的代码
input->name = av_strdup("out");
input->filter_ctx = buffer_shink_filter_ctx;
input->pad_idx = 0;
input->next = NULL;
output->name = av_strdup("in");
output->filter_ctx = buffer_filter_ctx;
output->pad_idx = 0;
output->next = NULL;
5、使构建好的AVFilterGraph生效
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
下面是完整的代码
#include "filter1.h"
static AVFormatContext *fmt_ctx = NULL;
static AVCodec *video_codec = NULL;
static AVCodecParameters *video_codec_par = NULL;
static AVCodecContext *video_codec_ctx = NULL;
static int video_stream_index = -1;
static int open_input_file(const char *path)
{
int ret = 0;
ret = avformat_open_input(&fmt_ctx, path, NULL, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avformat_open_input faile path=%s", path);
return ret;
}
ret = avformat_find_stream_info(fmt_ctx, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avformat_find_stream_info faile ");
return ret;
}
av_dump_format(fmt_ctx, 0, path, 0);
video_stream_index = av_find_best_stream(fmt_ctx,
AVMEDIA_TYPE_VIDEO,
-1,
-1,
&video_codec,
-1);
if (video_stream_index < 0)
{
av_log(NULL, AV_LOG_ERROR, "av_find_best_stream faile ");
ret = -1;
return ret;
}
video_codec_par = fmt_ctx->streams[video_stream_index]->codecpar;
video_codec_ctx = avcodec_alloc_context3(video_codec);
ret = avcodec_parameters_to_context(video_codec_ctx, video_codec_par);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avcodec_parameters_to_context faile ");
return ret;
}
ret = avcodec_open2(video_codec_ctx, video_codec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avcodec_open2 faile ");
return ret;
}
return ret;
}
static int init_filters(const char *filter_desc)
{
int ret = -1;
//创建Graph
AVFilterGraph *graph = NULL;
AVFilterInOut *input = NULL;
AVFilterInOut *output = NULL;
AVFilterContext *buffer_filter_ctx = NULL;
AVFilterContext *buffer_shink_filter_ctx = NULL;
char args[512] = {};
// "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d"
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
//第一步创建graph
graph = avfilter_graph_alloc();
if (!graph)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_alloc fail");
goto __INIT_FILTER_ERROR;
}
//第二步创建buffer_filter_ctx
const AVFilter *bufsrc = avfilter_get_by_name("buffer");
if (!bufsrc)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_get_by_name fail");
goto __INIT_FILTER_ERROR;
}
//args 来源于ffmpeg -h filter=buffer
snprintf(args,
512,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
video_codec_par->width,
video_codec_par->height,
video_codec_par->format,
time_base.num,
time_base.den,
video_codec_ctx->sample_aspect_ratio.num,
video_codec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffer_filter_ctx, bufsrc, "in", args, NULL, graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_create_filter buffer_filter_ctx %s", av_err2str(ret));
goto __INIT_FILTER_ERROR;
}
//第三步创建buffersink filter
const AVFilter *bufsink = avfilter_get_by_name("buffersink");
if (!bufsink)
{
av_log(NULL, AV_LOG_ERROR, "creater bufsink fail");
goto __INIT_FILTER_ERROR;
}
ret = avfilter_graph_create_filter(&buffer_shink_filter_ctx, bufsink, "out", NULL, NULL, graph);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_create_filter buffer_shink_filter_ctx %s", av_err2str(ret));
goto __INIT_FILTER_ERROR;
}
//buffer shink ffmpeg -h filter=buffersink
enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P,
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_NONE};
av_opt_set_int_list(buffer_shink_filter_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
//第四步 创建avfilter inout链表
input = avfilter_inout_alloc();
output = avfilter_inout_alloc();
if (!input || !output)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_inout_alloc fail");
goto __INIT_FILTER_ERROR;
}
input->name = av_strdup("out");
input->filter_ctx = buffer_shink_filter_ctx;
input->pad_idx = 0;
input->next = NULL;
output->name = av_strdup("in");
output->filter_ctx = buffer_filter_ctx;
output->pad_idx = 0;
output->next = NULL;
//第五步 分析filter描述符,并构建AVFilterGraph
ret = avfilter_graph_parse_ptr(graph, filter_desc, &input, &output, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_parse_ptr %s", av_err2str(ret));
goto __INIT_FILTER_ERROR;
}
//第六步 使构建好的AVFilterGraph生效
ret = avfilter_graph_config(graph, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "avfilter_graph_config %s", av_err2str(ret));
goto __INIT_FILTER_ERROR;
}
return 0;
__INIT_FILTER_ERROR:
if (graph)
{
avfilter_graph_free(&graph);
}
if (input)
{
avfilter_inout_free(&input);
}
if (output)
{
avfilter_inout_free(&output);
}
if(buffer_filter_ctx){
avfilter_free(buffer_filter_ctx);
}
if(buffer_shink_filter_ctx){
avfilter_free(buffer_shink_filter_ctx);
}
if(buffer_shink_filter_ctx){
avfilter_free(buffer_shink_filter_ctx);
}
return ret;
}
int main(int argc, char *argv[])
{
int ret = 0;
// const char *filter_desc = "drawbox=30:10:64:64:red";
const char *filter_desc="drawbox=x=30:y=10:w=64:h=64:c=red";
const char *path = "/Users/yuanxuzhen/study/ffmpeg_study/ffmpeg/resource/in_audio.mp4";
av_log_set_level(AV_LOG_INFO);
ret = open_input_file(path);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "open_input_file fail");
goto __ERROR;
}
ret = init_filters(filter_desc);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "init_filters fail");
goto __ERROR;
}
__ERROR:
if (video_codec_ctx)
{
avcodec_close(video_codec_ctx);
avcodec_free_context(&video_codec_ctx);
}
if (fmt_ctx)
{
avformat_close_input(&fmt_ctx);
avformat_free_context(fmt_ctx);
}
return ret;
}
通过上面我们已经构建出filter的graph了。
gitee地址
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 单线程的Redis速度为什么快?
· SQL Server 2025 AI相关能力初探
· AI编程工具终极对决:字节Trae VS Cursor,谁才是开发者新宠?
· 展开说说关于C#中ORM框架的用法!
2018-03-01 H264系列(9):H264中的时间戳(DTS和PTS)
2017-03-01 Qt快速入门学习笔记(基础篇)
2016-03-01 FFmpeg解码H264及swscale缩放详解
2016-03-01 linux中cat more less head tail 命令区别