Windows下ffmpeg调用测试(YUV420P编码)

1下载静态库和动态库

如果你是动态加载,可以直接使用动态库(dlopen等);否则就才用动态+ 静态的方式(动态库的静态加载方式);或者直接将静态库编译到你的执行程序(纯静态组件使用);

https://www.gyan.dev/ffmpeg/builds/#libraries

 添加组件

vs2022:  属性->c/c++->附加包含目录;将头文件目录添加进去

 属性->链接器->附加库目录,添加静态库路径;然后运行的时候记得吧dll放到EXE目录

 

 2 .YUV420P 编码264 

选择YUV420P(YV12)是因为,这个数据我可以通过FFmpeg命令行解析文件得到,用来测试,而且planner格式的数据Y,V,U是连续且单独放置的,不会存在UV交叉;YUV一帧的数据量就是width*heigt*1.5

随便找一个MP4录像,用FFmpeg命令行得到YUV数据,用来测试,ffmpeg -i input.mp4 -vf "format=yuv420p" output.yuv;如果你本身就有YUV测试数据,或者数据来源于实时的录像,截屏等等,就可以直接用;

下边的是FFmpeg提供的测试demo的源码,是自己生成的YUV,然后设置编码参数,输出264;我后边会根据这个demo进行一点点改动;不过最主要还是使用这个demo

/*
 * Copyright (c) 2001 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

/**
 * @file libavcodec encoding video API usage example
 * @example encode_video.c
 *
 * Generate synthetic video data and encode it to an output file.
 */

#include <stdio.h>
#include <stdlib.h>
#include <string.h>

#include <libavcodec/avcodec.h>

#include <libavutil/opt.h>
#include <libavutil/imgutils.h>

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *outfile)
{
    int ret;

    /* send the frame to the encoder */
    if (frame)
        printf("Send frame %3"PRId64"\n", frame->pts);

    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame for encoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during encoding\n");
            exit(1);
        }

        printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}

int main(int argc, char **argv)
{
    const char *filename, *codec_name;
    const AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y;
    FILE *f;
    AVFrame *frame;
    AVPacket *pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    if (argc <= 2) {
        fprintf(stderr, "Usage: %s <output file> <codec name>\n", argv[0]);
        exit(0);
    }
    filename = argv[1];
    codec_name = argv[2];

    /* find the mpeg1video encoder */
    codec = avcodec_find_encoder_by_name(codec_name);
    if (!codec) {
        fprintf(stderr, "Codec '%s' not found\n", codec_name);
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base = (AVRational){1, 25};
    c->framerate = (AVRational){25, 1};

    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec->id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }

    /* encode 1 second of video */
    for (i = 0; i < 25; i++) {
        fflush(stdout);

        /* Make sure the frame data is writable.
           On the first round, the frame is fresh from av_frame_get_buffer()
           and therefore we know it is writable.
           But on the next rounds, encode() will have called
           avcodec_send_frame(), and the codec may have kept a reference to
           the frame in its internal structures, that makes the frame
           unwritable.
           av_frame_make_writable() checks that and allocates a new buffer
           for the frame only if necessary.
         */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);

        /* Prepare a dummy image.
           In real code, this is where you would have your own logic for
           filling the frame. FFmpeg does not care what you put in the
           frame.
         */
        /* Y */
        for (y = 0; y < c->height; y++) {
            for (x = 0; x < c->width; x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for (y = 0; y < c->height/2; y++) {
            for (x = 0; x < c->width/2; x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;

        /* encode the image */
        encode(c, frame, pkt, f);
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);

    /* Add sequence end code to have a real MPEG file.
       It makes only sense because this tiny examples writes packets
       directly. This is called "elementary stream" and only works for some
       codecs. To create a valid file, you usually need to write packets
       into a proper file format or protocol; see mux.c.
     */
    if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
        fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_free_context(&c);
    av_frame_free(&frame);
    av_packet_free(&pkt);

    return 0;
}

  


 

函数介绍 

1  查找编码器;还可以使用AVcodec_find_encoder(condec_ID)

 const AVCodec*  codec = avcodec_find_encoder_by_name(codec_name);//codec_name = "libx264";

函数返回的AVCodec结构体指针,内部包含编码ID,支持的帧率,支持的像素格式,音频采样率,声道,编码档次等

 1 /**
 2  * AVCodec.
 3  */
 4 typedef struct AVCodec {
 5     /**
 6      * Name of the codec implementation.
 7      * The name is globally unique among encoders and among decoders (but an
 8      * encoder and a decoder can share the same name).
 9      * This is the primary way to find a codec from the user perspective.
10      */
11     const char *name;
12     /**
13      * Descriptive name for the codec, meant to be more human readable than name.
14      * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
15      */
16     const char *long_name;
17     enum AVMediaType type;
18     enum AVCodecID id;
19     /**
20      * Codec capabilities.
21      * see AV_CODEC_CAP_*
22      */
23     int capabilities;
24     uint8_t max_lowres;                     ///< maximum value for lowres supported by the decoder
25     const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
26     const enum AVPixelFormat *pix_fmts;     ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
27     const int *supported_samplerates;       ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
28     const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
29 #if FF_API_OLD_CHANNEL_LAYOUT
30     /**
31      * @deprecated use ch_layouts instead
32      */
33     attribute_deprecated
34     const uint64_t *channel_layouts;         ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
35 #endif
36     const AVClass *priv_class;              ///< AVClass for the private context
37     const AVProfile *profiles;              ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
38 
39     /**
40      * Group name of the codec implementation.
41      * This is a short symbolic name of the wrapper backing this codec. A
42      * wrapper uses some kind of external implementation for the codec, such
43      * as an external library, or a codec implementation provided by the OS or
44      * the hardware.
45      * If this field is NULL, this is a builtin, libavcodec native codec.
46      * If non-NULL, this will be the suffix in AVCodec.name in most cases
47      * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
48      */
49     const char *wrapper_name;
50 
51     /**
52      * Array of supported channel layouts, terminated with a zeroed layout.
53      */
54     const AVChannelLayout *ch_layouts;
55 } AVCodec;

2  分配/创建 编码器上下文

作用就是创建上下文,设置编码参数;创建一个上下文之后,就可以设置一些参数了,这个参数会作用到编码器;

 AVCodecContext* c = NULL; c = avcodec_alloc_context3(codec);

  /* put sample parameters */
    c->bit_rate = 400000; //输出码率4M
    /* resolution must be a multiple of two */
    c->width = 1920; //输入的图像宽度,与YUV数据量对应上
    c->height = 1080;//输入图像的高
    /* frames per second */
    c->time_base.num = 1; 
    c->time_base.den = 25;
    c->framerate.num = 25; //25 FPS
    c->framerate.den = 1;

    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    c->gop_size = 10; //GOP 大小是10帧
    c->max_b_frames = 1;//每个I、P帧之间最多插入1个B帧
    c->pix_fmt = AV_PIX_FMT_YUV420P;//YV 12 planner格式;YVU连续且不存在UV交叉的一种存储格式

3  初始化编码器上下文,在上边第二步创建,设置参数后,初始化上下文,打开编码器

avcodec_open2(c, codec, NULL[用户自定义编码选项]);//const AVCodec* codec;    AVCodecContext* c = NULL;

4  创建frame,和package结构 用于承载数据

未压缩的图像放在frame结构体里,编码后的数据放到package结构体里;并不是所有的开源代码里frame存的都是未压缩数据,这个依照具体情况而定;在本博客的测试用例中frame存的是未压缩的YUV数据;有些地方frame存的是264,265数据,使用场景不同,意义也不一样

ffmpeg中的data存储未压缩数据,可以存储平铺模式planer,也可以存(交叉存储packed)的数据;(存储方式的差异主要体现在音频,如左右声道交叉存储,还是每个声道单独存储)

  frame = av_frame_alloc();

  pkt = av_packet_alloc();

 frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;//颜色格式,可设置YUV420P
    frame->width = c->width;
    frame->height = c->height;

    ret = av_frame_get_buffer(frame, 0);//后边每次复用buffer来装YUV之前,要先保证buffer没被占用,是可写的,av_frame_make_writable
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }

5 读取YUV数据,放到frame的data里边,编码;注意这里的输入输出缓冲都是复用的,是一个 读-编码-输出,串行逻辑

 int res_len = fread(buffer,len ,1,fpin);
       if (res_len > 0)
       {
           frame->data[0] = buffer;//Y分量
           frame->data[2] =buffer + frame->width * frame->height;//V分量
           frame->data[1] = buffer + frame->width * frame->height * 5 / 4;//U 分量
           frame->pts += 1;//显示时间戳
//frame 还可以设置该帧要不要编码成关键帧,帧类型等;
/* encode the image */ encode(c, frame, pkt, frle_out_ptr); }

5  AVpackage用来保存编码后的数据,如果H264编码,那这里就是H264的NAL数据,也就是ES流,内部还会保存当前帧的PTS,DTS

   编码函数的实现是,送YUV数据,获取Package: avcodec_send_frame(enc_ctx, frame);   avcodec_receive_packet(enc_ctx, pkt);

   将数据写到文件,解除对package的引用:fwrite(pkt->data, 1, pkt->size, outfile);   av_packet_unref(pkt);

typedef struct AVPacket {
    /**
     * A reference to the reference-counted buffer where the packet data is
     * stored.
     * May be NULL, then the packet data is not reference-counted.
     */
    AVBufferRef *buf;
    /**
     * Presentation timestamp in AVStream->time_base units; the time at which
     * the decompressed packet will be presented to the user.
     * Can be AV_NOPTS_VALUE if it is not stored in the file.
     * pts MUST be larger or equal to dts as presentation cannot happen before
     * decompression, unless one wants to view hex dumps. Some formats misuse
     * the terms dts and pts/cts to mean something different. Such timestamps
     * must be converted to true pts/dts before they are stored in AVPacket.
     */
    int64_t pts;
    /**
     * Decompression timestamp in AVStream->time_base units; the time at which
     * the packet is decompressed.
     * Can be AV_NOPTS_VALUE if it is not stored in the file.
     */
    int64_t dts;
    uint8_t *data;
    int   size;
    int   stream_index;
    /**
     * A combination of AV_PKT_FLAG values
     */
    int   flags;
    /**
     * Additional packet data that can be provided by the container.
     * Packet can contain several types of side information.
     */
    AVPacketSideData *side_data;
    int side_data_elems;

    /**
     * Duration of this packet in AVStream->time_base units, 0 if unknown.
     * Equals next_pts - this_pts in presentation order.
     */
    int64_t duration;

    int64_t pos;                            ///< byte position in stream, -1 if unknown

    /**
     * for some private data of the user
     */
    void *opaque;

    /**
     * AVBufferRef for free use by the API user. FFmpeg will never check the
     * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
     * the packet is unreferenced. av_packet_copy_props() calls create a new
     * reference with av_buffer_ref() for the target packet's opaque_ref field.
     *
     * This is unrelated to the opaque field, although it serves a similar
     * purpose.
     */
    AVBufferRef *opaque_ref;

    /**
     * Time base of the packet's timestamps.
     * In the future, this field may be set on packets output by encoders or
     * demuxers, but its value will be by default ignored on input to decoders
     * or muxers.
     */
    AVRational time_base;
} AVPacket;

6 编码完之后要再次调用下空数据的编码。以防有数据在编码器中未刷新出来

/* flush the encoder */
encode(c, NULL, pkt, fout);

并在编码输出文件结尾添加上mpeg的结束标记 uint8_t endcode[] = { 0, 0, 1, 0xb7 };

7 最后就是释放上下文,释放frame,package,关闭文件

avcodec_free_context(&c);
av_frame_free(&frame);
av_packet_free(&pkt);


 

下边提供一个真实可以编码的例子:(visual studio 2022)

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>

char av_error[AV_ERROR_MAX_STRING_SIZE] = { 0 };
#define av_err2str(errnum) av_make_error_string(av_error, AV_ERROR_MAX_STRING_SIZE, errnum)


static void encode(AVCodecContext* enc_ctx, AVFrame* frame, AVPacket* pkt,
    FILE* outfile)
{
    int ret;

    /* send the frame to the encoder */
    if (frame)
        printf("Send frame %3ld\n", frame->pts);

    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        fprintf(stderr, "Error sending a frame for encoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during encoding\n");
            exit(1);
        }

        printf("Write packet %3ld (size=%5d)\n", pkt->pts, pkt->size);
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}

int main(int argc, char** argv)
{
    const char* filename, * codec_name,* filename_in;
    //
    const AVCodec* codec;
    AVCodecContext* c = NULL;
    int i, ret, x, y;
    FILE* f;
    AVFrame* frame;
    AVPacket* pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };
    int len = 1920 * 1080 * 3 / 2;
    uint8_t* buffer = new uint8_t[len];

   
    filename = "D:/Emscripten/cmaketest/20230801230523.264";//编码输出的264,文件没有会自动创建
    filename_in = "D:/Emscripten/cmaketest/20230.yuv";//待编码的YUV,YV12;需要有
    codec_name = "libx264";//可以自己指定其他编码器
    
    FILE* fpin = fopen(filename_in, "rb+");
    if (!fpin)
    {
        fprintf(stderr, "Codec '%s' not open\n",filename_in);
        exit(1);
    }

    //查找编码器;还可以使用AVcodec_find_encoder(condec_ID)
    codec = avcodec_find_encoder_by_name(codec_name);
    if (!codec) {
        fprintf(stderr, "Codec '%s' not found\n", codec_name);
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    pkt = av_packet_alloc();
    if (!pkt)
        exit(1);

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 1920;
    c->height = 1080;
    /* frames per second */
    c->time_base.num = 1;
    c->time_base.den = 25;//时间基1/25
    c->framerate.num = 25; //fps=25
    c->framerate.den = 1;

    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;//YV 12 格式

    if (codec->id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width = c->width;
    frame->height = c->height;

    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate the video frame data\n");
        exit(1);
    }

    /* encode 1 second of video */
    while (!feof(fpin)) {
       
        fflush(stdout);

        /* Make sure the frame data is writable.
           On the first round, the frame is fresh from av_frame_get_buffer()
           and therefore we know it is writable.
           But on the next rounds, encode() will have called
           avcodec_send_frame(), and the codec may have kept a reference to
           the frame in its internal structures, that makes the frame
           unwritable.
           av_frame_make_writable() checks that and allocates a new buffer
           for the frame only if necessary.
         */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);

        /* Prepare a dummy image.
           In real code, this is where you would have your own logic for
           filling the frame. FFmpeg does not care what you put in the
           frame.
         */
         /* Y V,U*/
       int res_len = fread(buffer,len ,1,fpin);
       if (res_len > 0)
       {//这里可以直接读文件,我是用buffer从中间倒了一手,只要数据对得上,没必要借助中间buffer读数据
           frame->data[0] = buffer;
           frame->data[2] =buffer + frame->width * frame->height;
           frame->data[1] = buffer + frame->width * frame->height * 5 / 4;
           frame->pts += 1;//时间基 1/25

           /* encode the image */
           encode(c, frame, pkt, f);
       }
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);
    if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
        fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_free_context(&c);
    av_frame_free(&frame);
    av_packet_free(&pkt);

    delete[] buffer;
    fclose(fpin);
    return 0;
}
}

效果:

 264数据用elecard打开:

 这个是我修改 gop =25;无B帧的情况,编码后的数据

 

posted on 2023-09-20 15:51  邗影  阅读(183)  评论(0编辑  收藏  举报

导航