c++调用ffmpeg

在自己编译好ffmpeg库后,已经迫不及待的想尝试用vs2010来调用ffmpeg,在开始调用的时候遇到了些问题,但还是解决了。

配置vs

1.右键工程-属性,在然后选择 配置属性 -> C/C++ -> 常规 -> 附加包含目录,添加编译好的头文件;

2. 设置ffmpeg的lib文件位置 
鼠标右键点击工程名,选择属性, 然后选择 配置属性 -> 链接器 -> 常规 -> 附加库目录,添加编译好的lib目录。
3. 设置ffmpeg的所引用的lib文件  鼠标右键点击工程名,选择属性, 然后选择 配置属性 -> 链接器 -> 输入 -> 附加依赖项,添加编译好的lib文件
然后把编译好的dll文件拷贝到debug文件下
另外,C99中添加了几个新的头文件,Visual Studio中没有,所以需要你自己下载。并放至相应目录。对于VS2010来说通常是:C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\include。
代码如下:
注意:编译成功后,F5遇到avformat_open_input报错,可能是因为第一个参数没有初始化。
// FFmpegOpenFile.cpp : 定义控制台应用程序的入口点。
//
// ffmpeg-example.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"

#define inline _inline
#ifndef INT64_C
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#endif

#ifdef __cplusplus
extern "C" {
#endif

#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
}
#endif

#include <stdio.h>


static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);

int main (int argc, const char * argv[])
{
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame; 
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;

    // Register all formats and codecs
    av_register_all();
    const char* filename = "F:\\开发笔记实例\\C++\\FFmpegOpenFile\\Wildlife.wmv";
    // Open video file
    //if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    int ii = avformat_open_input(&pFormatCtx, filename, NULL, NULL);
    if(ii!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], false);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
        if(videoStream==-1)
            return -1; // Didn't find a video stream

        // Get a pointer to the codec context for the video stream
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;

        // Find the decoder for the video stream
        pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
        if(pCodec==NULL)
            return -1; // Codec not found

        // Open codec
        if(avcodec_open(pCodecCtx, pCodec)<0)
            return -1; // Could not open codec

        // Hack to correct wrong frame rates that seem to be generated by some codecs
        if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
            pCodecCtx->time_base.den=1000;

        // Allocate video frame
        pFrame=avcodec_alloc_frame();

        // Allocate an AVFrame structure
        pFrameRGB=avcodec_alloc_frame();
        if(pFrameRGB==NULL)
            return -1;

        // Determine required buffer size and allocate buffer
        numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
            pCodecCtx->height);

        //buffer=malloc(numBytes);
        buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameRGB
        avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
            pCodecCtx->width, pCodecCtx->height);

        // Read frames and save first five frames to disk
        i=0;
        while(av_read_frame(pFormatCtx, &packet)>=0)
        {
            // Is this a packet from the video stream?
            if(packet.stream_index==videoStream)
            {
                // Decode video frame
                avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                // Did we get a video frame?
                if(frameFinished)
                {
                    static struct SwsContext *img_convert_ctx;

#if 0
                    // Older removed code
                    // Convert the image from its native format to RGB swscale
                    img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, 
                        (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, 
                        pCodecCtx->height);

                    // function template, for reference
                    int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
                        int srcSliceH, uint8_t* dst[], int dstStride[]);
#endif
                    // Convert the image into YUV format that SDL uses
                    if(img_convert_ctx == NULL) {
                        int w = pCodecCtx->width;
                        int h = pCodecCtx->height;

                        img_convert_ctx = sws_getContext(w, h, 
                            pCodecCtx->pix_fmt, 
                            w, h, PIX_FMT_RGB24, SWS_BICUBIC,
                            NULL, NULL, NULL);
                        if(img_convert_ctx == NULL) {
                            fprintf(stderr, "Cannot initialize the conversion context!\n");
                            exit(1);
                        }
                    }
                    int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, 
                        pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
#if 0 
                    // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009
                    if(ret) {
                        fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
                        exit(-1);
                    }
#endif
                    // Save the frame to disk
                    if(i++<=5)
                        SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
                }
            }

            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }

        // Free the RGB image
        //free(buffer);
        av_free(buffer);
        av_free(pFrameRGB);

        // Free the YUV frame
        av_free(pFrame);

        // Close the codec
        avcodec_close(pCodecCtx);

        // Close the video file
        av_close_input_file(pFormatCtx);

        return 0;
}

static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
    FILE *pFile;
    char szFilename[32];
    int  y;

    // Open file
    sprintf(szFilename, "frame%d.ppm", iFrame);
    pFile=fopen(szFilename, "wb");
    if(pFile==NULL)
        return;

    // Write header
    fprintf(pFile, "P6\n%d %d\n255\n", width, height);

    // Write pixel data
    for(y=0; y<height; y++)
        fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

    // Close file
    fclose(pFile);
}

 

posted @ 2013-09-24 16:25  ronaldo9  阅读(5709)  评论(0编辑  收藏  举报