Linux下编译带x264的ffmpeg的配置方法,包含SDL2
一、环境准备
ffmpeg下载:http://www.ffmpeg.org/download.html
x264下载:http://download.videolan.org/x264/snapshots/
yasm下载:http://yasm.tortall.net/Download.html
二、编译
1、编译yasm。最新的x264,要求yasm1.2以上
./configure --prefix=/usr/local/yasm
make
make install
2、解压x264,进入目录,输入:
./configure --prefix=/usr/local/x264 --enable-shared --enable-static --enable-yasm
make
make install
由于是手动安装的yasm,下面继续的时候,可能会报yasm找不到,需在/etc/profile
export PATH=$PATH:/usr/local/yasm/bin
之后
source /etc/profile
3、解压ffmpeg,进入目录,
然后安装ffmpeg,ffmpeg有许多依赖包,需要一个一个先安装
apt-get install libfaac-dev libmp3lame-dev libtheora-dev libvorbis-dev libxvidcore-dev libxext-dev libxfixes-dev
输入:
./configure --prefix=/usr/local/ffmpeg --enable-libmp3lame --enable-libvorbis --enable-gpl --enable-version3 --enable-nonfree --enable-pthreads --enable-libfaac --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libx264 --enable-libxvid --enable-postproc --enable-ffserver --enable-ffplay --enable-shared --extra-cflags=-I/usr/local/x264/include --extra-ldflags=-L/usr/local/x264/lib
可能会提示缺少库,缺啥装啥
make
make install
三、配置环境变量及库路径
首先是命令的路径,编辑/etc/profile
export PATH=$PATH:/usr/local/ffmpeg/bin:/usr/local/yasm/bin:/usr/local/x264/bin
其次是链接库路径,编辑/etc/ld.so.conf
/usr/local/ffmpeg/lib
/usr/local/x264/lib
之后执行 sudo ldconfig
编译器默认搜索路径并不包含这两个目录,虽然这里设置了配置文件,但在编译的时候也会报错,仍然需要
-L/usr/local/ffmpeg/lib -L/usr/local/x264/lib来链接库
为了简化,可以直接将
/usr/local/ffmpeg/lib
/usr/local/x264/lib这两个目录中的.so文件直接考到/usr/local/lib目录,一劳永逸
四、在eclipse下搭建一个ffmpeg工程
1.首先建立一个空c工程
2.设置包含路径
3.设置链接目录及库
具体包括:ffmpeg的所有库:avcodec、avdevice、avfilter、avformat、avutil、swresample、swscale还有四个必须的库:pthread、m,x264和mp3lame。其中pthread是Linux系统进程库,m是数学库、x264是H264编码库,mp3lame是mp3的编码库
4.main.c
/** * @file * libavcodec API use example. * * @example decoding_encoding.c * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...), * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the * format handling */ #include <math.h> #include <libavutil/opt.h> #include <libavcodec/avcodec.h> #include <libavutil/channel_layout.h> #include <libavutil/common.h> #include <libavutil/imgutils.h> #include <libavutil/mathematics.h> #include <libavutil/samplefmt.h> #define INBUF_SIZE 4096 #define AUDIO_INBUF_SIZE 20480 #define AUDIO_REFILL_THRESH 4096 /* check that a given sample format is supported by the encoder */ static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt) { const enum AVSampleFormat *p = codec->sample_fmts; while (*p != AV_SAMPLE_FMT_NONE) { if (*p == sample_fmt) return 1; p++; } return 0; } /* just pick the highest supported samplerate */ static int select_sample_rate(AVCodec *codec) { const int *p; int best_samplerate = 0; if (!codec->supported_samplerates) return 44100; p = codec->supported_samplerates; while (*p) { best_samplerate = FFMAX(*p, best_samplerate); p++; } return best_samplerate; } /* select layout with the highest channel count */ static int select_channel_layout(AVCodec *codec) { const uint64_t *p; uint64_t best_ch_layout = 0; int best_nb_channels = 0; if (!codec->channel_layouts) return AV_CH_LAYOUT_STEREO; p = codec->channel_layouts; while (*p) { int nb_channels = av_get_channel_layout_nb_channels(*p); if (nb_channels > best_nb_channels) { best_ch_layout = *p; best_nb_channels = nb_channels; } p++; } return best_ch_layout; } /* * Audio encoding example */ static void audio_encode_example(const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; AVFrame *frame; AVPacket pkt; int i, j, k, ret, got_output; int buffer_size; FILE *f; uint16_t *samples; float t, tincr; printf("Encode audio file %s\n", filename); /* find the MP2 encoder */ codec = avcodec_find_encoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* put sample parameters */ c->bit_rate = 64000; /* check that the encoder supports s16 pcm input */ c->sample_fmt = AV_SAMPLE_FMT_S16; if (!check_sample_fmt(codec, c->sample_fmt)) { fprintf(stderr, "Encoder does not support sample format %s", av_get_sample_fmt_name(c->sample_fmt)); exit(1); } /* select other audio parameters supported by the encoder */ c->sample_rate = select_sample_rate(codec); c->channel_layout = select_channel_layout(codec); c->channels = av_get_channel_layout_nb_channels(c->channel_layout); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } /* frame containing input raw audio */ frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } frame->nb_samples = c->frame_size; frame->format = c->sample_fmt; frame->channel_layout = c->channel_layout; /* the codec gives us the frame size, in samples, * we calculate the size of the samples buffer in bytes */ buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size, c->sample_fmt, 0); if (buffer_size < 0) { fprintf(stderr, "Could not get sample buffer size\n"); exit(1); } samples = av_malloc(buffer_size); if (!samples) { fprintf(stderr, "Could not allocate %d bytes for samples buffer\n", buffer_size); exit(1); } /* setup the data pointers in the AVFrame */ ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (const uint8_t*)samples, buffer_size, 0); if (ret < 0) { fprintf(stderr, "Could not setup audio frame\n"); exit(1); } /* encode a single tone sound */ t = 0; tincr = 2 * M_PI * 440.0 / c->sample_rate; for (i = 0; i < 200; i++) { av_init_packet(&pkt); pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; for (j = 0; j < c->frame_size; j++) { samples[2*j] = (int)(sin(t) * 10000); for (k = 1; k < c->channels; k++) samples[2*j + k] = samples[2*j]; t += tincr; } /* encode the samples */ ret = avcodec_encode_audio2(c, &pkt, frame, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding audio frame\n"); exit(1); } if (got_output) { fwrite(pkt.data, 1, pkt.size, f); av_free_packet(&pkt); } } /* get the delayed frames */ for (got_output = 1; got_output; i++) { ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding frame\n"); exit(1); } if (got_output) { fwrite(pkt.data, 1, pkt.size, f); av_free_packet(&pkt); } } fclose(f); av_freep(&samples); av_frame_free(&frame); avcodec_close(c); av_free(c); } /* * Audio decoding. */ static void audio_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int len; FILE *f, *outfile; uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; AVPacket avpkt; AVFrame *decoded_frame = NULL; av_init_packet(&avpkt); printf("Decode audio file %s to %s\n", filename, outfilename); /* find the mpeg audio decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MP2); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate audio codec context\n"); exit(1); } /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } outfile = fopen(outfilename, "wb"); if (!outfile) { av_free(c); exit(1); } /* decode until eof */ avpkt.data = inbuf; avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); while (avpkt.size > 0) { int got_frame = 0; if (!decoded_frame) { if (!(decoded_frame = av_frame_alloc())) { fprintf(stderr, "Could not allocate audio frame\n"); exit(1); } } len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); if (len < 0) { fprintf(stderr, "Error while decoding\n"); exit(1); } if (got_frame) { /* if a frame has been decoded, output it */ int data_size = av_samples_get_buffer_size(NULL, c->channels, decoded_frame->nb_samples, c->sample_fmt, 1); if (data_size < 0) { /* This should not occur, checking just for paranoia */ fprintf(stderr, "Failed to calculate data size\n"); exit(1); } fwrite(decoded_frame->data[0], 1, data_size, outfile); } avpkt.size -= len; avpkt.data += len; avpkt.dts = avpkt.pts = AV_NOPTS_VALUE; if (avpkt.size < AUDIO_REFILL_THRESH) { /* Refill the input buffer, to avoid trying to decode * incomplete frames. Instead of this, one could also use * a parser, or use a proper container format through * libavformat. */ memmove(inbuf, avpkt.data, avpkt.size); avpkt.data = inbuf; len = fread(avpkt.data + avpkt.size, 1, AUDIO_INBUF_SIZE - avpkt.size, f); if (len > 0) avpkt.size += len; } } fclose(outfile); fclose(f); avcodec_close(c); av_free(c); av_frame_free(&decoded_frame); } /* * Video encoding example */ static void video_encode_example(const char *filename, int codec_id) { AVCodec *codec; AVCodecContext *c= NULL; int i, ret, x, y, got_output; FILE *f; AVFrame *frame; AVPacket pkt; uint8_t endcode[] = { 0, 0, 1, 0xb7 }; printf("Encode video file %s\n", filename); /* find the mpeg1 video encoder */ codec = avcodec_find_encoder(codec_id); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate video codec context\n"); exit(1); } /* put sample parameters */ c->bit_rate = 400000; /* resolution must be a multiple of two */ c->width = 352; c->height = 288; /* frames per second */ c->time_base = (AVRational){1,25}; /* emit one intra frame every ten frames * check frame pict_type before passing frame * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I * then gop_size is ignored and the output of encoder * will always be I frame irrespective to gop_size */ c->gop_size = 10; c->max_b_frames = 1; c->pix_fmt = AV_PIX_FMT_YUV420P; if (codec_id == AV_CODEC_ID_H264) av_opt_set(c->priv_data, "preset", "slow", 0); /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "wb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } frame->format = c->pix_fmt; frame->width = c->width; frame->height = c->height; /* the image can be allocated by any means and av_image_alloc() is * just the most convenient way if av_malloc() is to be used */ ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32); if (ret < 0) { fprintf(stderr, "Could not allocate raw picture buffer\n"); exit(1); } /* encode 1 second of video */ for (i = 0; i < 25; i++) { av_init_packet(&pkt); pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; fflush(stdout); /* prepare a dummy image */ /* Y */ for (y = 0; y < c->height; y++) { for (x = 0; x < c->width; x++) { frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for (y = 0; y < c->height/2; y++) { for (x = 0; x < c->width/2; x++) { frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2; frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5; } } frame->pts = i; /* encode the image */ ret = avcodec_encode_video2(c, &pkt, frame, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding frame\n"); exit(1); } if (got_output) { printf("Write frame %3d (size=%5d)\n", i, pkt.size); fwrite(pkt.data, 1, pkt.size, f); av_free_packet(&pkt); } } /* get the delayed frames */ for (got_output = 1; got_output; i++) { fflush(stdout); ret = avcodec_encode_video2(c, &pkt, NULL, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding frame\n"); exit(1); } if (got_output) { printf("Write frame %3d (size=%5d)\n", i, pkt.size); fwrite(pkt.data, 1, pkt.size, f); av_free_packet(&pkt); } } /* add sequence end code to have a real mpeg file */ fwrite(endcode, 1, sizeof(endcode), f); fclose(f); avcodec_close(c); av_free(c); av_freep(&frame->data[0]); av_frame_free(&frame); printf("\n"); } /* * Video decoding example */ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename) { FILE *f; int i; f = fopen(filename,"w"); fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255); for (i = 0; i < ysize; i++) fwrite(buf + i * wrap, 1, xsize, f); fclose(f); } static int decode_write_frame(const char *outfilename, AVCodecContext *avctx, AVFrame *frame, int *frame_count, AVPacket *pkt, int last) { int len, got_frame; char buf[1024]; len = avcodec_decode_video2(avctx, frame, &got_frame, pkt); if (len < 0) { fprintf(stderr, "Error while decoding frame %d\n", *frame_count); return len; } if (got_frame) { printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count); fflush(stdout); /* the picture is allocated by the decoder, no need to free it */ snprintf(buf, sizeof(buf), outfilename, *frame_count); pgm_save(frame->data[0], frame->linesize[0], avctx->width, avctx->height, buf); (*frame_count)++; } if (pkt->data) { pkt->size -= len; pkt->data += len; } return 0; } static void video_decode_example(const char *outfilename, const char *filename) { AVCodec *codec; AVCodecContext *c= NULL; int frame_count; FILE *f; AVFrame *frame; uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; AVPacket avpkt; av_init_packet(&avpkt); /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); printf("Decode video file %s to %s\n", filename, outfilename); /* find the mpeg1 video decoder */ codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate video codec context\n"); exit(1); } if(codec->capabilities&CODEC_CAP_TRUNCATED) c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ /* For some codecs, such as msmpeg4 and mpeg4, width and height MUST be initialized there because this information is not available in the bitstream. */ /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } frame_count = 0; for (;;) { avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); if (avpkt.size == 0) break; /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) and this is the only method to use them because you cannot know the compressed data size before analysing it. BUT some other codecs (msmpeg4, mpeg4) are inherently frame based, so you must call them with all the data for one frame exactly. You must also initialize 'width' and 'height' before initializing them. */ /* NOTE2: some codecs allow the raw parameters (frame size, sample rate) to be changed at any frame. We handle this, so you should also take care of it */ /* here, we use a stream based decoder (mpeg1video), so we feed decoder and see if it could decode a frame */ avpkt.data = inbuf; while (avpkt.size > 0) if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0) exit(1); } /* some codecs, such as MPEG, transmit the I and P frame with a latency of one frame. You must do the following to have a chance to get the last frame of the video */ avpkt.data = NULL; avpkt.size = 0; decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1); fclose(f); avcodec_close(c); av_free(c); av_frame_free(&frame); printf("\n"); } int main(int argc, char **argv) { const char *output_type; /* register all the codecs */ avcodec_register_all(); if (argc < 2) { printf("usage: %s output_type\n" "API example program to decode/encode a media stream with libavcodec.\n" "This program generates a synthetic stream and encodes it to a file\n" "named test.h264, test.mp2 or test.mpg depending on output_type.\n" "The encoded stream is then decoded and written to a raw data output.\n" "output_type must be chosen between 'h264', 'mp2', 'mpg'.\n", argv[0]); return 1; } output_type = argv[1]; // video_encode_example("test.h264", AV_CODEC_ID_H264); if (!strcmp(output_type, "h264")) { video_encode_example("1080P.h264", AV_CODEC_ID_H264); } else if (!strcmp(output_type, "mp2")) { audio_encode_example("test.mp2"); audio_decode_example("test.sw", "test.mp2"); } else if (!strcmp(output_type, "mpg")) { video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO); video_decode_example("test%02d.pgm", "test.mpg"); } else { fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n", output_type); return 1; } return 0; }
test.h264
链接:http://pan.baidu.com/s/1o8pRflS 密码:x6a8
编译之后,命令行进入Debug目录,同时将test.h264考进次目录,执行./app h264
成功的标志,会生成一个1080.h264的文件,可以使用ffplay 播放
参考:
下载SDL2 SDL2_image(依赖libpng1.5)
libpng1.5
编译安装
libpng1.5
./configure 全部默认即可
make
make install
SDL2 SDL2_image
./configure --prefix=/usr/local/sdl2
注意:SDL2_image 需要进行下面这个步骤,SDL2 跳过
vim Makefile
搜索命令行/png查找
将libpng相关的数字改成15(系统本身可能自带libpng12,或者其他,编译程序的时候不会报错,
执行的时候会报错,
ibpng warning: Application was compiled with png.h from libpng-1.4.3
libpng warning: Application is running with png.c from libpng-1.2.44
libpng error: Incompatible libpng version in application and library
sdl2image configure 的时候有问题,需要include和lib一直,统一改成一个版本)
make -j4
sudo make install
配置/etc/profile /etc/ld.so.conf 和ffmpeg方法类似
eclipse建立工程
/* * main.c * * Created on: Sep 16, 2016 * Author: tla001 */ #include<stdio.h> #include <SDL2/SDL.h> #include <SDL2/SDL_image.h> void example00() ; int main() { SDL_Window* window =NULL; SDL_Renderer* render=NULL; SDL_Texture *texture=NULL; SDL_Rect src,dst; int width,height; SDL_Init(SDL_INIT_EVERYTHING); window=SDL_CreateWindow("hello",SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,640,480,SDL_WINDOW_SHOWN); render=SDL_CreateRenderer(window,-1,SDL_RENDERER_ACCELERATED |SDL_RENDERER_PRESENTVSYNC); texture=IMG_LoadTexture(render,"./lufi.bmp"); //SDL_UpdateTexture(texture); if(texture==NULL){ printf("err"); exit(1); } SDL_QueryTexture(texture,NULL,NULL,&width,&height); printf("w=%d h=%d\n",width,height); src.x=src.y=0; src.w=width; src.h=height; dst.x=0; dst.y=0; dst.w=width/2; dst.h=height/2; SDL_SetRenderDrawColor(render,0,255,0,255); SDL_RenderClear(render); SDL_RenderCopy(render,texture,NULL,&src); SDL_RenderPresent(render); SDL_Delay(3000); SDL_DestroyWindow(window); SDL_DestroyRenderer(render); SDL_Quit(); // SDL_Window *pw = SDL_CreateWindow("hello1",SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,640,480,SDL_WINDOW_SHOWN); // SDL_Renderer *pr = SDL_CreateRenderer(pw, -1, 0); // SDL_Surface *ps = IMG_Load("/home/tla001/Desktop/lufi.png"); // if(ps==NULL){ // exit(-1); // } // SDL_Texture *pt = SDL_CreateTextureFromSurface(pr, ps); // SDL_RenderClear(pr); // SDL_Rect r; // r.x = 0; // r.y = 0; // r.w = 1000; // r.h = 1000; // SDL_RenderCopy(pr, pt, NULL, &r); // SDL_RenderPresent(pr); // //SDL_Flip(pw); // SDL_Delay(3000); // SDL_Quit(); //example00() ; return 0; } void example00() { SDL_Window *pWindow = NULL; SDL_Renderer*pRenderer = NULL; // 1. initialize SDL if (SDL_Init(SDL_INIT_EVERYTHING) < 0) { printf ("SDL initialize fail:%s\n", SDL_GetError()); return; } // 2. create window pWindow = SDL_CreateWindow("example00:Setting up SDL", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN); if (NULL == pWindow) { printf ("Create window fail:%s\n", SDL_GetError()); } // 3. create renderer pRenderer = SDL_CreateRenderer(pWindow, -1, 0); // 4. clear the window to green SDL_SetRenderDrawColor(pRenderer,0,255,0,255); SDL_RenderClear(pRenderer); // 5. show the window SDL_RenderPresent(pRenderer); SDL_Delay(5000); // for display // 6. exit SDL_Quit(); }
转载请注明出处:http://www.cnblogs.com/tla001/
一起学习,一起进步