ffmpeg 在ubuntu上编译环境搭建和开发
步骤如下:
1. 下载
官网永远是王道,呵呵:http://ffmpeg.org/download.html
或者 svn checkout svn://svn.mplayerhq.hu/ffmpeg/trunk ffmpeg
2. 编译
-
运行./configure
很不幸,运行configure后出现了错误提示:
yasm not found, use –disable-yasm for a crippled build解决方案:sudo apt-getinstall yasm
重新./configure,搞定 -
make
-
make install
权限不够需要前面加上sudo - 编译源码:一定注意加载库的顺序.
-
- 参考代码:
-
#include <SDL/SDL.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <stdio.h> #include <libswscale/swscale.h> int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx; int i, videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVFrame *pFrameYUV; AVPacket packet; int frameFinished; int numBytes; // Register all formats and codecs av_register_all(); // Open video file if (av_open_input_file(&pFormatCtx, "/home/user/workspace/panda/media/video/4f5a9c384d94eb21e5273ec263457535.mp4", NULL, 0, NULL ) != 0) { printf("=== cannot open file\n==="); return -1; // Couldn't open file } // Retrieve stream information if (av_find_stream_info(pFormatCtx) < 0) return -1; // Couldn't find stream information // Dump information about file onto standard error // dump_format(pFormatCtx, 0, argv[1], false); // Find the first video stream videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) //////// { videoStream = i; break; } if (videoStream == -1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx = pFormatCtx->streams[videoStream]->codec; ////////// ///////// SDL initialization SDL_Surface *screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, SDL_HWSURFACE); SDL_Overlay *overlay = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen); static SDL_Rect rect; rect.x = 0; rect.y = 0; rect.w = pCodecCtx->width; rect.h = pCodecCtx->height; ////////// // Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL ) return -1; // Codec not found // Open codec if (avcodec_open(pCodecCtx, pCodec) < 0) return -1; // Could not open codec // Allocate video frame pFrame = avcodec_alloc_frame(); // Allocate an AVFrame structure pFrameYUV = avcodec_alloc_frame(); if (pFrameYUV == NULL ) return -1; static struct SwsContext *img_convert_ctx; img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, // PIX_FMT_RGB24, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL ); // Set SDL events SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE); SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE); SDL_ShowCursor(SDL_DISABLE); // Read frames while ((av_read_frame(pFormatCtx, &packet) >= 0) && (SDL_PollEvent(NULL ) == 0)) { // Is this a packet from the video stream? if (packet.stream_index == videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if (frameFinished) { // Convert the image from its native format to YUV, and display SDL_LockYUVOverlay(overlay); pFrameYUV->data[0] = overlay->pixels[0]; pFrameYUV->data[1] = overlay->pixels[2]; pFrameYUV->data[2] = overlay->pixels[1]; pFrameYUV->linesize[0] = overlay->pitches[0]; pFrameYUV->linesize[1] = overlay->pitches[2]; pFrameYUV->linesize[2] = overlay->pitches[1]; // img_convert((AVPicture *) pFrameYUV, PIX_FMT_YUV420P, (AVPicture *) pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, // pCodecCtx->height); // other codes // Convert the image from its native format to RGB sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &rect); /// SDL_Delay(30); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } // Free the RGB image av_free(pFrameYUV); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); // SDL_FreeYUVOverlay(overlay); return 0; }
- 放大播放:
-
#include <SDL/SDL.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <stdio.h>
#include <libswscale/swscale.h>
int
avcodec_main(
int
argc,
char
*argv[]) {
AVFormatContext *pFormatCtx;
int
i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameYUV;
AVPacket packet;
int
frameFinished;
int
numBytes;
// Register all formats and codecs
av_register_all();
// Open video file
if
(av_open_input_file(&pFormatCtx,
"/home/user/workspace/panda/media/video/4f5a9c384d94eb21e5273ec263457535.mp4"
, NULL, 0, NULL )
!= 0) {
printf
(
"=== cannot open file\n==="
);
return
-1;
// Couldn't open file
}
// Retrieve stream information
if
(av_find_stream_info(pFormatCtx) < 0)
return
-1;
// Couldn't find stream information
// Dump information about file onto standard error
// dump_format(pFormatCtx, 0, argv[1], false);
// Find the first video stream
videoStream = -1;
for
(i = 0; i < pFormatCtx->nb_streams; i++)
if
(pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
////////
{
videoStream = i;
break
;
}
if
(videoStream == -1)
return
-1;
// Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
//////////
///////// SDL initialization
int
w = 1920, h = 1080;
SDL_Surface *screen = SDL_SetVideoMode(w, h, 0, SDL_HWSURFACE);
SDL_Overlay *overlay = SDL_CreateYUVOverlay(w, h, SDL_YV12_OVERLAY, screen);
static
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = w;
rect.h = h;
//////////
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if
(pCodec == NULL )
return
-1;
// Codec not found
// Open codec
if
(avcodec_open(pCodecCtx, pCodec) < 0)
return
-1;
// Could not open codec
// Allocate video frame
pFrame = avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameYUV = avcodec_alloc_frame();
if
(pFrameYUV == NULL )
return
-1;
static
struct
SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, w, h,
// PIX_FMT_RGB24,
PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
// Set SDL events
SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
SDL_ShowCursor(SDL_DISABLE);
// Read frames
while
((av_read_frame(pFormatCtx, &packet) >= 0) && (SDL_PollEvent(NULL ) == 0)) {
// Is this a packet from the video stream?
if
(packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
// Did we get a video frame?
if
(frameFinished) {
// Convert the image from its native format to YUV, and display
SDL_LockYUVOverlay(overlay);
pFrameYUV->data[0] = overlay->pixels[0];
pFrameYUV->data[1] = overlay->pixels[2];
pFrameYUV->data[2] = overlay->pixels[1];
pFrameYUV->linesize[0] = overlay->pitches[0];
pFrameYUV->linesize[1] = overlay->pitches[2];
pFrameYUV->linesize[2] = overlay->pitches[1];
// img_convert((AVPicture *) pFrameYUV, PIX_FMT_YUV420P, (AVPicture *) pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,
// pCodecCtx->height);
// other codes
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx, (
const
uint8_t*
const
*) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
pFrameYUV->linesize);
SDL_UnlockYUVOverlay(overlay);
SDL_DisplayYUVOverlay(overlay, &rect);
///
SDL_Delay(30);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(pFrameYUV);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
//
SDL_FreeYUVOverlay(overlay);
return
0;
}
-
ffmpeg: http://blog.csdn.net/byxdaz/article/details/7316304
ffmpeg编译和使用大全 http://lvzun.iteye.com/blog/706121
- 重点推荐:http://dranger.com/ffmpeg/ An ffmpeg and SDL Tutorial
- http://www.libsdl.org/release/SDL-1.2.15/test/ SDL官方示例。 overlay有rgb转换到YUV.
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· 单线程的Redis速度为什么快?