ffmpeg c++代码推流RTSP/RTMP
由于ffmpeg推出的rtsp或者rtmp端口需要Listen,所以需要开启端口TCP/UDP,之后采用ffmpeg向端口推送数据
第一,安装rtsp-simple-server
release下载地址https://github.com/aler9/rtsp-simple-server/releases/tag/v0.20.1
源码下载github地址
下载后解压后配置好端口,运行执行文件即可进行端口监听。
./rtsp-simple-server
第二,测试
FFmpeg 常用的命令行参数有
-c:指定编码器
-c copy:直接复制,不经过重新编码(这样比较快)
-c:v:指定视频编码器
-c:a:指定音频编码器
-i:指定输入文件
-an:去除音频流,不处理音频
-vn:去除视频流,不处理视频
-preset:指定输出的视频质量,会影响文件的生成速度,候选值有:ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, veryslow.
-y:不经过确认,输出时直接覆盖同名文件
-vcodec: 设定视频编解码器,未设定时则使用与输入流相同的编解码器
-b: 设定视频流量,默认为200Kbit/s
-r: 设定帧速率,默认为25
-s: 设定画面的宽与高
-aspect: 设定画面的比例
-ss: 开始时间
# 音频参数
-ar: 设定采样率
-ac: 设定声音的Channel数
-acodec: 设定声音编解码器,未设定时则使用与输入流相同的编解码器
-an: 不处理音频
转码
H264 转 MP4$ ffmpeg -i "xx.h264" -c:v copy -f mp4 "xx.mp4"
MP4 转 TSffmpeg -i xx.mp4 -c copy -vbsf h264_mp4toannexb faded.ts
推流
监听 UDP 端口转推为 HLS 流,$ ffmpeg -i udp://127.0.0.1:9000-c copy -f hls xx.m3u8
将 MP4 通过 UDP 协议推送为 RTSP 流ffmpeg -re -i xx.mp4 -rtsp_transport tcp -vcodec h264 -f rtsp rtsp://192.168.3.4/xx
将 h264 文件推流到 RTMP 服务器 ffmpeg -re -i hik.h264 -c copy -f flv rtmp://192.168.3.4/live/fromh264
转推海康RTSP到RTMP服务器ffmpeg -rtsp_transport tcp -i rtsp://username:password@192.168.42.128:554/h264/ch1/main/av_stream -vcodec copy -acodec copy -ar 44100 -strict -2 -ac 1 -f flv -s 1280x720 -q 10 -f flv rtmp://192.168.3.4/live/fromrtsp
第三,引用头文件
代码推送视频
#include <iostream>
extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
};
int avError(int errNum);
int main(int argc, char* argv[]){
//输入文件
const char *fileAddress = "../xx.mp4";
//推流地址
const char *rtmpAddress = "rtsp://0.0.0.0:8554/test";
//注册所有库
av_register_all();
//初始化网络库
avformat_network_init();
// 输入流处理部分 ///
AVFormatContext *ictx = NULL;
//打开文件
int ret = avformat_open_input(&ictx, fileAddress, 0, NULL);
if (ret < 0)
{
return avError(ret);
}
std::cout << "avformat_open_input succeeded" << std::endl;
//获取流信息
ret = avformat_find_stream_info(ictx, 0);
if (ret != 0)
{
return avError(ret);
}
//打印视频信息
av_dump_format(ictx, 0, fileAddress, 0);
// 输出流处理部分 ///
AVFormatContext *octx = NULL;
//创建输出上下文
ret = avformat_alloc_output_context2(&octx, NULL, "rtsp", rtmpAddress);
// ret = avformat_alloc_output_context2(&octx, NULL, "rtsp", rtmpAddress);
if (ret < 0) {
return avError(ret);
}
std::cout << "avformat_alloc_output_context2 succeeded" << std::endl;
//配置输出流
for (int i = 0; i < ictx->nb_streams; i++) {
//创建一个新的流
AVStream *outStream = avformat_new_stream(octx, ictx->streams[i]->codec->codec);
if (!outStream) {
return avError(0);
}
//复制配置信息
ret = avcodec_parameters_copy(outStream->codecpar, ictx->streams[i]->codecpar);
if (ret < 0) {
return avError(ret);
}
outStream->codec->codec_tag = 0;
}
//打印输出流的信息
av_dump_format(octx, 0, rtmpAddress, 1);
// 准备推流 ///
//打开io
ret = avio_open(&octx->pb, rtmpAddress, AVIO_FLAG_WRITE);
if (ret < 0) {
avError(ret);
}
//写入头部信息
ret = avformat_write_header(octx, NULL);
if ( ret < 0) {
avError(ret);
}
std::cout << "avformat_write_header succeeded" << std::endl;
//推流每一帧数据
AVPacket avPacket;
long long startTime = av_gettime();
while (true)
{
ret = av_read_frame(ictx, &avPacket);
if (ret < 0 )
{
break;
}
std::cout << avPacket.pts << " " << std::flush;
//计算转换时间戳
//获取时间基数
AVRational itime = ictx->streams[avPacket.stream_index]->time_base;
AVRational otime = octx->streams[avPacket.stream_index]->time_base;
avPacket.pts = av_rescale_q_rnd(avPacket.pts, itime, otime, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_NEAR_INF));
avPacket.dts = av_rescale_q_rnd(avPacket.dts, itime, otime, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_NEAR_INF));
//到这一帧经历了多长时间
avPacket.duration = av_rescale_q_rnd(avPacket.duration, itime, otime, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_NEAR_INF));
avPacket.pos = -1;
//视频帧推送速度
if (ictx->streams[avPacket.stream_index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
AVRational tb = ictx->streams[avPacket.stream_index]->time_base;
//已经过去的时间
long long now = av_gettime() - startTime;
long long dts = 0;
dts = avPacket.dts * (1000 * 1000 * av_q2d(tb));
if (dts > now)
{
av_usleep(dts - now);
}
}
ret = av_interleaved_write_frame(octx, &avPacket);
if (ret < 0)
{
break;
}
}
std::cin.get();
return 0;
}
int avError(int errNum) {
char buf[1024];
//获取错误信息
av_strerror(errNum, buf, sizeof(buf));
std::cout << " failed! " << buf << std::endl;
return -1;
}
代码推送图片
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
//FFMPEG
extern "C"
{
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avformat.lib")
int main()
{
VideoCapture capture(0);
const char *outUrl;
outUrl = "rtsp://0.0.0.0:8554/test";
//注册所有的编解码器
avcodec_register_all();
//注册所有的封装器
av_register_all();
//注册所有网络协议
avformat_network_init();
//像素格式转换上下文
SwsContext *vsc = NULL;
//输出的数据结构
AVFrame *yuv = NULL;
//编码器上下文
AVCodecContext *vc = NULL;
//rtmp flv 封装器
AVFormatContext *ic = NULL;
AVOutputFormat *ofmt = NULL;
int inWidth = 4160;
int inHeight = 1024;
int fps = 16;
///2 初始化格式转换上下文
vsc = sws_getCachedContext(vsc,
inWidth, inHeight, AV_PIX_FMT_BGR24, //源宽、高、像素格式
inWidth, inHeight, AV_PIX_FMT_YUV420P,//目标宽、高、像素格式
SWS_BICUBIC, // 尺寸变化使用算法
0, 0, 0 );
if (!vsc)
{
}
///3 初始化输出的数据结构
yuv = av_frame_alloc();
yuv->format = AV_PIX_FMT_YUV420P;
yuv->width = inWidth;
yuv->height = inHeight;
yuv->pts = 0;
//分配yuv空间
int ret = av_frame_get_buffer(yuv, 32);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
}
///4 初始化编码上下文
//a 找到编码器
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec)
{
}
//b 创建编码器上下文
vc = avcodec_alloc_context3(codec);
if (!vc)
{
// throw exception("avcodec_alloc_context3 failed!");
}
//c 配置编码器参数
vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //全局参数
vc->codec_id = codec->id;
vc->thread_count = 8;
vc->bit_rate = 50 * 1024 * 8;//压缩后每秒视频的bit位大小 50kB
vc->width = inWidth;
vc->height = inHeight;
vc->time_base = { 1,fps };
vc->framerate = { fps,1 };
//画面组的大小,多少帧一个关键帧
vc->gop_size = 50;
vc->max_b_frames = 0;
vc->pix_fmt = AV_PIX_FMT_YUV420P;//AV_PIX_FMT_RGB24 AV_PIX_FMT_YUV420P
//d 打开编码器上下文
ret = avcodec_open2(vc, codec, 0);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
//throw exception(buf);
}
cout << "avcodec_open2 success!" << endl;
///5 输出封装器和视频流配置
//a 创建输出封装器上下文
ret = avformat_alloc_output_context2(&ic, NULL, "rtsp", outUrl);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
//throw exception(buf);
}
ofmt = ic->oformat;
//b 添加视频流
AVStream *vs = avformat_new_stream(ic, codec);
if (!vs)
{
// throw exception("avformat_new_stream failed");
}
ret = avcodec_parameters_from_context(vs->codecpar, vc);
av_dump_format(ic, 0, outUrl, 1);
//打开rtmp 的网络输出IO
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ic->pb, outUrl, AVIO_FLAG_WRITE);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
//throw exception(buf);
}
}
//写入封装头
ret = avformat_write_header(ic, NULL);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
//throw exception(buf);
}
AVPacket pack;
memset(&pack, 0, sizeof(pack));
int vpts = 0;
cv::Mat color_image=cv::imread("../11.jpg");//change picture
while (1)
{
//Mat color_image;
//定义一个Mat类变量frame,用于存储每一帧的图像
// capture >> color_image;
//读取当前帧
//若视频完成播放,退出循环
/*
if (color_image.empty())
{
cout << "1111111111111 break!" << endl;
break;
}*/
///ffmpeg推流部分
//rgb to yuv
uint8_t *indata[AV_NUM_DATA_POINTERS] = { 0 };
indata[0] = color_image.data;
int insize[AV_NUM_DATA_POINTERS] = { 0 };
//一行(宽)数据的字节数
insize[0] = color_image.cols * color_image.elemSize();
int h = sws_scale(vsc, indata, insize, 0, color_image.rows, //源数据
yuv->data, yuv->linesize);
cout << color_image.rows << endl;
if (h <= 0)
{
continue;
}
//cout << h << " " << flush;
///h264编码
yuv->pts = vpts;
vpts++;
ret = avcodec_send_frame(vc, yuv);
if (ret != 0)
continue;
ret = avcodec_receive_packet(vc, &pack);
if (ret != 0 || pack.size > 0)
{
//cout << "*" << pack.size << flush;
}
else
{
continue;
}
//推流
pack.pts = av_rescale_q(pack.pts, vc->time_base, vs->time_base);
pack.dts = av_rescale_q(pack.dts, vc->time_base, vs->time_base);
pack.duration = av_rescale_q(pack.duration, vc->time_base, vs->time_base);
ret = av_interleaved_write_frame(ic, &pack);
if (ret == 0)
{
//cout << "#" << flush;
}
}}