nginx-http-flv-module 的部署

环境

  • 服务器: 阿里云轻量应用服务器
  • 系统: Ubuntu 20.04.3 LTS (GNU/Linux 5.4.0-90-generic x86_64)
  • 配置:
    • vCPU: 2核
    • 内存: 2GB
    • ESSD: 60GB

部署

1. 下载 (至同一目录) 并解压

  • Nginx

    选择 Stable version

  • nginx-http-flv-module

    git clone https://github.com/winshining/nginx-http-flv-module.git

  • OpenSSL

    git clone https://github.com/openssl/openssl.git

  • PCRE

    注意: 不是pcre2

2. 安装与配置 (进入对应文件夹)

​ a. 安装PCRE

./configure

​ b. 安装OpenSSL

./Configure --prefix=/usr/local/ssl --openssldir=/usr/local/ssl '-Wl,-rpath,$(LIBRPATH)'

​ c. 安装Nginx

./configure --add-module=../nginx-http-flv-module --with-pcre=../pcre-8.45 --with-openssl=../openssl --with-http_ssl_module

3. nginx.conf示例

rtmp {
server {
listen 1935;
chunk_size 4000;
# TV mode: one publisher, many subscribers
application mytv {
# enable live streaming
live on;
# record first 1K of stream
record all;
record_path /tmp/av;
record_max_size 1K;
# append current timestamp to each flv
record_unique on;
# publish only from localhost
allow publish 127.0.0.1;
deny publish all;
#allow play all;
}
# Transcoding (ffmpeg needed)
application big {
live on;
# On every pusblished stream run this command (ffmpeg)
# with substitutions: $app/${app}, $name/${name} for application & stream name.
#
# This ffmpeg call receives stream from this application &
# reduces the resolution down to 32x32. The stream is the published to
# 'small' application (see below) under the same name.
#
# ffmpeg can do anything with the stream like video/audio
# transcoding, resizing, altering container/codec params etc
#
# Multiple exec lines can be specified.
exec ffmpeg -re -i rtmp://localhost:1935/$app/$name -vcodec flv -acodec copy -s 32x32
-f flv rtmp://localhost:1935/small/${name};
}
application small {
live on;
# Video with reduced resolution comes here from ffmpeg
}
application webcam {
live on;
# Stream from local webcam
exec_static ffmpeg -f video4linux2 -i /dev/video0 -c:v libx264 -an
-f flv rtmp://localhost:1935/webcam/mystream;
}
application mypush {
live on;
# Every stream published here
# is automatically pushed to
# these two machines
push rtmp1.example.com;
push rtmp2.example.com:1934;
}
application mypull {
live on;
# Pull all streams from remote machine
# and play locally
pull rtmp://rtmp3.example.com pageUrl=www.example.com/index.html;
}
application mystaticpull {
live on;
# Static pull is started at nginx start
pull rtmp://rtmp4.example.com pageUrl=www.example.com/index.html name=mystream static;
}
# video on demand
application vod {
play /var/flvs;
}
application vod2 {
play /var/mp4s;
}
# Many publishers, many subscribers
# no checks, no recording
application videochat {
live on;
# The following notifications receive all
# the session variables as well as
# particular call arguments in HTTP POST
# request
# Make HTTP request & use HTTP retcode
# to decide whether to allow publishing
# from this connection or not
on_publish http://localhost:8080/publish;
# Same with playing
on_play http://localhost:8080/play;
# Publish/play end (repeats on disconnect)
on_done http://localhost:8080/done;
# All above mentioned notifications receive
# standard connect() arguments as well as
# play/publish ones. If any arguments are sent
# with GET-style syntax to play & publish
# these are also included.
# Example URL:
# rtmp://localhost/myapp/mystream?a=b&c=d
# record 10 video keyframes (no audio) every 2 minutes
record keyframes;
record_path /tmp/vc;
record_max_frames 10;
record_interval 2m;
# Async notify about an flv recorded
on_record_done http://localhost:8080/record_done;
}
# HLS
# For HLS to work please create a directory in tmpfs (/tmp/hls here)
# for the fragments. The directory contents is served via HTTP (see
# http{} section in config)
#
# Incoming stream must be in H264/AAC. For iPhones use baseline H264
# profile (see ffmpeg example).
# This example creates RTMP stream from movie ready for HLS:
#
# ffmpeg -loglevel verbose -re -i movie.avi -vcodec libx264
# -vprofile baseline -acodec libmp3lame -ar 44100 -ac 1
# -f flv rtmp://localhost:1935/hls/movie
#
# If you need to transcode live stream use 'exec' feature.
#
application hls {
live on;
hls on;
hls_path /tmp/hls;
}
# MPEG-DASH is similar to HLS
application dash {
live on;
dash on;
dash_path /tmp/dash;
}
}
}
# HTTP can be used for accessing RTMP stats
http {
server {
listen 8080;
# This URL provides RTMP statistics in XML
location /stat {
rtmp_stat all;
# Use this stylesheet to view XML as web page
# in browser
rtmp_stat_stylesheet stat.xsl;
}
location /stat.xsl {
# XML stylesheet to view RTMP stats.
# Copy stat.xsl wherever you want
# and put the full directory path here
root /path/to/stat.xsl/;
}
location /hls {
# Serve HLS fragments
types {
application/vnd.apple.mpegurl m3u8;
video/mp2t ts;
}
root /tmp;
add_header Cache-Control no-cache;
}
location /dash {
# Serve DASH fragments
root /tmp;
add_header Cache-Control no-cache;
}
}
}
posted @   Hellofds  阅读(2182)  评论(0编辑  收藏  举报
编辑推荐:
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
阅读排行:
· 终于写完轮子一部分:tcp代理 了,记录一下
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
点击右上角即可分享
微信分享提示