python推流在html上显示
1. websocket
安装nodejs,并安装ws
npm install ws
node server.js
1 2 3 4 5 6 7 8 9 10 11 12 | let WebSocketServer = require( 'ws' ).Server, wss = new WebSocketServer({ port: 3303}); wss.on( 'connection' , function (ws) { console.log( '客户端已连接' ); ws.on( 'message' , function (message) { wss.clients.forEach( function each(client) { client.send(message); }); console.log(message.length); }); }); |
html
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | <!DOCTYPE html> <html lang= "en" > <head> <meta charset= "UTF-8" > <title>Title</title> </head> <body> <div> <img id= "resImg" src= "" /> </div> <script src= "jquery.min.js" ></script> <script> let ws = new WebSocket( "ws://127.0.0.1:3303/" ); ws.onopen = function (evt) { console.log( "Connection open ..." ); ws.send( "Hello WebSockets!" ); }; ws.onmessage = function (evt) { //console.log( "Received Message: " + evt.data); //$("#resImg").attr("src",evt.data); var base64 = evt.data.text().then( res => { //console.log(res); $( "#resImg" ).attr( "src" ,res); } ) // ws.close(); }; ws.onclose = function (evt) { console.log( "Connection closed." ); }; </script> </body> </html> |
python,安装ffmepg并添加到环境变量中
import websocket import base64 import cv2 import numpy as np #视频播放质量4流畅3标清2高清1超清 _quality = 1 def image_compression(frame, frame_width, frame_height): img_base64 = '' r = int(2 + _quality * 0.5) w = (int)(frame_width / r) h = (int)(frame_height / r) frame = cv2.resize(frame, (w, h)) # 图片质量1-100 jpeg_quality = 100 - 10 * _quality img_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] # 转化 ret, frame = cv2.imencode('.jpg', frame, img_param) if ret: img_base64 = "data:image/jpeg;base64,"+ base64.b64encode(frame).decode("utf-8") return img_base64 if __name__ == '__main__': # https://blog.csdn.net/xietansheng/article/details/115558069 ws = websocket.WebSocket() ws.connect('ws://127.0.0.1:3303') capture = cv2.VideoCapture("test.mp4") if not capture.isOpened(): print('quit') quit() frame_width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) while True: ret, frame = capture.read() if ret: img_base64 = image_compression(frame, frame_width, frame_height) ws.send(img_base64)
2. ffmepg管道推流 + 流媒体程序(m7s)
#!/usr/local/bin/python3 # encodin: utf-8 import subprocess import threading import time import cv2 import os class RTSCapture(cv2.VideoCapture): _cur_frame = None _reading = False # 视频播放质量4流畅3标清2高清1超清 _quality = 3 @staticmethod def create(srcstream, dststream): rtscap = RTSCapture(srcstream) rtscap.frame_receiver = threading.Thread(target=rtscap.recv_frame, daemon=True) rtscap.frame_processer = threading.Thread(target=rtscap.prcoess_frame, daemon=True) fps = int(rtscap.get(cv2.CAP_PROP_FPS)) r = int(2 + rtscap._quality * 0.5) rtscap.width = int(rtscap.get(cv2.CAP_PROP_FRAME_WIDTH) / r) rtscap.height = int(rtscap.get(cv2.CAP_PROP_FRAME_HEIGHT) / r) command = ['G:\\ZLMediaKit_Release\\ffmpeg.exe', '-y', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-pix_fmt', 'bgr24', '-s', "{}x{}".format(rtscap.width, rtscap.height), '-r', str(fps), '-i', '-', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-preset', 'ultrafast', '-f', 'rtsp', # flv rtsp '-rtsp_transport', 'tcp', # 使用TCP推流,linux中一定要有这行 dststream] # 管道配置 rtscap.p = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE) rtscap._reading = True return rtscap def isOK(self): status = self.isOpened() and self._reading return status def recv_frame(self): while self.isOK(): time.sleep(0.01) ok, frame = self.read() if not ok: break self._cur_frame = frame #self.p.stdin.write(frame.tostring()) self._reading = False def prcoess_frame(self): while self.isOK(): time.sleep(0.01) try: ok, frame = self.read_latest_frame() if not ok: continue frame1 = cv2.resize(frame, (self.width, self.height)) # 图像压缩 self.p.stdin.write(frame1.tostring()) except Exception as e: ss = str(e) def read2(self): frame = self._cur_frame self._cur_frame = None return frame is not None, frame def start_read(self): self.frame_receiver.start() self.frame_processer.start() self.read_latest_frame = self.read2 if self._reading else self.read def stop_read(self): self._reading = False if self.frame_receiver.is_alive(): self.frame_receiver.join() if __name__ == '__main__': rtscap1 = RTSCapture.create("test.mp4", "rtsp://127.0.0.1:554/live/test1") rtscap1.start_read() os.system('pause')
qq:505645074
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步