python设计模式运用
工厂+策略+注册
class VideoFactory(object):
def __init__(self):
self.video = {}
def register_video(self, name, video):
self.video[name] = video
def create_video(self, name):
if name in self.video:
return self.video[name]()
else:
raise ValueError("Unknown video键值对")
@abstractmethod
def exec(self,frame):
pass
下面有三个方法实现这个执行的方法
这个执行方法
import ctypes
from datetime import datetime
import cv2
import snap7
from NetSDK import SDK_Enum
from NetSDK.SDK_Enum import SDK_PTZ_ControlType
from NetSDK.SDK_Struct import SDK_PTZ_LOCATION_INFO
from app import client, redis_client, sdk, loginID, Commworking
from app.factory.create_factory import VideoFactory
# 摄像头移动找下面的目标
class MoveVideo(VideoFactory):
def exec(self,frame):
print("MoveVideo的方法")
if Commworking:
XToPLC = client.read_area(snap7.types.Areas.DB, 1, 2000, 2)
YToPLC = client.read_area(snap7.types.Areas.DB, 1, 2002, 2)
print("a=========0")
# frame = frame
img = frame
# 缩放图片
scale_percent = 50 # 缩放比例
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
height, width, _ = frame.shape
# 获取整张图像的中心点坐标
img_center_x = int(width / 2)
img_center_y = int(height / 2)
# 转换成灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# left_x = x -,边缘检测
edges = cv2.Canny(gray, 50, 300)
# 对灰度图像进行形态学操作和滤波,,变的更加清晰
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
blurred = cv2.GaussianBlur(closed, (5, 5), 0)
# 寻找轮廓并绘制
cx, cy = 0, 0
# 获得一组有轮廓的点,blurred一堆有轮廓的图像信息,cv2.RETR_EXTERNAL 参数表示只检测最外层的轮廓。
# cv2.CHAIN_APPROX_SIMPLE 参数表示使用简单近似方法表示轮廓。
# 返回这张图片上,轮廓的点
contours, hierarchy = cv2.findContours(blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
# 计算当前轮廓的面积并将其存储在 area 变量中。
area = cv2.contourArea(cnt)
if area is None:
continue
# 忽略面积较小或者比较大的轮廓
elif area < 90000:
continue
else:
# 使用 fitEllipse 函数在当前轮廓上拟合椭圆,并将椭圆参数存储在 ellipse 变量中,这个函数就是化椭圆的,并且把参数存储在ellipse
ellipse = cv2.fitEllipse(cnt)
# 将椭圆参数解包,分别获取椭圆的中心点、轴长度和旋转角度,并分别存储在 center、axes 和 angle 变量中。
center, axes, angle = ellipse
# 获取中心点
cx, cy = int(center[0]), int(center[1])
# 在图像 img 上绘制拟合的椭圆,使用绿色 (0, 255, 0) 的线条颜色,线宽为5。
# cv2.ellipse就是绘制椭圆图像的,在哪绘制,有哪些点
cv2.ellipse(img, ellipse, (0, 255, 0), 5)
# print(cx, cy)
# 某个点绘制成圆
cv2.circle(img, (cx, cy), 5, (0, 255, 0), -1)
# cv2.circle(img, (img_center_x, img_center_y), 5, (255, 0, 0), -1)
# 不行,前提必须是只有一个圆,并且必须走到中心点,过于严格,应该是范围内的点就可以了
cv2.circle(img, (img_center_x, img_center_y), 5, (0, 255, 0), 1)
# print(cx, cy, img_center_x, img_center_y)
if Commworking:
try:
# 给这两个PLC设置值
snap7.util.set_int(XToPLC, 0, cy)
snap7.util.set_int(YToPLC, 0, img_center_y)
# 传递PLC设置的动态的值
client.write_area(snap7.types.Areas.DB, 1, 2000, XToPLC)
client.write_area(snap7.types.Areas.DB, 1, 2002, YToPLC)
except:
pass
# cv2.imshow("window_name", annotated_frame)
cv2.imshow("window_name", img)
cv2.waitKey(1)
# 摄像头向下面的过程
class DownVideo(VideoFactory):
def exec(self,frame):
print("DownVideo的下移方法")
# 到达指定位置获取图片
class CaptureVideo(VideoFactory):
def exec(self,frame):
print("CaptureVideo的拍照方法")
# 一、首先判断现在是第几个位置抓图
# 1.1从redis里面取出来数值,然后判断现在要到第几个位置抓图
value = redis_client.get("current_capture")
valueX = 0
valueY = 0
# 这里也使用一个工厂,根据value值设置valueX和valueY的值
if value is None:
print("键值对不存在")
if int(value) == 1:
valueX = int(redis_client.get("X1"))
valueY = int(redis_client.get("Y1"))
print("valueX1=" + str(valueX) + "ValueY1=" + str(valueY))
elif int(value) == 2:
valueX = int(redis_client.get("X2"))
valueY = int(redis_client.get("Y2"))
print("valueX2=" + str(valueX) + "ValueY2=" + str(valueY))
pass
elif int(value) == 3:
valueX = int(redis_client.get("X3"))
valueY = int(redis_client.get("Y3"))
print("valueX3=" + str(valueX) + "ValueY3=" + str(valueY))
pass
elif int(value) == 4:
valueX = int(redis_client.get("X4"))
valueY = int(redis_client.get("Y4"))
print("valueX4=" + str(valueX) + "ValueY4=" + str(valueY))
pass
elif int(value) == 5:
valueX = int(redis_client.get("X5"))
valueY = int(redis_client.get("Y5"))
print("valueX5=" + str(valueX) + "ValueY55=" + str(valueY))
pass
elif int(value) == 6:
pass
# 二、判断位置
# 这个是拍摄,需要登录句柄,然后通过控制拍摄,存储
buf_size = ctypes.sizeof(SDK_PTZ_LOCATION_INFO)
pBuf = ctypes.create_string_buffer(buf_size)
nBufLen = buf_size
pRetLen = ctypes.c_int()
waittime = 100000
# num = test.test01(sdk, loginID, SDK_Enum.EM_QUERY_DEV_STATE_TYPE.PTZ_LOCATION, pBuf, nBufLen, 100, waittime)
print(str(redis_client.get("exec")))
if int(redis_client.get("exec")) == 0:
d = sdk.PTZControlEx2(loginID, 0, SDK_PTZ_ControlType.EXACTGOTO, valueX, valueY, 15, False)
redis_client.set("exec", 1)
s = sdk.QueryDevState(loginID, SDK_Enum.EM_QUERY_DEV_STATE_TYPE.PTZ_LOCATION, pBuf, nBufLen, 100, waittime)
# print("是否成功="+str(s))
# 指向缓冲区的指针 pBuf 的数据解析为一个 SDK_PTZ_LOCATION_INFO 结构体对象 s
# 五个地点的信息
ptz_pos_arr = (SDK_PTZ_LOCATION_INFO * s).from_buffer(pBuf)
num_x = 0
num_y = 0
for i in range(s):
ptz_pos = ptz_pos_arr[i]
print("通道号:", i + 1)
print("云台位置信息:", ptz_pos.nPTZPan)
print("云台垂直", ptz_pos.nPTZTilt)
num_x = ptz_pos.nPTZPan
num_y = ptz_pos.nPTZTilt
# print("返回的数值" + str(num))
print("xxxxx" + str(num_x))
if int(num_x) == valueX and int(num_y) == valueY:
print("X坐标" + str(num_x) + "Y坐标" + str(num_y))
current_time = datetime.now()
file_name = current_time.strftime("%Y-%m-%d_%H-%M-%S.jpg")
# 保存图像到指定文件夹
save_folder = "d:/z_images/"
# frame = cv2.resize(frame, (1000, 900))
cv2.imwrite(save_folder + file_name, frame)
redis_client.set("current_capture", int(value) + 1)
redis_client.set("exec", 0)
执行
def my_thread():
# 摄像头转到了位置,但是帧没有跟上,这个不是实时的帧
# 这种方式可以每次获取最新的帧
cap = loginCap()
videoFactory.register_video(0, MoveVideo)
videoFactory.register_video(1, DownVideo)
videoFactory.register_video(2, CaptureVideo)
while True:
frame = global_frame
print("子线程执行")
frameStep(frame, loginID)
调用注意公共图像,global_frame,是一个公共变量,用于存储
读取到的图像数据,线程放入的就是这个图像数据
但是线程执行次数不如主线程
这个影响不大,因为主线程是1s执行很多次,每次执行到获得的图像
都会给gloal_frame更新,只要1s获得一张,差别不会太大
import concurrent
import datetime
import threading
from ctypes import sizeof
import cv2
import snap7
from NetSDK.NetSDK import NetClient
from NetSDK.SDK_Enum import EM_LOGIN_SPAC_CAP_TYPE
from NetSDK.SDK_Struct import NET_OUT_LOGIN_WITH_HIGHLEVEL_SECURITY, NET_IN_LOGIN_WITH_HIGHLEVEL_SECURITY
# 返回摄像头图片的写法
from flask import Response, request, jsonify
from torchvision.models import video
from app import app, db, sdk, getLoginId, loginID, redis_client, user_name, user_pwd, camera_ip, client, PLC_IP, \
videoFactory
from app.factory.video_factory import MoveVideo, DownVideo, CaptureVideo
from app.putil.test_piple import frameStep
# from app.frame.get_frame import run_single_camera
global_frame = 0
def loginCap():
cap = cv2.VideoCapture(
"rtsp://%s:%s@%s/cam/realmonitor?channel=%d&subtype=0" % (user_name, user_pwd, camera_ip, 1))
return cap
# 这是一个全局变量,因为开启了一个线程,但是线程要根据图像frame来进行下面的操作
# 如果让线程也获得摄像头数据,这个线程的读取速度过慢(由于主线程,会继续往下面执行,也开启死循环,占用了资源),所以很难完成实时读取
# 但是通过共享变量存储frame,主线程实时读取,会实时更新frame,虽然子线程不是实时执行,但是主线程,会实时更新这个共享变量frame,子线程无法获取每一帧图像
# 但是这些frame到了子线程执行,相差不会超过1秒,对于拍照不会有太大影响
global_frame = 0
def gen():
# i=0,如何才能做到登录一次就可以了,不靠谱,刷新的话又会重新开始计数,这怎么办??,应该根据当时的状态位置判断step是几
# 这是刷新带来的影响,如何根据当时的状态位置,判断呢,所以还是要根据当时的位置判断,判断有没有到为止,然后判断有没有下去
# 如果没有下去,就是找位置,如果下去了,就等结果,根据变化的部分,提供到了哪个步骤
step = 1
cap = loginCap()
now = datetime.datetime.now()
futrue_time = now + datetime.timedelta(minutes=10)
ret, frame = cap.read()
if ret:
global global_frame
global_frame = frame
t = threading.Thread(target=my_thread)
# 启动线程
# 只让他执行一次,但是也是个死循环,直到完成任务
t.start()
# print(str(step))
while True:
if datetime.datetime.now() >= futrue_time:
cap = loginCap()
futrue_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
frame = get_frameTwo(cap)
# print("正在运行")
# print("主线程执行")
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# PLC如何给信号,
def get_frameTwo(cap):
ret, frame = cap.read()
if ret:
global global_frame
global_frame = frame
# 创建线程,,如果是传入的frame会执行的非常慢
frame = cv2.resize(frame, (900, 600))
# 压缩视频帧
ret, jpeg = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 50])
return jpeg.tobytes()
def my_thread():
# 摄像头转到了位置,但是帧没有跟上,这个不是实时的帧
# 这种方式可以每次获取最新的帧
cap = loginCap()
videoFactory.register_video(0, MoveVideo)
videoFactory.register_video(1, DownVideo)
videoFactory.register_video(2, CaptureVideo)
while True:
frame = global_frame
print("子线程执行")
frameStep(frame, loginID)
@app.route('/api/video_feed')
async def video_feed():
# "rtsp://%s:%s@%s/cam/realmonitor?channel=%d&subtype=0" % (user_name, user_pwd, camera_ip, 1))
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')