from facenet_pytorch import MTCNN, InceptionResnetV1
from PIL import Image, ImageDraw
import cv2
import time
import numpy as np
import torch
import faiss
class FaissDB(object):
def __init__(self, d=512):
"""创建相似性搜索库"""
self.index = faiss.IndexFlatL2(d) # build the index
def add(self, data):
"""添加向量"""
# index.add(1000004, xb) # add vectors to the index
self.index.add(data)
def search(self, target, k=1):
"""
查询faiss向量数据库获取欧氏距离最小的特征向量
Args:
target: 目标向量
k: 返回最相近的特征向量数量
Returns:
"""
# 从索引中得到前K近个向量, search方法返回值D(欧氏距离值), I(索引值)
D, I = self.index.search(target, k) # sanity check
# val = filter(lambda x: x < 0.6, D)
return I
class Video(object):
def __init__(self, mtcnn, resnet):
self.mtcnn = mtcnn
self.resnet = resnet
def detection_face(self):
my_faiss = FaissDB()
# 特征向量的尺寸
d = 512 # dimension
# 生成10万条随机特征向量
xb = torch.randn((100000, d), dtype=torch.float)
my_faiss.add(xb)
fcj = torch.tensor(np.load("/Users/wb-fcj414969/project/facenet-pytorch-master/test/img/fcj.npy"))
my_faiss.add(fcj)
before_detection_time = 0
# cv2.resizeWindow("cap", (480, 480))
capture = cv2.VideoCapture(0) # 0为电脑内置摄像头
# 构架视频窗口,cv2.WINDOW_NORMAL标识窗口的大小
cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
# cv2.WND_PROP_FULLSCREEN标识全屏
# cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
# 设置窗口FPS,不生效
# capture.set(cv2.CAP_PROP_FPS, 10)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
start_time = time.time()
while True:
print("FPS:%s" %cv2.CAP_PROP_FPS)
time1 = time.time()
# 摄像头读取,ret为是否成功打开摄像头,true,false。 frame为视频的每一帧图像
ret, frame = capture.read()
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示。
# 人脸检测,显示人脸框,显示特征点
boxes, probs, points = self.mtcnn.detect(frame, landmarks=True)
if boxes is not None:
max_boxe, amx_probs, max_point = self.mtcnn.select_boxes(boxes, probs, points, frame)
img = Image.fromarray(frame) # ndarry类型转Image类型
draw = ImageDraw.Draw(img)
draw.rectangle((tuple(max_boxe[0][:2].tolist()), tuple(max_boxe[0][2:].tolist())), width=1)
for item in max_point[0]:
draw.point(tuple(item), fill=(255, 0, 0))
# 获取人脸张量信息, 并存储
img_cropped = self.mtcnn(frame)
# Calculate embedding (unsqueeze to add batch dimension)
img_embedding = torch.tensor(self.resnet(img_cropped).detach().numpy())
# 查询向量数据库
data = my_faiss.search(img_embedding)
if data[0]:
# 此处通过检测到的人脸索引值获取人脸特征查询数据库获取人员信息,并显示在屏幕中
draw.text(tuple(max_boxe[0][:2].tolist()), "fjc")
# Image类型转ndarry类型后,显示处理后的图片
frame = np.array(img)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow("frame", frame)
time2 = time.time()
print("人脸框检测时间:%s" % str(time2 - time1))
# 判断是否点击了退出键
c = cv2.waitKey(100)
if c == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
# 116.398663,39.919528
# 116.408149,39.929599
# a = [[(i, j) for j in range(39919528, 39929599)] for i in range(116398663, 116408149)]
#
# b = torch.tensor(a)
# b = b.view([-1, 2])
#
# # 测试faiss速度
# my_faiss = FaissDB(d=2)
# my_faiss.add(b)
# predict = torch.tensor([[116401754, 39925781]])
# predict = predict.repeat(1000, 1)
# time1 = time.time()
# result = my_faiss.search(predict)
# time2 = time.time()
# print(time2-time1)
image_size = 160
mtcnn = MTCNN(image_size=160, margin=0, keep_all=True)
mtcnn.eval()
# Create an inception resnet (in eval mode):
resnet = InceptionResnetV1(pretrained='vggface2')
resnet.eval()
video = Video(mtcnn, resnet)
video.detection_face()
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 全程不用写代码,我用AI程序员写了一个飞机大战
· DeepSeek 开源周回顾「GitHub 热点速览」
· 记一次.NET内存居高不下排查解决与启示
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了