Python Face Detect Offline
python版本 3.7.0
1、 安装 cmake
pip install cmake
2、安装 boost
pip install boost
3、安装 dlib
pip install dlib
4、安装 face_recognition
pip install face_recognition
5、验证
face_recognition 本地模型路径 要识别图片路径
输出:文件名 识别的人名
注意:文件名以人名命名
6、寻找人脸位置
face_detection “路径”
输出:人脸像素坐标
7、调整灵敏度
face_recognition –tolerance 灵敏度 本地模型路径 要识别图片路径
注:默认0.6,识别度越低识别难度越高
8、计算每次面部距离
face_recognition –show-distance true 本地模型路径 要识别图片路径
9、只是想知道每张照片中人物的姓名,却不关心文件名,可以这样做:
face_recognition 本地模型路径 要识别图片路径 | cut -d ‘,’ -f2
10、加速识别
face_recognition –cpus 使用内核数 本地模型路径 要识别图片路径
使用四核识别:
face_recognition –cpus 4 本地模型路径 要识别图片路径
使用全部内核识别:
face_recognition –cpus -1 本地模型路径 要识别图片路径
11、自动查找图像中的所有面孔
import face_recognition
image = face_recognition.load_image_file(“吴京.jpg”)
face_locations = face_recognition.face_locations(image)
import face_recognition import cv2 import numpy as np # This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the # other example, but it includes some basic performance tweaks to make things run a lot faster: # 1. Process each video frame at 1/4 resolution (though still display it at full resolution) # 2. Only detect faces in every other frame of video. # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Get a reference to webcam #0 (the default one) video_capture = cv2.VideoCapture(0) # Load a sample picture and learn how to recognize it. obama_image = face_recognition.load_image_file("obama.jpg") obama_face_encoding = face_recognition.face_encodings(obama_image)[0] # Load a second sample picture and learn how to recognize it. biden_image = face_recognition.load_image_file("biden.jpg") biden_face_encoding = face_recognition.face_encodings(biden_image)[0] # Create arrays of known face encodings and their names known_face_encodings = [ obama_face_encoding, biden_face_encoding ] known_face_names = [ "Barack Obama", "Joe Biden" ] # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1] # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # # If a match was found in known_face_encodings, just use the first one. # if True in matches: # first_match_index = matches.index(True) # name = known_face_names[first_match_index] # Or instead, use the known face with the smallest distance to the new face face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) best_match_index = np.argmin(face_distances) if matches[best_match_index]: name = known_face_names[best_match_index] face_names.append(name) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image cv2.imshow('Video', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows()
彩蛋
import cv2 import threading import face_recognition import numpy as np import os class camThread(threading.Thread): def __init__(self, previewName, camID): threading.Thread.__init__(self) self.previewName = previewName self.camID = camID def run(self): print("Starting " + self.previewName) camPreview(self.previewName, self.camID) def camPreview(previewName, camID): cv2.namedWindow(previewName) video_capture = cv2.VideoCapture(camID) if video_capture.isOpened(): rval, frame = video_capture.read() else: rval = False known_face_encodings = [] known_face_names = [] imagelist = os.listdir('./face/') for imagename in imagelist: image = face_recognition.load_image_file("./face/"+imagename) face_encoding = face_recognition.face_encodings(image)[0] known_face_encodings.append(face_encoding) subname=imagename.split('.')[0] known_face_names.append(subname) face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while rval: #cv2.imshow(previewName, frame) rval, frame = video_capture.read() small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) rgb_small_frame = small_frame[:, :, ::-1] if process_this_frame: face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) best_match_index = np.argmin(face_distances) if matches[best_match_index]: name = known_face_names[best_match_index] face_names.append(name) process_this_frame = not process_this_frame for (top, right, bottom, left), name in zip(face_locations, face_names): top *= 4 right *= 4 bottom *= 4 left *= 4 cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) cv2.imshow(previewName, frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyWindow(previewName) thread1 = camThread("Camera 1", 0) thread2 = camThread("Camera 2", 1) thread1.start() thread2.start()
Thanks & Best Regards!
Javi Zhu 朱佳辉
Mobile: 15900467108
Email: Javi.zhu@outlook.com