| --shape-predictor shape_predictor_68_face_landmarks.dat |
| --image images/liudehua.jpg |

| |
| from collections import OrderedDict |
| import numpy as np |
| import argparse |
| import dlib |
| import cv2 |
| |
| |
| ap = argparse.ArgumentParser() |
| ap.add_argument("-p", "--shape-predictor", required=True, |
| help="path to facial landmark predictor") |
| ap.add_argument("-i", "--image", required=True, |
| help="path to input image") |
| args = vars(ap.parse_args()) |
| |
| FACIAL_LANDMARKS_68_IDXS = OrderedDict([ |
| ("mouth", (48, 68)), |
| ("right_eyebrow", (17, 22)), |
| ("left_eyebrow", (22, 27)), |
| ("right_eye", (36, 42)), |
| ("left_eye", (42, 48)), |
| ("nose", (27, 36)), |
| ("jaw", (0, 17)) |
| ]) |
| |
| FACIAL_LANDMARKS_5_IDXS = OrderedDict([ |
| ("right_eye", (2, 3)), |
| ("left_eye", (0, 1)), |
| ("nose", (4)) |
| ]) |
| |
| def shape_to_np(shape, dtype="int"): |
| |
| coords = np.zeros((shape.num_parts, 2), dtype=dtype) |
| |
| |
| for i in range(0, shape.num_parts): |
| coords[i] = (shape.part(i).x, shape.part(i).y) |
| return coords |
| |
| def visualize_facial_landmarks(image, shape, colors=None, alpha=0.75): |
| |
| |
| overlay = image.copy() |
| output = image.copy() |
| |
| if colors is None: |
| colors = [(19, 199, 109), (79, 76, 240), (230, 159, 23), |
| (168, 100, 168), (158, 163, 32), |
| (163, 38, 32), (180, 42, 220)] |
| |
| for (i, name) in enumerate(FACIAL_LANDMARKS_68_IDXS.keys()): |
| |
| (j, k) = FACIAL_LANDMARKS_68_IDXS[name] |
| pts = shape[j:k] |
| |
| if name == "jaw": |
| |
| for l in range(1, len(pts)): |
| ptA = tuple(pts[l - 1]) |
| ptB = tuple(pts[l]) |
| cv2.line(overlay, ptA, ptB, colors[i], 2) |
| |
| else: |
| hull = cv2.convexHull(pts) |
| cv2.drawContours(overlay, [hull], -1, colors[i], -1) |
| |
| cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output) |
| return output |
| |
| |
| detector = dlib.get_frontal_face_detector() |
| predictor = dlib.shape_predictor(args["shape_predictor"]) |
| |
| |
| image = cv2.imread(args["image"]) |
| (h, w) = image.shape[:2] |
| width=500 |
| r = width / float(w) |
| dim = (width, int(h * r)) |
| image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA) |
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| |
| |
| rects = detector(gray, 1) |
| |
| |
| for (i, rect) in enumerate(rects): |
| |
| |
| shape = predictor(gray, rect) |
| shape = shape_to_np(shape) |
| |
| |
| for (name, (i, j)) in FACIAL_LANDMARKS_68_IDXS.items(): |
| clone = image.copy() |
| cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, |
| 0.7, (0, 0, 255), 2) |
| |
| |
| for (x, y) in shape[i:j]: |
| cv2.circle(clone, (x, y), 3, (0, 0, 255), -1) |
| |
| |
| (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]])) |
| |
| roi = image[y:y + h, x:x + w] |
| (h, w) = roi.shape[:2] |
| width=250 |
| r = width / float(w) |
| dim = (width, int(h * r)) |
| roi = cv2.resize(roi, dim, interpolation=cv2.INTER_AREA) |
| |
| |
| cv2.imshow("ROI", roi) |
| cv2.imshow("Image", clone) |
| cv2.waitKey(0) |
| |
| |
| output = visualize_facial_landmarks(image, shape) |
| cv2.imshow("Image", output) |
| cv2.waitKey(0) |
点击查看详情

【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· DeepSeek “源神”启动!「GitHub 热点速览」
· 微软正式发布.NET 10 Preview 1:开启下一代开发框架新篇章
· C# 集成 DeepSeek 模型实现 AI 私有化(本地部署与 API 调用教程)
· DeepSeek R1 简明指南:架构、训练、本地部署及硬件要求
· 2 本地部署DeepSeek模型构建本地知识库+联网搜索详细步骤
2022-03-04 实时视频通讯
2021-03-04 内部类