| import numpy as np |
| import cv2 |
| |
| class Stitcher: |
| |
| |
| def stitch(self, images, ratio=0.75, reprojThresh=4.0,showMatches=False): |
| |
| (imageB, imageA) = images |
| |
| (kpsA, featuresA) = self.detectAndDescribe(imageA) |
| (kpsB, featuresB) = self.detectAndDescribe(imageB) |
| |
| |
| M = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh) |
| |
| |
| if M is None: |
| return None |
| |
| |
| |
| (matches, H, status) = M |
| |
| result = cv2.warpPerspective(imageA, H, (imageA.shape[1] + imageB.shape[1], imageA.shape[0])) |
| self.cv_show('result', result) |
| |
| result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB |
| self.cv_show('result', result) |
| |
| if showMatches: |
| |
| vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches, status) |
| |
| return (result, vis) |
| |
| |
| return result |
| |
| def cv_show(self,name,img): |
| cv2.imshow(name, img) |
| cv2.waitKey(0) |
| cv2.destroyAllWindows() |
| |
| def detectAndDescribe(self, image): |
| |
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| |
| |
| descriptor = cv2.xfeatures2d.SIFT_create() |
| |
| (kps, features) = descriptor.detectAndCompute(image, None) |
| |
| |
| kps = np.float32([kp.pt for kp in kps]) |
| |
| |
| return (kps, features) |
| |
| def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh): |
| |
| matcher = cv2.BFMatcher() |
| |
| |
| rawMatches = matcher.knnMatch(featuresA, featuresB, 2) |
| |
| matches = [] |
| for m in rawMatches: |
| |
| if len(m) == 2 and m[0].distance < m[1].distance * ratio: |
| |
| matches.append((m[0].trainIdx, m[0].queryIdx)) |
| |
| |
| if len(matches) > 4: |
| |
| ptsA = np.float32([kpsA[i] for (_, i) in matches]) |
| ptsB = np.float32([kpsB[i] for (i, _) in matches]) |
| |
| |
| (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, reprojThresh) |
| |
| |
| return (matches, H, status) |
| |
| |
| return None |
| |
| def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status): |
| |
| (hA, wA) = imageA.shape[:2] |
| (hB, wB) = imageB.shape[:2] |
| vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8") |
| vis[0:hA, 0:wA] = imageA |
| vis[0:hB, wA:] = imageB |
| |
| |
| for ((trainIdx, queryIdx), s) in zip(matches, status): |
| |
| if s == 1: |
| |
| ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1])) |
| ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1])) |
| cv2.line(vis, ptA, ptB, (0, 255, 0), 1) |
| |
| |
| return vis |
| import cv2 |
| |
| |
| imageA = cv2.imread("01_Picture/21_Left_01.png") |
| imageB = cv2.imread("01_Picture/22_Right_01.png") |
| |
| |
| stitcher = Stitcher() |
| (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) |
| |
| |
| cv2.imshow("Image A", imageA) |
| cv2.imshow("Image B", imageB) |
| cv2.imshow("Keypoint Matches", vis) |
| cv2.imshow("Result", result) |
| cv2.waitKey(0) |
| cv2.destroyAllWindows() |
点击查看详情

【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· 阿里巴巴 QwQ-32B真的超越了 DeepSeek R-1吗?
· 如何调用 DeepSeek 的自然语言处理 API 接口并集成到在线客服系统
· 【译】Visual Studio 中新的强大生产力特性
· 2025年我用 Compose 写了一个 Todo App