[转]OpenCV_Find Basis F-Matrix and computeCorrespondEpilines(获取一对图像的基础矩阵及对应极线)

代码如下:

复制代码
// BasisMatrixCalculate.cpp : 定义控制台应用程序的入口点。
//
 
#include "stdafx.h"
#include  <iostream>
 
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
//引用cv::KeyPoint 特征检测器通用接口
#include <opencv2/features2d/features2d.hpp> 
# include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/nonfree/nonfree.hpp> //引用features2d.hpp中 SurfFeatureDetector
#include <opencv2/legacy/legacy.hpp>  
int main()
{
    //读取2张图像
    cv::Mat image1 = cv::imread("../../aTestImage/church01.jpg", 0);
    cv::Mat image2 = cv::imread("../../aTestImage/church03.jpg", 0);
    if (!image1.data || !image2.data)
        return 0;
    //使用SURF特征 获取图像特征点
    std::vector<cv::KeyPoint> keyPoints1;
    std::vector<cv::KeyPoint> keyPoints2;
    cv::SurfFeatureDetector surf(3000);
    surf.detect(image1, keyPoints1);
    surf.detect(image2, keyPoints2); //获取两幅图像的特征点
 
    // //展示图像中的keyPoints
    //cv::Mat imagekeyPt;
    //cv::drawKeypoints(image1, keyPoints1, imagekeyPt, cv::Scalar(255, 255, 255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    //cv::namedWindow("image1SURFkeyPt");
    //cv::imshow("image1SURFkeyPt", imagekeyPt);
    //cv::drawKeypoints(image2, keyPoints2, imagekeyPt, cv::Scalar(255, 255, 255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    //cv::namedWindow("image2SURFkeyPt");
    //cv::imshow("image2SURFkeyPt", imagekeyPt);
 
    //通过特征点获取描述子
    cv::SurfDescriptorExtractor  surfDesc;  // 构造描述子提取器
    cv::Mat descriptors1, descriptors2;  //描述子记录局部强度差值/梯度变化/位置等信息
    surfDesc.compute(image1, keyPoints1, descriptors1);
    surfDesc.compute(image2, keyPoints2, descriptors2);
 
    //匹配图像的描述子 descriptors
    cv::BruteForceMatcher< cv::L2<float> > matcher; //构造匹配器
    std::vector<cv::DMatch> matches; //匹配描述子
    matcher.match(descriptors1, descriptors2, matches);
    std::cout << "matches size= " << matches.size() << std::endl;
 
    //选择部分描述子 使用 一对图像的基础矩阵 进行匹配观测
    std::vector<cv::DMatch> partmatches; //部分匹配描述子 | 特征点对 的对应 索引结构体
    partmatches.push_back(matches[14]);  //选择第n个匹配描述子
    partmatches.push_back(matches[16]);
    partmatches.push_back(matches[141]);
    partmatches.push_back(matches[242]);
    partmatches.push_back(matches[236]);
    partmatches.push_back(matches[238]);
    //partmatches.push_back(matches[100]);
    partmatches.push_back(matches[200]);
 
 
    //画出选择的匹配描述子 两幅图像连线
    cv::Mat imagePartMatches;
    cv::drawMatches(image1, keyPoints1, image2, keyPoints2,
        partmatches, imagePartMatches, cv::Scalar(255, 255, 255));
    cv::namedWindow("imagePartMatches");
    cv::imshow("imagePartMatches", imagePartMatches);
 
    //将一维向量的keyPoints点转换成二维的Point2f点
    std::vector <int> pointIndexes1;//记录选择的匹配特征点的索引向量
    std::vector <int> pointIndexes2;
 
    for (std::vector<cv::DMatch>::const_iterator it = partmatches.begin();
        it != partmatches.end(); ++it)
    {
        pointIndexes1.push_back(it->queryIdx); //查询图像1特征点索引
        pointIndexes2.push_back(it->trainIdx);  //训练图像2特征点索引
    }
    std::vector <cv::Point2f>  selPoints1, selPoints2;
    cv::KeyPoint::convert(keyPoints1, selPoints1, pointIndexes1);//将索引指定的 特征点 转换成2D点
    cv::KeyPoint::convert(keyPoints2, selPoints2, pointIndexes2);
    画出转换后的点二维点到原图像
    std::vector<cv::Point2f>::const_iterator  it = selPoints1.begin();
    //while (it != selPoints1.end())
    //{
    //    cv::circle(image1, *it, 3, cv::Scalar(255, 255, 255), 5);
    //    ++it;
    //}
    it = selPoints2.begin();
    while (it != selPoints2.end())
    {
        cv::circle(image2, *it, 3, cv::Scalar(255, 255, 255), 2);
        ++it;
    }
    //cv::namedWindow("image1");
    //cv::imshow("image1", image1);
    //cv::namedWindow("image2");
    //cv::imshow("image2", image2);
 
    // 获取该对图像的基础矩阵 (使用7个匹配描述子matches) CV_FM_7POINT
 
    cv::Mat fundemental = cv::findFundamentalMat(cv::Mat(selPoints1), cv::Mat(selPoints2),CV_FM_8POINT);//CV_FM_LMEDS
    std::cout << "F-Matrix size= " << fundemental.rows << "," << fundemental.cols << std::endl;
    
    //使用基础矩阵 在对应图像上绘制外极线
    std::vector<cv::Vec3f> lines1; //存储外极线
    cv::computeCorrespondEpilines(cv::Mat(selPoints1), 1, fundemental, lines1);//获取图像1中的二维特征点 在图像2中对应的外极线
    for (std::vector<cv::Vec3f>::const_iterator it = lines1.begin(); 
          it != lines1.end(); ++it)
    {
        cv::line(image2, 
            cv::Point(0, -(*it)[2] / (*it)[1] ),
            cv::Point(image2.cols ,  -( (*it)[2] + (*it)[0] * image2.cols )/(*it)[1] ),
            cv::Scalar(255,255,255));
    }
    cv::namedWindow("Image2 Epilines");
    cv::imshow("Image2 Epilines", image2);
 
    cv::waitKey(0);
    return 0;
}
复制代码

结果:

 

原文链接:https://blog.csdn.net/shyjhyp11/article/details/66526685

 

posted @   rainbow70626  阅读(92)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 终于写完轮子一部分:tcp代理 了,记录一下
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
历史上的今天:
2020-10-09 Python设计模式(第2版)中文的pdf电子书
点击右上角即可分享
微信分享提示