https://github.com/gaoxiang12/slambook/blob/master/ch7/pose_estimation_2d2d.cpp
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | cmake_minimum_required( VERSION 2.8 ) project( vo1 ) set ( CMAKE_BUILD_TYPE "Release" ) set ( CMAKE_CXX_FLAGS "-std=c++11 -O3" ) # 添加cmake模块以使用g2o list ( APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR} / cmake_modules ) find_package( OpenCV 3.1 REQUIRED ) # find_package( OpenCV REQUIRED ) # use this if in OpenCV2 find_package( G2O REQUIRED ) find_package( CSparse REQUIRED ) include_directories( ${OpenCV_INCLUDE_DIRS} ${G2O_INCLUDE_DIRS} ${CSPARSE_INCLUDE_DIR} "/usr/include/eigen3/" ) add_executable( feature_extraction feature_extraction.cpp ) target_link_libraries( feature_extraction ${OpenCV_LIBS} ) # add_executable( pose_estimation_2d2d pose_estimation_2d2d.cpp extra.cpp ) # use this if in OpenCV2 add_executable( pose_estimation_2d2d pose_estimation_2d2d.cpp ) target_link_libraries( pose_estimation_2d2d ${OpenCV_LIBS} ) # add_executable( triangulation triangulation.cpp extra.cpp) # use this if in opencv2 add_executable( triangulation triangulation.cpp ) target_link_libraries( triangulation ${OpenCV_LIBS} ) add_executable( pose_estimation_3d2d pose_estimation_3d2d.cpp ) target_link_libraries( pose_estimation_3d2d ${OpenCV_LIBS} ${CSPARSE_LIBRARY} g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension ) add_executable( pose_estimation_3d3d pose_estimation_3d3d.cpp ) target_link_libraries( pose_estimation_3d3d ${OpenCV_LIBS} g2o_core g2o_stuff g2o_types_sba g2o_csparse_extension ${CSPARSE_LIBRARY} ) |
mian.cpp
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | #include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> / / #include "extra.h" // use this if in OpenCV2 using namespace std; using namespace cv; / * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * 本程序演示了如何使用 2D - 2D 的特征匹配估计相机运动 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * / void find_feature_matches ( const Mat& img_1, const Mat& img_2, std::vector<KeyPoint>& keypoints_1, std::vector<KeyPoint>& keypoints_2, std::vector< DMatch >& matches ); void pose_estimation_2d2d ( std::vector<KeyPoint> keypoints_1, std::vector<KeyPoint> keypoints_2, std::vector< DMatch > matches, Mat& R, Mat& t ); / / 像素坐标转相机归一化坐标 Point2d pixel2cam ( const Point2d& p, const Mat& K ); int main ( int argc, char * * argv ) { if ( argc ! = 3 ) { cout<< "usage: pose_estimation_2d2d img1 img2" <<endl; return 1 ; } / / - - 读取图像 Mat img_1 = imread ( argv[ 1 ], CV_LOAD_IMAGE_COLOR ); Mat img_2 = imread ( argv[ 2 ], CV_LOAD_IMAGE_COLOR ); vector<KeyPoint> keypoints_1, keypoints_2; vector<DMatch> matches; find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches ); cout<< "一共找到了" <<matches.size() << "组匹配点" <<endl; / / - - 估计两张图像间运动 Mat R,t; pose_estimation_2d2d ( keypoints_1, keypoints_2, matches, R, t ); / / - - 验证E = t^R * scale Mat t_x = ( Mat_<double> ( 3 , 3 ) << 0 , - t.at<double> ( 2 , 0 ), t.at<double> ( 1 , 0 ), t.at<double> ( 2 , 0 ), 0 , - t.at<double> ( 0 , 0 ), - t.at<double> ( 1 , 0 ), t.at<double> ( 0 , 0 ), 0 ); cout<< "t^R=" <<endl<<t_x * R<<endl; / / - - 验证对极约束 Mat K = ( Mat_<double> ( 3 , 3 ) << 520.9 , 0 , 325.1 , 0 , 521.0 , 249.7 , 0 , 0 , 1 ); for ( DMatch m: matches ) { Point2d pt1 = pixel2cam ( keypoints_1[ m.queryIdx ].pt, K ); Mat y1 = ( Mat_<double> ( 3 , 1 ) << pt1.x, pt1.y, 1 ); Point2d pt2 = pixel2cam ( keypoints_2[ m.trainIdx ].pt, K ); Mat y2 = ( Mat_<double> ( 3 , 1 ) << pt2.x, pt2.y, 1 ); Mat d = y2.t() * t_x * R * y1; cout << "epipolar constraint = " << d << endl; } return 0 ; } void find_feature_matches ( const Mat& img_1, const Mat& img_2, std::vector<KeyPoint>& keypoints_1, std::vector<KeyPoint>& keypoints_2, std::vector< DMatch >& matches ) { / / - - 初始化 Mat descriptors_1, descriptors_2; / / used in OpenCV3 Ptr<FeatureDetector> detector = ORB::create(); Ptr<DescriptorExtractor> descriptor = ORB::create(); / / use this if you are in OpenCV2 / / Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" ); / / Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" ); Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create ( "BruteForce-Hamming" ); / / - - 第一步:检测 Oriented FAST 角点位置 detector - >detect ( img_1,keypoints_1 ); detector - >detect ( img_2,keypoints_2 ); / / - - 第二步:根据角点位置计算 BRIEF 描述子 descriptor - >compute ( img_1, keypoints_1, descriptors_1 ); descriptor - >compute ( img_2, keypoints_2, descriptors_2 ); / / - - 第三步:对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离 vector<DMatch> match; / / BFMatcher matcher ( NORM_HAMMING ); matcher - >match ( descriptors_1, descriptors_2, match ); / / - - 第四步:匹配点对筛选 double min_dist = 10000 , max_dist = 0 ; / / 找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离 for ( int i = 0 ; i < descriptors_1.rows; i + + ) { double dist = match[i].distance; if ( dist < min_dist ) min_dist = dist; if ( dist > max_dist ) max_dist = dist; } printf ( "-- Max dist : %f \n" , max_dist ); printf ( "-- Min dist : %f \n" , min_dist ); / / 当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值 30 作为下限. for ( int i = 0 ; i < descriptors_1.rows; i + + ) { if ( match[i].distance < = max ( 2 * min_dist, 30.0 ) ) { matches.push_back ( match[i] ); } } } Point2d pixel2cam ( const Point2d& p, const Mat& K ) { return Point2d ( ( p.x - K.at<double> ( 0 , 2 ) ) / K.at<double> ( 0 , 0 ), ( p.y - K.at<double> ( 1 , 2 ) ) / K.at<double> ( 1 , 1 ) ); } void pose_estimation_2d2d ( std::vector<KeyPoint> keypoints_1, std::vector<KeyPoint> keypoints_2, std::vector< DMatch > matches, Mat& R, Mat& t ) { / / 相机内参,TUM Freiburg2 Mat K = ( Mat_<double> ( 3 , 3 ) << 520.9 , 0 , 325.1 , 0 , 521.0 , 249.7 , 0 , 0 , 1 ); / / - - 把匹配点转换为vector<Point2f>的形式 vector<Point2f> points1; vector<Point2f> points2; for ( int i = 0 ; i < ( int ) matches.size(); i + + ) { points1.push_back ( keypoints_1[matches[i].queryIdx].pt ); points2.push_back ( keypoints_2[matches[i].trainIdx].pt ); } / / - - 计算基础矩阵 Mat fundamental_matrix; fundamental_matrix = findFundamentalMat ( points1, points2, CV_FM_8POINT ); cout<< "fundamental_matrix is " <<endl<< fundamental_matrix<<endl; / / - - 计算本质矩阵 Point2d principal_point ( 325.1 , 249.7 ); / / 相机光心, TUM dataset标定值 double focal_length = 521 ; / / 相机焦距, TUM dataset标定值 Mat essential_matrix; essential_matrix = findEssentialMat ( points1, points2, focal_length, principal_point ); cout<< "essential_matrix is " <<endl<< essential_matrix<<endl; / / - - 计算单应矩阵 Mat homography_matrix; homography_matrix = findHomography ( points1, points2, RANSAC, 3 ); cout<< "homography_matrix is " <<endl<<homography_matrix<<endl; / / - - 从本质矩阵中恢复旋转和平移信息. recoverPose ( essential_matrix, points1, points2, R, t, focal_length, principal_point ); cout<< "R is " <<endl<<R<<endl; cout<< "t is " <<endl<<t<<endl; } |
分类:
1_1_6G2O优化学习(7)
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 零经验选手,Compose 一天开发一款小游戏!
· 通过 API 将Deepseek响应流式内容输出到前端
· 因为Apifox不支持离线,我果断选择了Apipost!
2021-11-16 同步连接和读取来自 intel realsense t265 和 d435 的数据
2021-11-16 排线相机模组使用
2020-11-16 openMVG+openMVS +三维重建测试
2020-11-16 qq能用网页不能用
2019-11-16 opencv调用Caffe、TensorFlow、Torch、PyTorch训练好的模型