https://blog.csdn.net/woyaomaishu2/article/details/134388057?spm=1001.2014.3001.5502
前言
SLAM 轨迹的对齐和评估时, 多用 Umeyama 算法实现.
该算法要解决的问题为:
给定两个 m mm 维空间内的点集 {x i \mathbf{x}_ix
i
} 和 {y i \mathbf{y}_iy
i
} (其中 i = 1 , 2 , … , n i=1,2,\ldots,ni=1,2,…,n) 找出最小误差平方意义下的相似变换参数 (Similarity Transformation).
该算法初见于 S. Umeyama 的一篇论文 “Least-squares estimation of transformation parameters between two point patterns”[1].
相似变换
相似变换 Similarity Transformation = 旋转 Rotation R \mathbf{R}R + 平移 Translation t \mathbf{t}t + 放缩 Scale c cc
原文链接:https://blog.csdn.net/woyaomaishu2/article/details/134388057
https://blog.csdn.net/woyaomaishu2/article/details/134408382
SLAM 轨迹的对齐和评估时, 多用 Umeyama 算法实现.
该算法从给定的两个欧几里得空间的关联点集中找出最小误差平方意义下的相似变换参数 (Similarity Transformation, 旋转+平移+缩放)[1].
在上一篇博文中, 我们已经详细推导了该算法的数学原理.
这里我们看一下实际应用中 Umeyama 算法的源码实现, 分别是
- Eigen[2] 中 Umeyama 算法源码
- PCL[3] 中 Umeyama 算法源码
- evo[4] 中 Umeyama 算法源码
(看现成的算法源码还是比推导公式稍微轻松一点.)
0 自己加了随机采样的代码
API_3D_3D_sRt.h
#ifndef API_3D_3D_sRt_H #define API_3D_3D_sRt_H #include <iostream> #include <Eigen/Dense> #include <vector> #include <cmath> #include <random> using namespace Eigen; using namespace std; // 计算均方根误差 double cal_rmse_t_v3s_v3s( vector<Eigen::Vector3d> &groundtruth,vector<Eigen::Vector3d> &estimated); // 核心公式 3d-3d sbd分解计算 /* 输入点云 一般是SLAM vo的enu坐标点 vector<Vector3d>& source_points, 目标点云 GNSS的enu坐标点 const vector<Vector3d>& target_points, 输出 尺度 double& scale, 旋转 Matrix3d& R, 位移 Vector3d& t 结果使用 target_points=scale*R*source_points+t */ void API_ICP_3D_3D_sRt(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, double& scale, Matrix3d& R, Vector3d& t) ; // 使用中心化电晕找内点 void API_ransac_ICP_3D_3D_sRt_inliner_sR(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, int num_iterations, //ransac随机抽取验证次数 const double error_threshold, // 误差阈值 3 double& best_scale, Matrix3d& best_R, Vector3d& best_t); // 使用直接变换点找内点 void API_ransac_ICP_3D_3D_sRt_inliner_sRt(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, int num_iterations, //ransac随机抽取验证次数 const double error_threshold, // 误差阈值 3 double& best_scale, Matrix3d& best_R, Vector3d& best_t) ; #endif
API_3D_3D_sRt.cc
#ifndef API_3D_3D_sRt_CPP #define API_3D_3D_sRt_CPP #include "API_3D_3D_sRt.h" /* 1. RANSAC随机采样一致算法 这种方法的目的是,从所有数据中选择正确的数据,用于估计。为了方便,先给几个定义。 点:每一个数据,SLAM里指的是匹配的点对 野值/外点:错误的点 内点:正确的点 内点集:内点的集合 外点集:外点的集合 模型:带估计的参数 :估计模型所需要的最小点数 :所有的点形成的点集 显然,内点集就是我们想要找的正确数据。RANSAC的想法是从点集中随机的选择出 个点,估计出一个模型,查看剩余点是否符合这个模型。如果大部分点都符合这个模型,就相当于我们找到了一个合适的模型,自然的符合模型的点就是内点啦。由于是随机选择,一次可能难以找到正确的模型,因此需要多次才可以。下面给出完整的RANSAC算法。 STEP 1. 随机的从 中选择 个点作为一个样本,估计出模型 STEP 2. 确定在模型距离阈值 内的数据点集 , 即可为内点集 STEP 3. 如果 的大小大于某个阈值 ,用所有内点 重新估计一个更精确的模型 STEP 4. 如果 的大小小于阈值 ,则重复1步骤 STEP 5. 经过 次试验,选择最大一致集 ,并用 的所有点重新估计模型 RANSAC里面有三个重要的参数需要确定,分别是 、 、 。 */ double cal_rmse_t_v3s_v3s( vector<Eigen::Vector3d> &groundtruth,vector<Eigen::Vector3d> &estimated){ // // compute rmse double rmse = 0; for (size_t i = 0; i < estimated.size(); i++) { Eigen::Vector3d p1 = estimated[i]; Eigen::Vector3d p2 = groundtruth[i]; Eigen::Vector3d error = (p2- p1); rmse += error.squaredNorm();//用 squaredNorm() 计算误差平方和 norm平方根 } rmse = rmse / double(estimated.size()); rmse = sqrt(rmse); //cout << "RMSE = " << rmse << endl; return rmse; } // 核心公式 3d-3d sbd分解计算 /* 输入点云 一般是SLAM vo的enu坐标点 vector<Vector3d>& source_points, 目标点云 GNSS的enu坐标点 const vector<Vector3d>& target_points, 输出 尺度 double& scale, 旋转 Matrix3d& R, 位移 Vector3d& t 结果使用 target_points=scale*R*source_points+t */ void API_ICP_3D_3D_sRt(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, double& scale, Matrix3d& R, Vector3d& t) { int N = source_points.size(); // 计算质心 Vector3d centroid_source = Vector3d::Zero(); Vector3d centroid_target = Vector3d::Zero(); for(int i=0;i<N;i++){ centroid_source += source_points[i]; centroid_target += target_points[i]; } centroid_source /= N; centroid_target /= N; /// 中心化点云 vector<Vector3d> centered_source_points (N); vector<Vector3d> centered_target_points (N); for(int i=0;i<N;i++){ centered_source_points[i]= source_points[i] - centroid_source; centered_target_points[i]= target_points[i] - centroid_target; } // 计算尺度因子 s 方法2 // 计算点云的每个点的范数平方 double sum_source = 0.0; double sum_target = 0.0; for (size_t i = 0; i < N; ++i) { sum_source += centered_source_points[i].norm(); // norm---sqrt(x^2+y^2+z^2) sum_target += centered_target_points[i].norm(); } double scale_ = sum_target /sum_source; scale = scale_; // vector<double> source_norm = computeNormSquared(centered_source_points); // 每一行的 source_norm[i]= x^2+y^2+z^2 // vector<double> target_norm = computeNormSquared(centered_target_points); // double sum_source = 0.0; // double sum_target = 0.0; // for (size_t i = 0; i < N; ++i) { // sum_source += sqrt(source_norm[i]) ; // sum_target += sqrt(target_norm[i]) ; // } // double scale_ = sum_target /sum_source; // scale = scale_; // 计算尺度因子 s 方法1 // double trace = 0.0; // for (int i = 0; i < N; ++i) { // trace += centered_target_points[i].transpose() * R * centered_source_points[i]; // } // double src_norm = 0.0; // for (int i = 0; i < N; ++i) { // src_norm += centered_source_points[i].squaredNorm(); // } // scale = trace / src_norm; // 计算协方差矩阵 H Matrix3d H = Matrix3d::Zero(); for (int i = 0; i < N; ++i) { // 是否乘上scale没有区别 centered_target_points H += centered_target_points[i] * centered_source_points[i].transpose(); //H += scale*centered_source_points[i] * centered_target_points[i].transpose(); } // 使用奇异值分解H SVD JacobiSVD<Matrix3d> svd(H, ComputeFullU | ComputeFullV); Matrix3d U = svd.matrixU(); Matrix3d V = svd.matrixV(); // 计算旋转矩阵R R = U* V.transpose() ; // src到dis点变换关系 对应后面计算t if (U.determinant() * V.determinant() < 0) // R的行列式<0 取反 { for (int x = 0; x < 3; ++x) { U(x, 2) *= -1; } R = U* V.transpose() ; } //R = V * U.transpose(); // 计算平移向量t 两个质心算位移 t = centroid_target - scale * R * centroid_source; } //https://zhuanlan.zhihu.com/p/62175983 // 使用 sr变换原始点的去中心点后,和目标点的去中心点计算的误差。 // 1随机抽取计算SRt 2 使用去中心点集合找内点,3使用最大内点数计算最有SRT void API_ransac_ICP_3D_3D_sRt_inliner_sR(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, int num_iterations, //ransac随机抽取验证次数 const double error_threshold, // 误差阈值 3 double& best_scale, Matrix3d& best_R, Vector3d& best_t) { // 1 数据准备 int best_inlier_count = 0; // 最大内点数 int N = source_points.size(); //cout<< "总点数 " << N << endl; int num_rancek_temp = N * (N - 1) / 2; // 随机挑选2个点验证 一共多少次 if(num_rancek_temp<num_iterations) // 限制下最大次数 节省时间 1000次足以找出 { num_iterations=num_rancek_temp;//ransac随机抽取验证次数 } // 全体点计算质心 Vector3d centroid_source = Vector3d::Zero(); Vector3d centroid_target = Vector3d::Zero(); for(int i=0;i<N;i++){ centroid_source += source_points[i]; centroid_target += target_points[i]; } centroid_source /= N; centroid_target /= N; /// 全体点中心化点云 vector<Vector3d> centered_source_points (N); vector<Vector3d> centered_target_points (N); for(int i=0;i<N;i++){ centered_source_points[i]= source_points[i] - centroid_source; centered_target_points[i]= target_points[i] - centroid_target; } // 3使用ransc 找出内点剔除外点,计算更加准确的sRt // 使用 std::random_device 生成真随机数种子 std::random_device rd; // 使用梅森旋转算法引擎 mt19937,以 rd() 作为种子 std::mt19937 gen(rd()); // 定义一个均匀整数分布,范围是[1, 100] //std::uniform_int_distribution<int> dist(1, 100); uniform_int_distribution<int> dist(0, N - 1); // 生成随机数 //int random_number = dist(gen); // RANSAC loop num_iterations 迭代次数 for (int iteration = 0; iteration < num_iterations; ++iteration) { // Randomly select 3 points to form a minimal sample vector<Vector3d> source_points_temp; vector<Vector3d> target_points_temp; for (int num=0;num<4;num++){ // 最少3个点 取4个点 int idx = dist(gen);// 随机数 source_points_temp.push_back(source_points[idx]); target_points_temp.push_back(target_points[idx]); } // 本次随机计算结果 Perform ICP with these initial parameters double scale; Matrix3d R; Vector3d t; API_ICP_3D_3D_sRt(source_points_temp,target_points_temp, scale, R, t); // Compute error with all points and count inliers int inlier_count = 0; vector<Vector3d> source_points_inliner; vector<Vector3d> target_points_inliner; for (int i = 0; i < N; ++i) { Vector3d centered_source2target_points_i = scale*R*centered_source_points[i];// 使用本次随机抽取样本计算的sRt结果 ,对所有原始点的去中心点变换到目标点的中心点 尺度和旋转位置 double source_norm_i=centered_source2target_points_i.norm(); //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) double target_norm_i=centered_target_points[i].norm(); //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) //Vector3d source_norm_i=centered_source2target_points_i; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) //Vector3d target_norm_i=centered_target_points[i]; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // 卡方分布中 3纬度变量 (x1-x1')^2+(y1-y1')^2+(z1-z1')^2 < 7.815 95% // 卡方分布中 2纬度变量 (x1-x1')^2+(y1-y1')^2 < 5.99 95% double error = abs(source_norm_i - target_norm_i);//计算误差 如果目标点是GNSS的enu坐标 单位是米 if (error < error_threshold) {// 取3米 inlier_count++; // 记录本次内点数目 // 找出内点集合 source_points_inliner.push_back(source_points[i]); target_points_inliner.push_back(target_points[i]); } } // for (int i = 0; i < N; ++i) { // Vector3d centered_source2target_points_i = scale*R*centered_source_points[i];// 使用本次随机抽取样本计算的sRt结果 ,对所有原始点的去中心点变换到目标点的中心点 尺度和旋转位置 // //double source_norm_i=centered_source2target_points_i.norm(); //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // //double target_norm_i=centered_target_points[i].norm(); //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // Vector3d source_norm_i=centered_source2target_points_i; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // Vector3d target_norm_i=centered_target_points[i]; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // // 卡方分布中 3纬度变量 (x1-x1')^2+(y1-y1')^2+(z1-z1')^2 < 7.815 95% // // 卡方分布中 2纬度变量 (x1-x1')^2+(y1-y1')^2 < 5.99 95% // Vector3d error_v3d = (source_norm_i - target_norm_i);//计算误差 如果目标点是GNSS的enu坐标 单位是米 // double error=error_v3d.squaredNorm(); // cout<< num_iterations <<" "<<i <<" "<<error <<endl; // if (error < error_threshold) {// 取3米 // inlier_count++; // 记录本次内点数目 // // 找出内点集合 // source_points_inliner.push_back(source_points[i]); // target_points_inliner.push_back(target_points[i]); // } // } // 如果本次内点多余历史最好内点数目 本次结果作为最好结果 if (inlier_count > best_inlier_count) { API_ICP_3D_3D_sRt(source_points_inliner,target_points_inliner, scale, R, t);// 使用内点再次计算最优的srt best_inlier_count = inlier_count; best_scale = scale; best_R = R; best_t = t; } if(iteration==num_iterations-1) { cout<< "API_3D_3D_sRt sRt计算 总数点数 "<< N << " 最大内点数: " << best_inlier_count <<endl; } } } // 使用 srt变换原始点后,计算的误差。 // 不建议使用 ,t是由sR计算出来的,sr本身就是估计的,在引入t误差更大,所以还是用sr变换原始点的去中心点集合,和目标点的去中心点集合整体算误差。 void API_ransac_ICP_3D_3D_sRt_inliner_sRt(const vector<Vector3d>& source_points, const vector<Vector3d>& target_points, int num_iterations, //ransac随机抽取验证次数 const double error_threshold, // 误差阈值 3 double& best_scale, Matrix3d& best_R, Vector3d& best_t) { // 1 数据准备 int best_inlier_count = 0; // 最大内点数 int N = source_points.size(); //cout<< "总点数 " << N << endl; int num_rancek_temp = N * (N - 1) / 2; // 随机挑选2个点验证 一共多少次 if(num_rancek_temp<num_iterations) // 限制下最大次数 节省时间 1000次足以找出 { num_iterations=num_rancek_temp;//ransac随机抽取验证次数 } // 全体点计算质心 Vector3d centroid_source = Vector3d::Zero(); Vector3d centroid_target = Vector3d::Zero(); for(int i=0;i<N;i++){ centroid_source += source_points[i]; centroid_target += target_points[i]; } centroid_source /= N; centroid_target /= N; /// 全体点中心化点云 vector<Vector3d> centered_source_points (N); vector<Vector3d> centered_target_points (N); for(int i=0;i<N;i++){ centered_source_points[i]= source_points[i] - centroid_source; centered_target_points[i]= target_points[i] - centroid_target; } // 3使用ransc 找出内点剔除外点,计算更加准确的sRt // 使用 std::random_device 生成真随机数种子 std::random_device rd; // 使用梅森旋转算法引擎 mt19937,以 rd() 作为种子 std::mt19937 gen(rd()); // 定义一个均匀整数分布,范围是[1, 100] //std::uniform_int_distribution<int> dist(1, 100); uniform_int_distribution<int> dist(0, N - 1); // 生成随机数 //int random_number = dist(gen); // RANSAC loop num_iterations 迭代次数 for (int iteration = 0; iteration < num_iterations; ++iteration) { // Randomly select 3 points to form a minimal sample vector<Vector3d> source_points_temp; vector<Vector3d> target_points_temp; for (int num=0;num<4;num++){ // 最少3个点 取4个点 int idx = dist(gen);// 随机数 source_points_temp.push_back(source_points[idx]); target_points_temp.push_back(target_points[idx]); } // 本次随机计算结果 Perform ICP with these initial parameters double scale; Matrix3d R; Vector3d t; API_ICP_3D_3D_sRt(source_points_temp,target_points_temp, scale, R, t); // Compute error with all points and count inliers int inlier_count = 0; vector<Vector3d> source_points_inliner; vector<Vector3d> target_points_inliner; for (int i = 0; i < N; ++i) { Vector3d source_target_point_i = scale*R*source_points[i]+t;// 使用本次随机抽取样本计算的sRt结果 ,对所有原始点的去中心点变换到目标点的中心点 尺度和旋转位置 Vector3d target_point_i=target_points[i]; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) //Vector3d source_norm_i=centered_source2target_points_i; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) //Vector3d target_norm_i=centered_target_points[i]; //去中心化的点长2范数度 norm---sqrt(x^2+y^2+z^2) // 卡方分布中 3纬度变量 (x1-x1')^2+(y1-y1')^2+(z1-z1')^2 < 7.815 95% // 卡方分布中 2纬度变量 (x1-x1')^2+(y1-y1')^2 < 5.99 95% double error = (source_target_point_i - target_point_i).norm();//计算误差 如果目标点是GNSS的enu坐标 单位是米 error=error*error; if (error < error_threshold) {// 取3米 inlier_count++; // 记录本次内点数目 // 找出内点集合 source_points_inliner.push_back(source_points[i]); target_points_inliner.push_back(target_points[i]); } } // 如果本次内点多余历史最好内点数目 本次结果作为最好结果 if (inlier_count > best_inlier_count) { API_ICP_3D_3D_sRt(source_points_inliner,target_points_inliner, scale, R, t);// 使用内点再次计算最优的srt best_inlier_count = inlier_count; best_scale = scale; best_R = R; best_t = t; } if(iteration==num_iterations-1) { cout<< "API_3D_3D_sRt == srt计算 总数点数 "<< N << " 最大内点数: " << inlier_count <<endl; } } } #endif
调用
// 全体关键帧位姿参与sRt计算 ============================================================ std::vector< data::keyframe*> all_dbkeyfrms = map_db_->get_all_keyframes(); // std::sort(all_keyfrms.begin(), all_keyfrms.end(), [&](data::keyframe *keyfrm_1, data::keyframe *keyfrm_2) // { return *keyfrm_1 < *keyfrm_2; }); vector<Eigen::Vector3d> source_points_vo; vector<Eigen::Vector3d> target_points_gnss; Eigen::Matrix4d T_VO_to_GNSS_ENU = Eigen::Matrix4d::Identity(); bool use_gnss_optimize=0; // 计算sRT 变换关系 if(all_dbkeyfrms.size()>4){ for (int i=0;i< all_dbkeyfrms.size();i++) { //auto local_keyfrm = id_local_keyfrm_pair.second; data::keyframe* local_keyfrm=all_dbkeyfrms[i]; if (!local_keyfrm) { continue; } if (local_keyfrm->will_be_erased()) { continue; } Eigen::Matrix4d cam_pose_cw_i = local_keyfrm->get_cam_pose(); const Eigen::Matrix4d cam_pose_wc_i = cam_pose_cw_i.inverse(); Eigen::Vector3d cam_pose_i_t = cam_pose_wc_i.block<3, 1>(0, 3); gnss_data gnss_data_i = local_keyfrm->Gnss_data; bool can_use=local_keyfrm->Gnss_data.can_use; if(!can_use){// 如果gnss可用 continue; } Eigen::Vector3d gnss_data_i_xyz(gnss_data_i.x,gnss_data_i.y,gnss_data_i.z); //gnss_data_i_xyz[2]=0;// 固定高度 source_points_vo.push_back(cam_pose_i_t); target_points_gnss.push_back(gnss_data_i_xyz); //std::cout<< "cam_pose_i_t : " <<cam_pose_i_t[0] << " " << cam_pose_i_t[1] << " "<<cam_pose_i_t[2] << " "<<std::endl; //std::cout<< "gnss_data_i_xyz : " <<gnss_data_i_xyz[0]<< " " <<gnss_data_i_xyz[1]<< " " <<gnss_data_i_xyz[2]<< " " << std::endl; } if(source_points_vo.size()>4 && source_points_vo.size()== target_points_gnss.size() ){ //std::cout<< "SRT计算 总GNSS点数" << source_points_vo.size()<< " 总关键帧: "<<all_dbkeyfrms.size() <<std::endl; //Eigen::Matrix4d T_VO_to_GNSS_ENU = Eigen::Matrix4d::Identity(); double s; Eigen::Matrix3d R; Eigen::Vector3d t; double error_th=7.841; API_ransac_ICP_3D_3D_sRt_inliner_sR(source_points_vo,target_points_gnss, 1000, //ransac随机抽取验证次数 7.841, // 误差阈值 3 s, R, t) ; T_VO_to_GNSS_ENU.block<3, 3>(0, 0) = s * R; T_VO_to_GNSS_ENU.block<3, 1>(0, 3) = t; //std::cout<< T_VO_to_GNSS_ENU << std::endl; vector<Eigen::Vector3d> source_points_vo_out; for( int i=0;i<source_points_vo.size();i++){ source_points_vo[i]= s*R*source_points_vo[i]+t; } double rmse=cal_rmse_t_v3s_v3s(target_points_gnss,source_points_vo); cout <<"当前帧 "<< curr_keyfrm->id_ <<" 纯局部VO优化, 整体关键帧srt计算 RMSE = " << rmse << " 参与计算总点数 " << source_points_vo.size() << " error_th " << error_th<< endl; } }
I. Eigen 中 Umeyama 算法源码
直接计算并没有RANCSK优化
1 自己的代码调用
bool system::Proccess_GPS_testerror(cv::Mat &SR, cv::Mat &T, std::vector<vslam::data::keyframe *> all_keyfrms, double &erro) { std::sort(all_keyfrms.begin(), all_keyfrms.end(), [&](vslam::data::keyframe *keyfrm_1, vslam::data::keyframe *keyfrm_2) { return *keyfrm_1 < *keyfrm_2; }); int num = all_keyfrms.size(); Eigen::Matrix<double, 3, Eigen::Dynamic> srcPoints(3, num); Eigen::Matrix<double, 3, Eigen::Dynamic> dstPoints(3, num); //此数据结构用作误差计算 std::vector<cv::Point3d> src_p; std::vector<cv::Point3d> dst_p; if (all_keyfrms.size() < 3) { spdlog::warn("关键帧数量小于3,无法计算SRT"); return false; } int i = 0; for (auto keyframe : all_keyfrms) { // 世界到相机的RT const Eigen::Matrix4d cam_pose_cw = keyframe->get_cam_pose(); const Eigen::Matrix4d cam_pose_wc = cam_pose_cw.inverse(); const Eigen::Matrix3d &temp_R_wc = cam_pose_wc.block<3, 3>(0, 0); const Eigen::Vector3d &temp_T_wc = cam_pose_wc.block<3, 1>(0, 3); /// 模型坐标 srcPoints(0, i) = temp_T_wc(0); srcPoints(1, i) = temp_T_wc(1); srcPoints(2, i) = temp_T_wc(2); cv::Point3d src_temp; //此数据结构用作误差计算 src_temp.x = temp_T_wc(0); src_temp.y = temp_T_wc(1); src_temp.z = temp_T_wc(2); src_p.push_back(src_temp); //当关键帧保存的为GPS时,需转换为ECEF auto keyfrem_ecef = LLA2ECEF(keyframe->Key_GPS_.x, keyframe->Key_GPS_.y, keyframe->Key_GPS_.z); dstPoints(0, i) = keyfrem_ecef(0, 0); dstPoints(1, i) = keyfrem_ecef(1, 0); dstPoints(2, i) = keyfrem_ecef(2, 0); dst_p.push_back(cv::Point3d(dstPoints(0, i), dstPoints(1, i), dstPoints(2, i))); //此数据结构用作误差计算 i++; } //从世界坐标到模型坐标的转换关系SRT Eigen::Matrix4d rt = Eigen::umeyama(srcPoints, dstPoints, true); //格式转换 cv::Mat sr; Eigen::Matrix<double, 3, 3> E_SR = rt.block<3, 3>(0, 0); cv::eigen2cv(E_SR, sr); SR = sr; cv::Mat t; Eigen::Matrix<double, 3, 1> E_T = rt.block<3, 1>(0, 3); cv::eigen2cv(E_T, t); T = t; /// 计算误差值:: 重点查看误差值 double error_sum = 0.0f; double max_error, mean_error, cur_erro; max_error = mean_error = 0.0f; for (int i = 0; i < num; ++i) { cv::Mat temp_srcpoints = (cv::Mat_<double>(3, 1) << src_p[i].x, src_p[i].y, src_p[i].z); cv::Mat temp_dstpoints = (cv::Mat_<double>(3, 1) << dst_p[i].x, dst_p[i].y, dst_p[i].z); cv::Mat dst_pose(3, 1, CV_64FC1); dst_pose = sr * temp_srcpoints + t; cv::Mat error_Mat = dst_pose - temp_dstpoints; /// 平方:给矩阵对应位平方 cv::Mat temp_error = error_Mat.mul(error_Mat); cv::Mat error, error_end; /// 求和:0意味着矩阵被处理成一行,将这行数相加赋值给error cv::reduce(temp_error, error, 0, cv::REDUCE_SUM); /// 开根号: 将得到的值开根号 cv::sqrt(error, error_end); error_sum += error_end.at<double>(0, 0); cur_erro = error_end.at<double>(0, 0); if (error_end.at<double>(0, 0) > max_error) { max_error = error_end.at<double>(0, 0); } } mean_error = error_sum / num; erro = cur_erro; //std::cout << "cur_erro:" << cur_erro << std::endl; // std::cout << "error_sum:" << error_sum << std::endl; // std::cout << "max_error:" << max_error << std::endl; // std::cout << "mean_error:" << mean_error << std::endl; return true; } // 更新srt void system::update_srt() { // 获取所有关键帧的gps(ecef) auto kfrms = map_db_->get_all_keyframes(); // 计算srt cv::Mat SR; cv::Mat T; Proccess_GPS_testerror(SR, T, kfrms, cur_erro); cv::Mat SRT; cv::hconcat(SR, T, SRT); cv2eigen(SRT, srt_slam2real); }
1-2 官方 Eigen/src/Geometry/Umeyama.h 源码
/** * \geometry_module \ingroup Geometry_Module * * \brief Returns the transformation between two point sets. * * The algorithm is based on: * "Least-squares estimation of transformation parameters between two point patterns", * Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 * * It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that * \f{align*} * \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 * \f} * is minimized. * * The algorithm is based on the analysis of the covariance matrix * \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ * of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where * \f$d\f$ is corresponding to the dimension (which is typically small). * The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ * though the actual computational effort lies in the covariance * matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when * the input point sets have dimension \f$d \times m\f$. * * Currently the method is working only for floating point matrices. * * \todo Should the return type of umeyama() become a Transform? * * \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. * \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. * \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed. * \return The homogeneous transformation * \f{align*} * T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} * \f} * minimizing the residual above. This transformation is always returned as an * Eigen::Matrix. */ template <typename Derived, typename OtherDerived> typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true) { typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType; typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix<Scalar, Dimension, 1> VectorType; typedef Matrix<Scalar, Dimension, Dimension> MatrixType; typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType dst_mean = dst.rowwise().sum() * one_over_n; // demeaning of src and dst points const RowMajorMatrixType src_demean = src.colwise() - src_mean; const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; // Eq. (36)-(37) const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); // Eq. (39) VectorType S = VectorType::Ones(m); if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 ) S(m-1) = -1; // Eq. (40) and (43) Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); if (with_scaling) { // Eq. (42) const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; Rt.block(0,0,m,m) *= c; } else { Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; } return Rt; }
测试
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #include <Eigen/Core> #include <Eigen/Geometry> #include <Eigen/LU> // required for MatrixBase::determinant #include <Eigen/SVD> // required for SVD #include <iostream> #include <unistd.h> using namespace Eigen; // Constructs a random matrix from the unitary group U(size). template <typename T> Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> randMatrixUnitary(int size) { typedef T Scalar; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixType; MatrixType Q; int max_tries = 40; bool is_unitary = false; while (!is_unitary && max_tries > 0) { // initialize random matrix Q = MatrixType::Random(size, size); // orthogonalize columns using the Gram-Schmidt algorithm for (int col = 0; col < size; ++col) { typename MatrixType::ColXpr colVec = Q.col(col); for (int prevCol = 0; prevCol < col; ++prevCol) { typename MatrixType::ColXpr prevColVec = Q.col(prevCol); colVec -= colVec.dot(prevColVec)*prevColVec; } Q.col(col) = colVec.normalized(); } // this additional orthogonalization is not necessary in theory but should enhance // the numerical orthogonality of the matrix for (int row = 0; row < size; ++row) { typename MatrixType::RowXpr rowVec = Q.row(row); for (int prevRow = 0; prevRow < row; ++prevRow) { typename MatrixType::RowXpr prevRowVec = Q.row(prevRow); rowVec -= rowVec.dot(prevRowVec)*prevRowVec; } Q.row(row) = rowVec.normalized(); } // final check is_unitary = Q.isUnitary(); --max_tries; } if (max_tries == 0) eigen_assert(false && "randMatrixUnitary: Could not construct unitary matrix!"); return Q; } // Constructs a random matrix from the special unitary group SU(size). template <typename T> Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> randMatrixSpecialUnitary(int size) { typedef T Scalar; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixType; // initialize unitary matrix MatrixType Q = randMatrixUnitary<Scalar>(size); // tweak the first column to make the determinant be 1 Q.col(0) *= numext::conj(Q.determinant()); return Q; } template <typename MatrixType> void run_test(int dim, int num_elements) { using std::abs; typedef typename internal::traits<MatrixType>::Scalar Scalar; typedef Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixX; typedef Matrix<Scalar, Eigen::Dynamic, 1> VectorX; // MUST be positive because in any other case det(cR_t) may become negative for // odd dimensions! srand((unsigned)time(NULL)); const Scalar c = abs(internal::random<Scalar>()); MatrixX R = randMatrixSpecialUnitary<Scalar>(dim); VectorX t = Scalar(50)*VectorX::Random(dim,1); MatrixX cR_t = MatrixX::Identity(dim+1,dim+1); cR_t.block(0,0,dim,dim) = c*R; cR_t.block(0,dim,dim,1) = t; std::cout << "[c]:\n" << c << "\n" << std::endl; std::cout << "[R]:\n"<< R << "\n" << std::endl; std::cout << "[t]:\n"<< t << "\n" << std::endl; std::cout << "[Original cR_t]:\n"<< cR_t << "\n" << std::endl; MatrixX src = MatrixX::Random(dim+1, num_elements); src.row(dim) = Matrix<Scalar, 1, Dynamic>::Constant(num_elements, Scalar(1)); MatrixX dst = cR_t*src; MatrixX cR_t_umeyama = umeyama(src.block(0,0,dim,num_elements), dst.block(0,0,dim,num_elements)); const Scalar error = ( cR_t_umeyama*src - dst ).norm() / dst.norm(); // VERIFY(error < Scalar(40)*std::numeric_limits<Scalar>::epsilon()); std::cout << "[cR_t_umeyama]:\n"<< cR_t_umeyama << "\n" << std::endl; std::cout << "[error]:\n"<< error << "\n" << std::endl; } int main(int argc, char** argv) { std::cout << "\n----- First Experiment -----" << std::endl; run_test<MatrixXf>(3, 20); sleep(1); std::cout << "\n----- Second Experiment -----" << std::endl; run_test<MatrixXf>(4, 10); return 0; }
II. PCL 中 Umeyama 算法源码
registration/include/pcl/registration/impl/transformation_estimation_svd.hpp 源码如下:
template <typename PointSource, typename PointTarget, typename Scalar> inline void TransformationEstimationSVD<PointSource, PointTarget, Scalar>:: estimateRigidTransformation(ConstCloudIterator<PointSource>& source_it, ConstCloudIterator<PointTarget>& target_it, Matrix4& transformation_matrix) const { // Convert to Eigen format const int npts = static_cast<int>(source_it.size()); if (use_umeyama_) { Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_src(3, npts); Eigen::Matrix<Scalar, 3, Eigen::Dynamic> cloud_tgt(3, npts); for (int i = 0; i < npts; ++i) { cloud_src(0, i) = source_it->x; cloud_src(1, i) = source_it->y; cloud_src(2, i) = source_it->z; ++source_it; cloud_tgt(0, i) = target_it->x; cloud_tgt(1, i) = target_it->y; cloud_tgt(2, i) = target_it->z; ++target_it; } // Call Umeyama directly from Eigen (PCL patched version until Eigen is released) transformation_matrix = pcl::umeyama(cloud_src, cloud_tgt, false); } else { source_it.reset(); target_it.reset(); // <cloud_src,cloud_src> is the source dataset transformation_matrix.setIdentity(); Eigen::Matrix<Scalar, 4, 1> centroid_src, centroid_tgt; // Estimate the centroids of source, target compute3DCentroid(source_it, centroid_src); compute3DCentroid(target_it, centroid_tgt); source_it.reset(); target_it.reset(); // Subtract the centroids from source, target Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cloud_src_demean, cloud_tgt_demean; demeanPointCloud(source_it, centroid_src, cloud_src_demean); demeanPointCloud(target_it, centroid_tgt, cloud_tgt_demean); getTransformationFromCorrelation(cloud_src_demean, centroid_src, cloud_tgt_demean, centroid_tgt, transformation_matrix); } } template <typename PointSource, typename PointTarget, typename Scalar> void TransformationEstimationSVD<PointSource, PointTarget, Scalar>:: getTransformationFromCorrelation( const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_src_demean, const Eigen::Matrix<Scalar, 4, 1>& centroid_src, const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>& cloud_tgt_demean, const Eigen::Matrix<Scalar, 4, 1>& centroid_tgt, Matrix4& transformation_matrix) const { transformation_matrix.setIdentity(); // Assemble the correlation matrix H = source * target' Eigen::Matrix<Scalar, 3, 3> H = (cloud_src_demean * cloud_tgt_demean.transpose()).topLeftCorner(3, 3); // Compute the Singular Value Decomposition Eigen::JacobiSVD<Eigen::Matrix<Scalar, 3, 3>> svd( H, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::Matrix<Scalar, 3, 3> u = svd.matrixU(); Eigen::Matrix<Scalar, 3, 3> v = svd.matrixV(); // Compute R = V * U' if (u.determinant() * v.determinant() < 0) { for (int x = 0; x < 3; ++x) v(x, 2) *= -1; } Eigen::Matrix<Scalar, 3, 3> R = v * u.transpose(); // Return the correct transformation transformation_matrix.topLeftCorner(3, 3) = R; const Eigen::Matrix<Scalar, 3, 1> Rc(R * centroid_src.head(3)); transformation_matrix.block(0, 3, 3, 1) = centroid_tgt.head(3) - Rc; } } // namespace registration } // namespace pcl
PCL 中做的是 3 维欧式空间中刚体变换, 不涉及高维空间, 且只计算了旋转参数和平移参数, 而不涉及缩放.
实现方法分为一种直接调用 Eigen 中的 Umeyama 算法实现, 另一种在 PCL 内自己实现. PCL 自己实现的部分和 Umeyama 算法类似, 确切地说是 Umeyama 算法的上一代算法. 可参考文献 [5], 我们此处不展开.
III. evo 中 Umeyama 算法源码
1. evo/core/geometry.py 源码
def umeyama_alignment(x: np.ndarray, y: np.ndarray, with_scale: bool = False) -> UmeyamaResult: """ Computes the least squares solution parameters of an Sim(m) matrix that minimizes the distance between a set of registered points. Umeyama, Shinji: Least-squares estimation of transformation parameters between two point patterns. IEEE PAMI, 1991 :param x: mxn matrix of points, m = dimension, n = nr. of data points :param y: mxn matrix of points, m = dimension, n = nr. of data points :param with_scale: set to True to align also the scale (default: 1.0 scale) :return: r, t, c - rotation matrix, translation vector and scale factor """ if x.shape != y.shape: raise GeometryException("data matrices must have the same shape") # m = dimension, n = nr. of data points m, n = x.shape # means, eq. 34 and 35 mean_x = x.mean(axis=1) mean_y = y.mean(axis=1) # variance, eq. 36 # "transpose" for column subtraction sigma_x = 1.0 / n * (np.linalg.norm(x - mean_x[:, np.newaxis])**2) # covariance matrix, eq. 38 outer_sum = np.zeros((m, m)) for i in range(n): outer_sum += np.outer((y[:, i] - mean_y), (x[:, i] - mean_x)) cov_xy = np.multiply(1.0 / n, outer_sum) # SVD (text betw. eq. 38 and 39) u, d, v = np.linalg.svd(cov_xy) if np.count_nonzero(d > np.finfo(d.dtype).eps) < m - 1: raise GeometryException("Degenerate covariance rank, " "Umeyama alignment is not possible") # S matrix, eq. 43 s = np.eye(m) if np.linalg.det(u) * np.linalg.det(v) < 0.0: # Ensure a RHS coordinate system (Kabsch algorithm). s[m - 1, m - 1] = -1 # rotation, eq. 40 r = u.dot(s).dot(v) # scale & translation, eq. 42 and 41 c = 1 / sigma_x * np.trace(np.diag(d).dot(s)) if with_scale else 1.0 t = mean_y - np.multiply(c, r.dot(mean_x)) return r, t, c
2. 代码测试
根据 doc/alignment_demo.py 示例程序, 简单修改并测试.
import copy import logging import sys import evo.core.lie_algebra as lie from evo.core import trajectory from evo.tools import plot, file_interface, log import numpy as np import matplotlib.pyplot as plt logger = logging.getLogger("evo") log.configure_logging(verbose=True) traj_ref = file_interface.read_kitti_poses_file("../test/data/KITTI_00_gt.txt") traj_est = file_interface.read_kitti_poses_file( "../test/data/KITTI_00_ORB.txt") # add artificial Sim(3) transformation traj_est.transform(lie.se3(lie.so3_exp([-0.7,2.3,1.2]), np.array([-215.7, -114.1, -198.3]))) # traj_est.transform(lie.se3(np.eye(3), np.array([0, 0, 0]))) traj_est.scale(0.7) logger.info("\nUmeyama alignment without scaling") traj_est_aligned = copy.deepcopy(traj_est) traj_est_aligned.align(traj_ref) logger.info("\nUmeyama alignment with scaling") traj_est_aligned_scaled = copy.deepcopy(traj_est) traj_est_aligned_scaled.align(traj_ref, correct_scale=True) logger.info("\nUmeyama alignment with scaling only") traj_est_aligned_only_scaled = copy.deepcopy(traj_est) traj_est_aligned_only_scaled.align(traj_ref, correct_only_scale=True) fig = plt.figure(figsize=(8, 8)) plot_mode = plot.PlotMode.xyz ax = plot.prepare_axis(fig, plot_mode, subplot_arg=221) plot.traj(ax, plot_mode, traj_ref, '--', 'red') plot.traj(ax, plot_mode, traj_est, '-', 'blue') fig.axes.append(ax) plt.title('not aligned') ax = plot.prepare_axis(fig, plot_mode, subplot_arg=222) plot.traj(ax, plot_mode, traj_ref, '--', 'red') plot.traj(ax, plot_mode, traj_est_aligned, '-', 'blue') fig.axes.append(ax) plt.title('$\mathrm{SE}(3)$ alignment without scaling') ax = plot.prepare_axis(fig, plot_mode, subplot_arg=223) plot.traj(ax, plot_mode, traj_ref, '--', 'red') plot.traj(ax, plot_mode, traj_est_aligned_scaled, '-', 'blue') fig.axes.append(ax) plt.title('$\mathrm{Sim}(3)$ alignment') ax = plot.prepare_axis(fig, plot_mode, subplot_arg=224) plot.traj(ax, plot_mode, traj_ref, '--', 'red') plot.traj(ax, plot_mode, traj_est_aligned_only_scaled, '-', 'blue') fig.axes.append(ax) plt.title('only scaled') fig.tight_layout() plt.show()
结果如下图所示, 左上角是原始的两组数据 (未对齐), 右上角仅计算了平移和旋转 (没有缩放) 后的 “对齐” 结果, 右下角仅计算了缩放 (没有旋转和平移) 后的 “对齐” 结果, 左下角是计算了相似变换 (旋转、平移、缩放) 后的对齐结果. 可以看出相似变换后的对齐效果比较好, 体现 Umeyama 算法的较好的特性.
参考文献
[1] S. Umeyama, “Least-squares estimation of transformation parameters between two point patterns,” in IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 13, no. 4, pp. 376-380, April 1991
[2] Eigen, https://eigen.tuxfamily.org/index.php?title=Main_Page
[3] The Point Cloud Library (PCL), https://pointclouds.org/
[4] Grupp Michael, “evo: Python package for the evaluation of odometry and SLAM”, https://github.com/MichaelGrupp/evo
[5] Kaxlamangla S. Arun and Thomas S. Huang and Steven D. Blostein, “Least-Squares Fitting of Two 3-D Point Sets”, IEEE Transactions on Pattern Analysis and Machine Intelligence, 1987, PAMI-9, pages 698-700