https://blog.csdn.net/weixin_44153180/article/details/110688599
https://wenku.baidu.com/view/b9db40255901020207409c1a.html?_wkts_=1704788291655
我也没法解释清楚,再甩个ppt链接https://wenku.baidu.com/view/b9db40255901020207409c1a.html,可以自己多理解。
决定还是用口水话写一下我自己琢磨很久的理解以及算法流程供参考(不一定正确,有错误还请随便指正)
反解法嘛,就是由正射影像上一点P,去反算出该点在原始影像上的扫描坐标,并把反算得到的扫描坐标的像素值赋给正射影像上的P点。
首先得知道纠正过后的正射影像的大小,然后再逐像素去反算。所以第一步,先确定正射影像的大小:
1.确定正射影像的大小(影像宽高)
已知原始影像的四个角点的扫描坐标(0,0),(cols,0),(0,rows),(cols,rows)以及这四个点对应的地面高(没有DEM的话可以自己大致确定一个地面均高作为DEM)
由四个角点扫描坐标计算对应的像平面坐标
扫描坐标和像平面坐标之间为仿射变换关系:
a0= - (cols/2)*pixelSize; a1= pixelSize; a2= 0; b0= -(row/2)*pixelSize; b1= 0; b2= pixelSize;
再由像平面坐标计算地面点坐标,原理为:
由算得的四个地面点外接矩形,大致确定地面范围大小
根据分辨率确定正射影像的宽高,比如说上一步得到地面范围矩形为500m300m,如果分辨率要求为1m,那么正射影像的大小即为500300(可以略大几个像素),如果分辨率要求为0.5米,那么正射影像的大小即为1000*600.
2.生产正射影像
既然正射影像大小是由地面范围以及分辨率确定的,那么就可以将地面以分辨率大小划分格网,每一个格子对应正射影像上的一个像素。好难解释,看看图
所以确定地面左下角点为起算点,每间隔分辨率米采样反算出其对应的原始影像的像平面坐标,再转换到扫描坐标,进行灰度内插,可以使用最近邻法或双线性内插法,最后灰度赋值。这样就可以逐像素赋值正射影像了。
二、所需数据
1.原始影像一张
2.原始影像的外方位元素Xs.Ys,Zs,Phi,Omega,Kappa
3.相机焦距、像素大小
4.畸变参数k1,k2,p1,p2
5.像主点X0,Y0
6.DEM(可以用大致地面均高代替)
三、环境
VS2019+opencv3.4
链接:https://pan.baidu.com/s/1fkhP-oJCCHxDOVuHPT6fPg?pwd=b4uf
提取码:b4uf
aopX aopY aopZ aopP aopO aopK f pxlSz k1 k2 p1 p2 camX0 camY0 296434.462720 3141533.705270 2005.025270 0.019684289657 0.087014797127 1.579867547070 28.2459977000 0.0048800000 -0.00014607300000 0.00000017201343 -0.00000596983230 0.00001741155700 0.000000 0.000000
#include "opencv2/opencv.hpp" #include "iostream" #include "cv.h" #include "highgui.h" #include <iostream> #include <fstream> using namespace std; using namespace cv; class CDomPho { public: CDomPho() {//构造函数 m_f = m_Pxz = m_k1 = m_k2 = m_p1 = m_k2 = 0; m_X0 = m_Y0 = 0; m_cols = m_rows = 0; }; public: void POK2M(double p, double o, double k, double* r) {//旋转矩阵 double cosp = cos(p); double cosw = cos(o); double cosk = cos(k); double sinp = sin(p); double sinw = sin(o); double sink = sin(k); r[0] = cosp * cosk - sinp * sinw * sink; r[1] = -cosp * sink - sinp * sinw * cosk; r[2] = -sinp * cosw; r[3] = cosw * sink; r[4] = cosw * cosk; r[5] = -sinw; r[6] = sinp * cosk + cosp * sinw * sink; r[7] = -sinp * sink + cosp * sinw * cosk; r[8] = cosp * cosw; }; void SetPara(double aopX, double aopY, double aopZ, double aopP, double aopO, double aopK,//初始化参数 double f, double pxz, int cols, int rows, double k1, double k2, double p1, double p2, double X0, double Y0) { POK2M(aopP, aopO, aopK, m_rotM); m_aopX = aopX; m_aopY = aopY; m_aopZ = aopZ; m_aopP = aopP; m_aopO = aopO; m_aopK = aopK; m_f = f; m_Pxz = pxz; m_k1 = k1; m_k2 = k2; m_p1 = p1; m_p2 = p2; m_X0 = X0; m_Y0 = Y0; m_ref[0] = -(cols / 2) * pxz; m_ref[1] = pxz; m_ref[2] = 0; m_ref[3] = -(rows / 2) * pxz; m_ref[4] = 0; m_ref[5] = -pxz; }; //图像->地面,即扫描坐标->地面坐标; //其中*xyz为数组,储存(x1,y1,Z1...xn.yn,Zn),x,y为扫描坐标,Z为地面高 //sum为点的个数n void ImgZ2Grd(double* xyz, int sum) { double px, py; for (int i = 0; i < sum; i++) { Scan2Pho(*(xyz + i * 3 + 0), *(xyz + i * 3 + 1), &px, &py, m_ref);//扫描坐标->像平面坐标 PhoZ2Grd(px, py, *(xyz + i * 3 + 2), xyz + i * 3 + 0, xyz + i * 3 + 1, m_aopX, m_aopY, m_aopZ, m_rotM, m_f);//像平面坐标->地面 } }; //扫描坐标->像平面坐标 //ref6为仿射变换参数 void Scan2Pho(double scX, double scY, double* phoX, double* phoY, double* ref6) { *phoX = ref6[0] + ref6[1] * scX - ref6[2] * scY; *phoY = ref6[3] + ref6[4] * scX - ref6[5] * scY; // Fix Distortion矫正畸变 if (m_k1 > 1.E-12 || m_k1 < -1.E-12) { // unit is mm double x = *phoX, y = *phoY; double r2 = x * x + y * y; double r4 = r2 * r2; double dx = x * (m_k1 * r2 + m_k2 * r4) + m_p1 * (r2 + 2 * x * x) + m_p2 * 2 * x * y; double dy = y * (m_k1 * r2 + m_k2 * r4) + m_p2 * (r2 + 2 * y * y) + m_p1 * 2 * x * y; *phoX -= dx; *phoY -= dy; } }; //地面->扫描坐标 void Grd2Img(double gx, double gy, double gz, float* ix, float* iy) { double px, py; Grd2Pho_(gx, gy, gz, &px, &py, m_f, m_aopX, m_aopY, m_aopZ, m_rotM); Pho2Scan(px, py, &px, &py, m_ref); *ix = float(px); *iy = float(py); }; void Grd2Pho_(double gx, double gy, double gz, double* px, double* py, double focus, double x0, double y0, double z0, double* rotM) { double fm = -focus / (rotM[2] * (gx - x0) + rotM[5] * (gy - y0) + rotM[8] * (gz - z0)); *px = float((rotM[0] * (gx - x0) + rotM[3] * (gy - y0) + rotM[6] * (gz - z0)) * fm); *py = float((rotM[1] * (gx - x0) + rotM[4] * (gy - y0) + rotM[7] * (gz - z0)) * fm); }; void Pho2Scan(double phoX, double phoY, double* scX, double* scY, double* ref6) { // Fix Distortion if (m_k1 > 1.E-12 || m_k1 < -1.E-12) { // unit is mm double x, y, r2, r4, dx, dy, dx0, dy0; // attention: this is an approximation ,for high accuracy ,it must be iteration x = phoX; y = phoY; r2 = x * x + y * y; r4 = r2 * r2; dx0 = dx = x * (m_k1 * r2 + m_k2 * r4) + m_p1 * (r2 + 2 * x * x) + m_p2 * 2 * x * y; dy0 = dy = y * (m_k1 * r2 + m_k2 * r4) + m_p2 * (r2 + 2 * y * y) + m_p1 * 2 * x * y; x = phoX + dx; y = phoY + dy; r2 = x * x + y * y; r4 = r2 * r2; dx = x * (m_k1 * r2 + m_k2 * r4) + m_p1 * (r2 + 2 * x * x) + m_p2 * 2 * x * y; dy = y * (m_k1 * r2 + m_k2 * r4) + m_p2 * (r2 + 2 * y * y) + m_p1 * 2 * x * y; phoX += dx; phoY += dy; } double tt = ref6[1] * ref6[5] - ref6[2] * ref6[4]; phoX -= ref6[0]; phoY -= ref6[3]; *scX = (ref6[5] * phoX - ref6[2] * phoY) / tt; *scY = -(-ref6[4] * phoX + ref6[1] * phoY) / tt; }; void PhoZ2Grd(double px, double py, double gz, double* gx, double* gy, double xs, double ys, double zs, double* Rot, double focus) { double A = Rot[0] * px + Rot[1] * py - Rot[2] * focus; double B = Rot[3] * px + Rot[4] * py - Rot[5] * focus; double C = Rot[6] * px + Rot[7] * py - Rot[8] * focus; double N = (C == 0 ? 0 : (gz - zs) / C); *gx = xs + A * N; *gy = ys + B * N; }; public: double m_aopX,m_aopY,m_aopZ,m_aopP,m_aopO,m_aopK;//Xs,Ys,Zs,phi,omega,kappa外方位元素 double m_f;//相机焦距 double m_Pxz;//像素大小 double m_k1, m_k2, m_p1, m_p2;//畸变参数 double m_X0,m_Y0;//像主点 int m_rows, m_cols;//原始影像大小 double m_rotM[9]; double m_ref[6]; }; int main(int argc, char** argv) { double aopX,aopY, aopZ, aopP, aopO, aopK;//Xs,Ys,Zs,phi,omega,kappa外方位元素 double f;//相机焦距 double Pxz;//像素大小 double k1, k2, p1, p2;//畸变参数 double X0,Y0;//像主点 int rows, cols;//原始影像大小 CDomPho dompho; string s; ifstream file("para.txt"); getline(file,s); file >> aopX >> aopY >> aopZ >> aopP >> aopO >> aopK >> f >> Pxz >> k1 >> k2 >> p1 >> p2 >> X0 >> Y0;//读取参数 Mat m_img; m_img = imread("DSC_3342.JPG");//读取原始影像 rows = m_img.rows; cols = m_img.cols; double corPt[12] = { 0.0,0.0,700,cols,0 ,700,0,rows ,700,cols,rows ,700 };//存放原始影像四个角点的扫描行坐标及地面高,无DEM,使用地面均高700m dompho.SetPara(aopX, aopY, aopZ, aopP, aopO, aopK, f, Pxz, cols, rows, k1, k2, p1, p2, X0, Y0); dompho.ImgZ2Grd(corPt, 4);//投影至地面的四个点 double mingx = 9999999; double mingy = 9999999; double maxgx = 0; double maxgy = 0; double h = 700; for (int i = 0; i < 4; i++) { mingx = mingx > corPt[i * 3] ? corPt[i * 3] : mingx; mingy = mingy > corPt[i * 3 + 1] ? corPt[i * 3 + 1] : mingy; maxgx = maxgx < corPt[i * 3] ? corPt[i * 3] : maxgx; maxgy = maxgy < corPt[i * 3 + 1] ? corPt[i * 3 + 1] : maxgy; } double dx = maxgx - mingx; double dy = maxgy - mingy;//确定地面范围 int domCols = dx*2 +1; int domRows = dy*2 + 1;//分辨率=0.5m,确定正射影像大小,(*2即为/0.5,加上1是使正射影像宽高向上取整) Mat domImg(domRows, domCols, CV_8UC3, Scalar(0, 0, 0)); float ix, iy; double gx = mingx; double gy = mingy; for (int r = 1; r < domRows+1; r++) { double gx = mingx; for (int c = 0; c < domCols; c++) { dompho.Grd2Img(gx, gy, h, &ix, &iy); //最近邻差值 int i = (int)(ix + 0.5); int j = (int)(iy + 0.5); if (i < 0 || i >= cols || j <= 0 || j > rows) { domImg.at<Vec3b>(domRows-r, c)[0] = 0; } else { //!!!!opencv中图像坐标原点在左上角,而扫描坐标系坐标原点在左下角 domImg.at<Vec3b>(domRows-r, c)[0] = m_img.at<Vec3b>(rows-j, i)[0]; domImg.at<Vec3b>(domRows-r, c)[1] = m_img.at<Vec3b>(rows-j, i)[1]; domImg.at<Vec3b>(domRows-r, c)[2] = m_img.at<Vec3b>(rows-j, i)[2]; } gx += 0.5;//分辨率为0.5米 } gy += 0.5; } namedWindow("dom",0); imshow("dom", domImg); //显示图像 cvWaitKey(0); //等待按键 imwrite("domimg.jpg", domImg); }