代码改变世界

学习OpenCV——ORB简化版&Location加速版

2016-04-17 19:36  GarfieldEr007  阅读(2840)  评论(0编辑  收藏  举报

根据前面surf简化版的结构,重新把ORB检测的代码给简化以下,发现虽然速度一样,确实能省好多行代码,关键是有

BruteForceMatcher<HammingLUT>matcher的帮忙,直接省的写了一个函数;

NB类型:class gpu::BruteForceMatcher_GPU

再加上findHomography,之后perspectiveTransform就可以location,但是这样速度很慢;

于是改动一下,求matches的keypoints的x与y坐标和的平均值,基本上就是对象中心!!!

以这个点为中心画与原对象大小相同的矩形框,就可以定位出大概位置,但是肯定不如透视变换准确,而且不具有尺度不变性。

但是鲁棒性应该更好,因为,只要能match成功,基本都能定位中心,但是透视变换有时却因为尺度变换过大等因素,画出很不靠谱的矩形框!

 

[cpp] view plain copy
 
 print?
  1. #include "opencv2/objdetect/objdetect.hpp"   
  2. #include "opencv2/features2d/features2d.hpp"   
  3. #include "opencv2/highgui/highgui.hpp"   
  4. #include "opencv2/calib3d/calib3d.hpp"   
  5. #include "opencv2/imgproc/imgproc_c.h"   
  6. #include "opencv2/imgproc/imgproc.hpp"     
  7.   
  8. #include <string>  
  9. #include <vector>  
  10. #include <iostream>  
  11.   
  12. using namespace cv;  
  13. using namespace std;   
  14.   
  15. char* image_filename1 = "D:/src.jpg";   
  16. char* image_filename2 = "D:/Demo.jpg";   
  17.   
  18. int main()  
  19. {  
  20.     Mat img1 = imread( image_filename1, CV_LOAD_IMAGE_GRAYSCALE );  
  21.     Mat img2 = imread( image_filename2, CV_LOAD_IMAGE_GRAYSCALE );  
  22.   
  23.     int64 st,et;  
  24.     ORB orb1(30,ORB::CommonParams(1.2,1));  
  25.     ORB orb2(100,ORB::CommonParams(1.2,1));  
  26.   
  27.     vector<KeyPoint>keys1,keys2;  
  28.     Mat descriptor1,descriptor2;  
  29.     orb1(img1,Mat(),keys1,descriptor1,false);  
  30.     st=getTickCount();  
  31.     orb2(img2,Mat(),keys2,descriptor2,false);  
  32.     et=getTickCount()-st;  
  33.     et=et*1000/(double)getTickFrequency();  
  34.     cout<<"extract time:"<<et<<"ms"<<endl;  
  35.   
  36.     vector<DMatch> matches;  
  37.          //<em>class </em><tt class="descclassname">gpu::</tt><tt class="descname"><span class="highlighted">BruteForce</span>Matcher_GPU</tt>    
  38.     BruteForceMatcher<HammingLUT>matcher;//BruteForceMatcher支持<Hamming> <L1<float>> <L2<float>>  
  39.     //FlannBasedMatcher matcher;不支持   
  40.     st=getTickCount();  
  41.     matcher.match(descriptor1,descriptor2,matches);  
  42.     et=getTickCount()-st;  
  43.     et=et*1000/getTickFrequency();  
  44.     cout<<"match time:"<<et<<"ms"<<endl;  
  45.       
  46.   
  47.     Mat img_matches;  
  48.     drawMatches( img1, keys1, img2, keys2,  
  49.                 matches, img_matches, Scalar::all(-1), Scalar::all(-1),  
  50.                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );  
  51.     imshow("match",img_matches);  
  52.   
  53.   
  54.     cout<<"match size:"<<matches.size()<<endl;  
  55.     /* 
  56.     Mat showImg; 
  57.     drawMatches(img1,keys1,img2,keys2,matchs,showImg); 
  58.     imshow( "win", showImg );  
  59.     */  
  60.     waitKey(0);  
  61.       
  62.   
  63.     st=getTickCount();  
  64.     vector<Point2f>pt1;  
  65.     vector<Point2f>pt2;  
  66.     float x=0,y=0;  
  67.     for(size_t i=0;i<matches.size();i++)  
  68.     {  
  69.         pt1.push_back(keys1[matches[i].queryIdx].pt);  
  70.         pt2.push_back(keys2[matches[i].trainIdx].pt);  
  71.         x+=keys2[matches[i].trainIdx].pt.x;  
  72.         y+=keys2[matches[i].trainIdx].pt.y;  
  73.     }  
  74.     x=x/matches.size();  
  75.     y=y/matches.size();  
  76.       
  77.     Mat homo;  
  78.     homo=findHomography(pt1,pt2,CV_RANSAC);  
  79.   
  80.   
  81.       
  82.     vector<Point2f>src_cornor(4);  
  83.     vector<Point2f>dst_cornor(4);  
  84.     src_cornor[0]=cvPoint(0,0);  
  85.     src_cornor[1]=cvPoint(img1.cols,0);  
  86.     src_cornor[2]=cvPoint(img1.cols,img1.rows);  
  87.     src_cornor[3]=cvPoint(0,img1.rows);  
  88.     perspectiveTransform(src_cornor,dst_cornor,homo);  
  89.       
  90.     Mat img=imread(image_filename2,1);  
  91.       
  92.     line(img,dst_cornor[0],dst_cornor[1],Scalar(255,0,0),2);  
  93.     line(img,dst_cornor[1],dst_cornor[2],Scalar(255,0,0),2);  
  94.     line(img,dst_cornor[2],dst_cornor[3],Scalar(255,0,0),2);  
  95.     line(img,dst_cornor[3],dst_cornor[0],Scalar(255,0,0),2);  
  96.     /* 
  97.     line(img,cvPoint((int)dst_cornor[0].x,(int)dst_cornor[0].y),cvPoint((int)dst_cornor[1].x,(int)dst_cornor[1].y),Scalar(255,0,0),2); 
  98.     line(img,cvPoint((int)dst_cornor[1].x,(int)dst_cornor[1].y),cvPoint((int)dst_cornor[2].x,(int)dst_cornor[2].y),Scalar(255,0,0),2); 
  99.     line(img,cvPoint((int)dst_cornor[2].x,(int)dst_cornor[2].y),cvPoint((int)dst_cornor[3].x,(int)dst_cornor[3].y),Scalar(255,0,0),2); 
  100.     line(img,cvPoint((int)dst_cornor[3].x,(int)dst_cornor[3].y),cvPoint((int)dst_cornor[0].x,(int)dst_cornor[0].y),Scalar(255,0,0),2); 
  101.     */  
  102.   
  103.     circle(img,Point(x,y),10,Scalar(0,0,255),3,CV_FILLED);  
  104.     line(img,Point(x-img1.cols/2,y-img1.rows/2),Point(x+img1.cols/2,y-img1.rows/2),Scalar(0,0,255),2);  
  105.     line(img,Point(x+img1.cols/2,y-img1.rows/2),Point(x+img1.cols/2,y+img1.rows/2),Scalar(0,0,255),2);  
  106.     line(img,Point(x+img1.cols/2,y+img1.rows/2),Point(x-img1.cols/2,y+img1.rows/2),Scalar(0,0,255),2);  
  107.     line(img,Point(x-img1.cols/2,y+img1.rows/2),Point(x-img1.cols/2,y-img1.rows/2),Scalar(0,0,255),2);  
  108.   
  109.     imshow("location",img);  
  110.       
  111.     et=getTickCount()-st;  
  112.     et=et*1000/getTickFrequency();  
  113.     cout<<"location time:"<<et<<"ms"<<endl;  
  114.       
  115.     waitKey(0);  
  116. }  


 

 

 

from: http://blog.csdn.net/yangtrees/article/details/7545820