1 #include <stdio.h> 2 #include <iostream> 3 #include "opencv2/core/core.hpp" 4 #include "opencv2/features2d/features2d.hpp" 5 #include "opencv2/highgui/highgui.hpp" 6 #include "opencv2/calib3d/calib3d.hpp" 7 8 using namespace cv; 9 10 void readme(); 11 12 /** @function main */ 13 int main( int argc, char** argv ) 14 { 15 if( argc != 3 ) 16 { readme(); return -1; } 17 18 Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); 19 Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); 20 21 if( !img_object.data || !img_scene.data ) 22 { std::cout<< " --(!) Error reading images " << std::endl; return -1; } 23 24 //-- Step 1: Detect the keypoints using SURF Detector 25 int minHessian = 400; 26 27 SurfFeatureDetector detector( minHessian ); 28 29 std::vector<KeyPoint> keypoints_object, keypoints_scene; 30 31 detector.detect( img_object, keypoints_object ); 32 detector.detect( img_scene, keypoints_scene ); 33 34 //-- Step 2: Calculate descriptors (feature vectors) 35 SurfDescriptorExtractor extractor; 36 37 Mat descriptors_object, descriptors_scene; 38 39 extractor.compute( img_object, keypoints_object, descriptors_object ); 40 extractor.compute( img_scene, keypoints_scene, descriptors_scene ); 41 42 //-- Step 3: Matching descriptor vectors using FLANN matcher 43 FlannBasedMatcher matcher; 44 std::vector< DMatch > matches; 45 matcher.match( descriptors_object, descriptors_scene, matches ); 46 47 double max_dist = 0; double min_dist = 100; 48 49 //-- Quick calculation of max and min distances between keypoints 50 for( int i = 0; i < descriptors_object.rows; i++ ) 51 { double dist = matches[i].distance; 52 if( dist < min_dist ) min_dist = dist; 53 if( dist > max_dist ) max_dist = dist; 54 } 55 56 printf("-- Max dist : %f \n", max_dist ); 57 printf("-- Min dist : %f \n", min_dist ); 58 59 //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist ) 60 std::vector< DMatch > good_matches; 61 62 for( int i = 0; i < descriptors_object.rows; i++ ) 63 { if( matches[i].distance < 3*min_dist ) 64 { good_matches.push_back( matches[i]); } 65 } 66 67 Mat img_matches; 68 drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, 69 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 70 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 71 72 //-- Localize the object 73 std::vector<Point2f> obj; 74 std::vector<Point2f> scene; 75 76 for( int i = 0; i < good_matches.size(); i++ ) 77 { 78 //-- Get the keypoints from the good matches 79 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); 80 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); 81 } 82 83 Mat H = findHomography( obj, scene, CV_RANSAC ); 84 85 //-- Get the corners from the image_1 ( the object to be "detected" ) 86 std::vector<Point2f> obj_corners(4); 87 obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 ); 88 obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows ); 89 std::vector<Point2f> scene_corners(4); 90 91 perspectiveTransform( obj_corners, scene_corners, H); 92 93 //-- Draw lines between the corners (the mapped object in the scene - image_2 ) 94 line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); 95 line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); 96 line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); 97 line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); 98 99 //-- Show detected matches 100 imshow( "Good Matches & Object detection", img_matches ); 101 102 waitKey(0); 103 return 0; 104 } 105 106 /** @function readme */ 107 void readme() 108 { std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }