Python调用C++动态库,实现图像拼接(调用输出结果有问题)
1 #include <iostream> 2 #include <fstream> 3 #include <string> 4 #include "opencv2/opencv_modules.hpp" 5 #include <opencv2/core/utility.hpp> 6 #include "opencv2/imgcodecs.hpp" 7 #include "opencv2/highgui.hpp" 8 #include "opencv2/stitching/detail/autocalib.hpp" 9 #include "opencv2/stitching/detail/blenders.hpp" 10 #include "opencv2/stitching/detail/timelapsers.hpp" 11 #include "opencv2/stitching/detail/camera.hpp" 12 #include "opencv2/stitching/detail/exposure_compensate.hpp" 13 #include "opencv2/stitching/detail/matchers.hpp" 14 #include "opencv2/stitching/detail/motion_estimators.hpp" 15 #include "opencv2/stitching/detail/seam_finders.hpp" 16 #include "opencv2/stitching/detail/warpers.hpp" 17 #include "opencv2/stitching/warpers.hpp" 18 #ifdef HAVE_OPENCV_XFEATURES2D 19 #include "opencv2/xfeatures2d/nonfree.hpp" 20 #endif 21 #define ENABLE_LOG 1 22 #define LOG(msg) std::cout << msg 23 #define LOGLN(msg) std::cout << msg << std::endl 24 using namespace std; 25 using namespace cv; 26 using namespace cv::detail; 27 // Default command line args 28 29 #if 1 30 #define DLL_API __declspec(dllexport) 31 #else 32 #define DLL_API __declspec(dllimport) 33 #endif 34 35 36 extern "C" { //由于编译过程的原因,python一般只支持c的接口 37 typedef struct ImageBase { 38 int w; //图像的宽 39 int h; //图像的高 40 int c; //通道数 41 unsigned char *data; //我们要写python和c++交互的数据结构,0-255的单字符指针 42 }ImageMeta; 43 //typedef ImageBase ImageMeta; 44 45 DLL_API int Stitch(ImageMeta *im1, ImageMeta *im2);//函数导出,要改 46 47 }; 48 49 //vector<String> img_names; 50 int num_images; 51 bool preview = false; 52 bool try_cuda = false; 53 double work_megapix = 0.6; 54 double seam_megapix = 0.1; 55 double compose_megapix = -1; 56 float conf_thresh = 1.f; 57 #ifdef HAVE_OPENCV_XFEATURES2D 58 string features_type = "surf"; 59 #else 60 string features_type = "orb"; 61 #endif 62 string matcher_type = "homography"; 63 string estimator_type = "homography"; 64 string ba_cost_func = "ray"; 65 string ba_refine_mask = "xxxxx"; 66 bool do_wave_correct = true; 67 WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ; 68 bool save_graph = false; 69 std::string save_graph_to; 70 string warp_type = "spherical"; 71 int expos_comp_type = ExposureCompensator::GAIN_BLOCKS; 72 int expos_comp_nr_feeds = 1; 73 int expos_comp_nr_filtering = 2; 74 int expos_comp_block_size = 32; 75 float match_conf = 0.3f; 76 string seam_find_type = "gc_color"; 77 int blend_type = Blender::MULTI_BAND; 78 int timelapse_type = Timelapser::AS_IS;//延时摄影 79 float blend_strength = 5; 80 string result_name = "D:/result.jpg";// 81 bool timelapse = false;//首先定义timelapse的默认布尔类型为False 82 int range_width = -1; 83 84 DLL_API int Stitch(ImageMeta *im1, ImageMeta *im2)//入参两个数组指针,出参一个数组指针 85 //vector<Mat> img_list 86 {//一个int数;一个图片类型的列表 87 //predict先判断长度 然后长度作为一个参数传给 88 //preview = true; 89 //try_cuda = true; 90 //preview = true; 91 //result = 'D:/result.jpg'; 92 //work_megapix = -1; 93 //features_type = "orb"; 94 95 Mat img1 = Mat::zeros(Size(im1->w, im1->h), CV_8UC3); 96 //先从输入的指针对象提取w,h,data;将python传来的参数转变成C处理的格式。用的是相同的结构:结构体。 97 img1.data = im1->data; 98 99 Mat img2 = Mat::zeros(Size(im2->w, im2->h), CV_8UC3); 100 //先从输入的指针对象提取w,h,data;将python传来的参数转变成C处理的格式。用的是相同的结构:结构体。 101 img2.data = im2->data; 102 103 104 //Mat img1, img2; 105 //img1 = imread("D:/1Hill.jpg"); 106 //img2 = imread("D:/2Hill.jpg"); 107 vector<Mat> ALLimages(2); 108 ALLimages[0] = img1.clone(); 109 ALLimages[1] = img2.clone(); 110 //img_names.push_back("D:/1Hill.jpg"); 111 //img_names.push_back("D:/2Hill.jpg");//?? 112 //img_names.push_back("D:/3Hill.jpg");//?? 113 num_images = 2; 114 #if ENABLE_LOG 115 int64 app_start_time = getTickCount(); 116 #endif 117 #if 0 118 cv::setBreakOnError(true); 119 #endif 120 //int retval = parseCmdArgs(argc, argv); 121 //if (retval) 122 //return retval; 123 // Check if have enough images 124 //int num_images = static_cast<int>(img_names.size()); 125 if (num_images < 2) 126 { 127 LOGLN("Need more images"); 128 return -1; 129 } 130 double work_scale = 1, seam_scale = 1, compose_scale = 1; 131 bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false; 132 LOGLN("Finding features..."); 133 #if ENABLE_LOG 134 int64 t = getTickCount(); 135 #endif 136 Ptr<Feature2D> finder; 137 if (features_type == "orb") 138 { 139 finder = ORB::create(); 140 } 141 else if (features_type == "akaze") 142 { 143 finder = AKAZE::create(); 144 } 145 #ifdef HAVE_OPENCV_XFEATURES2D 146 else if (features_type == "surf") 147 { 148 finder = xfeatures2d::SURF::create(); 149 } 150 else if (features_type == "sift") { 151 finder = xfeatures2d::SIFT::create(); 152 } 153 #endif 154 else 155 { 156 cout << "Unknown 2D features type: '" << features_type << "'.\n"; 157 return -1; 158 } 159 Mat full_img, img; 160 vector<ImageFeatures> features(num_images); 161 vector<Mat> images(num_images); 162 vector<Size> full_img_sizes(num_images); 163 double seam_work_aspect = 1; 164 for (int i = 0; i < num_images; ++i) 165 { 166 full_img = ALLimages[i]; 167 168 //获取一张图。imread_img -------->Mat 169 //先把一边调通了再去组合调试,分治 170 //full_img = img_list; 171 //python传进来n张图片的base64,可以转成读取后的图片。 172 173 //先在c中定义图像HWC结构数组数组转一次 Mat, dll返回Mat结果,Mat转一次结构体 174 //main输入 Mat1,Mat2 175 //dll返回数组,python转化成cv2image,然后输出image2base64 176 177 //full_image里面是读取的imread_img类型 178 //base64的size容易确定 179 //先在predict前提取到图片的整个Mat传给DLL 180 full_img_sizes[i] = full_img.size();//结果:full_img_sizes = [(500,300),(200,100)] 181 if (full_img.empty()) 182 { 183 //LOGLN("Can't open image " << img_names[i]);//访问了空指针,和img_names有关 184 return -2; 185 } 186 if (work_megapix < 0) 187 { 188 img = full_img; 189 work_scale = 1; 190 is_work_scale_set = true; 191 } 192 else 193 { 194 if (!is_work_scale_set) 195 { 196 work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area())); 197 is_work_scale_set = true; 198 } 199 resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT); 200 } 201 if (!is_seam_scale_set) 202 { 203 seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area())); 204 seam_work_aspect = seam_scale / work_scale; 205 is_seam_scale_set = true; 206 } 207 computeImageFeatures(finder, img, features[i]); 208 features[i].img_idx = i; 209 LOGLN("Features in image #" << i + 1 << ": " << features[i].keypoints.size()); 210 resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT); 211 images[i] = img.clone(); 212 //循环是为了找到每张图的特征,然后把图片copy到images里 213 } 214 full_img.release(); 215 img.release(); 216 LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 217 LOG("Pairwise matching"); 218 #if ENABLE_LOG 219 t = getTickCount(); 220 #endif 221 vector<MatchesInfo> pairwise_matches; 222 Ptr<FeaturesMatcher> matcher; 223 if (matcher_type == "affine") 224 matcher = makePtr<AffineBestOf2NearestMatcher>(false, try_cuda, match_conf); 225 else if (range_width == -1) 226 matcher = makePtr<BestOf2NearestMatcher>(try_cuda, match_conf); 227 else 228 matcher = makePtr<BestOf2NearestRangeMatcher>(range_width, try_cuda, match_conf); 229 (*matcher)(features, pairwise_matches); 230 matcher->collectGarbage(); 231 LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 232 // Check if we should save matches graph 233 //if (save_graph) 234 //{ 235 // LOGLN("Saving matches graph..."); 236 // ofstream f(save_graph_to.c_str()); 237 // f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh); 238 //} 239 // Leave only images we are sure are from the same panorama 240 vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh); 241 if (indices.size() != 2)//判断两个图片的相关性 242 return -1; 243 244 if (num_images < 2) 245 { 246 LOGLN("Need more images"); 247 return -1; 248 } 249 Ptr<Estimator> estimator; 250 if (estimator_type == "affine") 251 estimator = makePtr<AffineBasedEstimator>(); 252 else 253 estimator = makePtr<HomographyBasedEstimator>(); 254 vector<CameraParams> cameras; 255 if (!(*estimator)(features, pairwise_matches, cameras)) 256 { 257 cout << "Homography estimation failed.\n"; 258 return -1; 259 } 260 for (size_t i = 0; i < cameras.size(); ++i) 261 { 262 Mat R; 263 cameras[i].R.convertTo(R, CV_32F); 264 cameras[i].R = R; 265 //LOGLN("Initial camera intrinsics #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R); 266 } 267 Ptr<detail::BundleAdjusterBase> adjuster; 268 if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>(); 269 else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>(); 270 else if (ba_cost_func == "affine") adjuster = makePtr<detail::BundleAdjusterAffinePartial>(); 271 else if (ba_cost_func == "no") adjuster = makePtr<NoBundleAdjuster>(); 272 else 273 { 274 cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n"; 275 return -1; 276 } 277 adjuster->setConfThresh(conf_thresh); 278 Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U); 279 if (ba_refine_mask[0] == 'x') refine_mask(0, 0) = 1; 280 if (ba_refine_mask[1] == 'x') refine_mask(0, 1) = 1; 281 if (ba_refine_mask[2] == 'x') refine_mask(0, 2) = 1; 282 if (ba_refine_mask[3] == 'x') refine_mask(1, 1) = 1; 283 if (ba_refine_mask[4] == 'x') refine_mask(1, 2) = 1; 284 adjuster->setRefinementMask(refine_mask); 285 if (!(*adjuster)(features, pairwise_matches, cameras)) 286 { 287 cout << "Camera parameters adjusting failed.\n"; 288 return -1; 289 } 290 // Find median focal length 291 vector<double> focals; 292 for (size_t i = 0; i < cameras.size(); ++i) 293 { 294 //LOGLN("Camera #" << indices[i] + 1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R); 295 focals.push_back(cameras[i].focal); 296 } 297 sort(focals.begin(), focals.end()); 298 float warped_image_scale; 299 if (focals.size() % 2 == 1) 300 warped_image_scale = static_cast<float>(focals[focals.size() / 2]); 301 else 302 warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f; 303 if (do_wave_correct) 304 { 305 vector<Mat> rmats; 306 for (size_t i = 0; i < cameras.size(); ++i) 307 rmats.push_back(cameras[i].R.clone()); 308 waveCorrect(rmats, wave_correct); 309 for (size_t i = 0; i < cameras.size(); ++i) 310 cameras[i].R = rmats[i]; 311 } 312 LOGLN("Warping images (auxiliary)... "); 313 #if ENABLE_LOG 314 t = getTickCount(); 315 #endif 316 vector<Point> corners(num_images); 317 vector<UMat> masks_warped(num_images); 318 vector<UMat> images_warped(num_images); 319 vector<Size> sizes(num_images); 320 vector<UMat> masks(num_images); 321 // Prepare images masks 322 for (int i = 0; i < num_images; ++i) 323 { 324 masks[i].create(images[i].size(), CV_8U); 325 masks[i].setTo(Scalar::all(255)); 326 } 327 // Warp images and their masks 328 Ptr<WarperCreator> warper_creator; 329 #ifdef HAVE_OPENCV_CUDAWARPING 330 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) 331 { 332 if (warp_type == "plane") 333 warper_creator = makePtr<cv::PlaneWarperGpu>(); 334 else if (warp_type == "cylindrical") 335 warper_creator = makePtr<cv::CylindricalWarperGpu>(); 336 else if (warp_type == "spherical") 337 warper_creator = makePtr<cv::SphericalWarperGpu>(); 338 } 339 else 340 #endif 341 { 342 if (warp_type == "plane") 343 warper_creator = makePtr<cv::PlaneWarper>(); 344 else if (warp_type == "affine") 345 warper_creator = makePtr<cv::AffineWarper>(); 346 else if (warp_type == "cylindrical") 347 warper_creator = makePtr<cv::CylindricalWarper>(); 348 else if (warp_type == "spherical") 349 warper_creator = makePtr<cv::SphericalWarper>(); 350 else if (warp_type == "fisheye") 351 warper_creator = makePtr<cv::FisheyeWarper>(); 352 else if (warp_type == "stereographic") 353 warper_creator = makePtr<cv::StereographicWarper>(); 354 else if (warp_type == "compressedPlaneA2B1") 355 warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f); 356 else if (warp_type == "compressedPlaneA1.5B1") 357 warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f); 358 else if (warp_type == "compressedPlanePortraitA2B1") 359 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f); 360 else if (warp_type == "compressedPlanePortraitA1.5B1") 361 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f); 362 else if (warp_type == "paniniA2B1") 363 warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f); 364 else if (warp_type == "paniniA1.5B1") 365 warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f); 366 else if (warp_type == "paniniPortraitA2B1") 367 warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f); 368 else if (warp_type == "paniniPortraitA1.5B1") 369 warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f); 370 else if (warp_type == "mercator") 371 warper_creator = makePtr<cv::MercatorWarper>(); 372 else if (warp_type == "transverseMercator") 373 warper_creator = makePtr<cv::TransverseMercatorWarper>(); 374 } 375 if (!warper_creator) 376 { 377 cout << "Can't create the following warper '" << warp_type << "'\n"; 378 return 1; 379 } 380 Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect)); 381 for (int i = 0; i < num_images; ++i) 382 { 383 Mat_<float> K; 384 cameras[i].K().convertTo(K, CV_32F); 385 float swa = (float)seam_work_aspect; 386 K(0, 0) *= swa; K(0, 2) *= swa; 387 K(1, 1) *= swa; K(1, 2) *= swa; 388 corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]); 389 sizes[i] = images_warped[i].size(); 390 warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]); 391 } 392 vector<UMat> images_warped_f(num_images); 393 for (int i = 0; i < num_images; ++i) 394 images_warped[i].convertTo(images_warped_f[i], CV_32F); 395 LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 396 LOGLN("Compensating exposure..."); 397 #if ENABLE_LOG 398 t = getTickCount(); 399 #endif 400 Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type); 401 if (dynamic_cast<GainCompensator*>(compensator.get())) 402 { 403 GainCompensator* gcompensator = dynamic_cast<GainCompensator*>(compensator.get()); 404 gcompensator->setNrFeeds(expos_comp_nr_feeds); 405 } 406 if (dynamic_cast<ChannelsCompensator*>(compensator.get())) 407 { 408 ChannelsCompensator* ccompensator = dynamic_cast<ChannelsCompensator*>(compensator.get()); 409 ccompensator->setNrFeeds(expos_comp_nr_feeds); 410 } 411 if (dynamic_cast<BlocksCompensator*>(compensator.get())) 412 { 413 BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get()); 414 bcompensator->setNrFeeds(expos_comp_nr_feeds); 415 bcompensator->setNrGainsFilteringIterations(expos_comp_nr_filtering); 416 bcompensator->setBlockSize(expos_comp_block_size, expos_comp_block_size); 417 } 418 compensator->feed(corners, images_warped, masks_warped); 419 LOGLN("Compensating exposure, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 420 LOGLN("Finding seams..."); 421 #if ENABLE_LOG 422 t = getTickCount(); 423 #endif 424 Ptr<SeamFinder> seam_finder; 425 if (seam_find_type == "no") 426 seam_finder = makePtr<detail::NoSeamFinder>(); 427 else if (seam_find_type == "voronoi") 428 seam_finder = makePtr<detail::VoronoiSeamFinder>(); 429 else if (seam_find_type == "gc_color") 430 { 431 #ifdef HAVE_OPENCV_CUDALEGACY 432 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) 433 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR); 434 else 435 #endif 436 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR); 437 } 438 else if (seam_find_type == "gc_colorgrad") 439 { 440 #ifdef HAVE_OPENCV_CUDALEGACY 441 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0) 442 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD); 443 else 444 #endif 445 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD); 446 } 447 else if (seam_find_type == "dp_color") 448 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR); 449 else if (seam_find_type == "dp_colorgrad") 450 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD); 451 if (!seam_finder) 452 { 453 cout << "Can't create the following seam finder '" << seam_find_type << "'\n"; 454 return 1; 455 } 456 seam_finder->find(images_warped_f, corners, masks_warped); 457 LOGLN("Finding seams, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 458 // Release unused memory 459 images.clear(); 460 images_warped.clear(); 461 images_warped_f.clear(); 462 masks.clear(); 463 LOGLN("Compositing..."); 464 #if ENABLE_LOG 465 t = getTickCount(); 466 #endif 467 Mat img_warped, img_warped_s; 468 Mat dilated_mask, seam_mask, mask, mask_warped; 469 Ptr<Blender> blender; 470 Ptr<Timelapser> timelapser; 471 //double compose_seam_aspect = 1; 472 double compose_work_aspect = 1; 473 for (int img_idx = 0; img_idx < num_images; ++img_idx) 474 { 475 //LOGLN("Compositing image #" << indices[img_idx] + 1); 476 // Read image and resize it if necessary 477 full_img = ALLimages[img_idx]; 478 if (!is_compose_scale_set) 479 { 480 if (compose_megapix > 0) 481 compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area())); 482 is_compose_scale_set = true; 483 // Compute relative scales 484 //compose_seam_aspect = compose_scale / seam_scale; 485 compose_work_aspect = compose_scale / work_scale; 486 // Update warped image scale 487 warped_image_scale *= static_cast<float>(compose_work_aspect); 488 warper = warper_creator->create(warped_image_scale); 489 // Update corners and sizes 490 for (int i = 0; i < num_images; ++i) 491 { 492 // Update intrinsics 493 cameras[i].focal *= compose_work_aspect; 494 cameras[i].ppx *= compose_work_aspect; 495 cameras[i].ppy *= compose_work_aspect; 496 // Update corner and size 497 Size sz = full_img_sizes[i]; 498 Mat K; 499 cameras[i].K().convertTo(K, CV_32F); 500 Rect roi = warper->warpRoi(sz, K, cameras[i].R); 501 corners[i] = roi.tl(); 502 sizes[i] = roi.size(); 503 } 504 } 505 if (abs(compose_scale - 1) > 1e-1)//没用 506 resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT); 507 else 508 img = full_img; 509 full_img.release(); 510 Size img_size = img.size(); 511 Mat K; 512 cameras[img_idx].K().convertTo(K, CV_32F); 513 // Warp the current image 514 warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped); 515 // Warp the current image mask 516 mask.create(img_size, CV_8U); 517 mask.setTo(Scalar::all(255)); 518 warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped); 519 // Compensate exposure 520 compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped); 521 img_warped.convertTo(img_warped_s, CV_16S); 522 img_warped.release(); 523 img.release(); 524 mask.release(); 525 dilate(masks_warped[img_idx], dilated_mask, Mat()); 526 resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT); 527 mask_warped = seam_mask & mask_warped; 528 if (!blender && !timelapse)//blender是False,timelapse也是False,这里运行了! 529 {//做multiband 530 blender = Blender::createDefault(blend_type, try_cuda); 531 Size dst_sz = resultRoi(corners, sizes).size(); 532 float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f; 533 if (blend_width < 1.f) 534 blender = Blender::createDefault(Blender::NO, try_cuda); 535 else if (blend_type == Blender::MULTI_BAND) 536 { 537 MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get()); 538 mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.)); 539 LOGLN("Multi-band blender, number of bands: " << mb->numBands()); 540 } 541 else if (blend_type == Blender::FEATHER)//未运行 542 { 543 FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get()); 544 fb->setSharpness(1.f / blend_width); 545 LOGLN("Feather blender, sharpness: " << fb->sharpness()); 546 } 547 blender->prepare(corners, sizes); 548 } 549 else if (!timelapser && timelapse)//timelapse是假,timelapser是什么??没运行 550 { 551 timelapser = Timelapser::createDefault(timelapse_type); 552 timelapser->initialize(corners, sizes); 553 cout << "----------------------------运行---------------------------------" << endl; 554 } 555 // Blend the current image 556 if (timelapse)//默认是假 557 { 558 cout << "----------------------------运行2---------------------------------" << endl; 559 } 560 else 561 {//这里运行了两次,因为在循环体中,图片有两张 562 blender->feed(img_warped_s, mask_warped, corners[img_idx]); 563 cout << "----------------------------运行3---------------------------------" << endl; 564 } 565 } 566 if (!timelapse)//运行了 567 { 568 Mat result, result_mask; 569 blender->blend(result, result_mask); 570 LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); 571 imwrite(result_name, result); 572 //(result.cols)*(result.rows) 573 memcpy(im2->data, result.clone().data, (result.cols)*(result.rows));//从哪里拷贝多少个字节。。 574 im2->w = result.cols; 575 im2->h = result.rows; 576 im2->c = 3; 577 } 578 LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec"); 579 return 0; 580 }
1 from ctypes import * 2 from io import BytesIO 3 import numpy as np 4 import cv2 5 6 7 # 写法是yolo的darknet.py里的 8 9 def c_array(ctype, values): # 把图像的数据转化为内存连续的 列表 , 使c++能使用这块内存 10 arr = (ctype * len(values))() 11 arr[:] = values 12 return arr 13 14 15 def array_to_image(arr): 16 c = arr.shape[2] 17 h = arr.shape[0] 18 w = arr.shape[1] 19 arr = arr.flatten()#转化成图片后成了一维的 20 data = c_array(c_uint8, arr) 21 im = IMAGE(w, h, c, data)#将读进来数组转化成c接受的形式,调用class IMAGE 22 return im 23 24 25 class IMAGE(Structure): # 这里和ImgSegmentation.hpp里面的结构体保持一致。 26 _fields_ = [("w", c_int), 27 ("h", c_int), 28 ("c", c_int), 29 ("data", POINTER(c_uint8))] 30 31 32 img1 = cv2.imread('D:/1Hill.jpg') 33 img2 = cv2.imread('D:/2Hill.jpg') 34 #h, w, c = img.shape[0], img.shape[1], img.shape[2] 35 #h, w, c = img.shape[0], img.shape[1], img.shape[2] 36 37 #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) * 0 38 #gray = np.reshape(gray, (h, w, 1)) # 一定要使用(h, w, 1),最后的1别忘。 39 im1 = array_to_image(img1)#这里是将读进来的cv_imread格式图片转化成结构体,一维的 40 im2 = array_to_image(img2) 41 #gray_img = array_to_image(gray) 42 43 lib = cdll.LoadLibrary('./image_stiching.dll') # 读取动态库文件 44 lib.Stitch.argtypes = [POINTER(IMAGE), POINTER(IMAGE)] # 设置函数入参格式,声明采用指针传递。指定入参为2个数组指针,python里定义类型指针和C相反,类型在后。 45 #lib.Stitch.restype = c_int64 46 lib.Stitch(im1, im2) # 执行函数,这里直接修改gray_img的内存数据。入参是非指针,python提取地址作为输入。因为函数原型是传递的指针,这里相当于POINTER会自动取输入im1,im2的指针作为入参。 47 ## 因此输入的内存数据会直接被改变。 48 y = im2.data # 获取data,被改变传递改变的对象名 49 array_length = im2.h * im2.w 50 #转化为numpy的ndarray 51 buffer_from_memory = pythonapi.PyMemoryView_FromMemory # 这个是python 3的使用方法,提取运算缓存 52 buffer_from_memory.restype = py_object #提取缓存返回的数据格式,以上两步是下一步从缓存中提取某个变量的结果必须的。 53 buffer = buffer_from_memory(y, array_length) #提取底层的缓存指针,指定提取缓存大小 54 img = np.frombuffer(buffer, dtype=np.uint8) #提取到缓存中的数组 55 print("----------------------") 56 print(img.shape) 57 img = np.reshape(img, (im2.h, im2.w,1)) #改变缓存数组的格式,用于显示 58 print("-------2---------") 59 print(img.shape) 60 print(img) 61 cv2.imshow('test', img) 62 cv2.imwrite("D:/RESULT_PY.JPG",img) 63 cv2.waitKey(0) 64