gis如何无缝拼接两张图_opencv图像拼接

gis如何无缝拼接两张图_opencv图像拼接intMyVideoStitcher::Prepare(vector<Mat>&src){ cv::setBreakOnError(true); intnum_images=static_cast<int>(src.size()); if(num_images<2) { printf(“Needmoreimages”); returnSTITCH_CONFIG_ERROR; } intcudaStatus=testG.

大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。

Jetbrains全系列IDE使用 1年只要46元 售后保障 童叟无欺

int MyVideoStitcher::Prepare(vector<Mat> &src)
{
	cv::setBreakOnError(true);
	int num_images = static_cast<int>(src.size());
	if (num_images < 2)
	{
		printf("Need more images");
		return STITCH_CONFIG_ERROR;
	}

	int cudaStatus = testGPU();
	if( is_try_gpu_ && cudaStatus != 0 )
	{
		//printf("GPU acceleration failed! Error code: " <<  cudaStatus << " Please ensure that you have a CUDA-capable GPU installed!");
		printf("Stitching with CPU next ...");
		return STITCH_CONFIG_ERROR;
	}
	
	int flag;
	warp_type_ = "apap";
	if(warp_type_ == "apap")
		flag = PrepareAPAP(src);
	else
		flag = PrepareClassical(src);
	
	if(flag == 0 && is_try_gpu_)
	{
		flag = initGPU(num_images);
		if(flag != 0)
			return flag;
		C2GInitData *c2g_data = new C2GInitData[num_images];
		for(int i = 0; i < num_images; i++)
		{
			c2g_data[i].xmap			= xmaps_[i].ptr<float>(0);
			c2g_data[i].ymap			= ymaps_[i].ptr<float>(0);
			c2g_data[i].ec_weight		= ec_weight_maps_[i].ptr<float>(0);
			c2g_data[i].blend_weight	= blend_weight_maps_[i].ptr<float>(0);
			c2g_data[i].total_weight	= total_weight_maps_[i].ptr<float>(0);
			c2g_data[i].height			= src[i].rows;
			c2g_data[i].width			= src[i].cols;
			c2g_data[i].warped_height	= xmaps_[i].rows;
			c2g_data[i].warped_width	= xmaps_[i].cols;
			c2g_data[i].corner_x		= corners_[i].x - dst_roi_.x;
			c2g_data[i].corner_y		= corners_[i].y - dst_roi_.y;
		}
		initdataCopy2GPU(c2g_data, dst_roi_.height, dst_roi_.width);
	}

	if(flag == STITCH_SUCCESS)
	{
		printf("\t~Prepare complete");
		is_prepared_ = true;
	}

	return flag;
}


int MyVideoStitcher::PrepareAPAP(vector<Mat> &src)
{
	int num_images = static_cast<int>(src.size());

	this->InitMembers(num_images);

	// 计算一些放缩的尺度,在特征检测和计算接缝的时候,为了提高程序效率,可以对源图像进行一些放缩
	work_megapix_ = -1;	//	先不考虑放缩
	seam_megapix_ = -1;	//	先不考虑放缩
	this->SetScales(src);

	// 特征检测
	vector<ImageFeatures> features(num_images);
	this->FindFeatures(src, features);

	// 特征匹配,并去掉噪声图片
	vector<MatchesInfo> pairwise_matches;
	this->MatchImages(features, pairwise_matches);

	// APAP算法
	APAPWarper apap_warper;
	apap_warper.buildMaps(src, features, pairwise_matches, xmaps_, ymaps_, corners_);
	for(int i = 0; i < num_images; i++)
		sizes_[i] = xmaps_[i].size();
	dst_roi_ = resultRoi(corners_, sizes_);

	// 计算接缝
	vector<Mat> seamed_masks(num_images);
	vector<Mat> images_warped(num_images);
	vector<Mat> init_masks(num_images);
	for(int i = 0; i < num_images; i++)
	{
		init_masks[i].create(src[i].size(), CV_8U);
		init_masks[i].setTo(Scalar::all(255));
		remap(src[i], images_warped[i], xmaps_[i], ymaps_[i], INTER_LINEAR);
		remap(init_masks[i], final_warped_masks_[i], xmaps_[i], ymaps_[i], INTER_NEAREST, BORDER_CONSTANT);
		final_warped_masks_[i].copyTo(seamed_masks[i]);
	}
	this->FindSeam(images_warped, seamed_masks);
	printf("find seam");

	// 曝光补偿
	compensator_.createWeightMaps(corners_, images_warped, final_warped_masks_, ec_weight_maps_);
	// 曝光补偿时,各像素权值也要resize一下
	compensator_.gainMapResize(sizes_, ec_weight_maps_);
	printf("compensate");

	images_warped.clear();

	// 计算融合时,各像素的权值
	Size dst_sz = dst_roi_.size();
	//cout << "dst size: " << dst_sz << endl;
	float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength_ / 100.f;
	blender_.setSharpness(1.f / blend_width);
	for(int i = 0; i < num_images; i++)
		final_blend_masks_[i] = final_warped_masks_[i] & seamed_masks[i];
	blender_.createWeightMaps(dst_roi_, corners_, seamed_masks, blend_weight_maps_);

	return STITCH_SUCCESS;
}

int MyVideoStitcher::FindFeatures(vector<Mat> &src, vector<ImageFeatures> &features)
{
	Ptr<Feature2D> finder;
	if (features_type_ == "surf")
	{
#ifdef HAVE_OPENCV_GPU
		if (is_try_gpu_ && gpu::getCudaEnabledDeviceCount() > 0)
			finder = new SurfFeaturesFinderGpu();
		else
#endif
			finder = xfeatures2d::SURF::create();
	}
	else if (features_type_ == "orb")
	{
		finder = ORB::create();
	}
	else
	{
		cout << "Unknown 2D features type: '" << features_type_ << "'.\n";
		return STITCH_CONFIG_ERROR;
	}

	int num_images = static_cast<int>(src.size());
	Mat full_img, img;

	for (int i = 0; i < num_images; ++i)
	{
		full_img = src[i].clone();//

		if (work_megapix_ < 0)
			img = full_img;
		else
			resize(full_img, img, Size(), work_scale_, work_scale_);

		computeImageFeatures(finder, img, features[i]);
		//LOGLN("Features in image #" << i+1 << "("<<img.size()<< "): " << features[i].keypoints.size());
		features[i].img_idx = i;
	}

	//finder->collectGarbage();
	full_img.release();
	img.release();

	return STITCH_SUCCESS;
}



/*
 * 特征匹配,然后去除噪声图片。本代码实现时,一旦出现噪声图片,就终止算法
 * 返回值:
 *		0	——	正常
 *		-2	——	存在噪声图片
 */
int MyVideoStitcher::MatchImages(vector<ImageFeatures> &features, vector<MatchesInfo> &pairwise_matches)
{
	int total_num_images = static_cast<int>(features.size());

	BestOf2NearestMatcher matcher(is_try_gpu_, match_conf_);
	matcher(features, pairwise_matches);
	matcher.collectGarbage();
	
	// 去除噪声图像
	vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh_);
	
	// 一旦出现噪声图片,就终止算法
	int num_images = static_cast<int>(indices.size());
	if (num_images != total_num_images)
	{
		//LOGLN(total_num_images - num_images << " videos are invaild");
		return STITCH_NOISE;
	}
	
	return STITCH_SUCCESS;
}

 

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/190737.html原文链接:https://javaforall.cn

【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛

【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...

(0)


相关推荐

发表回复

您的电子邮箱地址不会被公开。

关注全栈程序员社区公众号