当前位置: 首页>>代码示例>>C++>>正文


C++ Mat_::reshape方法代码示例

本文整理汇总了C++中cv::Mat_::reshape方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::reshape方法的具体用法?C++ Mat_::reshape怎么用?C++ Mat_::reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv::Mat_的用法示例。


在下文中一共展示了Mat_::reshape方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: Response

//===========================================================================
void CCNF_patch_expert::Response(cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
		
	response.setTo(0);
	
	// the placeholder for the DFT of the image, the integral image, and squared integral image so they don't get recalculated for every response
	cv::Mat_<double> area_of_interest_dft;
	cv::Mat integral_image, integral_image_sq;
	
	cv::Mat_<float> neuron_response;

	// responses from the neural layers
	for(size_t i = 0; i < neurons.size(); i++)
	{		
		// Do not bother with neuron response if the alpha is tiny and will not contribute much to overall result
		if(neurons[i].alpha > 1e-4)
		{
			neurons[i].Response(area_of_interest, area_of_interest_dft, integral_image, integral_image_sq, neuron_response);
			response = response + neuron_response;						
		}
	}

	int s_to_use = -1;

	// Find the matching sigma
	for(size_t i=0; i < window_sizes.size(); ++i)
	{
		if(window_sizes[i] == response_height)
		{
			// Found the correct sigma
			s_to_use = i;			
			break;
		}
	}

	cv::Mat_<float> resp_vec_f = response.reshape(1, response_height * response_width);

	cv::Mat out = Sigmas[s_to_use] * resp_vec_f;
	
	response = out.reshape(1, response_height);

	// Making sure the response does not have negative numbers
	double min;

	minMaxIdx(response, &min, 0);
	if(min < 0)
	{
		response = response - min;
	}

}
开发者ID:hcmlab,项目名称:mobileSSI,代码行数:60,代码来源:CCNF_patch_expert.cpp

示例2: AlignFaceMask

	// Aligning a face to a common reference frame
	void AlignFaceMask(cv::Mat& aligned_face, const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, cv::Vec6f params_global, const LandmarkDetector::PDM& pdm, const cv::Mat_<int>& triangulation, bool rigid, double sim_scale, int out_width, int out_height)
	{
		// Will warp to scaled mean shape
		cv::Mat_<float> similarity_normalised_shape = pdm.mean_shape * sim_scale;
	
		// Discard the z component
		similarity_normalised_shape = similarity_normalised_shape(cv::Rect(0, 0, 1, 2*similarity_normalised_shape.rows/3)).clone();

		cv::Mat_<float> source_landmarks = detected_landmarks.reshape(1, 2).t();
		cv::Mat_<float> destination_landmarks = similarity_normalised_shape.reshape(1, 2).t();

		// Aligning only the more rigid points
		if(rigid)
		{
			extract_rigid_points(source_landmarks, destination_landmarks);
		}

		cv::Matx22f scale_rot_matrix = AlignShapesWithScale(source_landmarks, destination_landmarks);
		cv::Matx23f warp_matrix;

		warp_matrix(0,0) = scale_rot_matrix(0,0);
		warp_matrix(0,1) = scale_rot_matrix(0,1);
		warp_matrix(1,0) = scale_rot_matrix(1,0);
		warp_matrix(1,1) = scale_rot_matrix(1,1);

		float tx = params_global[4];
		float ty = params_global[5];

		cv::Vec2f T(tx, ty);
		T = scale_rot_matrix * T;

		// Make sure centering is correct
		warp_matrix(0,2) = -T(0) + out_width/2;
		warp_matrix(1,2) = -T(1) + out_height/2;

		cv::warpAffine(frame, aligned_face, warp_matrix, cv::Size(out_width, out_height), cv::INTER_LINEAR);

		// Move the destination landmarks there as well
		cv::Matx22f warp_matrix_2d(warp_matrix(0,0), warp_matrix(0,1), warp_matrix(1,0), warp_matrix(1,1));
		
		destination_landmarks = cv::Mat(detected_landmarks.reshape(1, 2).t()) * cv::Mat(warp_matrix_2d).t();

		destination_landmarks.col(0) = destination_landmarks.col(0) + warp_matrix(0,2);
		destination_landmarks.col(1) = destination_landmarks.col(1) + warp_matrix(1,2);
		
		// Move the eyebrows up to include more of upper face
		destination_landmarks.at<float>(0,1) -= (30/0.7)*sim_scale;
		destination_landmarks.at<float>(16,1) -= (30 / 0.7)*sim_scale;

		destination_landmarks.at<float>(17,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(18,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(19,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(20,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(21,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(22,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(23,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(24,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(25,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(26,1) -= (30 / 0.7)*sim_scale;

		destination_landmarks = cv::Mat(destination_landmarks.t()).reshape(1, 1).t();

		LandmarkDetector::PAW paw(destination_landmarks, triangulation, 0, 0, aligned_face.cols-1, aligned_face.rows-1);
		
		// Mask each of the channels (a bit of a roundabout way, but OpenCV 3.1 in debug mode doesn't seem to be able to handle a more direct way using split and merge)
		vector<cv::Mat> aligned_face_channels(aligned_face.channels());
		
		for (int c = 0; c < aligned_face.channels(); ++c)
		{
			cv::extractChannel(aligned_face, aligned_face_channels[c], c);
		}

		for(size_t i = 0; i < aligned_face_channels.size(); ++i)
		{
			cv::multiply(aligned_face_channels[i], paw.pixel_mask, aligned_face_channels[i], 1.0, CV_8U);
		}

		if(aligned_face.channels() == 3)
		{
			cv::Mat planes[] = { aligned_face_channels[0], aligned_face_channels[1], aligned_face_channels[2] };
			cv::merge(planes, 3, aligned_face);
		}
		else
		{
			aligned_face = aligned_face_channels[0];
		}
	}
开发者ID:JeffreyCastellano,项目名称:OpenFace,代码行数:88,代码来源:Face_utils.cpp


注:本文中的cv::Mat_::reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。