当前位置: 首页>>代码示例>>C++>>正文


C++ Blob::getPt1方法代码示例

本文整理汇总了C++中Blob::getPt1方法的典型用法代码示例。如果您正苦于以下问题:C++ Blob::getPt1方法的具体用法?C++ Blob::getPt1怎么用?C++ Blob::getPt1使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Blob的用法示例。


在下文中一共展示了Blob::getPt1方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: detectEyes

//==============================================================================
//==============================================================================
void detectEyes()
{
	//Variables
	Mat binaryFrame;
	Mat histBinaryFaceFrame;
	Mat contoursFrame;
	Mat temp1;

	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	vector<Rect> leftBlobs;
	vector<Rect> rightBlobs;
	vector<Rect> leftCandidatesEye;
	vector<Rect> rightCandidatesEye;
	
	Blob candidatedBlob;
	Rect aBlob;
	Rect searchAreaForEyesFiltering;
	
	unsigned int blobSize = 0;
	float blobRatio = 0.0;
	float blobsDistance = 0;
	int xDiff = 0;
	int yDiff = 0;
	
	bool isLeft = false;
	bool isRight = false;
	
	//Convert IRImage from Kinect into grayScale image and cut eyesArea
	//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
	frameIR.copyTo(binaryFrame);
	
	//Cut eyesBinaryFrame to obtain eyesArea image
	Mat temp2 (binaryFrame, eyesAreaBlob.getRect());	
			
	//Distance handler
	if (userDistance < 700)
	{	
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 30;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 89, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());
	}
	else if ((userDistance >= 700)&&(userDistance < 760))
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 40;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
						   ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 91, 0); //AirLab 125
		erode(temp1,contoursFrame, Mat());

		//imshow("Binary Eyes Image", temp1);
		//imshow("Eroded Eyes Image", contoursFrame); 
	}
	else
	{
		//Define blobs dimension
		MIN_EYE_BLOB_SIZE = 35;
		MAX_EYE_BLOB_SIZE = 300;
		
		//Get binary image and optimize it for blob analysis
		adaptiveThreshold (temp2, temp1, 255, 
							ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 75, 0); //Airlab 111
		erode(temp1,contoursFrame, Mat());
	}
	
	//Find eyesBlob
	//-----TRY TO USE CANNY FIRST-------//
	findContours(contoursFrame, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE , eyesAreaBlob.getPt1());
	
	//Filter contours and get the best ones
	for(int i = 0; i >= 0 ; i = hierarchy[i][0] )
	{
		if ((int)contours[i].size() > 4)
		{
			aBlob = boundingRect(contours[i]);
			if(eyesFilteringEnable)
			{
				//Data for filtering on blob dimensions
				blobSize = ((int)aBlob.width)*((int)aBlob.height);
				blobRatio = ((int)aBlob.width)/((int)aBlob.height);
				//Save blob into vector of candidated blobs
				candidatedBlob = Blob(cvPoint(aBlob.x, aBlob.y), aBlob.height, aBlob.width);
				
				if (((blobSize > MIN_EYE_BLOB_SIZE) && (blobSize < MAX_EYE_BLOB_SIZE)) && (blobRatio > BLOB_EYE_RATIO))
				{
					//Get distance between blob center and left/right edge of eyesAreaBlob
					unsigned int distDX = eyesAreaBlob.getPt2().x - candidatedBlob.getCenter().x;
					unsigned int distSX = candidatedBlob.getCenter().x - eyesAreaBlob.getPt1().x;

//.........这里部分代码省略.........
开发者ID:AIRLab-POLIMI,项目名称:e2,代码行数:101,代码来源:head_analyzer.cpp

示例2: detectFace

//==============================================================================
//==============================================================================
bool detectFace()
{
	//Variables 
	int biggerContourIdx = 0;
	int contourArea = -1;
	
	Mat binaryFrame;
	Mat binaryFrameCopy;
	
	vector<vector<Point> > contours;
	vector<Vec4i> hierarchy;
	
	Rect headROI;
	Rect faceROI;
	Blob faceBlobempirical;
	Rect eyesROI;
	
	if(userDistance < 750)
	{
		//Face area into frameIR 
		headROI = cvRect(	(headPosition.x - BLOB_HEAD_WIDTH/2),  //X
							headPosition.y, 					   //Y
							BLOB_HEAD_WIDTH, BLOB_HEAD_HEIGHT);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_NEAR;
	}
	else
	{
		//Face area into frameIR 
		headROI = cvRect(	headPosition.x - (BLOB_HEAD_WIDTH-10)/2,  //X
							headPosition.y,							  //Y
							BLOB_HEAD_WIDTH-10, BLOB_HEAD_HEIGHT-20);
		//Convert IRImage into gray image and then into binary one
		//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
		frameIR.copyTo(binaryFrame);
		binaryFrame = binaryFrame > THRESHOLD_HEAD_FAR;
	}

	//Chech out-of-frame error
	//check outOfImage error
	if(headROI.x < 0)
		headROI.x = 0;
	if(headROI.x > ( FRAME_WIDTH - headROI.width))
		headROI.x = FRAME_WIDTH - headROI.width;
	
	if(headROI.y < 0)
		headROI.y = 0;
	if(headROI.y > (FRAME_HEIGHT - headROI.height))
		headROI.y = FRAME_HEIGHT - (headROI.height+10);
	
	//Define a sub-image for head detection algorithm
	binaryFrame.copyTo(binaryFrameCopy);
	Mat headBinaryFrame (binaryFrame, headROI);
	
	//OpenCV find contours algorithm
	findContours(headBinaryFrame, contours, hierarchy, CV_RETR_CCOMP, 
				CV_CHAIN_APPROX_SIMPLE, cvPoint(headROI.x, headROI.y));
	
	//Filter contours and get the biggest one
	for (int i = 0; i >= 0; i = hierarchy[i][0])
	{
		//Draw contours
		//vScalar color = CV_RGB(rand()&255,rand()&255,rand()&255);
		//drawContours(frameDrawn, contours, i, color, CV_FILLED,8, hierarchy);
			
		headROI = boundingRect(contours[i]);
		
		//Get the biggest area
		int temp = headROI.width * headROI.height;
		if(temp > contourArea)
		{
			contourArea = temp;
			biggerContourIdx = i;
		}
	}

	//Save head dimensions
	if(contourArea > 0)
	{
		headROI = boundingRect(contours[biggerContourIdx]);
		headBlob = Blob(cvPoint(headROI.x, headROI.y), headROI.height, headROI.width);
		
		//imshow("BinaryFrame", binaryFrame);
		//rectangle(frameDrawn, headROI, CV_RGB(0,255,0), 2, 8, 0);

		//Take some border around the image
		if(headBlob.getPt1().x < 150)
		{
			userDistanceReady = false;
			return false;
		}
		else if (headBlob.getPt2().x > 600)
		{	
			userDistanceReady = false;
			return false;
		}
//.........这里部分代码省略.........
开发者ID:AIRLab-POLIMI,项目名称:e2,代码行数:101,代码来源:head_analyzer.cpp

示例3: detectMovements

//==============================================================================
//==============================================================================
void detectMovements()
{
	Mat frameIR_1C, prevFrameIR_1C;
	
	//Convert prevFrameIR in a 1 channel image (prevFrameIR_1C)
	//cvtColor(prevFrameIR, prevFrameIR_1C, CV_BGR2GRAY);
	prevFrameIR.copyTo(prevFrameIR_1C);
	
	//Convert frameIR in a 1 channel image (frameIR_1C)
	//cvtColor(frameIR, frameIR_1C, CV_BGR2GRAY);
	frameIR.copyTo(frameIR_1C);
	
	//-----	SHI & TOMASI ALGORITHM	-----
	
	//Saving of prevFrameIR_1C features
	vector<Point2f> prevFrameIRFeatures;
	//mask setting
	Mat mask;
	CvPoint pt1Mask, pt2Mask;
	
	CvSize dim = cv::Size(frameIR.cols, frameIR.rows);
	mask = cv::Mat::zeros(dim, CV_8U);
	pt1Mask.x = eyesAreaBlob.getPt1().x + 10;
	pt1Mask.y = eyesAreaBlob.getPt1().y;
	pt2Mask.x = eyesAreaBlob.getPt2().x - 10;
	pt2Mask.y = eyesAreaBlob.getPt2().y;

	cv::rectangle(mask, pt1Mask, pt2Mask, CV_RGB(255, 255, 255), CV_FILLED, 8, 0);
	
	//draw mask dimensions
	//cv::rectangle(frameDrawn, pt1Mask, pt2Mask, CV_RGB(0, 0, 255), 2, 8, 0);
	
	//Computation of prevFrameIR_1C features
	goodFeaturesToTrack(prevFrameIR_1C, prevFrameIRFeatures, FEATURES_NUMBER, .2, .1, mask, 3, false, 0.04);
	
	//-----	LUCAS AND KANADE ALGORITHM	-----
	
	//Saving of frameIR_1C features
	vector<Point2f> frameIRFeatures;
	vector<uchar> foundFeatures;
	vector<float> featuresError;
	
	TermCriteria terminationCriteria = TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .03);
	
	calcOpticalFlowPyrLK(prevFrameIR_1C, frameIR_1C, 
						 prevFrameIRFeatures, frameIRFeatures,
						 foundFeatures, featuresError,
						 cv::Size(15,15), 3, terminationCriteria, 0.5, 0);
	
	//----- PRINT ALL GOOD FEATURES FOUND -----
	/*for(int i=0; i< FEATURES_NUMBER; i++)
	{
		pointer arrow;
		arrow.pt1.x = prevFrameIRFeatures[i].x;
		arrow.pt1.y = prevFrameIRFeatures[i].y;
		arrow.pt2.x = frameIRFeatures[i].x;
		arrow.pt2.y = frameIRFeatures[i].y;
		
		double angle = atan2((double) arrow.pt1.y - arrow.pt2.y, (double) arrow.pt1.x - arrow.pt2.x);
		double hypotenuse = sqrt(pow((arrow.pt1.y - arrow.pt2.y),2) + pow((arrow.pt1.x - arrow.pt2.x),2));
		
		drawArrow(arrow, hypotenuse, angle, CV_RGB(0,255,0), 1);
	}*/
	
	//----- MEDIAN COMPUTATION -----
	pointer median;
	median.pt1 = medianComputation(prevFrameIRFeatures);
	median.pt2 = medianComputation(frameIRFeatures);
	
	//----- MOVEMENTS DATA COMPUTATION -----
	double medianHypotenuse = sqrt(pow((median.pt1.y - median.pt2.y),2) + pow((median.pt1.x - median.pt2.x),2));
	double medianAngle = atan2((double)(median.pt1.y - median.pt2.y), (double)(median.pt1.x - median.pt2.x));
	
	//----- PRINT MOVEMENT FOUND -----
	pointer moveArrow;
	
	//Print arrow upon the face
	moveArrow.pt1.x = faceBlob.getPt1().x + (((faceBlob.getPt2().x)-(faceBlob.getPt1().x))/2);
	moveArrow.pt1.y = faceBlob.getPt1().y;
	
	//Considering only meaningful arrows
	if(((unsigned int)medianHypotenuse > 10) && ((unsigned int)medianHypotenuse < 100))
	{
		//reset count of holes during movements
		countHolesMove = 0;

		//draw arrow
		drawArrow(moveArrow, medianHypotenuse, medianAngle, CV_RGB(255,0,0), 2);
		
		//save hypotenuse  count
		moveIntensity = moveIntensity + medianHypotenuse;
		
		//save data of arrow
		//ROS_INFO("Angle: %f, AngleClass:%d", medianAngle, defineAngleClass(medianAngle));
		Point2DSample moveSample = Point2DSample(defineAngleClass(medianAngle), 1, 1.0, 'U');		
		move.insert(moveSample);
		
	}
//.........这里部分代码省略.........
开发者ID:AIRLab-POLIMI,项目名称:e2,代码行数:101,代码来源:head_analyzer.cpp


注:本文中的Blob::getPt1方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。