本文整理汇总了C++中Blob::getRect方法的典型用法代码示例。如果您正苦于以下问题:C++ Blob::getRect方法的具体用法?C++ Blob::getRect怎么用?C++ Blob::getRect使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Blob
的用法示例。
在下文中一共展示了Blob::getRect方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: detectFace
//.........这里部分代码省略.........
headROI = boundingRect(contours[i]);
//Get the biggest area
int temp = headROI.width * headROI.height;
if(temp > contourArea)
{
contourArea = temp;
biggerContourIdx = i;
}
}
//Save head dimensions
if(contourArea > 0)
{
headROI = boundingRect(contours[biggerContourIdx]);
headBlob = Blob(cvPoint(headROI.x, headROI.y), headROI.height, headROI.width);
//imshow("BinaryFrame", binaryFrame);
//rectangle(frameDrawn, headROI, CV_RGB(0,255,0), 2, 8, 0);
//Take some border around the image
if(headBlob.getPt1().x < 150)
{
userDistanceReady = false;
return false;
}
else if (headBlob.getPt2().x > 600)
{
userDistanceReady = false;
return false;
}
if( headBlob.getPt1().y < 20)
{
userDistanceReady = false;
return false;
}
else if(headBlob.getPt2().y > 360 )
{
userDistanceReady = false;
return false;
}
//Define eyes area
eyesROI = cvRect(headROI.x, (headROI.y + headROI.height/8 + 15),
headROI.width, 3*headROI.height/8);
//Shrink headROI width with findContours algorithm applied on eyesArea sub-image
//Define a sub-image for face detection algorithm
Mat faceBinaryFrame (binaryFrameCopy, eyesROI);
//Find face contours
contours.clear();
hierarchy.clear();
findContours(faceBinaryFrame, contours, hierarchy, CV_RETR_CCOMP,
CV_CHAIN_APPROX_SIMPLE, cvPoint(eyesROI.x, eyesROI.y));
//Filter contours and get the biggest one
biggerContourIdx = 0;
contourArea = -1;
for (int i = 0; i >= 0; i = hierarchy[i][0])
{
faceROI = boundingRect(contours[i]);
//Get the biggest area
int temp = faceROI.width * faceROI.height;
if(temp > contourArea)
{
contourArea = temp;
biggerContourIdx = i;
}
}
//Save face dimensions
if(contourArea > 0)
{
faceROI = boundingRect(contours[biggerContourIdx]);
faceBlob = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, faceROI.width);
//faceBlobempirical = Blob(cvPoint(faceROI.x, headROI.y), headROI.height, (headROI.height/4)*3);
//rectangle(frameDrawn, faceBlobempirical.getRect(), CV_RGB(0,0,255), 2, 8, 0);
eyesAreaBlob = Blob( cvPoint((faceROI.x), (eyesROI.y-5)), //Pt1
cvPoint((faceROI.x+faceROI.width),eyesROI.y+eyesROI.height)); //Pt2
//Drawn face blob and eye area
rectangle(frameDrawn, faceBlob.getRect(), CV_RGB(0,255,0), 2, 8, 0);
rectangle(frameDrawn, eyesAreaBlob.getRect(), CV_RGB(255,0,0), 2, 8, 0);
//Save ratio
userHeadRatio = (float)faceBlob.getWidth() / (float)faceBlob.getHeight();
if (userHeadRatio > 0.9) {userHeadRatio = -1.0; }
userHeadPitch = faceBlob.getPt1().y - headPosition.y;
headDataMessageReady = true;
return true;
}
}
return false;
}
示例2: detectEyes
//==============================================================================
//==============================================================================
void detectEyes()
{
//Variables
Mat binaryFrame;
Mat histBinaryFaceFrame;
Mat contoursFrame;
Mat temp1;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
vector<Rect> leftBlobs;
vector<Rect> rightBlobs;
vector<Rect> leftCandidatesEye;
vector<Rect> rightCandidatesEye;
Blob candidatedBlob;
Rect aBlob;
Rect searchAreaForEyesFiltering;
unsigned int blobSize = 0;
float blobRatio = 0.0;
float blobsDistance = 0;
int xDiff = 0;
int yDiff = 0;
bool isLeft = false;
bool isRight = false;
//Convert IRImage from Kinect into grayScale image and cut eyesArea
//cvtColor(frameIR, binaryFrame, CV_BGR2GRAY);
frameIR.copyTo(binaryFrame);
//Cut eyesBinaryFrame to obtain eyesArea image
Mat temp2 (binaryFrame, eyesAreaBlob.getRect());
//Distance handler
if (userDistance < 700)
{
//Define blobs dimension
MIN_EYE_BLOB_SIZE = 30;
MAX_EYE_BLOB_SIZE = 300;
//Get binary image and optimize it for blob analysis
adaptiveThreshold (temp2, temp1, 255,
ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 89, 0); //AirLab 125
erode(temp1,contoursFrame, Mat());
}
else if ((userDistance >= 700)&&(userDistance < 760))
{
//Define blobs dimension
MIN_EYE_BLOB_SIZE = 40;
MAX_EYE_BLOB_SIZE = 300;
//Get binary image and optimize it for blob analysis
adaptiveThreshold (temp2, temp1, 255,
ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 91, 0); //AirLab 125
erode(temp1,contoursFrame, Mat());
//imshow("Binary Eyes Image", temp1);
//imshow("Eroded Eyes Image", contoursFrame);
}
else
{
//Define blobs dimension
MIN_EYE_BLOB_SIZE = 35;
MAX_EYE_BLOB_SIZE = 300;
//Get binary image and optimize it for blob analysis
adaptiveThreshold (temp2, temp1, 255,
ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, 75, 0); //Airlab 111
erode(temp1,contoursFrame, Mat());
}
//Find eyesBlob
//-----TRY TO USE CANNY FIRST-------//
findContours(contoursFrame, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE , eyesAreaBlob.getPt1());
//Filter contours and get the best ones
for(int i = 0; i >= 0 ; i = hierarchy[i][0] )
{
if ((int)contours[i].size() > 4)
{
aBlob = boundingRect(contours[i]);
if(eyesFilteringEnable)
{
//Data for filtering on blob dimensions
blobSize = ((int)aBlob.width)*((int)aBlob.height);
blobRatio = ((int)aBlob.width)/((int)aBlob.height);
//Save blob into vector of candidated blobs
candidatedBlob = Blob(cvPoint(aBlob.x, aBlob.y), aBlob.height, aBlob.width);
if (((blobSize > MIN_EYE_BLOB_SIZE) && (blobSize < MAX_EYE_BLOB_SIZE)) && (blobRatio > BLOB_EYE_RATIO))
{
//Get distance between blob center and left/right edge of eyesAreaBlob
unsigned int distDX = eyesAreaBlob.getPt2().x - candidatedBlob.getCenter().x;
unsigned int distSX = candidatedBlob.getCenter().x - eyesAreaBlob.getPt1().x;
//.........这里部分代码省略.........