本文整理匯總了C++中CBlob::GetBoundingBox方法的典型用法代碼示例。如果您正苦於以下問題:C++ CBlob::GetBoundingBox方法的具體用法?C++ CBlob::GetBoundingBox怎麽用?C++ CBlob::GetBoundingBox使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類CBlob
的用法示例。
在下文中一共展示了CBlob::GetBoundingBox方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: nextIteration
void ForegroundDetector::nextIteration(const Mat &img)
{
if(bgImg.empty())
{
return;
}
Mat absImg = Mat(img.cols, img.rows, img.type());
Mat threshImg = Mat(img.cols, img.rows, img.type());
absdiff(bgImg, img, absImg);
threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY);
IplImage im = (IplImage)threshImg;
CBlobResult blobs = CBlobResult(&im, NULL, 0);
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize);
vector<Rect>* fgList = detectionResult->fgList;
fgList->clear();
for(int i = 0; i < blobs.GetNumBlobs(); i++)
{
CBlob *blob = blobs.GetBlob(i);
CvRect rect = blob->GetBoundingBox();
fgList->push_back(rect);
}
}
示例2: Update
//.........這裏部分代碼省略.........
Image src=camera.SubImage(crop_x,crop_y,crop_w,crop_h);
out = new Image(src.m_Image->width,
src.m_Image->height, 8, 4);
for(int y=0; y<src.m_Image->height; y++)
{
for(int x=0; x<src.m_Image->width; x++)
{
CvScalar col = cvGet2D(src.m_Image,y,x);
CvScalar alpha = cvGet2D(tofill.m_Image,y,x);
if (alpha.val[0]==0 &&
alpha.val[1]==255 &&
alpha.val[2]==0)
col.val[3]=0;
else
col.val[3]=255;
cvSet2D(out->m_Image,y,x,col);
}
}
}
if (key=='s')
{
cerr<<"deleting old images in islands/"<<endl;
int r=system("rm islands/*");
}
list<CvRect> allrects;
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
allrects.push_back(currentBlob->GetBoundingBox());
}
list<CvRect> filteredrects=allrects;
/* for (list<CvRect>::iterator i=allrects.begin();
i!=allrects.end(); ++i)
{
bool in=false;
for (list<CvRect>::iterator j=allrects.begin();
j!=allrects.end(); ++j)
{
if (Inside(*i,*j)) in=true;
}
if (!in) filteredrects.push_back(*i);
}*/
unsigned int instance = rand();
unsigned int count=0;
for (list<CvRect>::iterator i=filteredrects.begin();
i!=filteredrects.end(); ++i)
{
CvRect rect = *i;
if (key=='s')
{
Image island = out->SubImage(rect.x,rect.y,
rect.width,rect.height);
char buf[256];
sprintf(buf,"islands/island-%d-%d-%d.png",count,
rect.x+rect.width/2,
示例3: main
int main(int argc, char *argv[])
{
CvCapture* capture = cvCreateFileCapture( "recording_01.avi");
handOrientation rightOrientationLast = NONE, leftOrientationLast = NONE;
handOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;
//cvNamedWindow("Input Image", CV_WINDOW_AUTOSIZE);
//cvNamedWindow("Skin Pixels", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Skin Blobs", CV_WINDOW_AUTOSIZE);
while(1){
Mat imageBGR = cvQueryFrame(capture);
if(imageBGR.empty())break;
//imshow("Input Image", imageBGR);
// Convert the image to HSV colors.
Mat imageHSV = Mat(imageBGR.size(), CV_8UC3); // Full HSV color image.
cvtColor(imageBGR, imageHSV, CV_BGR2HSV); // Convert from a BGR to an HSV image.
std::vector<Mat> channels(3);
split(imageHSV, channels);
Mat planeH = channels[0];
Mat planeS = channels[1];
Mat planeV = channels[2];
// Detect which pixels in each of the H, S and V channels are probably skin pixels.
threshold(channels[0], channels[0], 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
threshold(channels[1], channels[1], 60, UCHAR_MAX, CV_THRESH_BINARY);//50
threshold(channels[2], channels[2], 170, UCHAR_MAX, CV_THRESH_BINARY);//80
// Combine all 3 thresholded color components, so that an output pixel will only
// be white if the H, S and V pixels were also white.
Mat imageSkinPixels = Mat( imageBGR.size(), CV_8UC3); // Greyscale output image.
bitwise_and(channels[0], channels[1], imageSkinPixels); // imageSkin = H {BITWISE_AND} S.
bitwise_and(imageSkinPixels, channels[2], imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.
// Show the output image on the screen.
//imshow("Skin Pixels", imageSkinPixels);
IplImage ipl_imageSkinPixels = imageSkinPixels;
// Find blobs in the image.
CBlobResult blobs;
blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color.
// Ignore the blobs whose area is less than minArea.
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);
srand (time(NULL));
// Show the large blobs.
IplImage* imageSkinBlobs = cvCreateImage(imageBGR.size(), 8, 3); //Colored Output//,1); Greyscale output image.
for (int i = 0; i < blobs.GetNumBlobs(); i++) {
CBlob *currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob(imageSkinBlobs, CV_RGB(rand()%255,rand()%255,rand()%255)); // Draw the large blobs as white.
cvDrawRect(imageSkinBlobs,
cvPoint(currentBlob->GetBoundingBox().x,currentBlob->GetBoundingBox().y),
cvPoint(currentBlob->GetBoundingBox().x + currentBlob->GetBoundingBox().width,currentBlob->GetBoundingBox().y + currentBlob->GetBoundingBox().height),
cvScalar(0,0,255),
2);//Draw Bounding Boxes
}
cvShowImage("Skin Blobs", imageSkinBlobs);
//Gestures
//std::cout << "Number of Blobs: "<< blobs.GetNumBlobs() <<endl;
if(blobs.GetNumBlobs() == 0){
//picture empty
}else if(blobs.GetNumBlobs() == 1) {
//head detected
}else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3){
//head + one hand || head + two hands
CvRect rect[3];
int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;
//Get Bounding Boxes
for(int i = 0; i< blobs.GetNumBlobs(); i++){
rect[i] = blobs.GetBlob(i)->GetBoundingBox();
}
//Detect Head and Hand indexes
if(blobs.GetNumBlobs() == 2){
int indexHand = -1;
if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y){
//.........這裏部分代碼省略.........
示例4: main
//.........這裏部分代碼省略.........
for(int i=0;i<numBlobs;i++){
int area = blobs.GetBlob(i)->Area();
if(area > maxArea_1){
maxArea_2 = maxArea_1;
maxArea_1 = area;
max_2 = max_1;
max_1 = i;
} else if(area > maxArea_2){
maxArea_2 = area;
max_2 = i;
}
}
int i_1 = max_1;
int i_2 = max_2;
double area_left, area_right;
Rect rect_1;
Rect rect_2;
//determines which hand is left/right
blob_1 = blobs.GetBlob(i_1);
blob_2 = blobs.GetBlob(i_2);
center_1 = blob_1->getCenter();
center_2 = blob_2->getCenter();
bool left_is_1 = (center_1.x < center_2.x)? true : false;
leftBlob = (left_is_1)? blob_1 : blob_2;
rightBlob = (left_is_1)? blob_2 : blob_1;
center_left = leftBlob->getCenter();
center_right = rightBlob->getCenter();
//determine the number of valid hands
//validity is decided by whether or not the hand followed a logical movement,
//and if the area of the blob is large enough to be accepted
int valids = 0;
rect_1 = leftBlob->GetBoundingBox();
rectangle(copy, rect_1.tl(), rect_1.br(), leftColor_2, 5);
error_left = norm(statePt_left - center_left);
area_left = leftBlob->Area();
left_valid = error_left < sensitivity && area_left > area;
if(left_valid){
leftBlob->FillBlob(copy,leftColor, true);
valids ++;
}
circle(copy, center_left, 5, leftColor_2, -1);
rect_2 = rightBlob->GetBoundingBox();
rectangle(copy, rect_2.tl(), rect_2.br(), rightColor_2, 5);
error_right = norm(statePt_right - center_right);
area_right = rightBlob->Area();
right_valid = error_right < sensitivity && area_right > area;
if(right_valid){
rightBlob->FillBlob(copy,rightColor, true);
valids ++;
}
circle(copy, center_right, 5, rightColor_2, -1);
//finds the blob representing the robot
//we could add a restriction to only choose a blob between the two hands
//in terms of x-coordinate
//a Kalman check can easily be done for the robot
Point robot_center;
maxArea_1 = 0;
max_1 = 0;
numBlobs = robot_blobs.GetNumBlobs();
if(0 < numBlobs){
示例5: main
//.........這裏部分代碼省略.........
//End of General Stuff
while (1) //The infinite loop
{
//Beginning getting camera shots
rightImage = GetNextCameraShot(rightCamera);
leftImage = GetNextCameraShot(leftCamera);
frameNumber++;
//Done getting camera shots
//Beginning getting motion images
HSVImageRight = rightImage.clone();
cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
medianBlur(motionImageRight, motionImageRight, 3);
HSVImageLeft = leftImage.clone();
cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
medianBlur(motionImageLeft, motionImageLeft, 3);
//Ended getting motion images
cout << "\nFor frame #" << frameNumber << " :\n";
//Beginning Getting Blobs
IplImage imageblobPixels = motionImageRight;
CBlobResult blobs;
blobs = CBlobResult(&imageblobPixels, NULL, 0); // Use a black background color.
int minArea = 100 / ((640 / width) * (640 / width));
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
int foundBlobs = blobs.GetNumBlobs();
//Ended Getting Blobs
cout << "Found " << foundBlobs << " motion blobs\n";
//Creating copies of original images for modifying and displaying
displayImageRight = rightImage.clone();
displayImageLeft = leftImage.clone();
//Done creating copies
//Cycling through the blobs
for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++)
{
cout << "Blob #" << blobIndex << " : ";
//Getting blob details
CBlob * blob = blobs.GetBlob(blobIndex);
int x = blob->GetBoundingBox().x;
int y = blob->GetBoundingBox().y;
int w = blob->GetBoundingBox().width;
int h = blob->GetBoundingBox().height;
//Done getting blob details
int sep = 0;
//The point for which we want to find depth
PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0};
cout << "inPoint = {" << inP.x << ", " << inP.y << "} ";
//Initialing the rectangle in which the corressponding point is likely in
Rectangle rect;
rect.location.x = -1;
rect.location.y = inP.y - 5;
rect.size.x = rightImage.cols;
rect.size.y = 11;
//Done initialising the target rectangle
//Find the corressponding point and calculate the sepertion
oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft);
sep = inP.x - oP.x;
cout << "foundPoint = {" << oP.x << ", " << oP.y << "} ";
//Just for visual presentation
DrawRect(displayImageRight, x, y, w, h);
cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3);
cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3);
//Done decoration
//The thing we were looking for... how can we forget to print this? :P
cout << "seperation = " << sep << "\n";
}
//Show the windows
cv::namedWindow("RIGHT");
cv::namedWindow("thresh");
cv::namedWindow("LEFT");
imshow("LEFT", displayImageLeft);
imshow("RIGHT", displayImageRight);
imshow("thresh", motionImageRight);
//End of code for showing windows
//The loop terminating condition
if (waitKey(27) >= 0) break;
}
//Mission Successful!! :D :)
return 0;
}
示例6: blob_center
/* Find the center of a given blob. */
CvPoint MarkerCapture::blob_center(CBlob blob){
CvPoint point;
point.x = blob.GetBoundingBox().x + (blob.GetBoundingBox().width / 2);
point.y = blob.GetBoundingBox().y + (blob.GetBoundingBox().height / 2);
return point;
}
示例7: main
int main()
{
CBlobResult blobs;
CBlob *currentBlob;
CvPoint pt1, pt2;
CvRect cvRect;
int key = 0;
IplImage* frame = 0;
// Initialize capturing live feed from video file or camera
CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );
// Get the frames per second
int fps = ( int )cvGetCaptureProperty( capture,
CV_CAP_PROP_FPS );
// Can't get device? Complain and quit
if( !capture )
{
printf( "Could not initialize capturing...\n" );
return -1;
}
// Windows used to display input video with bounding rectangles
// and the thresholded video
cvNamedWindow( "video" );
cvNamedWindow( "thresh" );
// An infinite loop
while( key != 'x' )
{
// If we couldn't grab a frame... quit
if( !( frame = cvQueryFrame( capture ) ) )
break;
// Get object's thresholded image (blue = white, rest = black)
IplImage* imgThresh = GetThresholdedImageHSV( frame );
// Detect the white blobs from the black background
blobs = CBlobResult( imgThresh, NULL, 0 );
// Exclude white blobs smaller than the given value (10)
// The bigger the last parameter, the bigger the blobs need
// to be for inclusion
blobs.Filter( blobs,
B_EXCLUDE,
CBlobGetArea(),
B_LESS,
10 );
// Attach a bounding rectangle for each blob discovered
int num_blobs = blobs.GetNumBlobs();
for ( int i = 0; i < num_blobs; i++ )
{
currentBlob = blobs.GetBlob( i );
cvRect = currentBlob->GetBoundingBox();
pt1.x = cvRect.x;
pt1.y = cvRect.y;
pt2.x = cvRect.x + cvRect.width;
pt2.y = cvRect.y + cvRect.height;
// Attach bounding rect to blob in orginal video input
cvRectangle( frame,
pt1,
pt2,
cvScalar(0, 0, 0, 0),
1,
8,
0 );
}
// Add the black and white and original images
cvShowImage( "thresh", imgThresh );
cvShowImage( "video", frame );
// Optional - used to slow up the display of frames
key = cvWaitKey( 2000 / fps );
// Prevent memory leaks by releasing thresholded image
cvReleaseImage( &imgThresh );
}
// We're through with using camera.
cvReleaseCapture( &capture );
return 0;
}