本文整理汇总了C++中CBlobResult::GetBlob方法的典型用法代码示例。如果您正苦于以下问题:C++ CBlobResult::GetBlob方法的具体用法?C++ CBlobResult::GetBlob怎么用?C++ CBlobResult::GetBlob使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CBlobResult
的用法示例。
在下文中一共展示了CBlobResult::GetBlob方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getNearestBlob
CBlob getNearestBlob(CBlobResult blobs, coord coordinate){
int tot = blobs.GetNumBlobs();
CBlob Blob;
float distance[10]; // 10 è il numero massimo di blob trovabile in un video
float minimum;
coord tempCoord;
//Questo ciclo for fa la distanza manhattan tra le coordinate passate e tutti i blob catturati e crea il vettore con tutte le distanze.
for (int i=0; i<tot; i++){
Blob = blobs.GetBlob(i);
tempCoord.set( (int) Blob.MaxX(), (int) Blob.MinX(), (int) Blob.MaxY(), (int) Blob.MinY());
distance[i] = sqrt((double)(tempCoord.cX - coordinate.cX)*(tempCoord.cX - coordinate.cX) + (tempCoord.cY - coordinate.cY)*(tempCoord.cY - coordinate.cY));
}
int minDistanceId=0;
//Questo ciclo for becca la minima distanza fra tutte quelle calcolate
for (int j=0; j<tot; j++){
minimum = min( distance[j], distance[minDistanceId]);
if ( distance[j] == minimum ) minDistanceId = j;
}
//Ottenuta la minima distanza si va a ritornare il Blob corrispondente
Blob = blobs.GetBlob( minDistanceId );
//delete[] distance;
return Blob;
}
示例2: drawInitialBlobs
void drawInitialBlobs(IplImage * tmp_frame, CBlobResult blobs){
coord drawCoord;
for (int i=0; i<blobs.GetNumBlobs();i++){
//!Creating the coordinate struct
drawCoord.set( (int) blobs.GetBlob(i).MaxX(), (int) blobs.GetBlob(i).MinX(), (int) blobs.GetBlob(i).MaxY(), (int) blobs.GetBlob(i).MinY());
drawBlob(tmp_frame, drawCoord, 255, 255, 0);
}
}
示例3: nextIteration
void ForegroundDetector::nextIteration(const Mat &img)
{
if(bgImg.empty())
{
return;
}
Mat absImg = Mat(img.cols, img.rows, img.type());
Mat threshImg = Mat(img.cols, img.rows, img.type());
absdiff(bgImg, img, absImg);
threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY);
IplImage im = (IplImage)threshImg;
CBlobResult blobs = CBlobResult(&im, NULL, 0);
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize);
vector<Rect>* fgList = detectionResult->fgList;
fgList->clear();
for(int i = 0; i < blobs.GetNumBlobs(); i++)
{
CBlob *blob = blobs.GetBlob(i);
CvRect rect = blob->GetBoundingBox();
fgList->push_back(rect);
}
}
示例4: findBlobs
void ScheinrieseApp::findBlobs() {
CBlobResult blobs;
int i;
CBlob *currentBlob;
IplImage *original, *originalThr;
// load an image and threshold it
original = cvLoadImage("pic1.png", 0);
cvThreshold( original, originalThr, 100, 0, 255, CV_THRESH_BINARY );
// find non-white blobs in thresholded image
blobs = CBlobResult( originalThr, NULL, 255 );
// exclude the ones smaller than param2 value
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );
// get mean gray color of biggest blob
CBlob biggestBlob;
CBlobGetMean getMeanColor( original );
double meanGray;
blobs.GetNth( CBlobGetArea(), 0, biggestBlob );
meanGray = getMeanColor( biggestBlob );
// display filtered blobs
cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );
for (i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
}
}
示例5: centroidMM
vector<Bubble> OMRSheet::getBubbles(int xi1, int yi1, int xi2, int yi2){
vector <Bubble> bubbles;
cout<<"Bubble area "<<bubbleArea;
int minArea = bubbleArea/2, maxArea = bubbleArea*1.5;
CBlobResult blobs = ImageUtils::findBlobs(rawSheet, minArea, maxArea, cvRect(xi1, yi1, xi2-xi1, yi2-yi1));
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
CvRect rect = blobs.GetBlob(i)->GetBoundingBox();
Point centroid = ImageUtils::findCentroid(rawSheet, &rect);
Point centroidMM((centroid.x() - x1)/15, (centroid.y() - y1)/15);
Bubble bubble(blobs.GetBlob(i), ¢roidMM, ¢roid);
bubbles.push_back(bubble);
}
return bubbles;
}
示例6: computeWhiteMaskOtsu
CBlobResult computeWhiteMaskOtsu(Mat& imgRGBin, Mat& imgHSVIn, CBlobResult& blobs, int limitRGB, int limitHSV, double RGBratio, double HSVratio, int bmin, int bmax, int i){
waitKey(30);
Mat BGRbands[3];
split(imgRGBin,BGRbands);
Mat imgHSV;
cvtColor(imgHSVIn,imgHSV,CV_BGR2HSV);
Mat HSVbands[3];
split(imgHSV,HSVbands);
Mat maskHSV, maskRGB, maskT;
int otsuTRGB = getThreshVal_Otsu_8u(BGRbands[2]);
do{
threshold(BGRbands[2],maskRGB,otsuTRGB,255,THRESH_BINARY);
otsuTRGB++;
}while(countNonZero(maskRGB)>(RGBratio*limitRGB) & otsuTRGB<=255);
int otsuTHSV = getThreshVal_Otsu_8u(HSVbands[1]);
do{
threshold(HSVbands[1],maskHSV,otsuTHSV,255,THRESH_BINARY_INV);
otsuTHSV--;
}while(countNonZero(maskHSV)>(HSVratio*limitHSV) & otsuTHSV>=0); // 0.1
bitwise_or(maskHSV,maskRGB,maskT);
int blobSizeBefore = blobs.GetNumBlobs();
blobs = blobs + CBlobResult( maskT ,Mat(),8);
blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_GREATER, bmax );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_LESS, bmin );
int blobSizeAfter = blobs.GetNumBlobs();
Mat newMask(maskT.size(),maskT.type());
newMask.setTo(0);
for(;i<blobs.GetNumBlobs();i++){
double area = blobs.GetBlob(i)->Area();
if(area < 5000 && area > 400)
blobs.GetBlob(i)->FillBlob(newMask,CV_RGB(255,255,255),0,0,true);
}
if(countNonZero(maskRGB)>400 && countNonZero(maskHSV)>400 && blobSizeBefore!=blobSizeAfter){
vector<Mat> BGRbands; split(imgRGBin,BGRbands);
Mat maskedRGB = applyMaskBandByBand(newMask,BGRbands);
bitwise_not(newMask,newMask);
split(imgHSVIn,BGRbands);
Mat maskedHSV = applyMaskBandByBand(newMask,BGRbands);
blobs = computeWhiteMaskOtsu(maskedRGB, maskedHSV, blobs, countNonZero(maskRGB),countNonZero(maskHSV),RGBratio, HSVratio, bmin, bmax, i-1);
}
return blobs;
}
示例7: extractBall
void extractBall()
{
imgTransform(BALL_HUE_U, BALL_HUE_L, BALL_SAT_U, BALL_SAT_L, VAL_U, VAL_L);
blobRes = CBlobResult(dst, NULL, 0);
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_GREATER, BALL_SIZE_MAX );// keep blobs smaller than BALL_SIZE_MAX
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
blobRes.Filter( blobRes, B_INCLUDE, CBlobGetCompactness(), B_GREATER, BALL_COMPACTNESS );// keep blobs greater than BALL_COMPACTNESS
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
for(int i=0; i<numOfBlobs; i++)
blobs[i] = blobRes.GetBlob(i);
};
示例8: extractBots
void extractBots()
{
//RED TEAM
imgTransform(TEAM_R_HUE_U, TEAM_R_HUE_L, TEAM_R_SAT_U, TEAM_R_SAT_L, VAL_U, VAL_L);
blobRes = CBlobResult(dst, NULL, 0);
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
if(numOfBlobs == 2)
{
for (int i=0; i<2; i++)
blobRes.GetBlob(i)
for(int i=0; i<numOfBlobs; i++)
blobs[i] = blobRes.GetBlob(i);
};
void printBlobs()
{
CBlobGetXCenter getXC;
CBlobGetYCenter getYC;
CBlobGetArea getArea;
CBlobGetCompactness getCompactness;
printf("-----Printng Blobs------\n");
for(int i=0; i<numOfBlobs; i++)
{
printf("%d\t(%3.2f,%3.2f),%3.2f %3.2f\n", i, getXC(blobs[i]), getYC(blobs[i]), getArea(blobs[i]), getCompactness(blobs[i]));
}
printf("\n");
cvNamedWindow("old", 1);
cvNamedWindow("new", 1);
cvMoveWindow("old", 0,0);
cvMoveWindow("new", 0,400);
cvShowImage("old", img);
cvShowImage("new", dst);
cvWaitKey();
};
示例9: on_trackbar
// threshold trackbar callback
void on_trackbar( int dummy )
{
if(!originalThr)
{
originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,1);
}
if(!displayedImage)
{
displayedImage = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,3);
}
// threshold input image
cvThreshold( original, originalThr, param1, 255, CV_THRESH_BINARY );
// get blobs and filter them using its area
CBlobResult blobs;
int i;
CBlob *currentBlob;
// find blobs in image
blobs = CBlobResult( originalThr, NULL, 255 );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );
// display filtered blobs
cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );
for (i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
}
cvShowImage( wndname, displayedImage );
}
示例10: main
int main(int argc, char *argv[])
{
CvCapture* capture = cvCreateFileCapture( "recording_01.avi");
handOrientation rightOrientationLast = NONE, leftOrientationLast = NONE;
handOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;
//cvNamedWindow("Input Image", CV_WINDOW_AUTOSIZE);
//cvNamedWindow("Skin Pixels", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Skin Blobs", CV_WINDOW_AUTOSIZE);
while(1){
Mat imageBGR = cvQueryFrame(capture);
if(imageBGR.empty())break;
//imshow("Input Image", imageBGR);
// Convert the image to HSV colors.
Mat imageHSV = Mat(imageBGR.size(), CV_8UC3); // Full HSV color image.
cvtColor(imageBGR, imageHSV, CV_BGR2HSV); // Convert from a BGR to an HSV image.
std::vector<Mat> channels(3);
split(imageHSV, channels);
Mat planeH = channels[0];
Mat planeS = channels[1];
Mat planeV = channels[2];
// Detect which pixels in each of the H, S and V channels are probably skin pixels.
threshold(channels[0], channels[0], 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
threshold(channels[1], channels[1], 60, UCHAR_MAX, CV_THRESH_BINARY);//50
threshold(channels[2], channels[2], 170, UCHAR_MAX, CV_THRESH_BINARY);//80
// Combine all 3 thresholded color components, so that an output pixel will only
// be white if the H, S and V pixels were also white.
Mat imageSkinPixels = Mat( imageBGR.size(), CV_8UC3); // Greyscale output image.
bitwise_and(channels[0], channels[1], imageSkinPixels); // imageSkin = H {BITWISE_AND} S.
bitwise_and(imageSkinPixels, channels[2], imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.
// Show the output image on the screen.
//imshow("Skin Pixels", imageSkinPixels);
IplImage ipl_imageSkinPixels = imageSkinPixels;
// Find blobs in the image.
CBlobResult blobs;
blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color.
// Ignore the blobs whose area is less than minArea.
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);
srand (time(NULL));
// Show the large blobs.
IplImage* imageSkinBlobs = cvCreateImage(imageBGR.size(), 8, 3); //Colored Output//,1); Greyscale output image.
for (int i = 0; i < blobs.GetNumBlobs(); i++) {
CBlob *currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob(imageSkinBlobs, CV_RGB(rand()%255,rand()%255,rand()%255)); // Draw the large blobs as white.
cvDrawRect(imageSkinBlobs,
cvPoint(currentBlob->GetBoundingBox().x,currentBlob->GetBoundingBox().y),
cvPoint(currentBlob->GetBoundingBox().x + currentBlob->GetBoundingBox().width,currentBlob->GetBoundingBox().y + currentBlob->GetBoundingBox().height),
cvScalar(0,0,255),
2);//Draw Bounding Boxes
}
cvShowImage("Skin Blobs", imageSkinBlobs);
//Gestures
//std::cout << "Number of Blobs: "<< blobs.GetNumBlobs() <<endl;
if(blobs.GetNumBlobs() == 0){
//picture empty
}else if(blobs.GetNumBlobs() == 1) {
//head detected
}else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3){
//head + one hand || head + two hands
CvRect rect[3];
int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;
//Get Bounding Boxes
for(int i = 0; i< blobs.GetNumBlobs(); i++){
rect[i] = blobs.GetBlob(i)->GetBoundingBox();
}
//Detect Head and Hand indexes
if(blobs.GetNumBlobs() == 2){
int indexHand = -1;
if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y){
//.........这里部分代码省略.........
示例11: blobTracking
//==============================================================================
void PanTiltCameraClass::blobTracking(IplImage* hsv_mask,
IplImage* pFour,
IplImage* pImg)
{
//--- Get blobs and filter them using the blob area
CBlobResult blobs;
CBlob *currentBlob;
//--- Create a thresholded image and display image --------------------
//--- Creates binary image
IplImage* originalThr = cvCreateImage(cvGetSize(hsv_mask), IPL_DEPTH_8U,1);
//--- Create 3-channel image
IplImage* display = cvCreateImage(cvGetSize(hsv_mask),IPL_DEPTH_8U,3);
//--- Copies the original
cvMerge( hsv_mask, hsv_mask, hsv_mask, NULL, display );
//--- Makes a copy for processing
cvCopy(hsv_mask,originalThr);
//--- Find blobs in image ---------------------------------------------
int blobThreshold = 0;
bool blobFindMoments = true;
blobs = CBlobResult( originalThr, originalThr, blobThreshold, blobFindMoments);
//--- filters blobs according to size and radius constraints
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, this->minBlobSize );
//--- display filtered blobs ------------------------------------------
//--- copies the original in (for background)
cvMerge( originalThr, originalThr, originalThr, NULL, display );
CvPoint pts[this->NUMBER_OF_CIRCLES];
//--- This sequence marks all the blobs
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( display, CV_RGB(0,0,255));
//--- Get blobs centerpoint
CvPoint bcg;
bcg.x = (int)(currentBlob->MinX()+((currentBlob->MaxX()-currentBlob->MinX())/2));
bcg.y = (int)(currentBlob->MinY()+((currentBlob->MaxY()-currentBlob->MinY())/2));
//--- Print the CG on the picture
char blobtext[40];
for(int k=0;k<this->NUMBER_OF_CIRCLES;k++)
{
sprintf(blobtext,"%d",k+1);
TargetReticle(display,&pts[k],blobtext,6,CV_RGB(255,0,0));
}//for
}//for each blob
//--- Set the ROI in the pFour image
cvSetImageROI(pFour,cvRect(pImg->width,pImg->height+80,pImg->width,pImg->height));
cvCopy(display,pFour);
//Reset region of interest
cvResetImageROI(display);
//Clean up
cvReleaseImage( &originalThr );
cvReleaseImage( &display);
}
示例12: main
//.........这里部分代码省略.........
int numBlobs = blobs.GetNumBlobs();
if(0 == numBlobs){
cout << "can't find blobs!" << endl;
continue;
}
// detects robot as a blob
CBlobResult robot_blobs;
IplImage robot_temp = (IplImage) white_masked;
robot_blobs = CBlobResult(&robot_temp, NULL, 1);
robot_blobs = CBlobResult(white_masked, Mat(), NUMCORES);
if(0 == robot_blobs.GetNumBlobs()){
cout << "can't find robot_blobs!" << endl;
continue;
}
CBlob *curblob;
CBlob* blob_1;
CBlob* blob_2;
CBlob* leftBlob;
CBlob* rightBlob;
CBlob* robotBlob;
copy.setTo(Vec3b(0,0,0));
// chooses the two largest blobs for the hands
Point center_1, center_2;
int max_1 = 0;
int max_2 = 0;
int maxArea_1 = 0;
int maxArea_2 = 0;
for(int i=0;i<numBlobs;i++){
int area = blobs.GetBlob(i)->Area();
if(area > maxArea_1){
maxArea_2 = maxArea_1;
maxArea_1 = area;
max_2 = max_1;
max_1 = i;
} else if(area > maxArea_2){
maxArea_2 = area;
max_2 = i;
}
}
int i_1 = max_1;
int i_2 = max_2;
double area_left, area_right;
Rect rect_1;
Rect rect_2;
//determines which hand is left/right
blob_1 = blobs.GetBlob(i_1);
blob_2 = blobs.GetBlob(i_2);
center_1 = blob_1->getCenter();
center_2 = blob_2->getCenter();
bool left_is_1 = (center_1.x < center_2.x)? true : false;
leftBlob = (left_is_1)? blob_1 : blob_2;
rightBlob = (left_is_1)? blob_2 : blob_1;
center_left = leftBlob->getCenter();
center_right = rightBlob->getCenter();
//determine the number of valid hands
//validity is decided by whether or not the hand followed a logical movement,
//and if the area of the blob is large enough to be accepted
int valids = 0;
rect_1 = leftBlob->GetBoundingBox();
示例13: main
//.........这里部分代码省略.........
//End of General Stuff
while (1) //The infinite loop
{
//Beginning getting camera shots
rightImage = GetNextCameraShot(rightCamera);
leftImage = GetNextCameraShot(leftCamera);
frameNumber++;
//Done getting camera shots
//Beginning getting motion images
HSVImageRight = rightImage.clone();
cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
medianBlur(motionImageRight, motionImageRight, 3);
HSVImageLeft = leftImage.clone();
cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
medianBlur(motionImageLeft, motionImageLeft, 3);
//Ended getting motion images
cout << "\nFor frame #" << frameNumber << " :\n";
//Beginning Getting Blobs
IplImage imageblobPixels = motionImageRight;
CBlobResult blobs;
blobs = CBlobResult(&imageblobPixels, NULL, 0); // Use a black background color.
int minArea = 100 / ((640 / width) * (640 / width));
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
int foundBlobs = blobs.GetNumBlobs();
//Ended Getting Blobs
cout << "Found " << foundBlobs << " motion blobs\n";
//Creating copies of original images for modifying and displaying
displayImageRight = rightImage.clone();
displayImageLeft = leftImage.clone();
//Done creating copies
//Cycling through the blobs
for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++)
{
cout << "Blob #" << blobIndex << " : ";
//Getting blob details
CBlob * blob = blobs.GetBlob(blobIndex);
int x = blob->GetBoundingBox().x;
int y = blob->GetBoundingBox().y;
int w = blob->GetBoundingBox().width;
int h = blob->GetBoundingBox().height;
//Done getting blob details
int sep = 0;
//The point for which we want to find depth
PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0};
cout << "inPoint = {" << inP.x << ", " << inP.y << "} ";
//Initialing the rectangle in which the corressponding point is likely in
Rectangle rect;
rect.location.x = -1;
rect.location.y = inP.y - 5;
rect.size.x = rightImage.cols;
rect.size.y = 11;
//Done initialising the target rectangle
//Find the corressponding point and calculate the sepertion
oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft);
sep = inP.x - oP.x;
cout << "foundPoint = {" << oP.x << ", " << oP.y << "} ";
//Just for visual presentation
DrawRect(displayImageRight, x, y, w, h);
cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3);
cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3);
//Done decoration
//The thing we were looking for... how can we forget to print this? :P
cout << "seperation = " << sep << "\n";
}
//Show the windows
cv::namedWindow("RIGHT");
cv::namedWindow("thresh");
cv::namedWindow("LEFT");
imshow("LEFT", displayImageLeft);
imshow("RIGHT", displayImageRight);
imshow("thresh", motionImageRight);
//End of code for showing windows
//The loop terminating condition
if (waitKey(27) >= 0) break;
}
//Mission Successful!! :D :)
return 0;
}
示例14: init
//.........这里部分代码省略.........
threshold(planeV, planeV, 170, UCHAR_MAX, CV_THRESH_BINARY);//80
// Combine all 3 thresholded color components, so that an output pixel will only
// be white if the H, S and V pixels were also white.
Mat imageSkinPixels = Mat(img_hsv.size(), CV_8UC3); // Greyscale output image.
bitwise_and(planeH, planeS, imageSkinPixels); // imageSkin = H {BITWISE_AND} S.
bitwise_and(imageSkinPixels, planeV, imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.
// Assing the Mat (C++) to an IplImage (C), this is necessary because the blob detection is writtn in old opnCv C version
IplImage ipl_imageSkinPixels = imageSkinPixels;
// RECODING: record the video using the C container variable
// RECODING: store the size (in memory meaning) of the image for recording purpouse
//size = img_cam->getSize();
//videoFile.write((char*) ipl_imageSkinPixels.imageData, size/3);
// Set up the blob detection.
CBlobResult blobs;
blobs.ClearBlobs();
blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color.
// Ignore the blobs whose area is less than minArea.
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);
// ##### Gestures #####
std::cout << "Number of Blobs: " << blobs.GetNumBlobs() <<endl;
if(blobs.GetNumBlobs() == 0)
{
//picture empty
}
else if(blobs.GetNumBlobs() == 1)
{
//head detected
trackHead(getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).x, getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).y);
}
else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3)
{
//head + one hand || head + two hands
Rect rect[3];
int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;
//Get Bounding Boxes
for(int i = 0; i< blobs.GetNumBlobs(); i++)
{
rect[i] = blobs.GetBlob(i)->GetBoundingBox();
}
//Detect Head and Hand indexes
if(blobs.GetNumBlobs() == 2)
{
// head and one hand
int indexHand = -1;
if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y)
{
// rect[0] is head
indexHead = 0;
indexHand = 1;
}
else
{
// rect[1] is head
indexHead = 1;
indexHand = 0;
}
示例15: main
int main(int argc, char * argv[])
{
vector <string> imgNames;
vector <string> imgNamesMask;
char strFrame[20];
readImageSequenceFiles(imgNames, imgNamesMask);
list<TrackLine> trackLineArr;
// read org frame and forground for process
// you can modify it to read video by add a segment alg
for(unsigned int i = 40; i < imgNames.size() - 1; i++)
{
Mat frame = imread(imgNames[i]);
Mat grayImg;
cvtColor(frame, grayImg, CV_RGB2GRAY);
Mat maskImage = imread(imgNamesMask[i], 0);
// get blobs and filter them using its area
// use 'cvblobslib' to get the object blobs
threshold( maskImage, maskImage, 81, 255, CV_THRESH_BINARY );
medianBlur(maskImage, maskImage, 3);
IplImage ipl_maskImage = maskImage;
CBlobResult blobs = CBlobResult( &ipl_maskImage, NULL, 0 );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 30 ); // filter blobs that area smaller than a certern num
list<CBlob *> remBlob;
for (int k = 0; k < blobs.GetNumBlobs(); k++)
{
remBlob.push_back(blobs.GetBlob(k));
}
printf("%d\n", trackLineArr.size());
for (list<TrackLine>::iterator trackIter = trackLineArr.begin(); trackIter != trackLineArr.end(); )
{
//kf predicition, get kfRect
Mat kfPrediction = (trackIter->kf).predict();
Point kfPrePt((int)(kfPrediction.at<float>(0)), (int)(kfPrediction.at<float>(1)));
Rect kfRect(kfPrePt.x - (trackIter->box).width / 2, kfPrePt.y - (trackIter->box).height / 2, (trackIter->box).width, (trackIter->box).height);
//ct predicition, get ctRect
int ctError = 0;
Rect ctRect(trackIter->box);
float score = (trackIter->ct).predicition(grayImg, ctRect);
rectangle(frame, kfRect, Scalar(0, 200, 0)); //green, kf predicition box
rectangle(frame, ctRect, Scalar(0, 0, 200)); //red, ct predicition box
//union predicit rectangle
//if they have no same area, we consider ct is wrong, because kalman is physical movement
float areaScale = (float)(sqrt((kfRect & ctRect).area() *1.0 / kfRect.area()));
Point movePoint((int)((ctRect.x - kfRect.x) * areaScale), (int)((ctRect.y - kfRect.y) * areaScale));
Rect unionPreRect = kfRect + movePoint;
//calc object box
Rect objRect;
int j = 0;
for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); )
{
Rect detRect((*blobIter)->GetBoundingBox());
float detArea = (float)((*blobIter)->Area());
if ((unionPreRect & detRect).area() > 0)
{
if (j++ == 0) objRect = detRect;
else objRect = objRect | detRect;
blobIter = remBlob.erase(blobIter);
}
else blobIter++;
}
// let box's area equal
float objArea = (float)(objRect.area());
objRect = Rect((int)(objRect.x + objRect.width / 2.0 - unionPreRect.width / 2.0),
(int)(objRect.y + objRect.height / 2.0 - unionPreRect.height / 2.0),
unionPreRect.width, unionPreRect.height);
float detAreaScale = (float)(sqrt(objArea * 1.0 / unionPreRect.area()));
if (detAreaScale > 1.0) detAreaScale = 1.0;
Point detMovePoint((int)((objRect.x - unionPreRect.x) * detAreaScale), (int)((objRect.y - unionPreRect.y) * detAreaScale));
Rect unionCorrRect = unionPreRect + detMovePoint;
// if detect area > 0
if (objArea > 0)
{
trackIter->box = unionCorrRect;
rectangle(frame, unionCorrRect, Scalar(200,0,0), 1);
//kf correct
Mat_<float> measurement(2,1);
measurement(0) = (float)((trackIter->box).x + (trackIter->box).width / 2.0);
measurement(1) = (float)((trackIter->box).y + (trackIter->box).height / 2.0);
(trackIter->kf).correct(measurement);
//ct update
(trackIter->ct).update(grayImg, trackIter->box);
trackIter++;
}
// else we beleve tracking miss
else
{
if ((trackIter->miss)++ == 5) trackIter = trackLineArr.erase(trackIter);
else trackIter++;
//.........这里部分代码省略.........