本文整理汇总了C++中CBlobResult::Filter方法的典型用法代码示例。如果您正苦于以下问题:C++ CBlobResult::Filter方法的具体用法?C++ CBlobResult::Filter怎么用?C++ CBlobResult::Filter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CBlobResult
的用法示例。
在下文中一共展示了CBlobResult::Filter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getBlobs
CBlobResult getBlobs(IplImage* tmp_frame, IplImage* binFore){
//IplImage* binFore = cvCreateImage(cvGetSize(tmp_frame),IPL_DEPTH_8U,1);
//get the binary foreground object
//cvSub( getBinaryImage(tmp_frame) , binBack, binFore, NULL );
//if(!cvSaveImage("binFore.jpg",binFore)) printf("Could not save the backgroundimage\n");
//!Starting the extracting of Blob
CBlobResult blobs;
//! get the blobs from the image, with no mask, using a threshold of 100
blobs = CBlobResult( binFore, NULL, 10, true );
//! Create a file with all the found blob
blobs.PrintBlobs( "blobs.txt" );
//! discard the blobs with less area than 60 pixels
blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_GREATER, 40);
//!This two row of code are to filter the blob find from the library by a bug that match ablob like all the image and return the center of it
blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_LESS, (tmp_frame->height)*(tmp_frame->width)*0.8);
blobs.Filter( blobs, B_INCLUDE, CBlobGetPerimeter(), B_LESS, (tmp_frame->height)+(tmp_frame->width)*2*0.8);
//! Create a file with filtered results
blobs.PrintBlobs( "filteredBlobs.txt" );
//return blobs;
return blobs;
}
示例2: extractBall
void extractBall()
{
imgTransform(BALL_HUE_U, BALL_HUE_L, BALL_SAT_U, BALL_SAT_L, VAL_U, VAL_L);
blobRes = CBlobResult(dst, NULL, 0);
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_GREATER, BALL_SIZE_MAX );// keep blobs smaller than BALL_SIZE_MAX
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
blobRes.Filter( blobRes, B_INCLUDE, CBlobGetCompactness(), B_GREATER, BALL_COMPACTNESS );// keep blobs greater than BALL_COMPACTNESS
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
for(int i=0; i<numOfBlobs; i++)
blobs[i] = blobRes.GetBlob(i);
};
示例3: findBlobs
void ScheinrieseApp::findBlobs() {
CBlobResult blobs;
int i;
CBlob *currentBlob;
IplImage *original, *originalThr;
// load an image and threshold it
original = cvLoadImage("pic1.png", 0);
cvThreshold( original, originalThr, 100, 0, 255, CV_THRESH_BINARY );
// find non-white blobs in thresholded image
blobs = CBlobResult( originalThr, NULL, 255 );
// exclude the ones smaller than param2 value
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );
// get mean gray color of biggest blob
CBlob biggestBlob;
CBlobGetMean getMeanColor( original );
double meanGray;
blobs.GetNth( CBlobGetArea(), 0, biggestBlob );
meanGray = getMeanColor( biggestBlob );
// display filtered blobs
cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );
for (i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
}
}
示例4: detect_blobs
/* Detect blobs larger than min_size in a given IplImage. */
CBlobResult MarkerCapture::detect_blobs(IplImage *img, int min_size = 10){
// find white blobs in thresholded image
CBlobResult blobs = CBlobResult(img, NULL, 0);
// exclude ones smaller than min_size.
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, min_size);
return blobs;
}
示例5: nextIteration
void ForegroundDetector::nextIteration(const Mat &img)
{
if(bgImg.empty())
{
return;
}
Mat absImg = Mat(img.cols, img.rows, img.type());
Mat threshImg = Mat(img.cols, img.rows, img.type());
absdiff(bgImg, img, absImg);
threshold(absImg, threshImg, fgThreshold, 255, CV_THRESH_BINARY);
IplImage im = (IplImage)threshImg;
CBlobResult blobs = CBlobResult(&im, NULL, 0);
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobSize);
vector<Rect>* fgList = detectionResult->fgList;
fgList->clear();
for(int i = 0; i < blobs.GetNumBlobs(); i++)
{
CBlob *blob = blobs.GetBlob(i);
CvRect rect = blob->GetBoundingBox();
fgList->push_back(rect);
}
}
示例6: computeWhiteMaskOtsu
CBlobResult computeWhiteMaskOtsu(Mat& imgRGBin, Mat& imgHSVIn, CBlobResult& blobs, int limitRGB, int limitHSV, double RGBratio, double HSVratio, int bmin, int bmax, int i){
waitKey(30);
Mat BGRbands[3];
split(imgRGBin,BGRbands);
Mat imgHSV;
cvtColor(imgHSVIn,imgHSV,CV_BGR2HSV);
Mat HSVbands[3];
split(imgHSV,HSVbands);
Mat maskHSV, maskRGB, maskT;
int otsuTRGB = getThreshVal_Otsu_8u(BGRbands[2]);
do{
threshold(BGRbands[2],maskRGB,otsuTRGB,255,THRESH_BINARY);
otsuTRGB++;
}while(countNonZero(maskRGB)>(RGBratio*limitRGB) & otsuTRGB<=255);
int otsuTHSV = getThreshVal_Otsu_8u(HSVbands[1]);
do{
threshold(HSVbands[1],maskHSV,otsuTHSV,255,THRESH_BINARY_INV);
otsuTHSV--;
}while(countNonZero(maskHSV)>(HSVratio*limitHSV) & otsuTHSV>=0); // 0.1
bitwise_or(maskHSV,maskRGB,maskT);
int blobSizeBefore = blobs.GetNumBlobs();
blobs = blobs + CBlobResult( maskT ,Mat(),8);
blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_GREATER, bmax );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetLength(), B_LESS, bmin );
int blobSizeAfter = blobs.GetNumBlobs();
Mat newMask(maskT.size(),maskT.type());
newMask.setTo(0);
for(;i<blobs.GetNumBlobs();i++){
double area = blobs.GetBlob(i)->Area();
if(area < 5000 && area > 400)
blobs.GetBlob(i)->FillBlob(newMask,CV_RGB(255,255,255),0,0,true);
}
if(countNonZero(maskRGB)>400 && countNonZero(maskHSV)>400 && blobSizeBefore!=blobSizeAfter){
vector<Mat> BGRbands; split(imgRGBin,BGRbands);
Mat maskedRGB = applyMaskBandByBand(newMask,BGRbands);
bitwise_not(newMask,newMask);
split(imgHSVIn,BGRbands);
Mat maskedHSV = applyMaskBandByBand(newMask,BGRbands);
blobs = computeWhiteMaskOtsu(maskedRGB, maskedHSV, blobs, countNonZero(maskRGB),countNonZero(maskHSV),RGBratio, HSVratio, bmin, bmax, i-1);
}
return blobs;
}
示例7: extractBots
void extractBots()
{
//RED TEAM
imgTransform(TEAM_R_HUE_U, TEAM_R_HUE_L, TEAM_R_SAT_U, TEAM_R_SAT_L, VAL_U, VAL_L);
blobRes = CBlobResult(dst, NULL, 0);
blobRes.Filter( blobRes, B_EXCLUDE, CBlobGetArea(), B_LESS, BLOB_SIZE_MIN );// keep blobs larger than BLOB_SIZE_MIN
numOfBlobs = blobRes.GetNumBlobs(); cout << numOfBlobs << endl;
if(numOfBlobs == 2)
{
for (int i=0; i<2; i++)
blobRes.GetBlob(i)
for(int i=0; i<numOfBlobs; i++)
blobs[i] = blobRes.GetBlob(i);
};
void printBlobs()
{
CBlobGetXCenter getXC;
CBlobGetYCenter getYC;
CBlobGetArea getArea;
CBlobGetCompactness getCompactness;
printf("-----Printng Blobs------\n");
for(int i=0; i<numOfBlobs; i++)
{
printf("%d\t(%3.2f,%3.2f),%3.2f %3.2f\n", i, getXC(blobs[i]), getYC(blobs[i]), getArea(blobs[i]), getCompactness(blobs[i]));
}
printf("\n");
cvNamedWindow("old", 1);
cvNamedWindow("new", 1);
cvMoveWindow("old", 0,0);
cvMoveWindow("new", 0,400);
cvShowImage("old", img);
cvShowImage("new", dst);
cvWaitKey();
};
示例8: givedepth
void givedepth(IplImage *localimagergb)
{ IplImage*localimage=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
cvCvtColor(localimagergb,localimage,CV_BGR2HSV);
IplImage *blobbedscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
uchar *itemp=(uchar *)(localimage->imageData);
IplImage *binaryscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,1);
uchar *itemp1=(uchar *)(binaryscaling ->imageData);
for(int i=0;i<hi2->height;i++){
for(int j=0;j<hi2->width;j++){
if((itemp[i*localimage->widthStep+j*localimage->nChannels] <hh)
&&
(itemp[i*localimage->widthStep+j*localimage->nChannels]>hl)
&&
(itemp[i*localimage->widthStep+j*localimage->nChannels+1]<sh)
&&
(itemp[i*localimage->widthStep+j*localimage->nChannels+1]>sl)
&&
( itemp[i*localimage->widthStep+j*localimage->nChannels+2]<vh)
&&
( itemp[i*localimage->widthStep+j*localimage->nChannels+2]>vl) //previous 124
) {
itemp1[i*binaryscaling->widthStep+j]=0; //dark regions black rest white
}
else
itemp1[i*binaryscaling->widthStep+j]=255;
}}
cvErode( binaryscaling, binaryscaling, NULL, 4);
cvDilate(binaryscaling, binaryscaling, NULL, 4);
CBlobResult blob;
CBlob *currentBlob=NULL;
blob=CBlobResult(binaryscaling,NULL,255);
blob.Filter(blob,B_EXCLUDE,CBlobGetArea(),B_LESS,500);
cvMerge(binaryscaling,binaryscaling,binaryscaling,NULL,blobbedscaling);
CBlob hand1,hand2; //two blobs,one for each hand
blob.GetNthBlob( CBlobGetArea(), 0, (hand2));
blob.GetNthBlob( CBlobGetArea(), 1, (hand1 ));
hand1.FillBlob(blobbedscaling,CV_RGB(0,0,255)); //fill the color of blob of hand one with blue
hand2.FillBlob(blobbedscaling,CV_RGB(0,255,0)); //fill the color of blob of hand two with green
coordinates (blobbedscaling,0);
}
示例9: on_trackbar
// threshold trackbar callback
void on_trackbar( int dummy )
{
if(!originalThr)
{
originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,1);
}
if(!displayedImage)
{
displayedImage = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U,3);
}
// threshold input image
cvThreshold( original, originalThr, param1, 255, CV_THRESH_BINARY );
// get blobs and filter them using its area
CBlobResult blobs;
int i;
CBlob *currentBlob;
// find blobs in image
blobs = CBlobResult( originalThr, NULL, 255 );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, param2 );
// display filtered blobs
cvMerge( originalThr, originalThr, originalThr, NULL, displayedImage );
for (i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( displayedImage, CV_RGB(255,0,0));
}
cvShowImage( wndname, displayedImage );
}
示例10: init
//.........这里部分代码省略.........
// Get the separate HSV color components of the color input image.
std::vector<Mat> channels(3);
split(img_hsv, channels);
Mat planeH = channels[0];
Mat planeS = channels[1];
Mat planeV = channels[2];
// Detect which pixels in each of the H, S and V channels are probably skin pixels.
threshold(planeH, planeH, 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
threshold(planeS, planeS, 60, UCHAR_MAX, CV_THRESH_BINARY);//50
threshold(planeV, planeV, 170, UCHAR_MAX, CV_THRESH_BINARY);//80
// Combine all 3 thresholded color components, so that an output pixel will only
// be white if the H, S and V pixels were also white.
Mat imageSkinPixels = Mat(img_hsv.size(), CV_8UC3); // Greyscale output image.
bitwise_and(planeH, planeS, imageSkinPixels); // imageSkin = H {BITWISE_AND} S.
bitwise_and(imageSkinPixels, planeV, imageSkinPixels); // imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.
// Assing the Mat (C++) to an IplImage (C), this is necessary because the blob detection is writtn in old opnCv C version
IplImage ipl_imageSkinPixels = imageSkinPixels;
// RECODING: record the video using the C container variable
// RECODING: store the size (in memory meaning) of the image for recording purpouse
//size = img_cam->getSize();
//videoFile.write((char*) ipl_imageSkinPixels.imageData, size/3);
// Set up the blob detection.
CBlobResult blobs;
blobs.ClearBlobs();
blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0); // Use a black background color.
// Ignore the blobs whose area is less than minArea.
blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);
// ##### Gestures #####
std::cout << "Number of Blobs: " << blobs.GetNumBlobs() <<endl;
if(blobs.GetNumBlobs() == 0)
{
//picture empty
}
else if(blobs.GetNumBlobs() == 1)
{
//head detected
trackHead(getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).x, getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).y);
}
else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3)
{
//head + one hand || head + two hands
Rect rect[3];
int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;
//Get Bounding Boxes
for(int i = 0; i< blobs.GetNumBlobs(); i++)
{
rect[i] = blobs.GetBlob(i)->GetBoundingBox();
}
//Detect Head and Hand indexes
if(blobs.GetNumBlobs() == 2)
{
// head and one hand
int indexHand = -1;
if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y)
{
示例11: findShadow
double findShadow(IplImage *l_img, int hue,int sat,int val,int threshold, double blobLowLimit,double blobHighLimit){
// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out.
// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out.
//Ouput: pointer to data array, size[#ofblobs*3+1]; Format data=[Number of Blobs, Area1,X of center1, y of center1, Area2,X of center2,y of center2,...,areaN,X of centerN, Y of centerN];
// Image variables
IplImage* local_copy = cvCloneImage(l_img);
IplImage* imageSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
IplImage* imageSuperSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
IplImage* imageHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
IplImage* i1 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
IplImage* i2 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
IplImage* i_ts = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
IplImage* planeH = cvCreateImage(cvGetSize(l_img),8,1); //Hue
IplImage* planeS = cvCreateImage(cvGetSize(l_img),8,1); //Saturation
IplImage* planeV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
IplImage* planeSmoothV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
IplImage* imageSmoothHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
IplImage* obsdetmask = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
IplImage* obsdetmask_dil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
IplImage* obsdetmask_b = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
IplImage* obsdetmask_bdil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
//Blob variables
CBlobResult mask_bls;
CBlob mask_bl;
CBlobResult blobs;
CBlob blob;
CBlobResult blobs1;
CBlob blob1;
CBlobGetXCenter getXCenter;
CBlobGetYCenter getYCenter;
//Output Variable
//Gausian Filter
cvSmooth(l_img,imageSmooth,CV_GAUSSIAN,13,13,0,0);
cvSmooth(l_img,imageSuperSmooth,CV_GAUSSIAN,41,41,0,0);
//cvShowImage("View2a",imageSmooth);
//Covert RGB to HSV
cvCvtColor(imageSmooth,imageHSV,CV_BGR2HSV);
cvCvtColor(imageSuperSmooth,imageSmoothHSV,CV_BGR2HSV);
cvCvtPixToPlane(imageSuperSmooth,NULL,NULL,planeSmoothV,0);
cvCvtPixToPlane(imageHSV, planeH,planeS,planeV,0);//Extract the 3 color components
cvSetImageROI(imageHSV,cvRect(0,imageHSV->height/3,imageHSV->width,imageHSV->height*2/3));
IplImage* planeH1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Hue
IplImage* planeS1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Saturation
IplImage* planeV1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Brightness
cvCvtPixToPlane(imageHSV, planeH1,planeS1,planeV1,0);//Extract the 3 color components
cvResetImageROI(imageHSV);
cvShowImage("Dark_Value",planeV);
cvShowImage("Dark_Sat",planeS);
cvShowImage("Dark_Hue",planeH);
cvSet(obsdetmask, cvScalar(0,0,0));
cv::waitKey(3);
int maxDark = 0;
int minDark = 255;
int minDarknessValue=0;
int maxDarknessValue = 0;
int midDarknessValue = 0;
//Filter image for desired Color, output image with only desired color highlighted remaining
for( int y = 0; y < planeH1->height; y++ ){
unsigned char* h = &CV_IMAGE_ELEM( planeH1, unsigned char, y, 0 );
unsigned char* s = &CV_IMAGE_ELEM( planeS1, unsigned char, y, 0 );
unsigned char* v = &CV_IMAGE_ELEM( planeV1, unsigned char, y, 0 );
for( int x = 0; x < planeH1->width*planeH1->nChannels; x += planeH1->nChannels ){
//if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
//int f= HSV_filter(h[x],s[x],v[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
int diff = abs((h[x]-hue));
if(((diff < threshold)||(v[x]<MIN_BRIGHT)||(s[x]<MIN_SAT)))
{
((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=255;
if(v[x]<minDark)
{minDark=v[x];}
if(v[x]>maxDark)
{maxDark=v[x];}
}
else
{
((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=0;
}
}
}//debug
cvDilate(obsdetmask,obsdetmask_dil,NULL,1);
cvShowImage("Dark_ObsDetPre",obsdetmask_dil);
mask_bls = CBlobResult(obsdetmask_dil,NULL,0);
mask_bls.Filter(mask_bls,B_EXCLUDE,CBlobGetArea(),B_LESS,MASK_MIN_BLOB); // Filter Blobs with min and max size
mask_bls.GetNthBlob( CBlobGetArea(), 0, mask_bl );
cvSet(obsdetmask_b, cvScalar(0,0,0));
mask_bl.FillBlob(obsdetmask_b,CV_RGB(255,255,255));
cvDilate(obsdetmask_b,obsdetmask_bdil,NULL,5);
cvShowImage("Dark_ObsDet",obsdetmask_bdil);
cvWaitKey(3);
//.........这里部分代码省略.........
示例12: on_startstopbutton_clicked
// starts the auto targeting sequence
void MainWindow::on_startstopbutton_clicked()
{
shootingstopped=false;
QImage* currimage=getQImage();
n=currimage->width();
k=currimage->height();
IplImage* curriplimage=Qimage2IplImage(&currimage->convertToFormat(QImage::Format_RGB32));
IplImage* threshedimage=threshimage(curriplimage);
CBlobResult blobs;
CBlob* currentblob;
blobs=CBlobResult(threshedimage,NULL,0);
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 150 );
int j=blobs.GetNumBlobs();
if(j==0)
{
QMessageBox::information(this,"No Targets","No Targets Found!");
cvReleaseImage(&threshedimage);
cvReleaseImage(&curriplimage);
return;
}
CBlobGetXCenter XCenter;
CBlobGetYCenter YCenter;
for(int i=0;i<blobs.GetNumBlobs();i++)
{
tmptargetcenter=new targetcenter;
currentblob=blobs.GetBlob(i);
tmptargetcenter->x=XCenter(*currentblob);
tmptargetcenter->y=YCenter(*currentblob);
getangles(tmptargetcenter);
targets.append(tmptargetcenter);
}
checkformissiles();
ui->targetcountdisplay->display(targets.size());
setupautobuttons();
qApp->processEvents();
ui->timeNumber->display(0);
timeshooting.start(100);
turr->initAngle();
if(shootingstopped)
{
timeshooting.stop();
targets.clear();
return;
}
foreach(targetcenter* target,targets)
{
checkformissiles();
qApp->processEvents();
turr->setAngle(target->beta,target->betav);
ui->shotcountdisplay->display(turr->currentmissilecount());
if(shootingstopped)
{
timeshooting.stop();
targets.clear();
delete target;
return;
}
ui->targetcountdisplay->display(ui->targetcountdisplay->value()-1);
qApp->processEvents();
delete target;
}
示例13: blobTracking
//==============================================================================
void PanTiltCameraClass::blobTracking(IplImage* hsv_mask,
IplImage* pFour,
IplImage* pImg)
{
//--- Get blobs and filter them using the blob area
CBlobResult blobs;
CBlob *currentBlob;
//--- Create a thresholded image and display image --------------------
//--- Creates binary image
IplImage* originalThr = cvCreateImage(cvGetSize(hsv_mask), IPL_DEPTH_8U,1);
//--- Create 3-channel image
IplImage* display = cvCreateImage(cvGetSize(hsv_mask),IPL_DEPTH_8U,3);
//--- Copies the original
cvMerge( hsv_mask, hsv_mask, hsv_mask, NULL, display );
//--- Makes a copy for processing
cvCopy(hsv_mask,originalThr);
//--- Find blobs in image ---------------------------------------------
int blobThreshold = 0;
bool blobFindMoments = true;
blobs = CBlobResult( originalThr, originalThr, blobThreshold, blobFindMoments);
//--- filters blobs according to size and radius constraints
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, this->minBlobSize );
//--- display filtered blobs ------------------------------------------
//--- copies the original in (for background)
cvMerge( originalThr, originalThr, originalThr, NULL, display );
CvPoint pts[this->NUMBER_OF_CIRCLES];
//--- This sequence marks all the blobs
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( display, CV_RGB(0,0,255));
//--- Get blobs centerpoint
CvPoint bcg;
bcg.x = (int)(currentBlob->MinX()+((currentBlob->MaxX()-currentBlob->MinX())/2));
bcg.y = (int)(currentBlob->MinY()+((currentBlob->MaxY()-currentBlob->MinY())/2));
//--- Print the CG on the picture
char blobtext[40];
for(int k=0;k<this->NUMBER_OF_CIRCLES;k++)
{
sprintf(blobtext,"%d",k+1);
TargetReticle(display,&pts[k],blobtext,6,CV_RGB(255,0,0));
}//for
}//for each blob
//--- Set the ROI in the pFour image
cvSetImageROI(pFour,cvRect(pImg->width,pImg->height+80,pImg->width,pImg->height));
cvCopy(display,pFour);
//Reset region of interest
cvResetImageROI(display);
//Clean up
cvReleaseImage( &originalThr );
cvReleaseImage( &display);
}
示例14: markerDetect
void iptask::markerDetect(void)
{
IplImage * frame,*img_hsv,*img_proc,* new1;
CvMemStorage * storage = cvCreateMemStorage(0);
ros::NodeHandle n;
ros::Publisher marker = n.advertise<ikat_ip_data::ip_marker_data>("marker_data",3);
ros::Rate looprate(5);
int count = 0;
CvSeq * contours,*final_contour;
int total_con;
double maxarea;
marker_data * Data =(marker_data *)malloc(sizeof(marker_data));
CBlobResult blobs;
CBlob * currentblob;
CvPoint2D32f vertices[4];
//CvCapture * img_video=cvCaptureFromAVI("downward-pipe-15_56_17.avi");
frame=cvQueryFrame(img);
cvNamedWindow("Image Actual");
cvNamedWindow("final Image");
img_hsv=cvCreateImage(cvGetSize(frame),8,3);
img_proc=cvCreateImage(cvGetSize(frame),8,1);
new1=cvCreateImage(cvGetSize(frame),8,1);
while(ros::ok())
{
ikat_ip_data::ip_marker_data msg;
IplImage * img_con=cvCreateImage(cvGetSize(frame),8,1);
frame=cvQueryFrame(img);
if(!frame)
break;
cvShowImage("Image Actual",frame);
cvCvtColor(frame,img_hsv,CV_RGB2HSV);
cvInRangeS(img_hsv,cvScalar(100,100,100),cvScalar(120,170,255),img_proc);
cvSmooth(img_proc,img_proc,CV_GAUSSIAN,11,11);
cvErode(img_proc,img_proc);
blobs=CBlobResult(img_proc,NULL,0);
blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,75);
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentblob = blobs.GetBlob(i);
currentblob->FillBlob(img_proc,cvScalar(255));
}
cvCanny(img_proc,img_proc,10,200);
total_con=cvFindContours(img_proc,storage,&contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
if(contours->total==0)
continue;
final_contour=cvApproxPoly(contours,sizeof(CvContour),storage,CV_POLY_APPROX_DP,1,1);
maxarea=0;
cvZero(img_con);
CvBox2D rect;
while(final_contour)
{
rect=cvMinAreaRect2(final_contour, storage);
if(rect.size.height*rect.size.width>maxarea)
{
Data->center.x=rect.center.x;
Data->center.y=rect.center.y;
Data->size.x=rect.size.width;
Data->size.y=rect.size.height;
Data->angle=rect.angle;
maxarea=rect.size.height*rect.size.width;
msg.Marker_data[0]=Data->center.x;
msg.Marker_data[1]=Data->center.y;
msg.Marker_data[2]=Data->angle;
}
final_contour=final_contour->h_next;
}
cvBoxPoints(rect,vertices);
cvLine(frame,cvPointFrom32f(vertices[0]),cvPointFrom32f(vertices[1]),cvScalarAll(255),2);
cvLine(frame,cvPointFrom32f(vertices[1]),cvPointFrom32f(vertices[2]),cvScalarAll(255),2);
cvLine(frame,cvPointFrom32f(vertices[2]),cvPointFrom32f(vertices[3]),cvScalarAll(255),2);
cvLine(frame,cvPointFrom32f(vertices[3]),cvPointFrom32f(vertices[0]),cvScalarAll(255),2);
ROS_INFO("center x :[%f]",msg.Marker_data[0]);
ROS_INFO("center y :[%f]",msg.Marker_data[1]);
ROS_INFO("angle : [%f]",msg.Marker_data[2]);
marker.publish(msg);
cvShowImage("final Image",frame);
char c=cvWaitKey(33);
if (c==27)
break;
ros::spinOnce();
++count;
looprate.sleep();
}
cvDestroyWindow("Image Actual");
cvDestroyWindow("final Image");
free(Data);
}
示例15: main
int main(int argc, char * argv[])
{
vector <string> imgNames;
vector <string> imgNamesMask;
char strFrame[20];
readImageSequenceFiles(imgNames, imgNamesMask);
list<TrackLine> trackLineArr;
// read org frame and forground for process
// you can modify it to read video by add a segment alg
for(unsigned int i = 40; i < imgNames.size() - 1; i++)
{
Mat frame = imread(imgNames[i]);
Mat grayImg;
cvtColor(frame, grayImg, CV_RGB2GRAY);
Mat maskImage = imread(imgNamesMask[i], 0);
// get blobs and filter them using its area
// use 'cvblobslib' to get the object blobs
threshold( maskImage, maskImage, 81, 255, CV_THRESH_BINARY );
medianBlur(maskImage, maskImage, 3);
IplImage ipl_maskImage = maskImage;
CBlobResult blobs = CBlobResult( &ipl_maskImage, NULL, 0 );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 30 ); // filter blobs that area smaller than a certern num
list<CBlob *> remBlob;
for (int k = 0; k < blobs.GetNumBlobs(); k++)
{
remBlob.push_back(blobs.GetBlob(k));
}
printf("%d\n", trackLineArr.size());
for (list<TrackLine>::iterator trackIter = trackLineArr.begin(); trackIter != trackLineArr.end(); )
{
//kf predicition, get kfRect
Mat kfPrediction = (trackIter->kf).predict();
Point kfPrePt((int)(kfPrediction.at<float>(0)), (int)(kfPrediction.at<float>(1)));
Rect kfRect(kfPrePt.x - (trackIter->box).width / 2, kfPrePt.y - (trackIter->box).height / 2, (trackIter->box).width, (trackIter->box).height);
//ct predicition, get ctRect
int ctError = 0;
Rect ctRect(trackIter->box);
float score = (trackIter->ct).predicition(grayImg, ctRect);
rectangle(frame, kfRect, Scalar(0, 200, 0)); //green, kf predicition box
rectangle(frame, ctRect, Scalar(0, 0, 200)); //red, ct predicition box
//union predicit rectangle
//if they have no same area, we consider ct is wrong, because kalman is physical movement
float areaScale = (float)(sqrt((kfRect & ctRect).area() *1.0 / kfRect.area()));
Point movePoint((int)((ctRect.x - kfRect.x) * areaScale), (int)((ctRect.y - kfRect.y) * areaScale));
Rect unionPreRect = kfRect + movePoint;
//calc object box
Rect objRect;
int j = 0;
for (list<CBlob *>::iterator blobIter = remBlob.begin(); blobIter != remBlob.end(); )
{
Rect detRect((*blobIter)->GetBoundingBox());
float detArea = (float)((*blobIter)->Area());
if ((unionPreRect & detRect).area() > 0)
{
if (j++ == 0) objRect = detRect;
else objRect = objRect | detRect;
blobIter = remBlob.erase(blobIter);
}
else blobIter++;
}
// let box's area equal
float objArea = (float)(objRect.area());
objRect = Rect((int)(objRect.x + objRect.width / 2.0 - unionPreRect.width / 2.0),
(int)(objRect.y + objRect.height / 2.0 - unionPreRect.height / 2.0),
unionPreRect.width, unionPreRect.height);
float detAreaScale = (float)(sqrt(objArea * 1.0 / unionPreRect.area()));
if (detAreaScale > 1.0) detAreaScale = 1.0;
Point detMovePoint((int)((objRect.x - unionPreRect.x) * detAreaScale), (int)((objRect.y - unionPreRect.y) * detAreaScale));
Rect unionCorrRect = unionPreRect + detMovePoint;
// if detect area > 0
if (objArea > 0)
{
trackIter->box = unionCorrRect;
rectangle(frame, unionCorrRect, Scalar(200,0,0), 1);
//kf correct
Mat_<float> measurement(2,1);
measurement(0) = (float)((trackIter->box).x + (trackIter->box).width / 2.0);
measurement(1) = (float)((trackIter->box).y + (trackIter->box).height / 2.0);
(trackIter->kf).correct(measurement);
//ct update
(trackIter->ct).update(grayImg, trackIter->box);
trackIter++;
}
// else we beleve tracking miss
else
{
if ((trackIter->miss)++ == 5) trackIter = trackLineArr.erase(trackIter);
else trackIter++;
//.........这里部分代码省略.........