本文整理匯總了C++中CBlob::Area方法的典型用法代碼示例。如果您正苦於以下問題:C++ CBlob::Area方法的具體用法?C++ CBlob::Area怎麽用?C++ CBlob::Area使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類CBlob
的用法示例。
在下文中一共展示了CBlob::Area方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: operator
/**
- FUNCTION: CBlobGetCompactness
- FUNCTIONALITY: Calculates the compactness of the blob
( maximum for circle shaped blobs, minimum for the rest)
- PARAMETERS:
- RESULT:
- RESTRICTIONS:
- AUTHOR: Ricard Borràs
- CREATION DATE: 25-05-2005.
- MODIFICATION: Date. Author. Description.
*/
double CBlobGetCompactness::operator()(CBlob &blob)
{
if( blob.Area() != 0.0 )
return (double) pow(blob.Perimeter(),2)/(4*CV_PI*blob.Area());
else
return 0.0;
}
示例2: operator
/**
- FUNCTION: CBlobGetElongation
- FUNCTIONALITY: Calculates the elongation of the blob ( length/breadth )
- PARAMETERS:
- RESULT:
- RESTRICTIONS:
- See below to see how the length and the breadth are
aproximated
- AUTHOR: Ricard Borr�
- CREATION DATE: 25-05-2005.
- MODIFICATION: Date. Author. Description.
*/
double CBlobGetElongation::operator()(const CBlob &blob) const
{
double ampladaC,longitudC,amplada,longitud;
ampladaC=(double) (blob.Perimeter()+sqrt(pow(blob.Perimeter(),2)-16*blob.Area()))/4;
if(ampladaC<=0.0) return 0;
longitudC=(double) blob.Area()/ampladaC;
longitud=MAX( longitudC , ampladaC );
amplada=MIN( longitudC , ampladaC );
return (double) longitud/amplada;
}
示例3: cvCreateMemStorage
/**
- FUNCTION: CBlob
- FUNCTIONALITY: Copy constructor
- PARAMETERS:
- RESULT:
- RESTRICTIONS:
- AUTHOR: Ricard Borr�
- CREATION DATE: 25-05-2005.
- MODIFICATION: Date. Author. Description.
*/
CBlob::CBlob( const CBlob &src )
{
// copiem les propietats del blob origen a l'actual
etiqueta = src.etiqueta;
exterior = src.exterior;
area = src.Area();
perimeter = src.Perimeter();
parent = src.parent;
minx = src.minx;
maxx = src.maxx;
miny = src.miny;
maxy = src.maxy;
sumx = src.sumx;
sumy = src.sumy;
sumxx = src.sumxx;
sumyy = src.sumyy;
sumxy = src.sumxy;
mean = src.mean;
stddev = src.stddev;
externPerimeter = src.externPerimeter;
// copiem els edges del blob origen a l'actual
CvSeqReader reader;
CvSeqWriter writer;
CvPoint edgeactual;
// creem una sequencia buida per als edges
m_storage = cvCreateMemStorage(0);
edges = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2,
sizeof(CvContour),
sizeof(CvPoint),m_storage);
cvStartReadSeq( src.Edges(), &reader);
cvStartAppendToSeq( edges, &writer );
for( int i=0; i< src.Edges()->total; i++)
{
CV_READ_SEQ_ELEM( edgeactual ,reader);
CV_WRITE_SEQ_ELEM( edgeactual , writer );
}
cvEndWriteSeq( &writer );
}
示例4: blobDetect
void PSTouch::blobDetect(cv::Mat& image){
CBlobResult res(image,cv::Mat(),NUMCORES);
std::vector<TouchEvent> events;
qRegisterMetaType<std::vector<TouchEvent > >("std::vector<TouchEvent>");
for (unsigned int i = 0; i<res.GetNumBlobs(); i++){
CBlob blob = res.GetBlob(i);
if(blob.Area()<3) {
continue;
}
cv::Point point = blob.getCenter();
cv::Point3f camPoint(point.x,point.y,groundTruth->at<openni::DepthPixel>(point.y,point.x));
cv::Point2i p = transform->transformPointfromCamToProjector(camPoint);
if(p.x < 1200 && p.y < 700 && p.x>0 && p.y >0){
TouchEvent event(p,camPoint);
events.push_back(event);
}
}
if(events.size() >10){
qDebug("RECALIBRATE");
calibrateTouch();
}
emit updateEvents(events);
//Timing
timerCount++;
//qDebug()<<"Timer: "<< timerCount;
if(timerCount==60){
timerCount=0;
int x = timer.restart();
float fps = 50.0/((float)x/1000.0);
qDebug() << " working with: " << fps << "fps " << x;
timer.restart();
}
}
示例5: CBlobResult
void
Auvsi_Recognize::extractLetter( void )
{
typedef cv::Vec<unsigned char, 1> VT_binary;
#ifdef TWO_CHANNEL
typedef cv::Vec<T, 2> VT;
#else
typedef cv::Vec<T, 3> VT;
#endif
typedef cv::Vec<int, 1> IT;
// Erode input slightly
cv::Mat input;
cv::erode( _shape, input, cv::Mat() );
// Remove any small white blobs left over
CBlobResult blobs;
CBlob * currentBlob;
CBlob biggestBlob;
IplImage binaryIpl = input;
blobs = CBlobResult( &binaryIpl, NULL, 0 );
blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( &binaryIpl, cvScalar(0));
}
// Perform k-means on this region only
int areaLetter = (int)biggestBlob.Area();
cv::Mat kMeansInput = cv::Mat( areaLetter, 1, _image.type() );
// Discard if we couldn't extract a letter
if( areaLetter <= 0 )
{
_letter = cv::Mat( _shape );
_letter = cv::Scalar(0);
return;
}
cv::MatIterator_<VT_binary> binaryIterator = input.begin<VT_binary>();
cv::MatIterator_<VT_binary> binaryEnd = input.end<VT_binary>();
cv::MatIterator_<VT> kMeansIterator = kMeansInput.begin<VT>();
for( ; binaryIterator != binaryEnd; ++binaryIterator )
{
if( (*binaryIterator)[0] > 0 )
{
(*kMeansIterator) = _image.at<VT>( binaryIterator.pos() );
++kMeansIterator;
}
}
// Get k-means labels
cv::Mat labels = doClustering<T>( kMeansInput, 2, false );
int numZeros = areaLetter - cv::countNonZero( labels );
bool useZeros = numZeros < cv::countNonZero( labels );
// Reshape into original form
_letter = cv::Mat( _shape.size(), _shape.type() );
_letter = cv::Scalar(0);
binaryIterator = input.begin<VT_binary>();
binaryEnd = input.end<VT_binary>();
cv::MatIterator_<IT> labelsIterator = labels.begin<IT>();
for( int index = 0; binaryIterator != binaryEnd; ++binaryIterator )
{
if( (*binaryIterator)[0] > 0 )
{
// Whichever label was the minority, we make that value white and all other values black
unsigned char value = (*labelsIterator)[0];
if( useZeros )
if( value )
value = 0;
else
value = 255;
else
if( value )
value = 255;
else
value = 0;
_letter.at<VT_binary>( binaryIterator.pos() ) = VT_binary( value );
++labelsIterator;
}
}
}
示例6: convertToGray
void
Auvsi_Recognize::extractShape( void )
{
typedef cv::Vec<T, 1> VT;
// Reduce input to two colors
cv::Mat reducedColors = doClustering<T>( _image, 2 );
cv::Mat grayScaled, binary;
// Make output grayscale
grayScaled = convertToGray( reducedColors );
//cv::cvtColor( reducedColors, grayScaled, CV_RGB2GRAY );
// Make binary
double min, max;
cv::minMaxLoc( grayScaled, &min, &max );
cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY );
// ensure that background is black, image white
if( binary.at<VT>(0, 0)[0] > 0.0f )
cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY_INV );
binary.convertTo( binary, CV_8U, 255.0f );
// Fill in all black regions smaller than largest black region with white
CBlobResult blobs;
CBlob * currentBlob;
IplImage binaryIpl = binary;
blobs = CBlobResult( &binaryIpl, NULL, 255 );
// Get area of biggest blob
CBlob biggestBlob;
blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
// Remove all blobs of smaller area
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( &binaryIpl, cvScalar(255));
}
// Fill in all small white regions black
blobs = CBlobResult( &binaryIpl, NULL, 0 );
blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
currentBlob->FillBlob( &binaryIpl, cvScalar(0));
}
binary = cv::Scalar(0);
biggestBlob.FillBlob( &binaryIpl, cvScalar(255));
_shape = binary;
}
示例7: main
//.........這裏部分代碼省略.........
IplImage temp = (IplImage)skin_masked;
blobs = CBlobResult(&temp,NULL,1);
blobs = CBlobResult(skin_masked,Mat(),NUMCORES);
int numBlobs = blobs.GetNumBlobs();
if(0 == numBlobs){
cout << "can't find blobs!" << endl;
continue;
}
// detects robot as a blob
CBlobResult robot_blobs;
IplImage robot_temp = (IplImage) white_masked;
robot_blobs = CBlobResult(&robot_temp, NULL, 1);
robot_blobs = CBlobResult(white_masked, Mat(), NUMCORES);
if(0 == robot_blobs.GetNumBlobs()){
cout << "can't find robot_blobs!" << endl;
continue;
}
CBlob *curblob;
CBlob* blob_1;
CBlob* blob_2;
CBlob* leftBlob;
CBlob* rightBlob;
CBlob* robotBlob;
copy.setTo(Vec3b(0,0,0));
// chooses the two largest blobs for the hands
Point center_1, center_2;
int max_1 = 0;
int max_2 = 0;
int maxArea_1 = 0;
int maxArea_2 = 0;
for(int i=0;i<numBlobs;i++){
int area = blobs.GetBlob(i)->Area();
if(area > maxArea_1){
maxArea_2 = maxArea_1;
maxArea_1 = area;
max_2 = max_1;
max_1 = i;
} else if(area > maxArea_2){
maxArea_2 = area;
max_2 = i;
}
}
int i_1 = max_1;
int i_2 = max_2;
double area_left, area_right;
Rect rect_1;
Rect rect_2;
//determines which hand is left/right
blob_1 = blobs.GetBlob(i_1);
blob_2 = blobs.GetBlob(i_2);
center_1 = blob_1->getCenter();
center_2 = blob_2->getCenter();
bool left_is_1 = (center_1.x < center_2.x)? true : false;
leftBlob = (left_is_1)? blob_1 : blob_2;
rightBlob = (left_is_1)? blob_2 : blob_1;
center_left = leftBlob->getCenter();
center_right = rightBlob->getCenter();
//determine the number of valid hands
//validity is decided by whether or not the hand followed a logical movement,
示例8: cvCreateImage
IplImage* blobDetection2(IplImage* imgThreshRed, IplImage* imgThreshGreen) {
// get blobs and filter them using its area
int i, j;
// int areaBlob = 100;
float distMark = 10;
CBlobResult blobsRed, blobsGreen, whiteRedBlobs, whiteGreenBlobs;
CBlob *currentBlob;
double px, py;
// Create Image
IplImage* displayedImage = cvCreateImage(cvGetSize(imgThreshRed), IPL_DEPTH_8U, 3);
// find all the RED related blobs in the image
blobsRed = CBlobResult(imgThreshRed, NULL, 0);
// find all the GREEN related blobs in the image
blobsGreen = CBlobResult(imgThreshGreen, NULL, 0);
// select the ones with mean gray-level equal to 255 (white) and put
// them in the whiteBlobs variable
blobsRed.Filter(whiteRedBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);
blobsGreen.Filter(whiteGreenBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);
#ifdef DEBUG_PRINT
printf("White Blobs: %d\n", whiteBlobs.GetNumBlobs());
#endif
// display filtered blobs
cvMerge(imgThreshRed, imgThreshRed, imgThreshRed, NULL, displayedImage);
// RED
CvPoint2D32f redCenter[whiteRedBlobs.GetNumBlobs()];
for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++) {
currentBlob = whiteRedBlobs.GetBlob(i);
px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
redCenter[i] = cvPoint2D32f(px, py);
#ifdef DEBUG_PRINT
printf("%2.2f\t%2.2f\n", px, py);
#endif
if (currentBlob->Area() > areaBlob) {
// Add Cross to the image
currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
cvCircle(displayedImage, cvPointFrom32f(redCenter[i]), 2, cvScalar(255, 0, 0), 10, 8, 0);
}
}
// GREEN
CvPoint2D32f greenCenter[whiteGreenBlobs.GetNumBlobs()];
for (i = 0; i < whiteGreenBlobs.GetNumBlobs(); i++) {
currentBlob = whiteGreenBlobs.GetBlob(i);
px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
greenCenter[i] = cvPoint2D32f(px, py);
#ifdef DEBUG_PRINT
printf("%2.2f\t%2.2f\n", px, py);
#endif
if (currentBlob->Area() > areaBlob) {
// Add Cross to the image
currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
cvCircle(displayedImage, cvPointFrom32f(greenCenter[i]), 2, cvScalar(0, 255, 0), 10, 8, 0);
}
}
// Populating the list of potential robots
potRobList.robNum = 0;
for (i = 0; i < robMax; i++)
potRobList.robList[i].active = 0;
int redUsage[whiteRedBlobs.GetNumBlobs()];
int greenUsage[whiteGreenBlobs.GetNumBlobs()];
for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++)
redUsage[i] = 0;
for (j = 0; j < whiteGreenBlobs.GetNumBlobs(); j++)
greenUsage[j] = 0;
// Detect Robots
float distCenter[whiteRedBlobs.GetNumBlobs()][whiteGreenBlobs.GetNumBlobs()];
for (i = 0; i < min(whiteRedBlobs.GetNumBlobs(), robMax); i++) {
currentBlob = whiteRedBlobs.GetBlob(i);
if (currentBlob->Area() > areaBlob) {
for (j = 0; j < min(whiteGreenBlobs.GetNumBlobs(), robMax); j++) {
currentBlob = whiteGreenBlobs.GetBlob(j);
if (currentBlob->Area() > areaBlob) {
distCenter[i][j] = computeDist(redCenter[i], greenCenter[j]);
//printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
//printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
// Print a connection line if this could be a robot
if (redUsage[i] == 0 && greenUsage[j] == 0 && checkDistMarker(distCenter[i][j], distMark)) {
//.........這裏部分代碼省略.........
示例9: findBiggestBlobImage
bool findBiggestBlobImage(IplImage* img, int color, IplImage* &output)
{
CBlobResult blobs;
CBlob *currentBlob;
blobs = CBlobResult( img, NULL, 0 );
blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, m_minBlobSize );
double biggestArea = m_minBlobSize;
int biggestBlob = -1;
for (int i = 0; i < blobs.GetNumBlobs(); i++ )
{
currentBlob = blobs.GetBlob(i);
double blobArea = currentBlob->Area();
if(blobArea > biggestArea)
{
biggestBlob = i;
biggestArea = blobArea;
}
}
if(biggestBlob >= 0)
{
int x = (int) blobs.GetBlob(biggestBlob)->MinX();
int y = (int) blobs.GetBlob(biggestBlob)->MinY();
int width= (int) blobs.GetBlob(biggestBlob)->MaxX()-x;
int height= (int) blobs.GetBlob(biggestBlob)->MaxY()-y;
IplImage* temp = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1);
IplImage* temp2 = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);
IplImage* result = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);
if(biggestBlob>=0) blobs.GetBlob(biggestBlob)->FillBlob(temp,cvScalar(255),x,y);
cvSetImageROI(temp, cvRect(x, y, width, height));
cvCopy(temp,temp2);
uchar* tempData;
uchar* resultData;
tempData = (uchar *)(temp2->imageData);
resultData = (uchar *) (result->imageData);
for (int j = 0; j < width*height; j++)
{
if (tempData[j]==255) resultData[j] = color;
else resultData[j] = 0;
}
cvResize(result, output);
cvReleaseImage(&temp);
cvReleaseImage(&temp2);
cvReleaseImage(&result);
return true;
}
else
return false;
}