當前位置: 首頁>>代碼示例>>C++>>正文


C++ CBlob類代碼示例

本文整理匯總了C++中CBlob的典型用法代碼示例。如果您正苦於以下問題:C++ CBlob類的具體用法?C++ CBlob怎麽用?C++ CBlob使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了CBlob類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。

示例1: ShapeModel

SHModel* ShapeModel( CvCapture* g_capture,StaticBGModel* BGModel , BGModelParams* BGParams){

	int num_frames = 0;
	int total_blobs=0;
	float Sumatorio = 0;
	float SumatorioDes = 0;
	IplImage* frame = NULL;

	STFrame* frameData = NULL;
	SHModel* Shape = NULL;

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage* ImGris = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 1 );
	IplImage* Imblob = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 3 );
	IplImage* lastBG = cvCreateImage( cvGetSize( BGModel->Imed ),8, 1 );
	IplImage* lastIdes = cvCreateImage( cvGetSize( BGModel->Imed ), IPL_DEPTH_32F, 1);
	cvZero(Imblob);
	// Iniciar estructura para modelo de forma

	Shape = ( SHModel *) malloc( sizeof( SHModel));
	if ( !Shape ) {error(4);return 0;}
	Shape->FlyAreaDes = 0;
	Shape->FlyAreaMedia=0;
	//Pone a 0 los valores del vector areas


	//EXTRACCION DE LOS BLOBS Y CALCULO DE MEDIANA/MEDIA Y DESVIACION TIPICA PARA TODOS LOS FRAMES
	cvSetCaptureProperty( g_capture,1,BGParams->initDelay ); // establecemos la posición
	while( num_frames < ShParams->FramesTraining ){
		frame = cvQueryFrame( g_capture );
		if ( !frame ) {
			error(2);
			break;
		}
		if ( (cvWaitKey(10) & 255) == 27 ) break;

		ImPreProcess( frame, ImGris, BGModel->ImFMask, 0, BGModel->DataFROI);

		// Cargamos datos del fondo
		if(!frameData ) { //en la primera iteración iniciamos el modelo dinamico al estático
			// Iniciar estructura para datos del nuevo frame
			frameData = InitNewFrameData( frame );
			cvCopy(  BGModel->Imed,frameData->BGModel);
			cvSet(frameData->IDesvf, cvScalar(1));
			cvCopy(  BGModel->Imed,lastBG);
		}
		else{	// cargamos los últimos parámetros del fondo.
			cvCopy( lastBG, frameData->BGModel);
			cvCopy( lastIdes,frameData->IDesvf );
		}
	//	obtener la mascara del FG y la lista con los datos de sus blobs.
		//// BACKGROUND UPDATE
		// Actualización del fondo
		// establecer parametros

		UpdateBGModel( ImGris,frameData->BGModel,frameData->IDesvf, BGParams, BGModel->DataFROI, BGModel->ImFMask );
		/////// BACKGROUND DIFERENCE. Obtención de la máscara del foreground
		BackgroundDifference( ImGris, frameData->BGModel,frameData->IDesvf, frameData->FG ,BGParams, BGModel->DataFROI);

		// guardamos las imagenes para iniciar el siguiente frame
		cvCopy( frameData->BGModel, lastBG);
		cvCopy(  frameData->IDesvf,lastIdes);

		//Obtener los Blobs y excluir aquellos que no interesan por su tamaño
//		cvSetImageROI(  frameData->FG , BGModel->DataFROI);

		blobs = CBlobResult( frameData->FG, NULL, 100, true );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(),B_GREATER,100);
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetPerimeter(),B_GREATER,1000);

		int j = blobs.GetNumBlobs();//numero de blos encontrados en el frame

		total_blobs=total_blobs+j; // Contabiliza los blobs encontrados para todos los frames

		//Recorrer Blob a blob y obtener las caracteristicas del AREA de cada uno de ellos

		for (int i = 0; i < blobs.GetNumBlobs(); i++ ){ //for 1

			currentBlob = blobs.GetBlob(i);

			CBlobGetArea();
			if(ShParams->SHOW_DATA_AREAS) {
				//printf("Area blob %d = %f ",i,currentBlob->area);
			}
			//Estimar la media de las Areas

			Sumatorio = Sumatorio + currentBlob->area;
			SumatorioDes = SumatorioDes + currentBlob->area*currentBlob->area;

			muestrearAreas( currentBlob->area);
			currentBlob->FillBlob( Imblob, CV_RGB(255,0,0));

		}//Fin del For 1

		Shape->FlyAreaMedia = Sumatorio / total_blobs;
		Shape->FlyAreaDes = (SumatorioDes / total_blobs) - Shape->FlyAreaMedia*Shape->FlyAreaMedia;

		num_frames += 1;
//.........這裏部分代碼省略.........
開發者ID:beetecu,項目名稱:trackingdrosophila,代碼行數:101,代碼來源:ShapeModel.cpp

示例2: binaris

/**
- FUNCI? ComponentLabeling
- FUNCIONALITAT: Calcula els components binaris (blobs) d'una imatge amb connectivitat a 8
- PARÀMETRES:
	- inputImage: image to segment (pixel values different than blobColor are treated as background)
	- maskImage: if not NULL, all the pixels equal to 0 in mask are skipped in input image
	- backgroundColor: color of background (ignored pixels)
	- blobs: blob vector destination
- RESULTAT:
	- 
- RESTRICCIONS:
	- 
- AUTOR: rborras
- DATA DE CREACI? 2008/04/21
- MODIFICACI? Data. Autor. Descripci?
- NOTA: Algorithm based on "A linear-time component labeling algorithm using contour tracing technique", 
		F.Chang et al
*/
bool ComponentLabeling(	IplImage* inputImage,
						IplImage* maskImage,
						unsigned char backgroundColor,
						Blob_vector &blobs )
{
	int i,j;
	// row major vector with visited points 
	bool *visitedPoints, *pVisitedPoints, internalContour, externalContour;
	unsigned char *pInputImage, *pMask, *pAboveInputImage, *pBelowInputImage,
				  *pAboveMask, *pBelowMask;
	int imageWidth, imageHeight, currentLabel, contourLabel;
	// row major vector with labelled image 
	t_labelType *labelledImage, *pLabels;
	//! current blob pointer
	CBlob *currentBlob;
	CvSize imageSizes;
	CvPoint currentPoint;

	// verify input image
	if( !CV_IS_IMAGE( inputImage ) )
		return false;

	// verify that input image and mask image has same size
	if( maskImage )
	{
		if( !CV_IS_IMAGE(maskImage) || 
			maskImage->width != inputImage->width || 
			maskImage->height != inputImage->height )
		return false;
	}
	else
	{
		pMask = NULL;
		pAboveMask = NULL;
		pBelowMask = NULL;
	}

	imageSizes = cvSize(inputImage->width,inputImage->height);
	
	imageWidth = inputImage->width;
	imageHeight = inputImage->height;

	// create auxiliary buffers
	labelledImage = (t_labelType*) malloc( inputImage->width * inputImage->height * sizeof(t_labelType) );
	visitedPoints = (bool*) malloc( inputImage->width * inputImage->height * sizeof(bool) );

	// initialize it to 0
	memset(labelledImage, 0, inputImage->width * inputImage->height * sizeof(t_labelType) ) ;
	memset(visitedPoints, false, inputImage->width * inputImage->height * sizeof(bool) ) ;

	// initialize pointers and label counter
	pLabels = labelledImage;
	pVisitedPoints = visitedPoints;
	currentLabel = 1;

	for (j = 0; j < imageHeight; j++ )
	{
		// don't verify if we area on first or last row, it will verified on pointer access
		pAboveInputImage = (unsigned char*) inputImage->imageData + (j-1) * inputImage->widthStep;
		pBelowInputImage = (unsigned char*) inputImage->imageData + (j+1) * inputImage->widthStep;
	
		pInputImage = (unsigned char*) inputImage->imageData + j * inputImage->widthStep;

		if( maskImage )
		{
			pMask = (unsigned char*) maskImage->imageData + j * maskImage->widthStep;
			// don't verify if we area on first or last row, it will verified on pointer access
			pAboveMask = (unsigned char*) maskImage->imageData + (j-1) * maskImage->widthStep;
			pBelowMask = (unsigned char*) maskImage->imageData + (j+1) * maskImage->widthStep;

		}
		
		for (i = 0; i < imageWidth; i++, pInputImage++, pMask++, pAboveInputImage++, pBelowInputImage++,
										 pAboveMask++, pBelowMask++ )
		{
			// ignore background pixels or 0 pixels in mask
			if ( (*pInputImage == backgroundColor) || (maskImage && *pMask == 0 ))
			{
				pLabels++;
				pVisitedPoints++;
				continue;
			}
//.........這裏部分代碼省略.........
開發者ID:lupnfer,項目名稱:camc,代碼行數:101,代碼來源:ComponentLabeling.cpp

示例3: blobify

void Blobs::blobify()
{
	//mm not entirely sure yet, but I think this function might just "draw" the blobs, based on the color model of each pixel. the color model is already determined in the "packed" data (see unpack...color model information is extracted from the packed data there...)
	
	
    uint32_t i, j, k;
    CBlob *blob;
    uint16_t *blobsStart;
    uint16_t numBlobsStart, invalid, invalid2;
    uint16_t left, top, right, bottom;
    //uint32_t timer, timer2=0;

    unpack(); //mm as is clear in unpack(), at this point, we already know the model to which each blob belongs.

    // copy blobs into memory //mm does this refer to the unpack() above??
    invalid = 0;
	
    // mutex keeps interrupt routine from stepping on us
    m_mutex = true;
	
	//mm iterate through models:
    for (i=0, m_numBlobs=0; i<NUM_MODELS; i++)
    {
		//mm iterate through blobs in this model:
        for (j=m_numBlobs*5, k=0, blobsStart=m_blobs+j, numBlobsStart=m_numBlobs, blob=m_assembler[i].finishedBlobs;
             blob && m_numBlobs<m_maxBlobs && k<m_maxBlobsPerModel; blob=blob->next, k++)
        {
            if (blob->GetArea()<(int)m_minArea)
                continue;
            blob->getBBox((short &)left, (short &)top, (short &)right, (short &)bottom);
            m_blobs[j + 0] = i+1;
            m_blobs[j + 1] = left;
            m_blobs[j + 2] = right;
            m_blobs[j + 3] = top;
            m_blobs[j + 4] = bottom;
            m_numBlobs++;
            j += 5;

        }
        //setTimer(&timer);
        if (true)
        {
            while(1)
            {
                invalid2 = combine2(blobsStart, m_numBlobs-numBlobsStart);
                if (invalid2==0)
                    break;
                invalid += invalid2;
            }
        }
        //timer2 += getTimer(timer);
    }
    //setTimer(&timer);
    invalid += combine(m_blobs, m_numBlobs);
    if (false)
    {
        m_codedBlobs = (BlobB *)(m_blobs + m_numBlobs*5);
        processCoded();
    }
    if (invalid)
    {
        invalid2 = compress(m_blobs, m_numBlobs);
        m_numBlobs -= invalid2;
        if (invalid2!=invalid)
            cprintf("**** %d %d\n", invalid2, invalid);

    }
    //timer2 += getTimer(timer);
    //cprintf("time=%d\n", timer2); // never seen this greater than 200us.  or 1% of frame period

    // reset read index-- new frame
    m_blobReadIndex = 0;
    m_mutex = false;

    // free memory
    for (i=0; i<NUM_MODELS; i++)
        m_assembler[i].Reset();

#if 0
    static int frame = 0;
    if (m_numBlobs>0)
        cprintf("%d: blobs %d %d %d %d %d\n", frame, m_numBlobs, m_blobs[1], m_blobs[2], m_blobs[3], m_blobs[4]);
    else
        cprintf("%d: blobs 0\n", frame);
    frame++;
#endif
}
開發者ID:mjlm,項目名稱:pixy,代碼行數:87,代碼來源:blobs.cpp

示例4: blobify

void Blobs::blobify()
{
    uint32_t i, j, k;
    bool colorCode;
    CBlob *blob;
    uint16_t *blobsStart;
    uint16_t numBlobsStart, invalid, invalid2;
    uint16_t left, top, right, bottom;
    //uint32_t timer, timer2=0;

    unpack();

    // copy blobs into memory
    invalid = 0;
    // mutex keeps interrupt routine from stepping on us
    m_mutex = true;
    for (i=0, m_numBlobs=0; i<NUM_MODELS; i++)
    {
        colorCode = m_clut->getType(i+1)==CL_MODEL_TYPE_COLORCODE;

        for (j=m_numBlobs*5, k=0, blobsStart=m_blobs+j, numBlobsStart=m_numBlobs, blob=m_assembler[i].finishedBlobs;
             blob && m_numBlobs<m_maxBlobs && k<m_maxBlobsPerModel; blob=blob->next, k++)
        {
            if ((colorCode && blob->GetArea()<MIN_COLOR_CODE_AREA) ||
                    (!colorCode && blob->GetArea()<(int)m_minArea))
                continue;
            blob->getBBox((short &)left, (short &)top, (short &)right, (short &)bottom);
            m_blobs[j + 0] = i+1;
            m_blobs[j + 1] = left;
            m_blobs[j + 2] = right;
            m_blobs[j + 3] = top;
            m_blobs[j + 4] = bottom;
            m_numBlobs++;
            j += 5;

        }
        //setTimer(&timer);
        if (!colorCode) // do not combine color code models
        {
            while(1)
            {
                invalid2 = combine2(blobsStart, m_numBlobs-numBlobsStart);
                if (invalid2==0)
                    break;
                invalid += invalid2;
            }
        }
        //timer2 += getTimer(timer);
    }
    //setTimer(&timer);
    invalid += combine(m_blobs, m_numBlobs);
    if (m_codedMode)
    {
        m_codedBlobs = (BlobB *)(m_blobs + m_numBlobs*5);
        // calculate number of codedblobs left
        processCoded();
    }
    if (invalid || m_codedMode)
    {
        invalid2 = compress(m_blobs, m_numBlobs);
        m_numBlobs -= invalid2;
    }
    //timer2 += getTimer(timer);
    //cprintf("time=%d\n", timer2); // never seen this greater than 200us.  or 1% of frame period

    // reset read index-- new frame
    m_blobReadIndex = 0;
    m_mutex = false;

    // free memory
    for (i=0; i<NUM_MODELS; i++)
        m_assembler[i].Reset();

#if 0
    static int frame = 0;
    if (m_numBlobs>0)
        cprintf("%d: blobs %d %d %d %d %d\n", frame, m_numBlobs, m_blobs[1], m_blobs[2], m_blobs[3], m_blobs[4]);
    else
        cprintf("%d: blobs 0\n", frame);
    frame++;
#endif
}
開發者ID:Srinivas-E,項目名稱:pixy,代碼行數:82,代碼來源:blobs.cpp

示例5: findBiggestBlobImage

	bool findBiggestBlobImage(IplImage* img, int color, IplImage* &output)
	{
		CBlobResult blobs;
		CBlob *currentBlob;

		blobs = CBlobResult( img, NULL, 0 );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, m_minBlobSize );

		double biggestArea = m_minBlobSize;
		int biggestBlob = -1;

		for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		{
			currentBlob = blobs.GetBlob(i);
			double blobArea = currentBlob->Area();
			if(blobArea > biggestArea) 
			{
				biggestBlob = i;
				biggestArea = blobArea;
			}
		}

		if(biggestBlob >= 0)
		{
			int x = (int) blobs.GetBlob(biggestBlob)->MinX();
			int y = (int) blobs.GetBlob(biggestBlob)->MinY();
			int width= (int) blobs.GetBlob(biggestBlob)->MaxX()-x;
			int height= (int) blobs.GetBlob(biggestBlob)->MaxY()-y;

			IplImage* temp = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1);
			IplImage* temp2 = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);
			IplImage* result = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);

			if(biggestBlob>=0) blobs.GetBlob(biggestBlob)->FillBlob(temp,cvScalar(255),x,y);

			cvSetImageROI(temp, cvRect(x, y, width, height));

			cvCopy(temp,temp2);

			uchar* tempData;

			uchar* resultData;

			tempData = (uchar *)(temp2->imageData);
			resultData = (uchar *) (result->imageData);

			for (int j = 0; j < width*height; j++)
			{
				if (tempData[j]==255) resultData[j] = color;
				else	resultData[j] = 0;
			}

			cvResize(result, output);

			cvReleaseImage(&temp);
			cvReleaseImage(&temp2);
			cvReleaseImage(&result);

			return true;
		}
		else
			return false;
	}
開發者ID:anasancho,項目名稱:_2RealFramework,代碼行數:63,代碼來源:ShapeRecordingBlock.cpp

示例6: main

/*
arg1: Width of each frame
arg2: Height of each frame
arg3: Target frames per second of the program
arg4: Maximum number of blobs to track. Each blob MAY corresspond to a person in front of the camera
*/
int main(int argc, char* argv[])
{
    if (argc < 5)
    {
        cout << "Too few arguments to the program. Exiting...\n";
        return 0;
    }

    int width, height, fps, numberOfBlobs;
    try
    {
        //Read the arguments
        width = atoi(argv[1]);
        height = atoi(argv[2]);
        fps = atoi(argv[3]);
        numberOfBlobs = atoi(argv[4]);
        //Done reading arguments
    }
    catch(...)
    {
        cout << "One or more arguments are invalid!. Exiting...\n";
        return 0;
    }


    /*
    int width = 320;
    int height = 240;
    int fps = 10;
    int numberOfBlobs = 2;
    */

    tempImageV4L = cvCreateImage(cvSize(width, height), 8, 3);
    frameNumber = 0;

    //Beginning initialising cameras
    rightCamera = new Camera("/dev/video0", width, height, fps);
    leftCamera = new Camera("/dev/video1", width, height, fps);
	//leftCamera = rightCamera; //If only one camera is available, uncomment this line and comment the line above this.
    //Done initialising cameras

    //Waste some frames so as to get the cameras running in full flow
    WasteNFrames(10);

    //Beginning capturing background
    backImageRight = GetNextCameraShot(rightCamera);
    backImageLeft = GetNextCameraShot(leftCamera);
    frameNumber++;
    cvtColor(backImageRight, backImageRight, CV_BGR2HSV);
    cvtColor(backImageLeft, backImageLeft, CV_BGR2HSV);
    //Done capturing background

    //General Stuff
    Mat motionImageRight(backImageRight.rows, backImageRight.cols, CV_8UC1);
    Mat motionImageLeft(backImageLeft.rows, backImageLeft.cols, CV_8UC1);
    Mat HSVImageRight, HSVImageLeft;
    Mat displayImageRight, displayImageLeft;
    //End of General Stuff


    while (1) //The infinite loop
    {
        //Beginning getting camera shots
        rightImage = GetNextCameraShot(rightCamera);
        leftImage = GetNextCameraShot(leftCamera);
        frameNumber++;
        //Done getting camera shots


        //Beginning getting motion images
        HSVImageRight = rightImage.clone();
        cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
        CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
        medianBlur(motionImageRight, motionImageRight, 3);

        HSVImageLeft = leftImage.clone();
        cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
        CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
        medianBlur(motionImageLeft, motionImageLeft, 3);
        //Ended getting motion images

        cout << "\nFor frame #" << frameNumber << " :\n";

        //Beginning Getting Blobs
        IplImage  imageblobPixels = motionImageRight;
        CBlobResult blobs;
        blobs = CBlobResult(&imageblobPixels, NULL, 0);	// Use a black background color.
        int minArea = 100 / ((640 / width) * (640 / width));
        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
        int foundBlobs = blobs.GetNumBlobs();
        //Ended Getting Blobs

        cout << "Found " << foundBlobs << " motion blobs\n";

//.........這裏部分代碼省略.........
開發者ID:ruppeshnalwaya1993,項目名稱:SmartLAB-v1.0,代碼行數:101,代碼來源:main_stereo.cpp

示例7: CBlobResult

void
Auvsi_Recognize::extractLetter( void )
{
	typedef cv::Vec<unsigned char, 1> VT_binary;
	#ifdef TWO_CHANNEL
		typedef cv::Vec<T, 2> VT;
	#else
		typedef cv::Vec<T, 3> VT;
	#endif
	typedef cv::Vec<int, 1> IT;
	

	
	
	
	
	// Erode input slightly
	cv::Mat input;
	cv::erode( _shape, input, cv::Mat() );

	// Remove any small white blobs left over
	CBlobResult blobs;
	CBlob * currentBlob;
	CBlob biggestBlob;
	IplImage binaryIpl = input;
	
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
	
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );
	
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}
	
	// Perform k-means on this region only
	int areaLetter = (int)biggestBlob.Area();
	cv::Mat kMeansInput = cv::Mat( areaLetter, 1, _image.type() );

	// Discard if we couldn't extract a letter
	if( areaLetter <= 0 )
	{
		_letter = cv::Mat( _shape );
		_letter = cv::Scalar(0);
		return;
	}

	cv::MatIterator_<VT_binary> binaryIterator = input.begin<VT_binary>();
	cv::MatIterator_<VT_binary> binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<VT> kMeansIterator = kMeansInput.begin<VT>();

	for( ; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			(*kMeansIterator) = _image.at<VT>( binaryIterator.pos() );
			++kMeansIterator;
		}
	}

	// Get k-means labels
	cv::Mat labels = doClustering<T>( kMeansInput, 2, false );	
	int numZeros = areaLetter - cv::countNonZero( labels );
	bool useZeros = numZeros < cv::countNonZero( labels );

	// Reshape into original form
	_letter = cv::Mat( _shape.size(), _shape.type() );
	_letter = cv::Scalar(0);

	binaryIterator = input.begin<VT_binary>();
	binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<IT> labelsIterator = labels.begin<IT>();

	for( int index = 0; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			// Whichever label was the minority, we make that value white and all other values black
			unsigned char value = (*labelsIterator)[0];

			if( useZeros )
				if( value )
					value = 0;
				else
					value = 255;
			else
				if( value )
					value = 255;
				else
					value = 0;

			_letter.at<VT_binary>( binaryIterator.pos() ) = VT_binary( value );
			++labelsIterator;
		}
	}
	
	// Attempt to deal with any spurious locations that are left
	//	If we can be fairly confident that one of the blobs left is not a letter, remove it
//.........這裏部分代碼省略.........
開發者ID:UCSD-AUVSI,項目名稱:OCR,代碼行數:101,代碼來源:Auvsi_Recognize.cpp

示例8: cvCloneImage

/*
 * thread for displaying the opencv content
 */
void *cv_threadfunc (void *ptr) {
	IplImage* timg = cvCloneImage(rgbimg); // Image we do our processing on
	IplImage* dimg = cvCloneImage(rgbimg); // Image we draw on
	CvSize sz = cvSize( timg->width & -2, timg->height & -2);
	IplImage* outimg = cvCreateImage(sz, 8, 3);

	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* squares; // Sequence for squares - sets of 4 points
	CvSeq* contours; // Raw contours list
	CvSeq* result; // Single contour being processed

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage *pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1);

	// Set region of interest
	cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));
	cvSetImageROI(dimg, cvRect(0, 0, sz.width, sz.height));

	// Processing and contours
	while (1) {
		squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

		pthread_mutex_lock( &mutex_rgb );
		cvCopy(rgbimg, dimg, 0);
		cvCopy(rgbimg, timg, 0);
		pthread_mutex_unlock( &mutex_rgb );

		// BLUR TEST
		// cvPyrDown(dimg, pyr, 7);
		// cvPyrUp(pyr, timg, 7);

		// DILATE TEST
		IplConvKernel* element = cvCreateStructuringElementEx(5, 5, 2, 2, 0);
		IplConvKernel* element2 = cvCreateStructuringElementEx(3, 3, 1, 1, 0);
		cvDilate(timg, timg, element, 2);
		cvErode(timg, timg, element2, 3);

		// THRESHOLD TEST 
		cvThreshold(timg, timg, 200, 255, CV_THRESH_BINARY);

		// Output processed or raw image.
		cvCvtColor(timg, outimg, CV_GRAY2BGR);

		// BLOB TEST
		blobs = CBlobResult( timg, (IplImage*)NULL, 0, true );
		// blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 50 );
		
		printf("Blobs: %d\n", blobs.GetNumBlobs());

		CBlob biggestBlob;
		blobs.GetNthBlob( CBlobGetArea(), 1, biggestBlob );
		biggestBlob.FillBlob( outimg, CV_RGB(255, 0, 0) );
		CvSeq* dest;
		biggestBlob.GetConvexHull(dest);
		
		// for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		// {
		// 	currentBlob = blobs.GetBlob(i);
		// 	currentBlob->FillBlob( outimg, CV_RGB(255,0,0) );
		// }
		

//		// CONTOUR FINDING
//		cvFindContours(timg, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//
//		while (contours)
//		{
//			// Approximate contour, accuracy proportional to perimeter of contour; may want to tune accuracy.
//			result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);
//			// Filter small contours and contours w/o 4 vertices (filters noise, finds rectangles)
//			if (result->total == 4 && 
//				fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 600 && 
//				cvCheckContourConvexity(result))
//			{
//				// Skipped checking whether angles were close to 90 degrees here; may want to implement.
//				// Probably also want to check if it's square enough to filter out ex. long windows.
//
//				for (int i = 0; i < 4; i++)
//				{
//					// Write vertices to output sequence
//					cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i));
//				}
//			}
//
//			// Take next contour
//			contours = contours->h_next;
//		}
//
//
//		// DRAW RECTANGLES
//		CvSeqReader reader;
//		cvStartReadSeq(squares, &reader, 0);
//
//		// Read 4 points at a time
//		CvPoint pt[4];
//.........這裏部分代碼省略.........
開發者ID:CRRobotics,項目名稱:Kinect,代碼行數:101,代碼來源:testblobs.cpp

示例9: cc_getMaxBlob

int32_t cc_getMaxBlob(uint32_t *qvals, uint32_t numRls, int16_t *bdata)
{
	
	int16_t* c_components = new int16_t[MAX_BLOBS*4];
	
	uint32_t result;//, prebuf;
	
	CBlobAssembler blobber;
	
	int32_t row;
	uint32_t i, startCol, length;
	uint8_t model;

	for (i=0, row=-1; i<numRls; i++)
	{
			if (qvals[i]==0)
			{
					row++;
					continue;
			}
			model = qvals[i]&0x03;
			qvals[i] >>= 3;
			startCol = qvals[i]&0x1ff;
			qvals[i] >>= 9;
			length = qvals[i]&0x1ff;
			if(!handleRL(&blobber, model, row, startCol, length))
				break;
	}
	
	blobber.EndFrame();
	blobber.SortFinished();

	int16_t top, right, bottom, left;
	CBlob *blob;
	blob = blobber.finishedBlobs;
	if (blob->GetArea()>MIN_AREA)
	{
		blob->getBBox(left, top, right, bottom);
		bdata[0] = left;
		bdata[1] = right;
		bdata[2] = top;
		bdata[3] = bottom;
	}
	else 
		bdata[0] = -1;

#if 0	
	//
	// Take Finished blobs and return with chirp
	//
	CBlob *blob, *temp;
	blob = blobber.finishedBlobs;
	
	uint32_t cc_num = 0;
	temp = blob;
	while (temp)
	{
		int16_t top, right, bottom, left;
		temp->getBBox(left, top, right, bottom);
		
		// Don't want objects with area less than 9...
		if ((right-left)*(bottom-top) < 9)
			break;
		
		temp = temp->next;
		cc_num++;
	}
	
	// Remove the rest that we don't care about
	/*while(temp)
	{
		CBlob *next = temp->next;
		temp->~CBlob();
		temp = NULL;
		temp = next;
	}*/
	
	cc_num = (cc_num < 15) ? cc_num : MAX_BLOBS;

	// Populate return w/ result
	//void* mem = malloc(sizeof(int16_t)*cc_num*4);
	//if (mem == NULL)
	//	int i = 0;
	//free(mem);
	//int16_t* c_components = new int16_t[cc_num*4];
	//g_mem += sizeof(int16_t)*cc_num*4;
	memset((void *)c_components, 0, sizeof(uint16_t)*cc_num*4);

	for (int i = 0; i < cc_num; i++)
	{
		int16_t top, right, bottom, left;
		blob->getBBox(left, top, right, bottom);
		c_components[(i*4)+0] = top;
		c_components[(i*4)+1] = right;
		c_components[(i*4)+2] = bottom;
		c_components[(i*4)+3] = left;

		blob = blob->next;
	}
	
//.........這裏部分代碼省略.........
開發者ID:UIKit0,項目名稱:pixy,代碼行數:101,代碼來源:conncomp.cpp

示例10: main

 int main()  
 {  
     CBlobResult blobs;    
     CBlob *currentBlob;   
     CvPoint pt1, pt2;  
     CvRect cvRect;  
     int key = 0;  
     IplImage* frame = 0;  
   
     // Initialize capturing live feed from video file or camera  
     CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );  
   
     // Get the frames per second  
     int fps = ( int )cvGetCaptureProperty( capture,  
                                            CV_CAP_PROP_FPS );    
   
     // Can't get device? Complain and quit  
     if( !capture )  
     {  
         printf( "Could not initialize capturing...\n" );  
         return -1;  
     }  
   
     // Windows used to display input video with bounding rectangles  
     // and the thresholded video  
     cvNamedWindow( "video" );  
     cvNamedWindow( "thresh" );        
   
     // An infinite loop  
     while( key != 'x' ) 
     { 
         // If we couldn't grab a frame... quit  
         if( !( frame = cvQueryFrame( capture ) ) )  
             break;        
   
         // Get object's thresholded image (blue = white, rest = black)  
         IplImage* imgThresh = GetThresholdedImageHSV( frame );        
   
         // Detect the white blobs from the black background  
         blobs = CBlobResult( imgThresh, NULL, 0 );    
   
         // Exclude white blobs smaller than the given value (10)    
         // The bigger the last parameter, the bigger the blobs need    
         // to be for inclusion    
         blobs.Filter( blobs,  
                       B_EXCLUDE,  
                       CBlobGetArea(),  
                       B_LESS,  
                       10 );           
   
         // Attach a bounding rectangle for each blob discovered  
         int num_blobs = blobs.GetNumBlobs();  
   
         for ( int i = 0; i < num_blobs; i++ )    
         {                 
             currentBlob = blobs.GetBlob( i );               
             cvRect = currentBlob->GetBoundingBox();  
   
             pt1.x = cvRect.x;  
             pt1.y = cvRect.y;  
             pt2.x = cvRect.x + cvRect.width;  
             pt2.y = cvRect.y + cvRect.height;  
   
             // Attach bounding rect to blob in orginal video input  
             cvRectangle( frame,  
                          pt1,   
                          pt2,  
                          cvScalar(0, 0, 0, 0),  
                          1,  
                          8,  
                          0 );  
         }  
   
         // Add the black and white and original images  
         cvShowImage( "thresh", imgThresh );  
         cvShowImage( "video", frame );  
   
         // Optional - used to slow up the display of frames  
         key = cvWaitKey( 2000 / fps );  
   
         // Prevent memory leaks by releasing thresholded image  
         cvReleaseImage( &imgThresh );        
     }  
   
     // We're through with using camera.   
     cvReleaseCapture( &capture );  
   
     return 0;  
 }  
開發者ID:akashwar,項目名稱:Eye-NAB,代碼行數:89,代碼來源:blob1.cpp

示例11: main

 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
   CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144);
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216);	 
  // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold. 
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs; 
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only  the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob

     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
開發者ID:bhuneshwar21,項目名稱:AUV,代碼行數:62,代碼來源:colortest1.cpp

示例12: findShadow

double findShadow(IplImage *l_img, int hue,int sat,int val,int threshold, double blobLowLimit,double blobHighLimit){
	// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
	//Ouput: pointer to data array, size[#ofblobs*3+1]; Format data=[Number of Blobs, Area1,X of center1, y of center1, Area2,X of center2,y of center2,...,areaN,X of centerN, Y of centerN];
    

    

	// Image variables
	IplImage* local_copy = cvCloneImage(l_img);
	IplImage* imageSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageSuperSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* i1 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i2 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i_ts = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* planeH = cvCreateImage(cvGetSize(l_img),8,1); //Hue
	IplImage* planeS = cvCreateImage(cvGetSize(l_img),8,1); //Saturation
	IplImage* planeV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* planeSmoothV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* imageSmoothHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* obsdetmask = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_dil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_b = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_bdil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	//Blob variables
	CBlobResult mask_bls;
	CBlob	mask_bl;
	CBlobResult blobs;
	CBlob blob;
	CBlobResult blobs1;
	CBlob blob1;
	CBlobGetXCenter getXCenter;
	CBlobGetYCenter getYCenter;
	//Output Variable
	//Gausian Filter
	cvSmooth(l_img,imageSmooth,CV_GAUSSIAN,13,13,0,0);
	cvSmooth(l_img,imageSuperSmooth,CV_GAUSSIAN,41,41,0,0);
	//cvShowImage("View2a",imageSmooth);
	
	
	
	//Covert RGB to HSV
	cvCvtColor(imageSmooth,imageHSV,CV_BGR2HSV);
	cvCvtColor(imageSuperSmooth,imageSmoothHSV,CV_BGR2HSV);
	cvCvtPixToPlane(imageSuperSmooth,NULL,NULL,planeSmoothV,0);
	cvCvtPixToPlane(imageHSV, planeH,planeS,planeV,0);//Extract the 3 color components
	cvSetImageROI(imageHSV,cvRect(0,imageHSV->height/3,imageHSV->width,imageHSV->height*2/3));
	IplImage* planeH1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Hue
	IplImage* planeS1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Saturation
	IplImage* planeV1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Brightness
	cvCvtPixToPlane(imageHSV, planeH1,planeS1,planeV1,0);//Extract the 3 color components
	cvResetImageROI(imageHSV);
	
	
	cvShowImage("Dark_Value",planeV);
	cvShowImage("Dark_Sat",planeS);
	cvShowImage("Dark_Hue",planeH);
	cvSet(obsdetmask, cvScalar(0,0,0));
	cv::waitKey(3);
	
	
	int maxDark = 0;
	int minDark = 255;
	int minDarknessValue=0;
	int maxDarknessValue = 0;
	int midDarknessValue = 0;
	//Filter image for desired Color, output image with only desired color highlighted remaining
	for( int y = 0; y < planeH1->height; y++ ){
		unsigned char* h = &CV_IMAGE_ELEM( planeH1, unsigned char, y, 0 );
		unsigned char* s = &CV_IMAGE_ELEM( planeS1, unsigned char, y, 0 );
		unsigned char* v = &CV_IMAGE_ELEM( planeV1, unsigned char, y, 0 );
		for( int x = 0; x < planeH1->width*planeH1->nChannels; x += planeH1->nChannels ){
		  //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
			//int f= HSV_filter(h[x],s[x],v[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
			int diff = abs((h[x]-hue));
			if(((diff < threshold)||(v[x]<MIN_BRIGHT)||(s[x]<MIN_SAT)))
			{ 
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=255;
			   if(v[x]<minDark)
			   {minDark=v[x];}
			    if(v[x]>maxDark)
			    {maxDark=v[x];}
			}
			else
			{
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=0;
			}
		}
	}//debug
	cvDilate(obsdetmask,obsdetmask_dil,NULL,1);
	cvShowImage("Dark_ObsDetPre",obsdetmask_dil);
	mask_bls = CBlobResult(obsdetmask_dil,NULL,0);
	mask_bls.Filter(mask_bls,B_EXCLUDE,CBlobGetArea(),B_LESS,MASK_MIN_BLOB); // Filter Blobs with min and max size
	mask_bls.GetNthBlob( CBlobGetArea(), 0, mask_bl );
	cvSet(obsdetmask_b, cvScalar(0,0,0));
	mask_bl.FillBlob(obsdetmask_b,CV_RGB(255,255,255));
	cvDilate(obsdetmask_b,obsdetmask_bdil,NULL,5);
	cvShowImage("Dark_ObsDet",obsdetmask_bdil);
	cvWaitKey(3);
//.........這裏部分代碼省略.........
開發者ID:jhawke,項目名稱:cs7630_robotics,代碼行數:101,代碼來源:raptor_find_dark.cpp

示例13: locator

	void locator()
	{
		namedWindow("Tracking");
		int hMin, hMax, sMin, sMax, vMin, vMax,area_min;
		hMin = 0;
		//hMax = 124; // night values/???
		hMax = 255;
		//sMin = 95;
		sMin = 126;
		sMax = 255;
		//vMin = 139;
		vMin = 173;
		vMax = 255;
		area_min = 100;
		Mat smoothed, hsvImg, t_img;
		createTrackbar("blob min area","Tracking" ,&area_min ,1000);
		createTrackbar("Hue Min", "Tracking", &hMin, 255);
		createTrackbar("Hue Max", "Tracking", &hMax, 255);
		createTrackbar("Sat Min", "Tracking", &sMin, 255);
		createTrackbar("Sat Max", "Tracking", &sMax, 255);
		createTrackbar("Val Min", "Tracking", &vMin, 255);
		createTrackbar("Val MaX", "Tracking", &vMax, 255);
		while(ros::ok())
		{
			Mat source = imageB;
			Mat copy = imageB.clone();
			GaussianBlur(source, smoothed, Size(9,9), 4);
			cvtColor(smoothed, hsvImg, CV_BGR2HSV);
			inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img);

			CBlobResult blob;
			IplImage i_img = t_img;
			blob = CBlobResult(&i_img,NULL,0);
			int num_blobs = blob.GetNumBlobs();

			blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_);
			num_blobs = blob.GetNumBlobs();

			std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane

			tf::StampedTransform transform;
			tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5));
			tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform);

			for(int i =0;i<num_blobs;i++)
			{
				CBlob* bl = blob.GetBlob(i);
				Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl));
				//Use the width as the height
				uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5;
				circle(copy,uv,50,Scalar(255,0,0),5);

				cv::Point3d xyz;
				model.projectPixelTo3dRay(uv, xyz);
		
				// Intersect ray with plane in virtual table frame
				//Origin of camera frame wrt virtual table frame
				tf::Point P0 = transform.getOrigin();
				//Point at end of unit ray wrt virtual table frame
				tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z);
				// Origin of virtual table frame
				tf::Point V0 = tf::Point(0.0,0.0,0.0);
				// normal to the table plane
				tf::Vector3 n(0, 0, 1);
				// finding scaling value
				double scale = (n.dot(V0-P0))/(n.dot(P1-P0));
				tf::Point ball_pos = P0 + (P1-P0)*scale;
				cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl;
			}
			imshow(WINDOW, copy);
			waitKey(3);

			imshow("edited", t_img);
			waitKey(3);

			ros::spinOnce();
		}
	}
開發者ID:ashokzg,項目名稱:cpb,代碼行數:78,代碼來源:locator.cpp

示例14: blobDetection2

IplImage* blobDetection2(IplImage* imgThreshRed, IplImage* imgThreshGreen) {
    // get blobs and filter them using its area
    int i, j;
    //  int areaBlob = 100;
    float distMark = 10;
    CBlobResult blobsRed, blobsGreen, whiteRedBlobs, whiteGreenBlobs;
    CBlob *currentBlob;
    double px, py;

    // Create Image
    IplImage* displayedImage = cvCreateImage(cvGetSize(imgThreshRed), IPL_DEPTH_8U, 3);

    // find all the RED related blobs in the image
    blobsRed = CBlobResult(imgThreshRed, NULL, 0);
    // find all the GREEN related blobs in the image
    blobsGreen = CBlobResult(imgThreshGreen, NULL, 0);

    // select the ones with mean gray-level equal to 255 (white) and put
    // them in the whiteBlobs variable
    blobsRed.Filter(whiteRedBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);
    blobsGreen.Filter(whiteGreenBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);

#ifdef DEBUG_PRINT    
    printf("White Blobs: %d\n", whiteBlobs.GetNumBlobs());
#endif

    // display filtered blobs
    cvMerge(imgThreshRed, imgThreshRed, imgThreshRed, NULL, displayedImage);

    // RED
    CvPoint2D32f redCenter[whiteRedBlobs.GetNumBlobs()];

    for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++) {
        currentBlob = whiteRedBlobs.GetBlob(i);
        px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
        py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
        redCenter[i] = cvPoint2D32f(px, py);

#ifdef DEBUG_PRINT    
        printf("%2.2f\t%2.2f\n", px, py);
#endif

        if (currentBlob->Area() > areaBlob) {
            // Add Cross to the image
            currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
            cvCircle(displayedImage, cvPointFrom32f(redCenter[i]), 2, cvScalar(255, 0, 0), 10, 8, 0);
        }
    }

    // GREEN
    CvPoint2D32f greenCenter[whiteGreenBlobs.GetNumBlobs()];

    for (i = 0; i < whiteGreenBlobs.GetNumBlobs(); i++) {
        currentBlob = whiteGreenBlobs.GetBlob(i);
        px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
        py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
        greenCenter[i] = cvPoint2D32f(px, py);

#ifdef DEBUG_PRINT    
        printf("%2.2f\t%2.2f\n", px, py);
#endif

        if (currentBlob->Area() > areaBlob) {
            // Add Cross to the image
            currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
            cvCircle(displayedImage, cvPointFrom32f(greenCenter[i]), 2, cvScalar(0, 255, 0), 10, 8, 0);
        }
    }

    // Populating the list of potential robots
    RobotList potRobList;
    potRobList.robNum = 0;

    for (i = 0; i < robMax; i++)
        potRobList.robList[i].active = 0;

    int redUsage[whiteRedBlobs.GetNumBlobs()];
    int greenUsage[whiteGreenBlobs.GetNumBlobs()];

    for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++)
        redUsage[i] = 0;

    for (j = 0; j < whiteGreenBlobs.GetNumBlobs(); j++)
        greenUsage[j] = 0;



    // Detect Robots
    float distCenter[whiteRedBlobs.GetNumBlobs()][whiteGreenBlobs.GetNumBlobs()];
    for (i = 0; i < min(whiteRedBlobs.GetNumBlobs(), robMax); i++) {
        currentBlob = whiteRedBlobs.GetBlob(i);
        if (currentBlob->Area() > areaBlob) {
            for (j = 0; j < min(whiteGreenBlobs.GetNumBlobs(), robMax); j++) {
                currentBlob = whiteGreenBlobs.GetBlob(j);
                if (currentBlob->Area() > areaBlob) {
                    distCenter[i][j] = computeDist(redCenter[i], greenCenter[j]);
                    //printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
                    //printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
                    // Print a connection line if this could be a robot
                    if (redUsage[i] == 0 && greenUsage[j] == 0 && checkDistMarker(distCenter[i][j], distMark)) {
//.........這裏部分代碼省略.........
開發者ID:iarwain88,項目名稱:multi-camera-tracking,代碼行數:101,代碼來源:image_pub_sciame.cpp

示例15: blob_center

/* Find the center of a given blob. */
CvPoint MarkerCapture::blob_center(CBlob blob){
    CvPoint point;
    point.x = blob.GetBoundingBox().x + (blob.GetBoundingBox().width / 2);
    point.y = blob.GetBoundingBox().y + (blob.GetBoundingBox().height / 2);
    return point;
}
開發者ID:kenkeiter,項目名稱:interface,代碼行數:7,代碼來源:markers.cpp


注:本文中的CBlob類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。