当前位置: 首页>>代码示例>>C++>>正文


C++ cvAdd函数代码示例

本文整理汇总了C++中cvAdd函数的典型用法代码示例。如果您正苦于以下问题:C++ cvAdd函数的具体用法?C++ cvAdd怎么用?C++ cvAdd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvAdd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: blur_function

void blur_function(const IplImage *latent_image, IplImage *blur_image, const CvMat *hom1, const CvMat *hom2)
{
	const int T = 20;
	const int tau = 10;
	CvMat *id_mat = cvCreateMat(3, 3, CV_32FC1);
	cvSetIdentity(id_mat, cvRealScalar(1));
	CvMat *invhom1 = cvCreateMat(3, 3, CV_32FC1);
	cvInvert(hom1, invhom1, CV_LU);
	
	CvMat *h1 = cvCreateMat(3, 3, CV_32FC1);
	CvMat *h2 = cvCreateMat(3, 3, CV_32FC1);
	CvSize size = cvSize(latent_image->width, latent_image->height);
	IplImage *temp = cvCreateImage(size, latent_image->depth, latent_image->nChannels);
	IplImage *blur = cvCreateImage(size, IPL_DEPTH_32F, latent_image->nChannels);
	cvSetZero(blur);
	
	for (int i = 1; i <= tau; ++i)
	{
		cvAddWeighted(id_mat, (double)(T-i)/T, invhom1, (double)i/T, 0, h1);
		cvAddWeighted(id_mat, (double)(T-i)/T, hom2, (double)i/T, 0, h2);
		cvWarpPerspective(latent_image, temp, h1, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
		cvWarpPerspective(latent_image, temp, h2, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
	}
	cvAdd(blur, latent_image, blur, NULL);
	cvConvertScale(blur, blur_image, 1.0/(2*tau+1), 0);
	
	cvReleaseMat(&id_mat);
	cvReleaseMat(&invhom1);
	cvReleaseMat(&h1);
	cvReleaseMat(&h2);
	cvReleaseImage(&temp);
	cvReleaseImage(&blur);
}
开发者ID:capslock1874,项目名称:newDeblur,代码行数:35,代码来源:blurfunc.c

示例2: cvCreateImage

void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
开发者ID:junaidnaseer,项目名称:ivt,代码行数:55,代码来源:ImageProcessorCV.cpp

示例3: easyplot

void easyplot(IplImage *fr, IplImage *fr0)
{
	int rmean = 0.5*(r1+r2), rthick = r1-r2;
	CvPoint up, cp, bp;
	
	up.x = coo2pix(upc.x);
	up.y = coo2pix(upc.y);
	
	// pause button
	if(sqr(pbuttonp.x-up.x)+sqr(pbuttonp.y-up.y)<sqr(r1+buttonr)) {
		plot_circular_button(fr, yellow);
	}
	
	// user handle
	cvCircle(fr, up, rmean,   red, rthick+2, CV_AA, 0);
	cvCircle(fr, up, rmean, white, rthick-4, CV_AA, 0);
	
	// computer handle
	cp.x = coo2pix(cpc.x);
	cp.y = coo2pix(cpc.y);
	cvCircle(fr, cp, rmean, green, rthick+2, CV_AA, 0);
	cvCircle(fr, cp, rmean, white, rthick-4, CV_AA, 0);
	
	// ball
	bp.x = coo2pix(bpc.x);
	bp.y = coo2pix(bpc.y);
	if(bp.y>winy+r0) {
		cvCircle(fr, cvPoint(winx/2,winy-bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,winy-bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else if(bp.y<-r0) {
		cvCircle(fr, cvPoint(winx/2,bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else {
		cvCircle(fr, bp, r0,  white, -1, CV_AA, 0);
		cvCircle(fr, bp, r0,   blue, 3, CV_AA, 0);
	}
	
	// blur processing
	cvSmooth(fr, fr, CV_BLUR, 15, 15, 0.0, 0.0);
	cvAddWeighted(fr0, 0.55, fr, 1.0, -10.0, fr);
	
	// score
	cvSetImageROI(fr, scoreroi1);
	cvAdd(fr, scoretext1, fr);
	cvSetImageROI(fr, scoreroi2);
	cvAdd(fr, scoretext2, fr);
	cvResetImageROI(fr);
	cvSmooth(fr, fr, CV_BLUR, 5, 5, 0.0, 0.0);
	
	cvCopy(fr, fr0);
}
开发者ID:jjlschen,项目名称:Air-Hockey-Game-practice,代码行数:55,代码来源:air_hockey_02.cpp

示例4: stack_imgs

IplImage* stack_imgs( IplImage* img1, IplImage* img2 )
{
	IplImage* stacked = cvCreateImage( cvSize( MAX(img1->width, img2->width), img1->height + img2->height ), IPL_DEPTH_8U, 3 );
	cvZero( stacked );
	cvSetImageROI( stacked, cvRect( 0, 0, img1->width, img1->height ) );
	cvAdd( img1, stacked, stacked, NULL );
	cvSetImageROI( stacked, cvRect(0, img1->height, img2->width, img2->height) );
	cvAdd( img2, stacked, stacked, NULL );
	cvResetImageROI( stacked );
//	cvShowImage( "stack", stacked );
	return stacked;
}
开发者ID:fxia22,项目名称:tinker,代码行数:12,代码来源:main.cpp

示例5: cvMinMaxLoc

void CueTemplate::adapt() {
	if(!m_init) return;

	CVImage* cvgrayimg = cvGrayImageIn.getBuffer();
	if(!cvgrayimg) { std::cerr<< getName() << "::ERROR::execute()::cvGrayImageIn is NULL!...\n"; return; }
	IplImage* grayimg = cvgrayimg->ipl;

	TrackData* track = trackIn.getBuffer();
//	if(!track){ std::cerr<< getName() << "::ERROR::execute()::trackIn is NULL!...\n"; return; }	

	float rel;
	CvPoint winner;
	if(track) {
		rel = track->reliability;
		winner = track->winnerPos;
	}
	else{
		double min, max;
		CvPoint minLoc, maxLoc;
		cvMinMaxLoc(mp_boundedoutputimg, &min, &max, &minLoc, &maxLoc, NULL);
		rel = (float)max;
		winner = maxLoc;
	}

	if(rel > m_threshold){
		// adapt toward new template
		int x = winner.x;
		int y = winner.y;
		if(x < m_halftemplatesizex) x = m_halftemplatesizex;
		if(y < m_halftemplatesizey) y = m_halftemplatesizey;
		if(x >= grayimg->width - m_halftemplatesizex) x = grayimg->width - m_halftemplatesizex-1;
		if(y >= grayimg->height - m_halftemplatesizey) y = grayimg->height - m_halftemplatesizey-1;
		CvRect rect;
		rect.x = x - m_halftemplatesizex;
		rect.y = y - m_halftemplatesizey;
		rect.width = m_templatesizex;
		rect.height = m_templatesizey;
		cvSetImageROI(grayimg, rect );
		cvCopy( grayimg, mp_newtemplateimg );
		cvScale( mp_templateimg, mp_templateimg, 1.0 - m_tfacs);
		cvScale( mp_newtemplateimg, mp_newtemplateimg, m_tfacs);
		cvAdd( mp_newtemplateimg, mp_templateimg, mp_templateimg );
		cvResetImageROI( grayimg );
		cvTemplateImageOut.out();
	}
	else{
		// adapting back to the original template
		cvScale( mp_templateimg, mp_templateimg, 1.0 - (m_tfacs/m_back) );
		cvScale( mp_origtemplateimg, mp_temptemplateimg, (m_tfacs/m_back) );
		cvAdd( mp_temptemplateimg, mp_templateimg, mp_templateimg );
		cvTemplateImageOut.out();
	}
}
开发者ID:gatsoulis,项目名称:cappocacciaactivevision,代码行数:53,代码来源:cuetemplate.cpp

示例6: cvCreateImage

IplImage *stack(IplImage *img1, IplImage *img2) {
    IplImage *img = cvCreateImage(cvSize(img1->width + img2->width,
                _max(img1->height, img2->height)),
                IPL_DEPTH_8U, 3);
    cvZero(img);
    cvSetImageROI(img, cvRect(0, 0, img1->width, img1->height));
    cvAdd(img1, img, img, NULL);
    cvSetImageROI(img, cvRect(img1->width, 0, img2->width, img2->height));
    cvAdd(img2, img, img, NULL);
    cvResetImageROI(img);

    return img;
}
开发者ID:cherip,项目名称:dct,代码行数:13,代码来源:utils.cpp

示例7: stack_imgs_horizontal

/*(自己写的函数)
将两张图像合成为一张,水平排放
参数:img1:位于左边的图像的指针,img2:位于右边的图像的指针
返回值:合成图像
*/
extern IplImage* stack_imgs_horizontal( IplImage* img1, IplImage* img2 )
{
    //生成合成图像
    IplImage * stacked = cvCreateImage(cvSize(img1->width+img2->width, MAX(img1->height,img2->height)),
                                       IPL_DEPTH_8U, 3);
    cvZero(stacked);//清零
    cvSetImageROI(stacked, cvRect(0,0,img1->width,img1->height));
    cvAdd(img1,stacked,stacked,NULL);//叠加第一张图像
    cvSetImageROI(stacked, cvRect(img1->width,0,img2->width,img2->height));
    cvAdd(img2,stacked,stacked,NULL);//叠加第二张图像
    cvResetImageROI(stacked);

    return stacked;
}
开发者ID:BrownOfSummer,项目名称:sift-1,代码行数:19,代码来源:utils.c

示例8: stack_imgs

IplImage* stack_imgs(const IplImage* img1, const IplImage* img2 )
{
  IplImage* stacked = cvCreateImage( cvSize( img1->width + img2->width,
					     MAX(img1->height, img2->height) ),
				     IPL_DEPTH_8U, 3 );

  cvZero( stacked );
  cvSetImageROI( stacked, cvRect( 0, 0, img1->width, img1->height ) );
  cvAdd( img1, stacked, stacked, NULL );
  cvSetImageROI( stacked, cvRect(img1->width, 0, img2->width, img2->height) );
  cvAdd( img2, stacked, stacked, NULL );
  cvResetImageROI( stacked );

  return stacked;
}
开发者ID:cherip,项目名称:dct,代码行数:15,代码来源:utils.cpp

示例9: Segment

Segment* Segment::combine(Segment *s1, Segment *s2)
{
	Segment* newSegment = new Segment(*s1->seg, s1->label);

	IplImage* maskAddition = cvCreateImage(cvSize(s1->seg->width, s1->seg->height), IPL_DEPTH_8U, 1);
	cvAdd(s1->iplMask, s2->iplMask, maskAddition, NULL);
	cvReleaseImage(&s1->iplMask);
	newSegment->iplMask = maskAddition;
	newSegment->mask = BwImage(newSegment->iplMask);
	
	// weighted average colors
	int s1PixelCount = s1->pixels.size();
	int s2PixelCount = s2->pixels.size();
	int totalPixelCount = s1PixelCount + s2PixelCount;
	float s1Weight = float(s1PixelCount) / float(totalPixelCount);
	float s2Weight = float(s2PixelCount) / float(totalPixelCount);
	newSegment->color.r = s1->color.r * s1Weight + s2->color.r * s2Weight;
	newSegment->color.g = s1->color.g * s1Weight + s2->color.g * s2Weight;
	newSegment->color.b = s1->color.b * s1Weight + s2->color.b * s2Weight;

	newSegment->pixels.insert(newSegment->pixels.end(), s1->pixels.begin(), s1->pixels.end());
	newSegment->pixels.insert(newSegment->pixels.end(), s2->pixels.begin(), s2->pixels.end());

	newSegment->updateContour();

	return newSegment;
}
开发者ID:gimlids,项目名称:LTPM,代码行数:27,代码来源:Segment.cpp

示例10: while

/* Tool function */
void motionDetection::accFrameFromVideo(CvCapture* capture){

	//cvNamedWindow( "Video", CV_WINDOW_AUTOSIZE ); // Create a window to display the video 

	while (mCount != mFrameNumber)
	{
		if (cvGrabFrame(capture))
		{
			mFrame = cvRetrieveFrame(capture);
			// convert rgb to gray 
			cvCvtColor(mFrame, mpFrame[mCount], CV_BGR2GRAY);
			// accumulate each frame
			cvAdd(mSum, mpFrame[mCount], mSum);
			//cvShowImage( "Video", mpFrame[mCount] );  // display current frame 

			++mCount;
			if (cvWaitKey(10) >= 0) {
				break;
			}
		}
		else {
			break;
		}
	}
	//cvDestroyWindow( "Video" );
}
开发者ID:KevinGuo0211,项目名称:EarlyFireDetection,代码行数:27,代码来源:motionDetection.cpp

示例11: CalculateKernel

Img GaborImage::GaborTransform(Img Image, int Frequency, int Orientation) {
	orientation = Orientation;
	CalculateKernel(Orientation, Frequency);

	Img retImg  = (IplImage*) cvClone(Image);
	
	Img gabor_real = (IplImage*) cvClone(Image);
	Img gabor_img  = (IplImage*) cvClone(Image);
	cvFilter2D(Image, gabor_real, KernelRealData);	//image.Convolution(this.KernelRealData);
	cvFilter2D(Image, gabor_img , KernelImgData);	//image.Convolution(this.KernelImgData);
	
	cvPow(gabor_real, gabor_real, 2);
	cvPow(gabor_img,  gabor_img,  2);
	
	// Img gabor = (gabor_real + gabor_img).Pow(0.5);
	cvAdd(gabor_real, gabor_img, retImg);
	
	cv::Mat in = retImg;
	cv::Mat out;
	cv::sqrt(in, out); 
	
	IplImage dst_img = out;	
	
	cvReleaseImage(&gabor_real);
	cvReleaseImage(&gabor_img);
	
	retImg = (IplImage*) cvClone(&dst_img);
	
	return retImg;
}
开发者ID:Juxi,项目名称:icVision,代码行数:30,代码来源:GaborImage.cpp

示例12: cvZero

/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {

	// Initialize
	cvZero(mSum);
	for (int i = 0; i < mFrameNumber; ++i) {
		// frame[i] <= | frame[i] - Background Model |
		cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
		// uchar->float
		cvConvert(mTmp8U, mTmp);
		// mTmp = mTmp * mTmp 
		cvPow(mTmp, mTmp, 2.0);
		// add mSum += mTmp
		cvAdd(mSum, mTmp, mSum);
	}

	// variance: mTmp <= mSum / (mFrameNumber-1)
	for (int i = 0; i < mSize.height; ++i) {
		for (int j = 0; j < mSize.width; ++j) {
			((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
		}
	}

	// standard deviation
	cvPow(mTmp, mTmp, 0.5);

	// float->uchar
	cvConvert(mTmp, m_imgStandardDeviation);

	return m_imgStandardDeviation;
}
开发者ID:KevinGuo0211,项目名称:EarlyFireDetection,代码行数:31,代码来源:motionDetection.cpp

示例13: createAnaglyph

//get the stereo pair and create the anaglyph
void createAnaglyph(IplImage *frameL, IplImage *frameR, IplImage **anaglyph){
    IplImage *grayR, *grayL;
    
    CvSize size = cvGetSize(frameL);
    grayR = cvCreateImage(size, frameL->depth, 1);
    grayL = cvCreateImage(size, frameL->depth, 1);
    
    //convert images to grayscale
    cvCvtColor(frameR, grayR, CV_BGR2GRAY);
    cvCvtColor(frameL, grayL, CV_BGR2GRAY);
    
    //revert to RGB (grayscale with 3 channels, all have the same pixel value)
    cvCvtColor(grayR, frameR, CV_GRAY2BGR);
    cvCvtColor(grayL ,frameL, CV_GRAY2BGR);
    
    //remove channels
    for(int row = 0; row < frameL->height; row++){
            //set pointer to the correct position in each row
            uchar* ptrR = (uchar*)(frameR->imageData + row * frameR->widthStep);
            uchar* ptrL = (uchar*)(frameL->imageData + row * frameL->widthStep);
            
            for(int col = 0; col < frameL->width; col++){
                    //remove blue and green channel from the right image
                    ptrR[3*col] = 0;
                    ptrR[3*col+1] = 0;
                    //remove red channel from the left image
                    ptrL[3*col+2] = 0;
            }            
    }
    
    //junctions images
    cvAdd(frameR, frameL, *anaglyph);    
}
开发者ID:andrecurvello,项目名称:tests-zingarelli,代码行数:34,代码来源:SBS-AB-to-GRAY-anaglyph-video.cpp

示例14: createAnaglyph

//get the stereo pair and create the anaglyph
void createAnaglyph(IplImage *frameL, IplImage *frameR){
    IplImage *anaglyph;

    //remove channels
    for(int row = 0; row < frameL->height; row++){
            //set pointer to the correct position in each row
            uchar* ptrR = (uchar*)(frameR->imageData + row * frameR->widthStep);
            uchar* ptrL = (uchar*)(frameL->imageData + row * frameL->widthStep);

            for(int col = 0; col < frameL->width; col++){
                    //remove blue and green channel from the right image
                    ptrR[3*col] = 0;
                    ptrR[3*col+1] = 0;
                    //remove red channel from the left image
                    ptrL[3*col+2] = 0;

                    /*//uncomment to obtain the green-magenta anaglyph
                    ptrR[3*col] = 0;
                    ptrR[3*col+2] = 0;
                    ptrL[3*col+1] = 0;*/
            }
    }

    //prepare anaglyph image
    CvSize size = cvGetSize(frameL);
    anaglyph = cvCreateImage(size, frameL->depth, frameL->nChannels);
    cvZero(anaglyph);

    //junctions images
    cvAdd(frameR, frameL, anaglyph);

    //save junctioned image
    cvSaveImage("anaglyph.bmp", anaglyph);
}
开发者ID:andrecurvello,项目名称:tests-zingarelli,代码行数:35,代码来源:SBS-to-anaglyph-image.cpp

示例15: AddError

void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mFinalImage) {
		mFinalImage = cvCloneImage(inputimage);
	} else if (mMode == sMode_Addition) {
		cvAdd(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Subtraction) {
		cvSub(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Multiplication) {
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mFinalImage);
	}
}
开发者ID:gctronic,项目名称:swistrack,代码行数:28,代码来源:ComponentOutputImageOverlayColor.cpp


注:本文中的cvAdd函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。