当前位置: 首页>>代码示例>>C++>>正文


C++ cvSub函数代码示例

本文整理汇总了C++中cvSub函数的典型用法代码示例。如果您正苦于以下问题:C++ cvSub函数的具体用法?C++ cvSub怎么用?C++ cvSub使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvSub函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: lhMorpHMTC

//形态学约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTC(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp4 = cvCreateImage(cvGetSize(src), 8, 1);

	IplImage*  mask1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask4 = cvCreateImage(cvGetSize(src), 8, 1);

	cvZero(mask1);
	cvZero(mask2);
	cvZero(mask3);
	cvZero(mask4);

	cvZero(dst);

	//P107 (5.5)
	cvErode( src, temp1, sebg);
	cvDilate( src, temp2, sebg);
	cvErode( src, temp3, sefg);
	cvDilate( src, temp4, sefg);

	cvCmp(src, temp3, mask1, CV_CMP_EQ);
	cvCmp(temp2, src,  mask2, CV_CMP_LT);
	cvAnd(mask1, mask2, mask2);

	cvCmp(src, temp4, mask3 , CV_CMP_EQ);
	cvCmp(temp1, src, mask4 , CV_CMP_GT);
	cvAnd(mask3, mask4, mask4);

	cvSub(src, temp2, dst, mask2);
	cvSub(temp1, src, dst, mask4);




	cvReleaseImage(&mask1);
	cvReleaseImage(&mask2);
	cvReleaseImage(&mask3);
	cvReleaseImage(&mask4);

	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp4);

	cvReleaseStructuringElement(&sebg);

}
开发者ID:CareShaw,项目名称:OCR,代码行数:61,代码来源:Morphology1.cpp

示例2: AddError

void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mOutputImage) {
	  mOutputImage = cvCloneImage(inputimage);
	} else {
	  cvCopyImage(inputimage, mOutputImage);
	}
	if (! mBackgroundImage) {
		mBackgroundImage = cvCloneImage(mOutputImage);
	} else if (mUpdateProportion > 0) {
		if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
			AddError(wxT("Input and background images do not have the same size."));
			return;
		}

		cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean) {
			mBackgroundImageMean = cvAvg(mBackgroundImage);
			CvScalar tmpScalar = cvAvg(mOutputImage);
			cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(mOutputImage, mBackgroundImage, mOutputImage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, mOutputImage, mOutputImage);
		} else {
			cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}
	mCore->mDataStructureImageColor.mImage = mOutputImage;
	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mOutputImage);
	}
}
开发者ID:dtbinh,项目名称:swistrackplus,代码行数:55,代码来源:ComponentAdaptiveBackgroundSubtractionColor.cpp

示例3: filterFrame

/**
 * \brief	Takes frame and applies image processing techniques to filter out non-laser line points. Updates images used for runtime display.
 */
int filterFrame() {
	args[0] = frame;
	cvCvtColor(frame, frameHSV, CV_BGR2HSV);	//convert RGB values of frame to HSV and place in frameHSV
	cvSplit(frameHSV, hue, saturation, value, NULL);	//split frameHSV into constituent components and place appropriately; we are done with frameHSV
	args[1] = hue;
	args[2] = value;
	cvCopy(saturation, saturation2);	//make an additional copy of saturation for display
	//args[8] = saturation2;
	//cvShowImage("saturation", saturation2);
	cvSmooth(frame, frameHSV, CV_BLUR, 20, 20 );   //smooth frame and store in frameHSV
	//cvShowImage("Smoothed frame", frameHSV);
	cvSplit(frame, blue, green, red, NULL);	//split frame into its RGB components
	cvSplit(frameHSV, blue2, green2, red2, NULL);	//split the smoothed version into its RGB components
	cvMin(blue, green, min_bg);	//take the min of blue and green and store in min_bg
	args[3] = min_bg;
	//cvShowImage("minimum of blue and green", min_bg);
	cvSub(red, min_bg, red_last);	//take red less the min of the blue and green
	//cvShowImage("red_last = red - min_bg", red_last);
	cvThreshold(red_last, red_last, thresholdValue, 255, CV_THRESH_BINARY_INV);	//threshold the red_last
	//cvShowImage("threshold of red_last", red_last);
	args[4] = red_last;
	cvSub(red, red2, deltaRed);
	//cvShowImage("deltaRed = Original red - smooth red", deltaRed);
	cvThreshold(deltaRed, deltaRed, thresholdValue, 255, CV_THRESH_BINARY);
	//cvShowImage("threshold(deltaRed)", deltaRed);
	cvCopy(deltaRed, alpha);
	cvInRangeS(saturation, cvScalar(0), cvScalar(25), saturation);
	//cvShowImage("Low saturation in original frame", saturation);
	cvInRangeS(hue, cvScalar(49), cvScalar(125), beta);
	//cvShowImage("Mixed hue in original frame", beta);
	cvOr(beta, saturation, beta);
	//cvShowImage("beta = Low saturation OR mixed hue", beta);
	cvOr(beta, red_last, beta);
	//cvShowImage("beta = beta OR red_last", beta);
	//args[5] = alpha;
	args[5] = beta;

	IplConvKernel*mask= cvCreateStructuringElementEx(5, 5, 2, 2, 2, NULL );

	cvDilate(saturation2,dialated, mask, 20);
	//cvShowImage("dilate original saturation", dialated);
	args[6] = dialated;
	cvThreshold(dialated, dialated, 100, 255, CV_THRESH_BINARY);
	cvErode(dialated,eroded, mask, 30);

	args[7] = eroded;
	cvSub(alpha, beta, orig_filter);
	args[8] = orig_filter;
	cvAnd(orig_filter, eroded, zeta);
	args[9] = zeta;
	return 0;
}
开发者ID:freeman94,项目名称:vision,代码行数:55,代码来源:intersect.c

示例4: cvMorphologyEx

CV_IMPL void
cvMorphologyEx( const void* src, void* dst,
                void* temp, IplConvKernel* element, int op, int iterations )
{
    CV_FUNCNAME( "cvMorhologyEx" );

    __BEGIN__;

    if( (op == CV_MOP_GRADIENT ||
        ((op == CV_MOP_TOPHAT || op == CV_MOP_BLACKHAT) && src == dst)) && temp == 0 )
        CV_ERROR( CV_HeaderIsNull, "temp image required" );

    if( temp == src || temp == dst )
        CV_ERROR( CV_HeaderIsNull, "temp image is equal to src or dst" );

    switch (op)
    {
    case CV_MOP_OPEN:
        CV_CALL( cvErode( src, dst, element, iterations ));
        CV_CALL( cvDilate( dst, dst, element, iterations ));
        break;
    case CV_MOP_CLOSE:
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvErode( dst, dst, element, iterations ));
        break;
    case CV_MOP_GRADIENT:
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvSub( dst, temp, dst ));
        break;
    case CV_MOP_TOPHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( temp, temp, element, iterations ));
        CV_CALL( cvSub( src, temp, dst ));
        break;
    case CV_MOP_BLACKHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvDilate( src, temp, element, iterations ));
        CV_CALL( cvErode( temp, temp, element, iterations ));
        CV_CALL( cvSub( temp, src, dst ));
        break;
    default:
        CV_ERROR( CV_StsBadArg, "unknown morphological operation" );
    }

    __END__;
}
开发者ID:273k,项目名称:OpenCV-Android,代码行数:50,代码来源:cvmorph.cpp

示例5: MatchAreaCount

// Get match area with two same size image, return area (by pixel count)
double MatchAreaCount(IplImage* imgfrom, IplImage* imgto) {
	cvSub(imgfrom, imgto, imgto, 0); 			// 图像相减
	cvThreshold(imgto, imgto, 1, 255, CV_THRESH_BINARY); 	// 阈值处理
	int white = 0, black, total;
	for (int y = 0; y < imgto->height; y++) {
		for (int x = 0; x < imgto->width; x++) {
			const int val = imgto->imageData[y * imgto->widthStep
					+ x];
			if (val & 1) {
				++white; 			// 白色区域
			}
		}
	}
	total = imgto->width * imgto->height; 			// 总面积
	black = total - white; 					// 黑色面积

	if (globalArgs.verbosity) {
		printf("white = %d  black = %d  total = %d\n", white, black,
				total);
		cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);
		cvShowImage("Test", imgto);
		cvWaitKey(0);
		cvDestroyWindow("Test");
	}
	return black;
}
开发者ID:quchunguang,项目名称:test,代码行数:27,代码来源:moneymatch.cpp

示例6: cvAvg

void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
	
	roi.x = targetRect.x / divisor;
	roi.y = targetRect.y / divisor;
	
	maskImg.setROI(roi);
	smallCurrentImg.setROI(roi);
	
	CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
	cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
	pupilAvg = tempPupilAvg.val[0];
	
	// get average of pupil black iteratively(get average twice) to remove the influence of glint
	cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY);		// 30 is the distance from average.
	cvSub(maskImg.getCvImage(), farFromAvg, newMask);								// make a mask to get rid of those far points.
	CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask);			// get new average value.
	
	// get average, min and max value of white area of an eye.
	CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
	for (int i = 0; i < 6; i++) notDiffImg.erode();				// this might be very useful to reduce the influence of small noise & glint
	cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());

	maskImg.resetROI();
	smallCurrentImg.resetROI();
	
	pupilAvg = newPupilAvg.val[0];					// value is in the first element of CvScalar
	whiteAvg = tempWhiteAvg.val[0];
	
}
开发者ID:BluntBlade,项目名称:eyewriter,代码行数:29,代码来源:thresholdCalculator.cpp

示例7: display

void display(struct ctx *ctx)
{
    int i;
    static IplImage *oldimage = NULL;

    /*if (ctx->num_fingers == NUM_FINGERS)
    {

#if defined(SHOW_HAND_CONTOUR)
        cvDrawContours(ctx->image, ctx->contour,
                       CV_RGB(0,0,255), CV_RGB(0,255,0),
                       0, 1, CV_AA, cvPoint(0,0));
#endif


        cvCircle(ctx->image, ctx->hand_center, 5, CV_RGB(255, 255, 0),
                 1, CV_AA, 0);
        cvCircle(ctx->image, ctx->hand_center, ctx->hand_radius,
                 CV_RGB(255, 0, 0), 1, CV_AA, 0);

        for (i = 0; i < ctx->num_fingers; i++)
        {

            cvCircle(ctx->image, ctx->fingers[i], 10,
                     CV_RGB(0, 255, 0), 3, CV_AA, 0);

            cvLine(ctx->image, ctx->hand_center, ctx->fingers[i],
                   CV_RGB(255,255,0), 1, CV_AA, 0);
        }

        for (i = 0; i < ctx->num_defects; i++)
        {
            cvCircle(ctx->image, ctx->defects[i], 2,
                     CV_RGB(200, 200, 200), 2, CV_AA, 0);
        }
    }*/

    cvShowImage("output", ctx->image);
    IplImage *dst;
    if ( oldimage != NULL ) {
        dst = cvCloneImage(ctx->image);

        cvSub(ctx->image,oldimage,dst,NULL);

        cvShowImage("thresholded", dst);

        cvAddWeighted(oldimage, 0.25, ctx->image, 0.75, 0.0, oldimage);


        cvReleaseImage(&dst);
        //cvReleaseImage(&oldimage);


    }
    else {
        cvShowImage("thresholded", ctx->thr_image);
        oldimage=cvCloneImage(ctx->image);

    }
}
开发者ID:light64,项目名称:project_nao,代码行数:60,代码来源:main.cpp

示例8: cvCreateImage

void DifferenceOfGaussian::output(IplImage* src, IplImage* dst){
	CvArr* dog1 = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
	CvArr* dog2 = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
	cvSmooth(src, dog1, CV_GAUSSIAN, _kernelSize, _kernelSize, _sigma1, _sigma1);
	cvSmooth(src, dog2, CV_GAUSSIAN, _kernelSize, _kernelSize, _sigma2, _sigma2);
	cvSub(dog2, dog1, dst, 0);
}
开发者ID:jcchiang,项目名称:Joseph-SampleCode,代码行数:7,代码来源:DifferenceOfGaussian.cpp

示例9: cvSmooth

void moHighpassModule::applyFilter(IplImage *src) {
	int b1 = this->property("size").asInteger()*2+1; //make sure its odd
	int b2 = this->property("blur").asInteger()*2+1; //make sure its odd
	cvSmooth(src, this->output_buffer, CV_GAUSSIAN, b1);
	cvSub(src, this->output_buffer, this->output_buffer);
	cvSmooth(this->output_buffer, this->output_buffer, CV_GAUSSIAN, b2);
}
开发者ID:Giladx,项目名称:Movid,代码行数:7,代码来源:moHighpassModule.cpp

示例10: cvWaitKey

/**
 * Обновление выводимой информации.
 */
void EdgeDetector::update() {
	if (camera == NULL) return;

	cvWaitKey(33);

	cameraFrame = cvQueryFrame(camera);
	cvReleaseImage(&resultFrame);
	
	if (isGrayScaleEffect) {
		IplImage* tempFrame = cvCloneImage(cameraFrame);
		resultFrame = cvCreateImage(imageSize, cameraFrame->depth, CV_LOAD_IMAGE_GRAYSCALE);
		cvCvtColor(tempFrame, resultFrame, CV_BGR2GRAY);
		cvReleaseImage(&tempFrame);
	} else resultFrame = cvCloneImage(cameraFrame);

	if (!isOriginalEffect) {
		if (isStrokeEffect) {
			IplImage* tempFrame = cvCloneImage(resultFrame);
			tempFrame = edgeDetectOperator->applyOperator(tempFrame);
			cvSub(resultFrame, tempFrame, resultFrame);
			cvReleaseImage(&tempFrame);
		} else resultFrame = edgeDetectOperator->applyOperator(resultFrame);
	}

	if (isInverseEffect) {
		IplImage* tempFrame = cvCloneImage(resultFrame);
		cvNot(tempFrame, resultFrame);
		cvReleaseImage(&tempFrame);
	}

	cvShowImage(getWindowName(), resultFrame);
}
开发者ID:aNNiMON,项目名称:DonNUEdgeDetector,代码行数:35,代码来源:EdgeDetector.cpp

示例11: lhMorpRBTH

//形态学黑帽重建
void lhMorpRBTH(const IplImage* src, IplImage* dst, IplConvKernel* se = NULL, int iterations=1)
{
	assert(src != NULL  && dst != NULL && src != dst );
	//p156
	lhMorpRClose(src, dst, se, iterations);
	cvSub(dst, src, dst);
}
开发者ID:CareShaw,项目名称:OCR,代码行数:8,代码来源:Morphology1.cpp

示例12: lhMorpHMTU

//形态学非约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTU(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask = cvCreateImage(cvGetSize(src), 8, 1);
	cvZero(mask);

	//P106 (5.4)
	cvErode( src, temp, sefg);
	cvDilate(src, dst, sebg);
	cvCmp(temp, dst, mask, CV_CMP_GT);

	cvSub(temp, dst, dst, mask);
	cvNot(mask, mask);
	cvSet(dst, cvScalar(0), mask);

	//cvCopy(dst, mask);
	//cvSet(dst, cvScalar(255), mask);
	cvReleaseImage(&mask);
	cvReleaseImage(&temp);

	cvReleaseStructuringElement(&sebg);
}
开发者ID:CareShaw,项目名称:OCR,代码行数:31,代码来源:Morphology1.cpp

示例13: ofLogError

//--------------------------------------------------------------------------------
void ofxCvImage::operator -= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator-=: source image not allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator-=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvSub( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator-=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator-=: image type mismatch";
	}
}
开发者ID:4ker,项目名称:openFrameworks,代码行数:26,代码来源:ofxCvImage.cpp

示例14: get_connected_components

/*	The function will return the connected components in 'comp', 
	as well as the number of connected components 'nc'.
	At this point, we have to determine whether the components are eye pair or not.
	We'll use experimentally derived heuristics for this, based on the width, 
	height, vertical distance, and horizontal distance of the components. 
	To make things simple, we only proceed if the number of the connected components is 2.*/
int get_connected_components(IplImage* img, IplImage* prev, CvRect window, CvSeq** comp)
{
		IplImage* _diff;
 
		cvZero(diff);
 
    /* apply search window to images */
		cvSetImageROI(img, window);
		cvSetImageROI(prev, window);
		cvSetImageROI(diff, window);
 
    /* motion analysis */
		cvSub(img, prev, diff, NULL);
		cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
		cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
 
    /* reset search window */
		cvResetImageROI(img);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
 
		_diff = (IplImage*)cvClone(diff);
 
    /* get connected components */
		int nc = cvFindContours(_diff, storage, comp, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
 
		cvClearMemStorage(storage);		
		cvReleaseImage(&_diff);
	
		return nc;
}
开发者ID:madhurjain,项目名称:TrackNoseBlinkEye,代码行数:38,代码来源:TrackNoseBlinkEye.cpp

示例15: lhMorpBlackTopHat

//形态学黑顶帽运算
void lhMorpBlackTopHat(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	lhMorpClose(src, dst, se, iterations );
    cvSub(dst, src, dst );

}
开发者ID:CareShaw,项目名称:OCR,代码行数:8,代码来源:Morphology1.cpp


注:本文中的cvSub函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。