当前位置: 首页>>代码示例>>C++>>正文


C++ cvAnd函数代码示例

本文整理汇总了C++中cvAnd函数的典型用法代码示例。如果您正苦于以下问题:C++ cvAnd函数的具体用法?C++ cvAnd怎么用?C++ cvAnd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvAnd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: skinDetect

void HandDetect::handDetecting()
{
	skinDetect();
	IplImage *tmp = cvCreateImage(cvGetSize(backproject), 8, 1);
	cvZero(tmp);

	if(track_comp.rect.height>0&&track_comp.rect.width>0)
	{
		cvCircle(tmp, handCen, track_box.size.width, CV_RGB(255, 255, 255), -1);
		cvDrawRect(tmp, cvPoint(track_window.x-(int)(track_box.size.width*0.2), track_window.y-(int)(track_box.size.height*0.2)), 
			cvPoint(track_window.x+(int)(track_box.size.width*1.2), track_window.y+track_box.size.height), CV_RGB(255, 255, 255), -1);
		
	}
	cvAnd(backproject, tmp, backproject, 0);
	cvDilate(backproject, backproject, 0, 1);
	cvErode(backproject, backproject, 0, 1);
	
	UsingYCbCr();
	cvAnd(gray, tmp, gray, 0);
	cvErode(gray, gray, 0, 1);
	cvDilate(gray, gray, 0, 1);
//	cvShowImage("52", gray);
	cvReleaseImage(&tmp);

	cvOr(gray, backproject, backproject, 0);
	
	handCen=cvPoint(track_box.center.x, track_box.center.y);
	
	setRad();
//	cvDrawRect(image, cvPoint(track_window.x, track_window.y), cvPoint(track_window.x+track_window.width, track_window.y+track_window.height), CV_RGB(255, 0, 0));
	cvCircle(image, handCen, 2, CV_RGB(255, 0, 0), 2);

}
开发者ID:gunbaek,项目名称:Hand2Mouse,代码行数:33,代码来源:HandDetect.cpp

示例2: lhMorpHMTC

//形态学约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTC(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp4 = cvCreateImage(cvGetSize(src), 8, 1);

	IplImage*  mask1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask4 = cvCreateImage(cvGetSize(src), 8, 1);

	cvZero(mask1);
	cvZero(mask2);
	cvZero(mask3);
	cvZero(mask4);

	cvZero(dst);

	//P107 (5.5)
	cvErode( src, temp1, sebg);
	cvDilate( src, temp2, sebg);
	cvErode( src, temp3, sefg);
	cvDilate( src, temp4, sefg);

	cvCmp(src, temp3, mask1, CV_CMP_EQ);
	cvCmp(temp2, src,  mask2, CV_CMP_LT);
	cvAnd(mask1, mask2, mask2);

	cvCmp(src, temp4, mask3 , CV_CMP_EQ);
	cvCmp(temp1, src, mask4 , CV_CMP_GT);
	cvAnd(mask3, mask4, mask4);

	cvSub(src, temp2, dst, mask2);
	cvSub(temp1, src, dst, mask4);




	cvReleaseImage(&mask1);
	cvReleaseImage(&mask2);
	cvReleaseImage(&mask3);
	cvReleaseImage(&mask4);

	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp4);

	cvReleaseStructuringElement(&sebg);

}
开发者ID:CareShaw,项目名称:OCR,代码行数:61,代码来源:Morphology1.cpp

示例3: cvSmooth

void TextLocation::thresholdImg(const IplImage* grayImg, int dilateDeg) {

    cvSmooth(grayImg, m_smoothImg, CV_GAUSSIAN, ksmoothSize, ksmoothSize);

    getEdgeImg(m_smoothImg, m_simpleEdgeImg, dilateDeg);

    double globalTh = getThForGlobalThImg(m_smoothImg, khistoRatio);

    if (globalTh > 50) {

        getGlobalThImg(m_smoothImg, globalTh, m_globalThImg, dilateDeg);

        cvAnd(m_globalThImg, m_simpleEdgeImg, m_advancedEdgeImg);
        cvAnd(m_advancedEdgeImg, blackPixelImg, m_edgeAndBlackImg);

        thrImgMode = ADVANCED_EDGE; // globalAndEdge
#ifdef DEBUG
        cvShowImage("advancedEdgeImg", m_advancedEdgeImg);
#endif

    } else {

        cvAnd(m_simpleEdgeImg, blackPixelImg, m_edgeAndBlackImg);

        thrImgMode = SIMPLE_EDGE; // edgeImg

    }
#ifdef DEBUG
    cvShowImage("edgeAndBlackImg", m_edgeAndBlackImg);
#endif

}
开发者ID:browny,项目名称:invoiceq-core,代码行数:32,代码来源:textlocation.cpp

示例4: crop

IplImage* crop(IplImage* src, CvPoint c, int r){
	IplImage* res, * roi;
 
    //src = cvLoadImage("x.jpg", 1);
    res = cvCreateImage(cvGetSize(src), 8, 3);
    roi = cvCreateImage(cvGetSize(src), 8, 1);
 
    /* prepare the 'ROI' image */
    cvZero(roi);
 
    /* Note that you can use any shape for the ROI */
    cvCircle(
        roi,
        c,
        r,
        CV_RGB(255, 255, 255),
        -1, 8, 0
    );
 
    /* extract subimage */
    cvAnd(src, src, res, roi);
 
 
    /* 'restore' subimage */
    IplImage* roi_C3 = cvCreateImage(cvGetSize(src), 8, 3);
    cvMerge(roi, roi, roi, NULL, roi_C3);
    cvAnd(res, roi_C3, res, NULL);
 
    return res;
 
   
}
开发者ID:dsatiate,项目名称:Facial-feature-tracking-OpenCv,代码行数:32,代码来源:main.cpp

示例5: gstskin_find_skin_center_of_mass

gint gstskin_find_skin_center_of_mass(struct _GstSkin *skin, gint display)
{
  int skin_under_seed = 0;

  IplImage* imageRGB = cvCreateImageHeader( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  imageRGB->imageData = skin->cvRGB->imageData;

  IplImage* imageHSV = cvCreateImage( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  cvCvtColor(imageRGB, imageHSV, CV_RGB2HSV);

  IplImage* planeH = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component.
  IplImage* planeH2= cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component, 2nd threshold
  IplImage* planeS = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Saturation component.
  IplImage* planeV = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Brightness component.
  cvCvtPixToPlane(imageHSV, planeH, planeS, planeV, 0);	// Extract the 3 color components.

  // Detect which pixels in each of the H, S and V channels are probably skin pixels.
  // Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80.
  cvThreshold(planeH , planeH2, 10, UCHAR_MAX, CV_THRESH_BINARY);         //(hue > 10)
  cvThreshold(planeH , planeH , 20, UCHAR_MAX, CV_THRESH_BINARY_INV);     //(hue < 20)
  cvThreshold(planeS , planeS , 48, UCHAR_MAX, CV_THRESH_BINARY);         //(sat > 48)
  cvThreshold(planeV , planeV , 80, UCHAR_MAX, CV_THRESH_BINARY);         //(val > 80)

  // erode the HUE to get rid of noise.
  cvErode(planeH, planeH, NULL, 1);

  // Combine all 3 thresholded color components, so that an output pixel will only
  // be white (255) if the H, S and V pixels were also white.
  IplImage* imageSkinPixels = cvCreateImage( cvGetSize(imageHSV), 8, 1);        // Greyscale output image.
  // imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND
  cvAnd(planeH         , planeS , imageSkinPixels);	
  cvAnd(imageSkinPixels, planeH2, imageSkinPixels);	
  cvAnd(imageSkinPixels, planeV , imageSkinPixels);	

  if(display){
    if( skin->showH )
      cvCvtColor(planeH, imageRGB, CV_GRAY2RGB);
    else if( skin->showS )
      cvCvtColor(planeS, imageRGB, CV_GRAY2RGB);
    else if( skin->showV )
      cvCvtColor(planeV, imageRGB, CV_GRAY2RGB);
    else
      cvCvtColor(imageSkinPixels, imageRGB, CV_GRAY2RGB);
  }


  cvReleaseImage( &imageSkinPixels );
  cvReleaseImage( &planeH );
  cvReleaseImage( &planeH2);
  cvReleaseImage( &planeS );
  cvReleaseImage( &planeV );
  cvReleaseImage( &imageHSV );
  cvReleaseImage( &imageRGB );

  return(skin_under_seed);
}
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:56,代码来源:gstskin.c

示例6: filterByHSV

int filterByHSV(IplImage *src, CvScalar minHSV, CvScalar maxHSV, IplImage *dst) {
	IplImage *tmp3d = cvCloneImage(src);
	cvSmooth(tmp3d, tmp3d, CV_GAUSSIAN, 13, 0, 0, 0);

	cvCvtColor(tmp3d, tmp3d, CV_BGR2HSV);
	IplImage *tmp1dH_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dS_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dV_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	cvSplit(tmp3d, tmp1dH_mask, tmp1dS_mask, tmp1dV_mask, NULL);

	//printf("\rmin: %03d,%03d,%03d", (int)minHSV.val[0], (int)minHSV.val[1], (int)minHSV.val[2]);
	//printf("\tmax: %03d,%03d,%03d", (int)maxHSV.val[0], (int)maxHSV.val[1], (int)maxHSV.val[2]);

	if (minHSV.val[0] < maxHSV.val[0]) {
		cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1dH_mask);
	} else {
		IplImage *tmp1d = cvCloneImage(tmp1dH_mask);
		cvInRangeS(tmp1dH_mask, cvScalar(0, 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1d);
		cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(255, 0, 0), tmp1dH_mask);
		cvOr(tmp1d, tmp1dH_mask, tmp1dH_mask, NULL);
		cvReleaseImage(&tmp1d);
	}

	cvInRangeS(tmp1dS_mask, cvScalar(minHSV.val[1], 0, 0), cvScalar(maxHSV.val[1], 0, 0), tmp1dS_mask);
	cvInRangeS(tmp1dV_mask, cvScalar(minHSV.val[2], 0, 0), cvScalar(maxHSV.val[2], 0, 0), tmp1dV_mask);

	IplImage *tmp1d_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	cvSet(tmp1d_mask, cvScalarAll(255), NULL);
	cvAnd(tmp1d_mask, tmp1dH_mask, tmp1d_mask, NULL);
	cvAnd(tmp1d_mask, tmp1dS_mask, tmp1d_mask, NULL);
	cvAnd(tmp1d_mask, tmp1dV_mask, tmp1d_mask, NULL);

	cvReleaseImage(&tmp1dH_mask);
	cvReleaseImage(&tmp1dS_mask);
	cvReleaseImage(&tmp1dV_mask);

	cvClose(tmp1d_mask, tmp1d_mask, NULL, 2);

#define CONTROLS_WIDTHA  640/2
#define CONTROLS_HEIGHTA 480/2
#if 1
	cvNamedWindow(CONTROL_WINDOW  "4", 0);
	cvResizeWindow(CONTROL_WINDOW "4", CONTROLS_WIDTHA, CONTROLS_HEIGHTA);
	cvShowImage(CONTROL_WINDOW    "4", tmp1d_mask);
#endif

	cvCopy2(src, dst, tmp1d_mask);

	cvReleaseImage(&tmp1d_mask);

	return 0;
}
开发者ID:changeyourdestiny,项目名称:DIP,代码行数:52,代码来源:operateImage.cpp

示例7: create_frequency_filtered_image

IplImage*
create_frequency_filtered_image(const IplImage *pImage, int low, int high)
{

  CvPoint2D32f  center;
  center.x = pImage->width / 2;
  center.y = pImage->height / 2;
  CvBox2D box;
  box.center = center;

  box.size.width = high;
  box.size.height = high;

  IplImage *pFilterMask = rb_cvCreateImage( cvGetSize(pImage), IPL_DEPTH_64F, 1 );
  IplImage *pFiltered = rb_cvCreateImage( cvGetSize(pImage), IPL_DEPTH_64F, 1 );

  cvZero(pFilterMask);
  cvZero(pFiltered);

  if(high > 0)
    cvEllipseBox(pFilterMask, box, cvScalar(255, 255, 255, 255), CV_FILLED, 8, 0);

  box.size.width = low;
  box.size.height = low;
  if(low > 0)
    cvEllipseBox(pFilterMask, box, cvScalar(0, 0, 0, 0), CV_FILLED, 8, 0);

  cvAnd(pImage, pFilterMask, pFiltered, NULL);

  cvReleaseImage(&pFilterMask);

  return pFiltered;
}
开发者ID:thenoseman,项目名称:ruby-opencv,代码行数:33,代码来源:iplimage.cpp

示例8: ofBackground

//--------------------------------------------------------------
void testApp::update()
{
	ofBackground(100, 100, 100);
	kinect.update();

	grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
			
	if( bThreshWithOpenCV ){
		
		grayThreshFar = grayImage;
		grayThresh = grayImage;
		grayThreshFar.threshold(farThreshold, true);
		grayThresh.threshold(nearThreshold);
		cvAnd(grayThresh.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
		 
	}

	// if one blob found, find nearest point in blob area		
	//update the cv image
	grayImage.flagImageChanged();
    // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
    // also, find holes is set to true so we will get interior contours as well....
    contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/2, 200, false);
	ofSoundUpdate();
	
	
}
开发者ID:investic,项目名称:mano_oscg,代码行数:28,代码来源:testApp.cpp

示例9: cvCreateImage

CvBox2D CamShiftPatch::getTrackBox(CvScalar maskRange, CvHistogram *hist)
{
	IplImage* backproject = cvCreateImage(cvGetSize(originImage), 8, 1);//反投影空間,單通道
	IplImage* hue = 0;
	hue = cvCreateImage(cvGetSize(originImage), 8, 1);
	IplImage *mask = getInRangeMask(maskRange, hue);

	cvCalcBackProject(&hue, backproject, hist); //使用back project方法 ,計算hue的反投影圖
	cvAnd(backproject, mask, backproject, 0);   // 將backproject 與mask 做AND 運算 再放到backproject 

	CvConnectedComp track_comp;
	CvBox2D track_box; // tracking返回的區域box,帶角度 

	CvRect zero;
	zero.x = 0; zero.y = 0; zero.width = 320; zero.height = 240;
	track_window = zero;

	for (int i = 0; i < 10; i++)
	{
		cvCamShift(
			backproject,    //色彩概率分佈圖像
			track_window,   //Search Window的初始值
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),//用來判斷搜尋是否停止的一個標準
			&track_comp,    //保存運算結果,包括新的Search Window的位置和面積
			&track_box     //包含被跟蹤物體的最小矩形
			);
		track_window = track_comp.rect;
	}

	cvReleaseImage(&backproject);
	cvReleaseImage(&hue);
	cvReleaseImage(&mask);

	return track_box;
}
开发者ID:FIREoo,项目名称:105_RobotCompetition_InternetOfRobotics,代码行数:35,代码来源:CamShiftPatch.cpp

示例10: cvAnd

void ofApp::update() {
    kinect.update();
    
    if(kinect.isNewFrame()) {
        depthPixels = kinect.getDepthRawPixels();

        grayImage.setFromPixels(kinect.getDepthRawPixels());
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(nearThreshold, true);
        grayThreshFar.threshold(farThreshold);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        grayImage.flagImageChanged();
        
        // set contour tracker parameters
        contourFinder.setMinArea(minArea);
        contourFinder.setMaxArea(maxArea);
        contourFinder.setThreshold(threshold);
        contourFinder.getTracker().setPersistence(persistence);
        contourFinder.getTracker().setMaximumDistance(maxDistance);
        
        // determine found contours
        contourFinder.findContours(grayImage);
    }
}
开发者ID:Kulbhushan-Chand,项目名称:ofxKinectProjectorToolkit,代码行数:25,代码来源:ofApp.cpp

示例11: updateHugeImage

void WebCamData::trackFace() {
  CvConnectedComp comps;
  updateHugeImage(d->data);

  cvCalcBackProject(&d->hueImage, d->prob, d->histogram);
  cvAnd(d->prob, d->mask, d->prob, 0);
  CvBox2D box;
  cvCamShift(d->prob, d->faceRect,
             cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1), &comps,
             &box);

  d->faceRect = comps.rect;

  int radius = cvRound((d->faceRect.width + d->faceRect.height) * 0.25);
  CvPoint center;
  center.x = cvRound(d->faceRect.x + d->faceRect.width * 0.5);
  center.y = cvRound(d->faceRect.y + d->faceRect.height * 0.5);
  /*
      qDebug() << Q_FUNC_INFO
      << comps.rect.x
      << comps.rect.y
      << comps.rect.width
      << comps.rect.height
      << box.angle
      << center.x
      << center.y
      << radius;
   */
  d->dataMap.clear();
  d->dataMap["z"] = QVariant(radius);
  d->dataMap["x"] = QVariant(center.x);
  d->dataMap["y"] = QVariant(center.y);
  d->dataMap["angle"] = QVariant(box.angle);
  Q_EMIT dataReady();
}
开发者ID:shaheeqa,项目名称:plexydesk,代码行数:35,代码来源:webcam.cpp

示例12: cvCalcBackProject

bool AdaptiveHistogramCamshift::ComputeCamshift(const IplImage* hue, const IplImage* mask)
{
  // Compute backproject
  cvCalcBackProject(&hue, m_imgBackproject, m_hist);
  cvAnd(m_imgBackproject, mask, m_imgBackproject, 0);

  // Init velocity
  m_trackPosTwoFramesBack = cvPoint(static_cast<int>(m_trackBox.center.x),
                                    static_cast<int>(m_trackBox.center.y));
  m_trackAreaTwoFramesBack = m_trackBox.size.width * m_trackBox.size.height;

  // DEBUG track window area
  //printf("track wnd area: %f\n", m_trackBox.size.width * m_trackBox.size.height);

  // Compute camshift this frame
  CvConnectedComp trackComp;
  assert((m_trackWindow.height > 0) && (m_trackWindow.width > 0));
  CvBox2D trackBox;
  const int camShiftRes = cvCamShift(m_imgBackproject,
                                     m_trackWindow,
                                     cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
                                     &trackComp,
                                     &trackBox);
  if (camShiftRes >= 0)
  {
    m_trackBox = trackBox;
    m_trackCompRect = trackComp.rect;
    return true;
  }
  else
  {
    return false;
  }
}
开发者ID:blr246,项目名称:adaptive-histogram-camshift,代码行数:34,代码来源:adaptive_histogram_camshift.cpp

示例13: node_composit_exec_cvAnd

static void node_composit_exec_cvAnd(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
	CvArr* dst;
	CvArr* src1;
	CvArr* src2;
	CvArr* mask=NULL;
        CompBuf* dst_buf;

	if(out[0]->hasoutput==0) return;
	if((in[0]->data)){
                //Inputs
		src1 = BOCV_IplImage_attach(in[0]->data);
                mask = BOCV_Mask_attach(in[2]->data);
                
                //Output
                dst_buf=dupalloc_compbuf(in[0]->data);
                dst=BOCV_IplImage_attach(dst_buf);

                //Check Image - Mask sizes
                if(mask){
                    if (!BOCV_checkMask(src1, mask)){
                        node->error= 1;
                        return;
                    }
                }
                
                if(in[1]->data){
                    src2 = BOCV_IplImage_attach(in[1]->data);
                    //Checks
                    //Check Image Sizes
                    if(!BOCV_checkAreSameType(src1, src2)){
                        node->error= 1;
                        return;
                    }
                    //Check Image number Channels
                    if(!BOCV_checkSameNChannels(src1, src2)){
                        node->error= 1;
                        return;
                    }
                    cvAnd(src1, src2, dst, mask);
                    BOCV_IplImage_detach(src2);
                }else{
                    CvScalar s;
                    s.val[0]= (in[1]->vec[0]);
                    s.val[1]= (in[1]->vec[1]);
                    s.val[2]= (in[1]->vec[2]);
                    s.val[3]= 0;
                    cvAndS(src1, s, dst, mask);
                }
                
                out[0]->data= dst_buf;
		
                BOCV_IplImage_detach(src1);
                
                BOCV_IplImage_detach(mask);
                BOCV_IplImage_detach(dst);

	}
	
}
开发者ID:gmaclair,项目名称:blendocv,代码行数:60,代码来源:CMP_CvAnd.c

示例14: ofLogError

//--------------------------------------------------------------------------------
void ofxCvImage::operator &= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator&=: source image not allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator&=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvAnd( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator&=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator&=: images need to have matching type";
	}
}
开发者ID:4ker,项目名称:openFrameworks,代码行数:26,代码来源:ofxCvImage.cpp

示例15: lhMorpHMTB

//形态学二值击中-击不中变换
void lhMorpHMTB(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);

	//P104 (5.2)
	cvErode( src, temp1, sefg);
	cvNot(src, temp2);
	cvErode( temp2, temp2, sebg);
	cvAnd(temp1, temp2, dst);


	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);

	cvReleaseStructuringElement(&sebg);

}
开发者ID:CareShaw,项目名称:OCR,代码行数:26,代码来源:Morphology1.cpp


注:本文中的cvAnd函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。