当前位置: 首页>>代码示例>>C++>>正文


C++ cvSplit函数代码示例

本文整理汇总了C++中cvSplit函数的典型用法代码示例。如果您正苦于以下问题:C++ cvSplit函数的具体用法?C++ cvSplit怎么用?C++ cvSplit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvSplit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: block_coeffs

static int* block_coeffs(IplImage *img, int* plane_coeffs) {
    CvSize size = cvGetSize(img);
    IplImage *b = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *g = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *r = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *trans = cvCreateImage(size, IPL_DEPTH_16S, 1);
    int dim = plane_coeffs[0] + plane_coeffs[1] + plane_coeffs[2];
    int sz = size.width*size.height/64*dim;
    int *buf = malloc(sizeof(int)*sz);
    unsigned *order_p0 = build_path(plane_coeffs[0], KERNS);
    unsigned *order_p1 = build_path(plane_coeffs[1], KERNS);
    unsigned *order_p2 = build_path(plane_coeffs[2], KERNS);

    cvSplit(img, b, g, r, NULL);

    wht2d(b, trans);
    quantize(trans, plane_coeffs[0], KERNS, order_p0, buf, dim);

    wht2d(g, trans);
    quantize(trans, plane_coeffs[1], KERNS, order_p1,
        buf+plane_coeffs[0], dim);

    wht2d(r, trans);
    quantize(trans, plane_coeffs[2], KERNS, order_p2,
        buf+plane_coeffs[0]+plane_coeffs[1], dim);

    cvReleaseImage(&trans);
    cvReleaseImage(&b);
    cvReleaseImage(&g);
    cvReleaseImage(&r);
    free(order_p0);
    free(order_p1);
    free(order_p2);

    return buf;
}
开发者ID:j0sh,项目名称:thesis,代码行数:36,代码来源:cd.c

示例2: cvCvtColor

void CamShift::Track(IplImage *frame, CvRect &selection, bool calc_hist)
{
	int i, bin_w, c;

	cvCvtColor( frame, _hsv, CV_BGR2HSV );

	cvInRangeS( _hsv, cvScalar(0,_smin,MIN(_vmin,_vmax),0),
		cvScalar(180,256,MAX(_vmin,_vmax),0), _mask );
	cvSplit( _hsv, _hue, 0, 0, 0 );

	if(calc_hist)
	{
		float max_val = 0.f;
		cvSetImageROI( _hue, selection );
		cvSetImageROI( _mask, selection );
		cvCalcHist( &_hue, _hist, 0, _mask );
		cvGetMinMaxHistValue( _hist, 0, &max_val, 0, 0 );
		cvConvertScale( _hist->bins, _hist->bins, max_val ? 255. / max_val : 0., 0 );
		cvResetImageROI( _hue );
		cvResetImageROI( _mask );
		_track_window = selection; 
	}

	cvCalcBackProject( &_hue, _backproject, _hist );
	cvAnd( _backproject, _mask, _backproject, 0 );
	cvCamShift( _backproject, _track_window,
		cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
		&_track_comp, &_track_box );
	_track_window = _track_comp.rect;

	if( frame->origin )
		_track_box.angle = -_track_box.angle;

	selection = cvRect(_track_box.center.x-_track_box.size.width/2, _track_box.center.y-_track_box.size.height/2,
		selection.width, selection.height);
}
开发者ID:ayushpurohit,项目名称:human-action-recognition,代码行数:36,代码来源:CamShift.cpp

示例3: EqualizeHistColorImage

IplImage* EqualizeHistColorImage(IplImage *pImage)
{
    IplImage *pEquaImage = cvCreateImage(cvGetSize(pImage), pImage->depth, 3);

    // 原图像分成各通道后再均衡化,最后合并即彩色图像的直方图均衡化
    const int MAX_CHANNEL = 4;
    IplImage *pImageChannel[MAX_CHANNEL] = {NULL};

    int i;
    for (i = 0; i < pImage->nChannels; i++)
        pImageChannel[i] = cvCreateImage(cvGetSize(pImage), pImage->depth, 1);

    cvSplit(pImage, pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3]);

    for (i = 0; i < pImage->nChannels; i++)
        cvEqualizeHist(pImageChannel[i], pImageChannel[i]);

    cvMerge(pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3], pEquaImage);

    for (i = 0; i < pImage->nChannels; i++)
        cvReleaseImage(&pImageChannel[i]);

    return pEquaImage;
}
开发者ID:kyyang28,项目名称:opencv,代码行数:24,代码来源:main.cpp

示例4: sum_rgb

void sum_rgb(IplImage* src, IplImage* dst) {
	// Allocate individual image planes.
	IplImage* r = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage* g = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage* b = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

	// Temporary storage.
	IplImage* s = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

	// Split image onto the color planes.
	cvSplit(src, r, g, b, NULL);

	// Add equally weighted rgb values.
	cvAddWeighted(r, 1. / 3., g, 1. / 3., 0.0, s);
	cvAddWeighted(s, 2. / 3., b, 1. / 3., 0.0, s);

	// Truncate values above 100.
	cvThreshold(s, dst, 100, 100, CV_THRESH_TRUNC);

	cvReleaseImage(&r);
	cvReleaseImage(&g);
	cvReleaseImage(&b);
	cvReleaseImage(&s);
}
开发者ID:quchunguang,项目名称:test,代码行数:24,代码来源:ch5_ex5_2.cpp

示例5: convRGB

int convRGB(IplImage* srcRGB, IplImage* dstRGB, CvSize sizIm)
{
	// ñîçäàåì Image 
	srcR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcG = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcB = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	srcRR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcGR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcBR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	// ðàçáèâàåì íà êàíàëû
	cvSplit(srcRGB, srcB, srcG, srcR, 0);
		
	// âûäåëÿåì äëÿ êàæäîãî êàíàëà ãðàíèöû
	cvInRangeS(srcR, cvScalar(Rmin), cvScalar(Rmax), srcRR);
	cvInRangeS(srcG, cvScalar(Gmin), cvScalar(Gmax), srcGR);
	cvInRangeS(srcB, cvScalar(Bmin), cvScalar(Bmax), srcBR);

	// "ñêëåèâàåì" êàíàëû
	cvAnd( srcRR, srcGR, dstRGB );
	cvAnd( dstRGB, srcBR, dstRGB );

	// âûâîäèì â îêíå èçîáðàæåíèå
	cvShowImage("RGBVideo", dstRGB);

	// îñâîáîæäàåì ðåñóðñû
	cvReleaseImage( &srcR );
	cvReleaseImage( &srcG );
	cvReleaseImage( &srcB );
	cvReleaseImage( &srcRR );
	cvReleaseImage( &srcGR );
	cvReleaseImage( &srcBR );
	
	return 0;
}
开发者ID:awg21,项目名称:sikle_lin,代码行数:36,代码来源:improc.cpp

示例6: cvInRangeS

void BoatDetecting::initilizeTracking(){
	cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), mask);
	// 10,256,30
	
	cvSplit(hsv, hue, 0, 0, 0);
	if (!isTrackingInitialized){ // 如果跟踪窗口未初始化
		float max_val = 0.f;		
		cvSetImageROI(hue, selection);
		cvSetImageROI(mask, selection);		
		cvCalcHist(&hue, hist, 0, mask);
		cvGetMinMaxHistValue(hist, 0, &max_val, 0, 0);
		cvConvertScale(hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0);
		cvResetImageROI(hue);
		cvResetImageROI(mask);
		trackWindow = selection;
		isTrackingInitialized = true;

	}

	cvCalcBackProject(&hue, backproject, hist);
	//cvShowImage("Hue Channel",backproject);
	
	cvAnd(backproject, mask, backproject, 0);
	
	cvCamShift(backproject, trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 15, 2), &trackComp, 0);//初始化跟踪窗口以后直接用trackWindow做跟踪,每帧都会更新
	

	//if (trackComp.rect.width<90 && trackComp.rect.y<200){
	//	trackWindow = trackComp.rect;
	//}
	//if (trackComp.rect.y>200)
	//{
	//	trackWindow = trackComp.rect;
	//}
	trackWindow = trackComp.rect;
}
开发者ID:IvelynHsu,项目名称:BridgeWarningSystem,代码行数:36,代码来源:BoatDetecting.cpp

示例7: main

int main()
{
    const IplImage* im1 = cvLoadImage("302.png",0);
    const IplImage* im2 = cvLoadImage("303.png",0);
    //int w_s = 10;
    int w = im1->width;
    int h = im1->height;
    //printf("Width = %d\nHeight = %d\n",w,h);
    CvMat* vel = cvCreateMat(h,w,CV_32FC2);
    CvMat* velx = cvCreateMat(h,w,CV_32FC1);
    CvMat* vely = cvCreateMat(h,w,CV_32FC1);
    CvMat* u = cvCreateMat(h/10, w/10, CV_32FC1); // Averaged Optical flows
    CvMat* v = cvCreateMat(h/10, w/10, CV_32FC1);

   //printf("matDimU = %d %d\nMatDimVel = %d %d\n ",cvGetMatSize(u),cvGetMatSize(velx));
   //printf("Ptr = %d %d \n",im1->data.ptr,velx->data.ptr);
    //cvCalcOpticalFlowLK(im1,im2,cvSize(4,4),velx,vely);
    //cvCalcOpticalFlowFarneback(const CvArr* prev, const CvArr* next, CvArr* flow,
    //          double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) flag means to use Gaussian smoothing
    cvCalcOpticalFlowFarneback(im1, im2, vel,0.5, 1, 2, 2, 2, 0.17, 0);//, iterations, poly_n, poly_sigma
    cvSplit(vel, velx, vely, NULL, NULL);
    average_flow(velx, u);
    average_flow(vely, v);

    /*//cvSave("u.xml", u);
    //cvSave("v.xml", v);*/
    saveMat(u,"ux.m");
    saveMat(v,"vy.m");

/*    CvMat* Big = cvCreateMat(50,50,CV_32FC1);
    cvSetIdentity(Big);
    CvMat* small = cvCreateMat(5,5,CV_32FC1);
    average_flow(Big,small);
    printMat(small);*/
    return 0;
}
开发者ID:bnjasim,项目名称:OpenCV-OpticalFlow,代码行数:36,代码来源:myFlow.cpp

示例8: gst_gcs_transform_ip

static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstGcs *gcs = GST_GCS (btrans);

  GST_GCS_LOCK (gcs);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(gcs->pImageRGBA,   gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX );
  cvCvtColor(gcs->pImageRGBA,  gcs->pImgRGB, CV_BGRA2BGR);


  //////////////////////////////////////////////////////////////////////////////
  ////////////////////////////////////////////////////////MOTION CUES INTEGR////
  //////////////////////////////////////////////////////////////////////////////

  //////////////////////////////////////////////////////////////////////////////
  // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch
  cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0);
  // create GRAY image
  cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY);

  // Frame difference the GRAY and the previous one
  // not intuitive: first smooth frames, then 
  cvCopy( gcs->pImgGRAY,   gcs->pImgGRAY_copy,  NULL);
  cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL);
  get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);
  cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);


  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // ghost mapping
  gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ;
  gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2;
  gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2;
  gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2;
  gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2;

  if( gcs->ghostfilename){
    cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat );
    cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat );
  }




  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // GrabCut algorithm preparation and running

  gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2;

  // create an IplImage  with the skin colour pixels as 255
  compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin);
  // And the skin pixels with the movement mask
  cvAnd( gcs->pImg_skin,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);
  //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1);
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2);

  // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us
  if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX)))
    cvZero(gcs->pImgChX);
  // OR the input Alpha
  cvOr( gcs->pImgChX,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);


  //////////////////////////////////////////////////////////////////////////////
  // try to consolidate a single mask from all the sub-patches
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4);

  //////////////////////////////////////////////////////////////////////////////
  // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask
  if( gcs->ghostfilename)
    compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff  );
  else{
    // toss it all to the bbox creation function, together with the face position and size
    compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound );
  }


  //////////////////////////////////////////////////////////////////////////////
#ifdef KMEANS
  gcs->num_clusters = 18; // keep it even to simplify integer arithmetics
  cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL);
  posterize_image(gcs->pImgRGB_kmeans);
  create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, 
                         gcs->num_clusters, gcs->num_samples);
  adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos);
#endif //KMEANS


  //////////////////////////////////////////////////////////////////////////////
  if( gcs->debug < 70)
//.........这里部分代码省略.........
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:101,代码来源:gstgcs.c

示例9: cvShowDFT1

CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    CvMat* dft_A, tmp;

    IplImage* image_Re;
    IplImage* image_Im;

    char str[80];

    double m, M;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    strcpy(str,"DFT -");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage(str, image_Re);
    return(dft_A);
}
开发者ID:hamiltondematos,项目名称:computervision,代码行数:63,代码来源:main.cpp

示例10: main


//.........这里部分代码省略.........
    // Populate matrix
    for (y = 0; y < rowLength; y++) //populate array with values
    {
        for (x = 0; x < rowLength; x++) {
            if (sqrt((x - (int)(radius) ) * (x - (int)(radius) ) + (y - (int)(radius))
                     * (y - (int)(radius))) <= (int)(radius)) {
                //kernels[y * rowLength + x] = 255;
                kernels[y * rowLength + x] =1.0/norm;
                printf("%f ",1.0/norm);
            }
            else{
                kernels[y * rowLength + x] =0;
            }
        }
    }

    kernel= cvMat(rowLength, // number of rows
                  rowLength, // number of columns
                  CV_32FC1, // matrix data type
                  &kernels);
    k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
    k_image = cvGetImage(&kernel,k_image_hdr);

    height = k_image->height;
    width = k_image->width;
    step = k_image->widthStep/sizeof(float);
    depth = k_image->depth;

    channels = k_image->nChannels;
    //data1 = (float *)(k_image->imageData);
    data1 = (uchar *)(k_image->imageData);
    cvNamedWindow("blur kernel", 0);
    cvShowImage("blur kernel", k_image);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    //dft_M1 = cvGetOptimalDFTSize( im->height+99 - 1 );
    //dft_N1 = cvGetOptimalDFTSize( im->width+99 - 1 );

    dft_M1 = cvGetOptimalDFTSize( im->height+3 - 1 );
    dft_N1 = cvGetOptimalDFTSize( im->width+3 - 1 );

    printf("dft_N1=%d,dft_M1=%d/n",dft_N1,dft_M1);

    // Perform DFT of original image
    dft_A = cvShowDFT1(im, dft_M1, dft_N1,"original");
    //Perform inverse (check)
    //cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, "original"); - Commented as it overwrites the DFT

    // Perform DFT of kernel
    dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,"kernel");
    //Perform inverse of kernel (check)
    //cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, "kernel");- Commented as it overwrites the DFT

    // Multiply numerator with complex conjugate
    dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    // Multiply DFT(blurred image) * complex conjugate of blur kernel
    cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
    //cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,"blur1");

    // Split Fourier in real and imaginary parts
    image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    //cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
    cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

    // Compute A^2 + B^2 of denominator or blur kernel
    image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

    // Split Real and imaginary parts
    cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
    cvPow( image_ReB, image_ReB, 2.0);
    cvPow( image_ImB, image_ImB, 2.0);
    cvAdd(image_ReB, image_ImB, image_ReB,0);

    val = cvScalarAll(kappa);
    cvAddS(image_ReB,val,image_ReB,0);

    //Divide Numerator/A^2 + B^2
    cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
    cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

    // Merge Real and complex parts
    cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

    // Perform Inverse
    cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,"O/p Wiener k=1 rad=2");

    cvWaitKey(-1);
    return 0;
}
开发者ID:hamiltondematos,项目名称:computervision,代码行数:101,代码来源:main.cpp

示例11: camKalTrack

//=========================================
CvRect camKalTrack(IplImage* frame, camshift_kalman_tracker& camKalTrk) {
//=========================================
	if (!frame)
		printf("Input frame empty!\n");

	cvCopy(frame, camKalTrk.image, 0);
	cvCvtColor(camKalTrk.image, camKalTrk.hsv, CV_BGR2HSV); // BGR to HSV

	if (camKalTrk.trackObject) {
		int _vmin = vmin, _vmax = vmax;
		cvInRangeS(camKalTrk.hsv, cvScalar(0, smin, MIN(_vmin,_vmax), 0), cvScalar(180, 256, MAX(_vmin,_vmax), 0), camKalTrk.mask); // MASK
		cvSplit(camKalTrk.hsv, camKalTrk.hue, 0, 0, 0); //  HUE
		if (camKalTrk.trackObject < 0) {
			float max_val = 0.f;
			boundaryCheck(camKalTrk.originBox, frame->width, frame->height);
			cvSetImageROI(camKalTrk.hue, camKalTrk.originBox); // for ROI
			cvSetImageROI(camKalTrk.mask, camKalTrk.originBox); // for camKalTrk.mask
			cvCalcHist(&camKalTrk.hue, camKalTrk.hist, 0, camKalTrk.mask); //
			cvGetMinMaxHistValue(camKalTrk.hist, 0, &max_val, 0, 0);
			cvConvertScale(camKalTrk.hist->bins, camKalTrk.hist->bins, max_val ? 255. / max_val : 0., 0); //  bin  [0,255]
			cvResetImageROI(camKalTrk.hue); // remove ROI
			cvResetImageROI(camKalTrk.mask);
			camKalTrk.trackWindow = camKalTrk.originBox;
			camKalTrk.trackObject = 1;
			camKalTrk.lastpoint = camKalTrk.predictpoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2,
					camKalTrk.trackWindow.y + camKalTrk.trackWindow.height / 2);
			getCurrState(camKalTrk.kalman, camKalTrk.lastpoint, camKalTrk.predictpoint);//input curent state
		}
		//(x,y,vx,vy),
		camKalTrk.prediction = cvKalmanPredict(camKalTrk.kalman, 0);//predicton=kalman->state_post

		camKalTrk.predictpoint = cvPoint(cvRound(camKalTrk.prediction->data.fl[0]), cvRound(camKalTrk.prediction->data.fl[1]));

		camKalTrk.trackWindow = cvRect(camKalTrk.predictpoint.x - camKalTrk.trackWindow.width / 2, camKalTrk.predictpoint.y
				- camKalTrk.trackWindow.height / 2, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.trackWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.trackWindow);

		camKalTrk.searchWindow = cvRect(camKalTrk.trackWindow.x - region, camKalTrk.trackWindow.y - region, camKalTrk.trackWindow.width + 2
				* region, camKalTrk.trackWindow.height + 2 * region);

		camKalTrk.searchWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.searchWindow);

		cvSetImageROI(camKalTrk.hue, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.mask, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.backproject, camKalTrk.searchWindow);

		cvCalcBackProject( &camKalTrk.hue, camKalTrk.backproject, camKalTrk.hist ); // back project

		cvAnd(camKalTrk.backproject, camKalTrk.mask, camKalTrk.backproject, 0);

		camKalTrk.trackWindow = cvRect(region, region, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		if (camKalTrk.trackWindow.height > 5 && camKalTrk.trackWindow.width > 5) {
			// calling CAMSHIFT
			cvCamShift(camKalTrk.backproject, camKalTrk.trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
					&camKalTrk.trackComp, &camKalTrk.trackBox);

			/*cvMeanShift( camKalTrk.backproject, camKalTrk.trackWindow,
			 cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
			 &camKalTrk.trackComp);*/
		}
		else {
			camKalTrk.trackComp.rect.x = 0;
			camKalTrk.trackComp.rect.y = 0;
			camKalTrk.trackComp.rect.width = 0;
			camKalTrk.trackComp.rect.height = 0;
		}

		cvResetImageROI(camKalTrk.hue);
		cvResetImageROI(camKalTrk.mask);
		cvResetImageROI(camKalTrk.backproject);
		camKalTrk.trackWindow = camKalTrk.trackComp.rect;
		camKalTrk.trackWindow = cvRect(camKalTrk.trackWindow.x + camKalTrk.searchWindow.x, camKalTrk.trackWindow.y
				+ camKalTrk.searchWindow.y, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.measurepoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2, camKalTrk.trackWindow.y
				+ camKalTrk.trackWindow.height / 2);
		camKalTrk.realposition->data.fl[0] = camKalTrk.measurepoint.x;
		camKalTrk.realposition->data.fl[1] = camKalTrk.measurepoint.y;
		camKalTrk.realposition->data.fl[2] = camKalTrk.measurepoint.x - camKalTrk.lastpoint.x;
		camKalTrk.realposition->data.fl[3] = camKalTrk.measurepoint.y - camKalTrk.lastpoint.y;
		camKalTrk.lastpoint = camKalTrk.measurepoint;//keep the current real position

		//measurement x,y
		cvMatMulAdd( camKalTrk.kalman->measurement_matrix/*2x4*/, camKalTrk.realposition/*4x1*/,/*measurementstate*/0, camKalTrk.measurement );
		cvKalmanCorrect(camKalTrk.kalman, camKalTrk.measurement);

		cvRectangle(frame, cvPoint(camKalTrk.trackWindow.x, camKalTrk.trackWindow.y), cvPoint(camKalTrk.trackWindow.x
				+ camKalTrk.trackWindow.width, camKalTrk.trackWindow.y + camKalTrk.trackWindow.height), CV_RGB(255,128,0), 4, 8, 0);
	}
	// set new selection if it exists
	if (camKalTrk.selectObject && camKalTrk.selection.width > 0 && camKalTrk.selection.height > 0) {
		cvSetImageROI(camKalTrk.image, camKalTrk.selection);
		cvXorS(camKalTrk.image, cvScalarAll(255), camKalTrk.image, 0);
		cvResetImageROI(camKalTrk.image);
	}

	return camKalTrk.trackWindow;
//.........这里部分代码省略.........
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:101,代码来源:camshift.cpp

示例12: cvCvtColor

void E_Saturation_Value::Edit(ImgFile_Ptr pFile)
{
	m_pEditDialog->SetProgPos(0);

	//hsvに変換
	cvCvtColor( m_editImage, m_hsvImage, CV_BGR2HSV );
	m_pEditDialog->SetProgPos(10);

	//分割
	cvSplit( m_hsvImage, m_hueImage, m_saturationImage, m_valueImage, NULL );
	m_pEditDialog->SetProgPos(20);

	//彩度を加算
	cvSet( m_addData, cvScalar(abs(s_)), NULL );
	m_pEditDialog->SetProgPos(25);
	if(s_ >=0){
		cvAdd( m_saturationImage, m_addData, m_saturationImage );
	}
	else{
		cvSub( m_saturationImage, m_addData, m_saturationImage );
	}
	m_pEditDialog->SetProgPos(35);

	//明度を加算
	cvSet( m_addData, cvScalar(abs(v_)), NULL );
	m_pEditDialog->SetProgPos(45);
	if(v_ >= 0){
		cvAdd( m_valueImage, m_addData, m_valueImage );
	}
	else{
		cvSub( m_valueImage, m_addData, m_valueImage );
	}
	m_pEditDialog->SetProgPos(55);

	//合成
	cvMerge( m_hueImage, m_saturationImage, m_valueImage, NULL, m_hsvImage);
	m_pEditDialog->SetProgPos(65);

	//hsvからBGRに変換
	cvCvtColor( m_hsvImage, m_hsvImage, CV_HSV2BGR );
	m_pEditDialog->SetProgPos(75);

	ucvCvtColor(m_hsvImage, m_editedImage, CV_BGR2BGRA);

	//コピー
	m_pEditNode->edit_img.ImgBlt(
		m_pEditNode->blt_rect.left - m_pEditNode->node_rect.left,
		m_pEditNode->blt_rect.top - m_pEditNode->node_rect.top,
		m_pEditNode->blt_rect.right - m_pEditNode->blt_rect.left,
		m_pEditNode->blt_rect.bottom - m_pEditNode->blt_rect.top,
		m_editedImage,
		0, 0,
		IPLEXT_RASTER_CODE::COPY,
		m_mask,
		0, 0);
	m_pEditDialog->SetProgPos(85);

	//
	m_pEditLayerHandle->Update( &(m_pEditNode->blt_rect ));
	m_pEditDialog->SetProgPos(100);
}
开发者ID:fughz,项目名称:frayer,代码行数:61,代码来源:E_Saturation_Value.cpp

示例13: Java_org_siprop_opencv_OpenCV_faceDetect

JNIEXPORT
jbooleanArray
JNICALL
Java_org_siprop_opencv_OpenCV_faceDetect(JNIEnv* env,
										jobject thiz,
										jintArray photo_data1,
										jintArray photo_data2,
										jint width,
										jint height) {
	LOGV("Load desp.");

	int i, x, y;
	int* pixels;
	IplImage *frameImage;
	
	IplImage *backgroundImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *grayImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *differenceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	
	IplImage *hsvImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 3 );
	IplImage *hueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *saturationImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *valueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage1 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage2 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage3 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *faceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	
	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;

	jbooleanArray res_array;
	int imageSize;



	// Load Image
	pixels = env->GetIntArrayElements(photo_data1, 0);
	frameImage = loadPixels(pixels, width, height);
	if(frameImage == 0) {
		LOGV("Error loadPixels.");
		return 0;
	}
	
	
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	
	
	pixels = env->GetIntArrayElements(photo_data2, 0);
	frameImage = loadPixels(pixels, width, height);
	if(frameImage == 0) {
		LOGV("Error loadPixels.");
		return 0;
	}
	cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
	cvAbsDiff( grayImage, backgroundImage, differenceImage );
	
	cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
	LOGV("Load cvCvtColor.");
	cvSplit( hsvImage, hueImage, saturationImage, valueImage, 0 );
	LOGV("Load cvSplit.");
	cvThreshold( hueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
	cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
	cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, 0 );
	LOGV("Load cvAnd.");
	
	cvAnd( differenceImage, thresholdImage3, faceImage, 0 );
	
	cvMoments( faceImage, &moment, 0 );
	m_00 = cvGetSpatialMoment( &moment, 0, 0 );
	m_10 = cvGetSpatialMoment( &moment, 1, 0 );
	m_01 = cvGetSpatialMoment( &moment, 0, 1 );
	gravityX = m_10 / m_00;
	gravityY = m_01 / m_00;
	LOGV("Load cvMoments.");


	cvCircle( frameImage, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS,
		 CV_RGB( 255, 0, 0 ), LINE_THICKNESS, LINE_TYPE, 0 );




	CvMat stub, *mat_image;
    int channels, ipl_depth;
    mat_image = cvGetMat( frameImage, &stub );
    channels = CV_MAT_CN( mat_image->type );

    ipl_depth = cvCvToIplDepth(mat_image->type);

	WLNonFileByteStream* m_strm = new WLNonFileByteStream();
    loadImageBytes(mat_image->data.ptr, mat_image->step, mat_image->width,
                             mat_image->height, ipl_depth, channels, m_strm);
	LOGV("Load loadImageBytes.");


//.........这里部分代码省略.........
开发者ID:273k,项目名称:OpenCV-Android,代码行数:101,代码来源:cvjni.cpp

示例14: setLowThreshold

void setLowThreshold( float scale ) {
	cvConvertScale( IdiffF, Iscratch, scale );
	cvAdd( Iscratch, IavgF, IlowF );
	cvSplit( IlowF, Ilow1, Ilow2, Ilow3, 0 );
}
开发者ID:Ashwaray,项目名称:Image-Processing-OpenCV,代码行数:5,代码来源:threshold.c

示例15: setHighThreshold

void setHighThreshold( float scale ) {
	cvConvertScale( IdiffF, Iscratch, scale );
	cvAdd( Iscratch, IavgF, IhiF );
	cvSplit( IhiF, Ihi1, Ihi2, Ihi3, 0 );
}
开发者ID:Ashwaray,项目名称:Image-Processing-OpenCV,代码行数:5,代码来源:threshold.c


注:本文中的cvSplit函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。