当前位置: 首页>>代码示例>>C++>>正文


C++ Mat::locateROI方法代码示例

本文整理汇总了C++中Mat::locateROI方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::locateROI方法的具体用法?C++ Mat::locateROI怎么用?C++ Mat::locateROI使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat::locateROI方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: create

void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    int type = m.type();
    //if(m.channels() == 3)
    //type = CV_MAKETYPE(m.depth(), 4);
    create(wholeSize, type);

    //if(m.channels() == 3)
    //{
    //int pitch = GPU_MATRIX_MALLOC_STEP(wholeSize.width * 3 * m.elemSize1());
    //int err;
    //cl_mem temp = clCreateBuffer(clCxt->clContext,CL_MEM_READ_WRITE,
    //pitch*wholeSize.height,0,&err);
    //CV_DbgAssert(err==0);

    //openCLMemcpy2D(clCxt,temp,pitch,m.datastart,m.step,wholeSize.width*m.elemSize(),wholeSize.height,clMemcpyHostToDevice);
    //convert_C3C4(temp, *this, pitch);
    //}
    //else
    openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
    download_channels = m.channels();
}
开发者ID:BRAINSia,项目名称:OpenCV_TruncatedSVN,代码行数:30,代码来源:matrix_operations.cpp

示例2: create

void cv::ocl::oclMat::upload(const Mat &m)
{
    if (!Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE) && m.depth() == CV_64F)
    {
        CV_Error(Error::OpenCLDoubleNotSupported, "Selected device doesn't support double");
        return;
    }

    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    create(wholeSize, m.type());

    if (m.channels() == 3)
    {
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp = clCreateBuffer(*(cl_context*)clCxt->getOpenCLContextPtr(), CL_MEM_READ_WRITE,
                                     (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
        openCLVerifyCall(err);

        openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step, wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        convert_C3C4(temp, *this);
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
        openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
}
开发者ID:AngryNetBeans,项目名称:opencv,代码行数:34,代码来源:matrix_operations.cpp

示例3: create

void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    //   int type = m.type();
    //   if(m.oclchannels() == 3)
    //{
    //	type = CV_MAKETYPE(m.depth(), 4);
    //}
    create(wholeSize, m.type());

    if(m.channels() == 3)
    {
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
                                     (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
        openCLVerifyCall(err);

        openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step, wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        convert_C3C4(temp, *this);
        //int* cputemp=new int[wholeSize.height*wholeSize.width * 3];
        //int* cpudata=new int[this->step*this->wholerows/sizeof(int)];
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, temp, CL_TRUE,
        //						0, wholeSize.height*wholeSize.width * 3* sizeof(int), cputemp, 0, NULL, NULL));
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, (cl_mem)data, CL_TRUE,
        //						0, this->step*this->wholerows, cpudata, 0, NULL, NULL));
        //for(int i=0;i<wholeSize.height;i++)
        //{
        //	int *a = cputemp+i*wholeSize.width * 3,*b = cpudata + i*this->step/sizeof(int);
        //	for(int j=0;j<wholeSize.width;j++)
        //	{
        //		if((a[3*j] != b[4*j])||(a[3*j+1] != b[4*j+1])||(a[3*j+2] != b[4*j+2]))
        //			printf("rows=%d,cols=%d,cputtemp=%d,%d,%d;cpudata=%d,%d,%d\n",
        //			i,j,a[3*j],a[3*j+1],a[3*j+2],b[4*j],b[4*j+1],b[4*j+2]);
        //	}
        //}
        //delete []cputemp;
        //delete []cpudata;
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
    {
        openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);
    }

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
    //download_channels = m.channels();
}
开发者ID:HVisionSensing,项目名称:Face-Expression-Recognition,代码行数:54,代码来源:matrix_operations.cpp

示例4: write_hdf5_image

void write_hdf5_image(H5File h5f, const char *name, const Mat &im)
{
    DSetCreatPropList cparms;
    hsize_t chunk_dims[2] = {256, 256};
    hsize_t dims[2];
    dims[0] = im.size().height;
    dims[1] = im.size().width;
  
    if (chunk_dims[0] > dims[0]) {
        chunk_dims[0] = dims[0];
    }
    if (chunk_dims[1] > dims[1]) {
        chunk_dims[1] = dims[1];
    }

    cparms.setChunk(2, chunk_dims);
    cparms.setShuffle();
    cparms.setDeflate(5);

    DataSet dataset = h5f.createDataSet(name, PredType::NATIVE_FLOAT,
                                        DataSpace(2, dims, dims),
                                        cparms);

    Mat image;
    if (im.type() !=  CV_32F)
        im.convertTo(image, CV_32F);
    else
        image = im;
    
    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
开发者ID:thouis,项目名称:align,代码行数:51,代码来源:opencv_hdf5.cpp

示例5: orientImage

/**
 * OrientImage - Readjust image orientation to be correctly upright
 */
void ImageReader::orientImage( cv::Mat &examImage, cv::Point2f &UL, cv::Point2f &UR, 
	cv::Point2f &LL, cv::Point2f &LR, float &widthRatio, float &heightRatio ) {
	// Set up lengths and widths
	float vLength = sqrt( pow( abs(LL.x - UL.x), 2 ) + pow( abs(LL.y - UL.y), 2 ) );
	float hLength = sqrt( pow( abs(UR.x - UL.x), 2 ) + pow( abs(UR.y - UL.y), 2 ) );
	// handle rotation issue (if on side - vertical length "less" than horizontal)
	if( vLength < hLength ) {
		float vTemp = vLength;
		vLength = hLength;
		hLength = vTemp;
	} 
	// Locates the ROI offset for its ul
	Point2f srcQuad[4], dstQuad[4];
	vector< Point2f > eRectPoints(4);
	eRectPoints[0] = UL;
	eRectPoints[1] = UR;
	eRectPoints[2] = LL;
	eRectPoints[3] = LR;
	Rect examRegion = minAreaRect( eRectPoints ).boundingRect();
	Size size;
	Point ofs;
	Mat examCopy = examImage( examRegion );
	examCopy.locateROI( size, ofs );
	// Set up points for the "source" array
	srcQuad[0] = Point2f( ofs.x + abs(UL.x - ofs.x), abs( UL.y - ofs.y ) );
	srcQuad[1] = Point2f( srcQuad[0].x + hLength, srcQuad[0].y );
	srcQuad[2] = Point2f( srcQuad[0].x, srcQuad[0].y + vLength );
	srcQuad[3] = Point2f( srcQuad[0].x + hLength, srcQuad[0].y + vLength );
	// Set up points for the "destination" array
	dstQuad[ 0 ] = UL;
	dstQuad[ 1 ] = UR;
	dstQuad[ 2 ] = LL;
	dstQuad[ 3 ] = LR;
	// Calc perspective transform, prepare the clone, & warp
	Mat warp_matrix = getPerspectiveTransform( srcQuad, dstQuad );
	warpPerspective( examImage.clone(), examImage, warp_matrix,
	 Size(examImage.cols * 1.5f, examImage.rows * 1.5f), WARP_INVERSE_MAP );
	// Resize examImage to only have coordinate values
	Rect rect( srcQuad[0], srcQuad[3] );
	Mat fittedImage = examImage( rect );
	examImage = fittedImage;
	// Recalculates size ratios
	widthRatio = examImage.cols / (mainUR.x - mainUL.x);
	heightRatio = examImage.rows / (mainLL.y - mainUL.y);
	// Reassign points
	UL = Point2f( 0, 0 );
	UR = Point2f( hLength, 0 );
	LL = Point2f( 0, vLength );
	LR = Point2f( hLength, vLength );
}
开发者ID:nikkoschaff,项目名称:GradesnapiOS,代码行数:53,代码来源:ImageReader.cpp

示例6: create

void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    if(m.channels() == 3)
    {
        create(wholeSize, m.type());
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp;
        if(gDeviceMemType!=DEVICE_MEM_UHP && gDeviceMemType!=DEVICE_MEM_CHP){
            temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
                                  (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
            openCLVerifyCall(err);
            openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
                           wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        }
        else{
            temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR,
                                  (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, m.datastart, &err);
            openCLVerifyCall(err);
        }


        convert_C3C4(temp, *this);
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
    {
        // try to use host ptr
        createEx(wholeSize, m.type(), gDeviceMemRW, gDeviceMemType, m.datastart);
        if(gDeviceMemType!=DEVICE_MEM_UHP && gDeviceMemType!=DEVICE_MEM_CHP)
            openCLMemcpy2D(clCxt, data, step, m.datastart, m.step,
                           wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);
    }

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
}
开发者ID:WillwoTechnologies,项目名称:opencv,代码行数:43,代码来源:matrix_operations.cpp

示例7: read_hdf5_image

void read_hdf5_image(H5File h5f, Mat &image_out, const char *name, const Rect &roi=Rect(0,0,0,0))
{
    DataSet dataset = h5f.openDataSet(name);
    DataSpace dspace = dataset.getSpace();
    assert (dspace.getSimpleExtentNdims() == 2);
    hsize_t dims[2];
    dspace.getSimpleExtentDims(dims);
    if ((roi.width == 0) && (roi.height == 0)) {
        image_out.create(dims[0], dims[1], CV_32F);
        dspace.selectAll();
    } else {
        image_out.create(roi.height, roi.width, CV_32F);
        hsize_t _offset[2], _size[2];
        _offset[0] = roi.y; _offset[1] = roi.x;
        _size[0] = roi.height; _size[1] = roi.width;
        dspace.selectHyperslab(H5S_SELECT_SET, _size, _offset);
    }
    
    DataSpace imspace;
    float *imdata;
    if (image_out.isContinuous()) {
        dims[0] = image_out.size().height; dims[1] = image_out.size().width;
        imspace = DataSpace(2, dims);
        imspace.selectAll();
        imdata = image_out.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image_out.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image_out.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image_out.size().height; im_size[1] = image_out.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image_out.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.read(imdata, PredType::NATIVE_FLOAT, imspace, dspace);
}
开发者ID:thouis,项目名称:align,代码行数:41,代码来源:opencv_hdf5.cpp

示例8: write_feature

void write_feature(H5File h5f, const Mat &image_in, const char *name)
{
    // make sure the sizes match
    assert (imsize == image_in.size());

    // make sure the image is in native float
    Mat image;
    if (image_in.type() !=  CV_32F)
        image_in.convertTo(image, CV_32F);
    else
        image = image_in;
    
    DataSet dataset = create_dataset(h5f, name);

    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
开发者ID:Rhoana,项目名称:feature-computation,代码行数:36,代码来源:opencv_hdf5.cpp

示例9: crossCorr


//.........这里部分代码省略.........
        int yofs = k*dftsize.height;
        Mat src = templ;
        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));

        if( tcn > 1 )
        {
            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
            int pairs[] = {k, 0};
            mixChannels(&templ, 1, &src, 1, pairs, 1);
        }

        if( dst1.data != src.data )
            src.convertTo(dst1, dst1.depth());

        if( dst.cols > templ.cols )
        {
            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
            part = Scalar::all(0);
        }
        dft(dst, dst, 0, templ.rows);
    }

    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
    int tileCount = tileCountX * tileCountY;

    Size wholeSize = img.size();
    Point roiofs(0,0);
    Mat img0 = img;

    if( !(borderType & BORDER_ISOLATED) )
    {
        img.locateROI(wholeSize, roiofs);
        img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y,
                       roiofs.x, wholeSize.width-img.cols-roiofs.x);
    }
    borderType |= BORDER_ISOLATED;

    // calculate correlation by blocks
    for( i = 0; i < tileCount; i++ )
    {
        int x = (i%tileCountX)*blocksize.width;
        int y = (i/tileCountX)*blocksize.height;

        Size bsz(std::min(blocksize.width, corr.cols - x),
                 std::min(blocksize.height, corr.rows - y));
        Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1);
        int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y;
        int x1 = std::max(0, x0), y1 = std::max(0, y0);
        int x2 = std::min(img0.cols, x0 + dsz.width);
        int y2 = std::min(img0.rows, y0 + dsz.height);
        Mat src0(img0, Range(y1, y2), Range(x1, x2));
        Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height));
        Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1));
        Mat cdst(corr, Rect(x, y, bsz.width, bsz.height));

        for( k = 0; k < cn; k++ )
        {
            Mat src = src0;
            dftImg = Scalar::all(0);

            if( cn > 1 )
            {
                src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]);
                int pairs[] = {k, 0};
开发者ID:2december,项目名称:opencv,代码行数:67,代码来源:templmatch.cpp

示例10: trackBlobs

void TrackerCvBlobsLib::trackBlobs(const Mat &mat, const Mat &areaMask, bool history, std::vector<Area> *pareas)
{
	double min_area = *m_pmin_area;
	double max_area = *m_pmax_area;
	double max_radius_2 = *m_pmax_radius * *m_pmax_radius;
	double x, y, min_x, min_y, max_x, max_y;
	double min_depth,max_depth;
	Scalar sdepth, sstddev;
	cBlob temp;
	bool new_hand(true);
	int mat_area(mat.size().width*mat.size().height);

	// we will convert the matrix object passed from our cFilter class to an object of type IplImage for calling the CBlobResult constructor
	IplImage img;
	IplImage areaImg;

	// storage of the current blobs and the blobs from the previous frame
	Size s;	Point p;
	mat.locateROI(s,p);
	areaImg = areaMask;

	// convert our OpenCV matrix object to one of type IplImage
	img = mat;

	// cvblobslib blob extraction
	blob_result = CBlobResult(&img, NULL, 1/*img∈{0,255}->thresh unimportant*/, false);
	blob_result.Filter(blob_result, B_EXCLUDE, CBlobGetArea(), B_LESS, min_area); 
	blob_result.Filter(blob_result, B_EXCLUDE, CBlobGetArea(), B_GREATER, max_area);

	// clear the blobs from two frames ago
	blobs_previous.clear();
	
	// before we populate the blobs vector with the current frame, we need to store the live blobs in blobs_previous
	for (int i = 0; i < blobs.size(); i++)
		if (blobs[i].event != BLOB_UP)
			blobs_previous.push_back(blobs[i]);


	// populate the blobs vector with the current frame
	blobs.clear();
	for (int i = 0; i < blob_result.GetNumBlobs(); i++) {
		current_blob = blob_result.GetBlob(i);

		x     = XCenter(current_blob)/*+m_pSettingKinect->m_kinectProp.roi.x*/;
		y     = YCenter(current_blob)/*+m_pSettingKinect->m_kinectProp.roi.y*/;

//		temp.areaid = areaMask.at<uchar>((int)x+p.x,(int)y+p.y);//?!not works
		temp.areaid = (uchar) areaImg.imageData[ ((int)x+p.x) + ((int)y+p.y)*areaMask.size().width];//works
		if( temp.areaid == 0 ) continue;

		min_x = MinX(current_blob);
		min_y = MinY(current_blob);
		max_x = MaxX(current_blob);
		max_y = MaxY(current_blob);

		if( (max_x-min_x)*(max_y-min_y) > 0.9*mat_area) continue;// fix blob detection issue?!

		temp.location.x = temp.origin.x = x;
		temp.location.y = temp.origin.y = y;
		temp.min.x = min_x; temp.min.y = min_y;
		temp.max.x = max_x; temp.max.y = max_y;

		//Rect r(min_x+p.x,min_y+p.x, max_x-min_y, max_y-min_y);
		//Rect r(min_x,min_y, max_x-min_x, max_y-min_y);//width, height +1?!
		Rect r( x-3, y-3, min(7,max_x-min_x), min(7, max_y-min_y));


		//z = mean( mat(r), mat(r) )[0];/* mean is not good. The blob can include many pixel behind the frame depth*/

		/* Depth detection. The measurement method is flexible. */
		if( m_pSettingKinect->m_kinectProp.areaThresh ){
			/* Mean is ok, because all pixels of the blob are in front of the frame. */
			max_depth = mean( mat(r), mat(r) )[0]+4;/*correct blur(1) and area thresh shift (3)*/
			//meanStdDev( mat(r), sdepth, sstddev, mat(r) );
			//max_depth = sdepth[0]+3*sstddev[0];
			//minMaxLoc( mat(r), &min_depth, &max_depth, NULL, NULL, mat(r) );

		}else	if( pareas != NULL){
			/* Remove values behind the area depth and count mean of rest.
				This is problematic/choppy if to many pixels are removed.
			*/
			max_depth = max( (*pareas)[temp.areaid-1].depth-22, 
					mean( mat(r), mat(r)>(*pareas)[temp.areaid-1].depth-2 )[0] + 1);

		}else{
			/* Very few information. Use maximum of blob. (Choppy).
			 * Can be improved, if mean of i.e. 10 biggest values is used
			 * minMaxLoc require filtered/blured images.
			 * */
			//max_depth = 0;
			minMaxLoc( mat(r), &min_depth, &max_depth, NULL, NULL, mat(r) );
		}
		//printf("Compared depth of area/blob: %i %f\n",(*pareas)[temp.areaid-1].depth ,max_depth);

		/* Compare depth of hand with depth of area and throw blob away if hand to far away. */
		if(pareas != NULL && max_depth - (*pareas)[temp.areaid-1].depth < -1 ){
			//printf("Hand not reached area depth.\n");
			continue ;
		}

//.........这里部分代码省略.........
开发者ID:StuartGuo,项目名称:KinectGrid,代码行数:101,代码来源:TrackerCvBlobsLib.cpp


注:本文中的Mat::locateROI方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。