当前位置: 首页>>代码示例>>C++>>正文


C++ Mat_::convertTo方法代码示例

本文整理汇总了C++中Mat_::convertTo方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::convertTo方法的具体用法?C++ Mat_::convertTo怎么用?C++ Mat_::convertTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat_的用法示例。


在下文中一共展示了Mat_::convertTo方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: OnBnClickedRate

void CRenderCenterDlg::OnBnClickedRate()
{
	// TODO: 在此添加控件通知处理程序代码
	if(img1.size()!=img2.size())
	{
		MessageBox(TEXT("Two images are not the same size!"),TEXT("error"),MB_OK);
		return;
	}
	RateBlending lp;
	Mat_<Vec3f> l; img1.convertTo(l,CV_32F,1.0/255.0);//Vec3f表示有三个通道,即 l[row][column][depth]  
	Mat_<Vec3f> r; img2.convertTo(r,CV_32F,1.0/255.0);
	Mat_<float> m(l.rows,l.cols,0.0);
	m(Range::all(),Range(0,m.cols/2)) = 1.0; 
	Mat_<Vec3f> blend =lp.RateBlend(l, r, m);
	blend.convertTo(imgfusion,CV_8UC3,255);

	CWnd *pWnd=GetDlgItem(IDC_IMGFUSION);
	CDC *pDC=pWnd->GetDC();
	HDC hDC=pDC->GetSafeHdc();
	IplImage img=imgfusion;
	CvvImage cimg;
	cimg.CopyOf(&img);
	CRect rect;
	GetDlgItem(IDC_IMGFUSION)->GetClientRect(&rect);
	cimg.DrawToHDC(hDC,&rect);
}
开发者ID:gordongithub,项目名称:RadarSim,代码行数:26,代码来源:RenderCenterDlg+-+副本.cpp

示例2: unique

Mat_< float > Saliency::saliency( const Mat_< Vec3b >& im ) const {
	// Convert the image to the lab space
	Mat_<Vec3f> rgbim, labim;
	im.convertTo( rgbim, CV_32F, 1.0/255. );
	cvtColor( rgbim, labim, CV_BGR2Lab );
	
	// Superpixel superpixel_( 300, 50.0 );
	// Do the abstraction
	Mat_<int> segmentation = superpixel_.segment( labim );
	std::vector< SuperpixelStatistic > stat = superpixel_.stat( labim, im, segmentation );
	
	// Compute the uniqueness
	std::vector<float> unique( stat.size(), 1 );
	if (settings_.uniqueness_) {
		if (settings_.filter_uniqueness_)
			unique = uniquenessFilter( stat );
		else
			unique = uniqueness( stat );
	}
	
	// Compute the distribution
	std::vector<float> dist( stat.size(), 0 );
	if (settings_.distribution_) {
		if (settings_.filter_distribution_)
			dist = distributionFilter( stat );
		else
			dist = distribution( stat );
	}
	
	// Combine the two measures
	std::vector<float> sp_saliency( stat.size() );
	for( int i=0; i<stat.size(); i++ )
		sp_saliency[i] = unique[i] * exp( - settings_.k_ * dist[i] );
	
	// Upsampling
	Mat_<float> r;
	if (settings_.upsample_)
		r = assignFilter( im, segmentation, stat, sp_saliency );
	else
		r = assign( segmentation, sp_saliency );
	
	// Rescale the saliency to [0..1]
	double mn, mx;
	minMaxLoc( r, &mn, & mx );
	r = (r - mn) / (mx - mn);
	
	// Increase the saliency value until we are below the minimal threshold
	double m_sal = settings_.min_saliency_ * r.size().area();
	for( float sm = sum( r )[0]; sm < m_sal; sm = sum( r )[0] )
		r =  min( r*m_sal/sm, 1.0f );
	
	return r;
}
开发者ID:Domino2015,项目名称:ComputerVision_DRC,代码行数:53,代码来源:saliency.cpp

示例3: opticalFlowRefine

void opticalFlow::opticalFlowRefine(Mat_<Vec2f> &flow_in, Mat_<uchar> &occMap,const Mat_<Vec3b> &weightColorImg, Mat_<Vec2f> &flow_refined)
{
	Mat_<float> flow_in_single[2];
	split(flow_in,flow_in_single);
	Mat_<float> flow_out_single[2];
	Mat_<float> occ_fgs;
	occMap.convertTo(occ_fgs,CV_32FC1);
	occMap = occMap;
	multiply(flow_in_single[0],occ_fgs,flow_in_single[0]);
	multiply(flow_in_single[1],occ_fgs,flow_in_single[1]);
	FGS(flow_in_single[0],weightColorImg,flow_out_single[0], 0.01, 100);
	FGS(flow_in_single[1],weightColorImg,flow_out_single[1], 0.01, 100);
	FGS(occMap,weightColorImg,occ_fgs, 0.01, 100);
	divide(flow_out_single[0],occ_fgs,flow_out_single[0]);
	divide(flow_out_single[1],occ_fgs,flow_out_single[1]);

	merge(flow_out_single,2,flow_refined);
}
开发者ID:ems0000,项目名称:spm-bp,代码行数:18,代码来源:opticalFLow.cpp

示例4: warpWholeImage

void PatchGenerator::warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
                                    Mat& warped, int border, RNG& rng) const
{
    Mat_<double> T = matT;
    Rect roi(INT_MAX, INT_MAX, INT_MIN, INT_MIN);

    for( int k = 0; k < 4; k++ )
    {
        Point2f pt0, pt1;
        pt0.x = (float)(k == 0 || k == 3 ? 0 : image.cols);
        pt0.y = (float)(k < 2 ? 0 : image.rows);
        pt1.x = (float)(T(0,0)*pt0.x + T(0,1)*pt0.y + T(0,2));
        pt1.y = (float)(T(1,0)*pt0.x + T(1,1)*pt0.y + T(1,2));

        roi.x = std::min(roi.x, cvFloor(pt1.x));
        roi.y = std::min(roi.y, cvFloor(pt1.y));
        roi.width = std::max(roi.width, cvCeil(pt1.x));
        roi.height = std::max(roi.height, cvCeil(pt1.y));
    }

    roi.width -= roi.x - 1;
    roi.height -= roi.y - 1;
    int dx = border - roi.x;
    int dy = border - roi.y;

    if( (roi.width+border*2)*(roi.height+border*2) > buf.cols )
        buf.create(1, (roi.width+border*2)*(roi.height+border*2), image.type());

    warped = Mat(roi.height + border*2, roi.width + border*2,
                 image.type(), buf.data);

    T(0,2) += dx;
    T(1,2) += dy;
    (*this)(image, T, warped, warped.size(), rng);

    if( T.data != matT.data )
        T.convertTo(matT, matT.type());
}
开发者ID:BenJamesbabala,项目名称:OpenTracking,代码行数:38,代码来源:PatchGenerator.cpp

示例5: main

int main(int argc, char **argv)
{
    fstream fs("test.txt", ios::out);
    VO_CohenDaubechiesFeauveau voCDF;
    VO_Coiflets vocoiflets;
    VO_Daubechies vodaubechies;
    VO_Haar vohaar;
    VO_Symlets vosymlets;
    VO_Gabor vogabor;
    vogabor.VO_PrepareGaborKernel(  4,
                                    2.0f, 
                                    0.0f,
                                    0.0f,
                                    4,
                                    1.0f);
    vogabor.GetWindowFunc()->VO_DisplayWindowFuncKernel("gaborkernel.jpg");
    Mat iImg = imread ( "/usr/local/share/opencv/samples/c/lena.jpg", 0 );
    cout << iImg.channels() << endl;
    
    Mat_<float> inputImg;
    iImg.copyTo(inputImg);
    inputImg.convertTo(inputImg, CV_32FC1);
    Mat_<float> waveletImg;
    cv::dft(inputImg, waveletImg );
    imwrite("dft.jpg", waveletImg);
    cv::idft(waveletImg, inputImg, DFT_SCALE);
    imwrite("idft.jpg", inputImg);
    Mat oImg = Mat::zeros(iImg.size(), iImg.type());
//    vogabor.VO_ForwardTransform(inputImg, waveletImg);
    vogabor.VO_ForwardTransform(inputImg, Point(256, 256), waveletImg);
    imwrite("gabored.jpg", waveletImg);
    vogabor.VO_BackwardTransform(waveletImg, inputImg);
    imwrite("igabored.jpg", inputImg);
    
    return 0;
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:36,代码来源:integraltransform.cpp

示例6: main


//.........这里部分代码省略.........
    // Display the current configuration
    cout << "\nDepth generator output mode:" << endl <<
        "FRAME_WIDTH      " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl <<
        "FRAME_HEIGHT     " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl <<
        "FRAME_MAX_DEPTH  " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
        "FPS              " << capture.get( CAP_PROP_FPS ) << endl <<
        "REGISTRATION     " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl;


    //---------------------------------------
    // Specify camera properties and geometry
    //--------------------------------------

    //TODO: Fill in the values for your setup.

    // Depth camera intrinsics
    Matx33f unregisteredCameraMatrix = Matx33f::eye();
    unregisteredCameraMatrix(0,0) = 570.0f;
    unregisteredCameraMatrix(1,1) = 570.0f;
    unregisteredCameraMatrix(0,2) = 320.0f-0.5f;
    unregisteredCameraMatrix(1,2) = 240.0f-0.5f;

    // NOTE: The depth distortion coefficients are currently not used by the Registration class.
    Vec<float, 5> unregisteredDistCoeffs(0,0,0,0,0);


    // RGB camera intrinsics
    Matx33f registeredCameraMatrix = Matx33f::eye();
    registeredCameraMatrix(0,0) = 570.0f;
    registeredCameraMatrix(1,1) = 570.0f;
    registeredCameraMatrix(0,2) = 320.0f-0.5f;
    registeredCameraMatrix(1,2) = 240.0f-0.5f;

    Vec<float, 5> registeredDistCoeffs(0,0,0,0,0);

    Size2i registeredImagePlaneSize = Size2i(640, 480);

    // The rigid body transformation between cameras.
    // Used as: uv_rgb = K_rgb * [R | t] * z * inv(K_ir) * uv_ir
    Matx44f registrationRbt = Matx44f::eye();
    registrationRbt(0,3) = .04;


    //------------------------------
    // Create our registration class
    //------------------------------
    oc::Registration registration(unregisteredCameraMatrix,
                                  unregisteredDistCoeffs,
                                  registeredCameraMatrix,
                                  registeredDistCoeffs,
                                  registrationRbt);

    for (;;) {

        Mat_<uint16_t> depthMap;

        if( !capture.grab() )
        {
            cout << "Can't grab depth." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) )
            {

                // Actually perform the registration
                Mat_<uint16_t> registeredDepth;
                bool performDilation = false;
                registration.registerDepthToColor(depthMap,
                                                  registeredImagePlaneSize,
                                                  registeredDepth,
                                                  performDilation);


                //Display the unregistered and registered depth
                const float scaleFactor = 0.05f;
                {
                    Mat_<uint8_t> show;
                    depthMap.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "depth map", show );
                }
                {
                    Mat_<uint8_t> show;
                    registeredDepth.convertTo( show, CV_8UC1, scaleFactor );
                    imshow( "registered map", show );
                }

            }

        }

        if( waitKey( 1 ) >= 0 )
            break;
    }



    return 0;
}
开发者ID:Michael-Lfx,项目名称:RGBDTutorial-CVPR2014,代码行数:101,代码来源:main.cpp

示例7: visualize_3d_points

void visualize_3d_points(const Mat_<cv::Vec3f>& points_3d, std::string name) {
  Mat points_3d_display;
  points_3d.convertTo(points_3d_display, CV_8UC3, 100);
  imshow(name, points_3d_display);
}
开发者ID:mtamburrano,项目名称:ICP_renderer,代码行数:5,代码来源:main_icp.cpp

示例8: R

    //
    // thanks again, Haris. i wouldn't be anywhere without your mind here.
    //
    Mat project3d(const Mat & test) const
    {
        PROFILEX("project3d");

        int mid = mdl.cols/2;
        int midi = test.cols/2;
        Rect R(mid-crop/2,mid-crop/2,crop,crop);
        Rect Ri(midi-crop/2,midi-crop/2,crop,crop);

        // get landmarks
        vector<Point2d> pts2d;
        getkp2d(test, pts2d, Ri);
        //cerr << "nose :" << pts2d[30].x << endl;

        // get pose mat for our landmarks
        Mat KP = pnp(test.size(), pts2d);

        // project img to head, count occlusions
        Mat_<uchar> test2(mdl.size(),127);
        Mat_<uchar> counts(mdl.size(),0);
	    for (int i=R.y; i<R.y+R.height; i++)
        {
            PROFILEX("proj_1");
	        for (int j=R.x; j<R.x+R.width; j++)
            {
                Mat1d p = project_vec(KP, i, j);
		        int x = int(p(0) / p(2));
		        int y = int(p(1) / p(2));
                if (y < 0 || y > test.rows - 1) continue;
                if (x < 0 || x > test.cols - 1) continue;
                // stare hard at the coord transformation ;)
                test2(i, j) = test.at<uchar>(y, x);
                // each point used more than once is occluded
                counts(y, x) ++;
	        }
        }

        // project the occlusion counts in the same way
        Mat_<uchar> counts1(mdl.size(),0);
	    for (int i=R.y; i<R.y+R.height; i++)
        {
            PROFILEX("proj_2");
	        for (int j=R.x; j<R.x+R.width; j++)
            {
                Mat1d p = project_vec(KP, i, j);
		        int x = int(p(0) / p(2));
		        int y = int(p(1) / p(2));
                if (y < 0 || y > test.rows - 1) continue;
                if (x < 0 || x > test.cols - 1) continue;
                counts1(i, j) = counts(y, x);
	        }
        }
        blur(counts1, counts1, Size(9,9));
        counts1 -= eyemask;
        counts1 -= eyemask;

        // count occlusions in left & right half
        Rect left (0,  0,mid,counts1.rows);
        Rect right(mid,0,mid,counts1.rows);
        double sleft=sum(counts1(left))[0];
        double sright=sum(counts1(right))[0];

        // fix occlusions with soft symmetry
        Mat_<double> weights;
        Mat_<uchar> sym = test2.clone();
        if (abs(sleft-sright)>symThresh)
        {
            PROFILEX("proj_3");

            // make weights
            counts1.convertTo(weights,CV_64F);

            Point p,P;
            double m,M;
            minMaxLoc(weights,&m,&M,&p,&P);

            double *wp = weights.ptr<double>();
            for (size_t i=0; i<weights.total(); ++i)
                wp[i] = (1.0 - 1.0 / exp(symBlend+(wp[i]/M)));
            // cerr << weights(Rect(mid,mid,6,6)) << endl;

            for (int i=R.y; i<R.y+R.height; i++)
            {
                if (sleft-sright>symThresh) // left side needs fixing
                {
                    for (int j=R.x; j<mid; j++)
                    {
                        int k = mdl.cols-j-1;
                        sym(i,j) = test2(i,j) * (1-weights(i,j)) + test2(i,k) * (weights(i,j));
                    }
                }
                if (sright-sleft>symThresh) // right side needs fixing
                {
                    for (int j=mid; j<R.x+R.width; j++)
                    {
                        int k = mdl.cols-j-1;
                        sym(i,j) = test2(i,j) * (1-weights(i,j)) + test2(i,k) * (weights(i,j));
//.........这里部分代码省略.........
开发者ID:memoarch,项目名称:uniform-lbp,代码行数:101,代码来源:frontalize.cpp

示例9: main


//.........这里部分代码省略.........
			Mat_<uchar> gray;
			cvtColor(img, gray, CV_BGR2GRAY);
		
			// Don't resize if it's unneeded
			Mat_<uchar> img_scaled;		
			if(dimx != gray.cols || dimy != gray.rows)
			{
				resize( gray, img_scaled, Size( dimx, dimy ) );
				resize(img, disp, Size( dimx, dimy));
			}
			else
			{
				img_scaled = gray;
				disp = img.clone();
			}
		
			namedWindow("colour",1);

			// Get depth image
			if(readDepth)
			{
				char* dst = new char[100];
				std::stringstream sstream;
				//sstream << dDir << "\\depth%06d.png";
				sstream << dDirs[f_n] << "\\depth%05d.png";
				sprintf(dst, sstream.str().c_str(), frameProc + 1);
				Mat_<short> dImg = imread(string(dst), -1);
				if(!dImg.empty())
				{
					if(dimx != dImg.cols || dimy != dImg.rows)
					{
						Mat_<short> dImgT;
						resize(dImg, dImgT, Size( dimx, dimy));
						dImgT.convertTo(depth, CV_32F);
					}
					else
					{
						dImg.convertTo(depth, CV_32F);
					}
				}
				else
				{
					WARN_STREAM( "Can't find depth image" );
				}
			}

			Vec6d poseEstimateHaar;
			Matx66d poseEstimateHaarUncertainty;

			Rect faceRegion;

			// The start place where CLM should start a search (or if it fails, can use the frame detection)
			if(!trackingInitialised || (!success && ( frameProc  % 2 == 0)))
			{
				INFO_STREAM( "Attempting to initialise a face");
				// The tracker can return multiple head pose observation
				vector<Vec6d> poseEstimatesInitialiser;
				vector<Matx66d> covariancesInitialiser;			
				vector<Rect> regionsInitialiser;

				bool initSuccess = PoseDetectorHaar::InitialisePosesHaar(img_scaled, depth, poseEstimatesInitialiser, covariancesInitialiser, regionsInitialiser, classifier, fx, fy, cx, cy, haarParams);
					
				if(initSuccess)
				{
					INFO_STREAM( "Face(s) detected");
					if(poseEstimatesInitialiser.size() > 1)
开发者ID:AshwinRajendraprasad,项目名称:FacialAgeSynthesis,代码行数:67,代码来源:SimpleCLM.cpp

示例10: MatchFeatures


//.........这里部分代码省略.........
			calcOpticalFlowPyrLK(img_1, img_2, corners, nextPts, status, err, Size(45,45));
			for (unsigned int i=0; i<corners.size(); i++) {
				if(status[i] == 1) {
#ifdef __SFM__DEBUG__
					line(outputflow, corners[i], nextPts[i], Scalar(0,255), 1);
#endif
//					imgpts1.push_back(KeyPoint(corners[i],1));
//					imgpts2.push_back(KeyPoint(nextPts[i],1));
					good_matches_.push_back(DMatch(imgpts1.size()-1,imgpts1.size()-1,1.0));
					keypoints_1.push_back(KeyPoint(corners[i],1));
					keypoints_2.push_back(KeyPoint(nextPts[i],1));
				}
			}
		}
		t = ((double)getTickCount() - t)/getTickFrequency();
		cout << "Done. (" << t <<"s)"<< endl;
#ifdef __SFM__DEBUG__
		imshow("flow", outputflow);
		waitKey(100);
		destroyWindow("flow");
#endif
	} 
	else if(use_horiz_disparity) 
	{		
		double downscale = 0.6;
		Mat small_im1; resize(img_1_orig,small_im1,Size(),downscale,downscale);
		Mat small_im2; resize(img_2_orig,small_im2,Size(),downscale,downscale);
		int numberOfDisparities = ((small_im1.cols/8) + 15) & -16;
		
		StereoSGBM sgbm;
		sgbm.preFilterCap = 63;
		sgbm.SADWindowSize = 3;
		
		int cn = img_1_orig.channels();
		
		sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
		sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
		sgbm.minDisparity = 0;
		sgbm.numberOfDisparities = numberOfDisparities;
		sgbm.uniquenessRatio = 10;
		sgbm.speckleWindowSize = 100;
		sgbm.speckleRange = 32;
		sgbm.disp12MaxDiff = 1;
		sgbm.fullDP = false;
		
		Mat_<short> disp;
		sgbm(small_im1, small_im2, disp);
		Mat disp8; disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
#ifdef __SFM__DEBUG__
		imshow("disparity",disp8);
		waitKey(0);
		destroyWindow("disparity");
#endif		
		Mat outputflow; img_1_orig.copyTo(outputflow);
		Mat_<short> disp_orig_scale; resize(disp,disp_orig_scale,img_1.size());
		
		for (int x=0;x<disp_orig_scale.cols; x+=1) {
			for (int y=0; y<disp_orig_scale.rows; y+=1) {
				float _d = ((float)disp_orig_scale(y,x))/(16.0 * downscale);
				if (fabsf(_d) > 150.0f || fabsf(_d) < 5.0f) {
					continue; //discard strange points 
				}
				Point2f p(x,y),p1(x-_d,y);
#ifdef __SFM__DEBUG__
				circle(outputflow, p, 1, Scalar(0,255*_d/50.0), 1);
#endif
				if (x%10 == 0 && y%10 == 0) {
//					imgpts1.push_back(KeyPoint(p,1));
//					imgpts2.push_back(KeyPoint(p1,1));
					good_matches_.push_back(DMatch(imgpts1.size()-1,imgpts1.size()-1,1.0));
					keypoints_1.push_back(KeyPoint(p,1));
					keypoints_2.push_back(KeyPoint(p1,1));
				}
				fullpts1.push_back(KeyPoint(p,1));
				fullpts2.push_back(KeyPoint(p1,1));
			}
		}		
#ifdef __SFM__DEBUG__		
		imshow("outputflow", outputflow);
		waitKey(0);
		destroyWindow("outputflow");
#endif
	}
	
	//Draw matches
//	if(0) 
#ifdef __SFM__DEBUG__
	{
		//-- Draw only "good" matches
		Mat img_matches;
		drawMatches( img_1, keypoints_1, img_2, keypoints_2,
					good_matches_, img_matches, Scalar::all(-1), Scalar::all(-1),
					vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );		
		//-- Show detected matches
		imshow( "Good Matches", img_matches );
		waitKey(100);
		destroyWindow("Good Matches");
	}
#endif
}
开发者ID:AshwinRajendraprasad,项目名称:SfM-Toy-Library,代码行数:101,代码来源:FeatureMatching.cpp


注:本文中的Mat_::convertTo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。