当前位置: 首页>>代码示例>>C++>>正文


C++ Mat_类代码示例

本文整理汇总了C++中Mat_的典型用法代码示例。如果您正苦于以下问题:C++ Mat_类的具体用法?C++ Mat_怎么用?C++ Mat_使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Mat_类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: findEssentialMatrix

void findEssentialMatrix(MFramePair& pair, Mat_<double> K) {
    vector<Point2f> k1, k2,n1,n2;
    vector<int> usedIndex1,usedIndex2;
    vector<uchar> status(pair.imgpts1.size());
    Mat F = findFundamentalMat(pair.matchPts1, pair.matchPts2, CV_FM_RANSAC, 0.2, 0.9,
                               status);
    Mat_<double> E;
    E = K.t() * F * K; //according to HZ (9.12)
    for (unsigned int i = 0; i < status.size(); i++) { // queryIdx is the "left" image
        if (status[i]) {
            usedIndex1.push_back(pair.matchedIndex1[i]);
            k1.push_back(pair.matchPts1[i]);
            usedIndex2.push_back(pair.matchedIndex2[i]);
            k2.push_back(pair.matchPts2[i]);
        }
    }
    correctMatches(F,k1,k2,n1,n2);
    pair.matchPts1 = n1;
    pair.matchPts2 = n2;
    pair.matchedIndex1=usedIndex1;
    pair.matchedIndex2=usedIndex2;
    pair.F = F;
    pair.E = E;
}
开发者ID:caomw,项目名称:AndroidSFMDemo,代码行数:24,代码来源:Utils.cpp

示例2: sampleIdx

float TrainableStatModel::leaveOneOutCrossValidation(const Mat_<float> &samples, const Mat_<int> &classes) {
	int correctResults = 0;
	Mat_<int> sampleIdx(samples.rows - 1, 1);

	for (int i = 1; i < samples.rows; i++) {
		sampleIdx(i - 1, 0) = i;
	}

	for (int i = 0; i < samples.rows; i++) {
		this->clear();
		this->train(samples, classes, sampleIdx);
		int actual = (float)this->predict(samples.row(i));

		if (actual == classes(i,0)) {
			correctResults++;
		}

		cout<<"actual = "<<actual<<", expected = "<<classes(i,0)<<endl;

		sampleIdx(i, 0) = i;
	}

	return (float)correctResults/(float)samples.rows;
}
开发者ID:alexisVallet,项目名称:animation-character-identification,代码行数:24,代码来源:TrainableStatModel.cpp

示例3: drawOpticalFlow

	static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
	{
	    dst.create(flow.size(), CV_8UC3);
	    dst.setTo(Scalar::all(0));
	
	    // determine motion range:
	    float maxrad = maxmotion;
	
	    if (maxmotion <= 0)
	    {
	        maxrad = 1;
	        for (int y = 0; y < flow.rows; ++y)
	        {
	            for (int x = 0; x < flow.cols; ++x)
	            {
	                Point2f u = flow(y, x);
	
	                if (!isFlowCorrect(u))
	                    continue;
	
	                maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
	            }
	        }
	    }
	
	    for (int y = 0; y < flow.rows; ++y)
	    {
	        for (int x = 0; x < flow.cols; ++x)
	        {
	            Point2f u = flow(y, x);
	
	            if (isFlowCorrect(u))
	                dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
	        }
	    }
	}
开发者ID:pedrodparkes,项目名称:tests,代码行数:36,代码来源:tvl1_optical_flow.cpp

示例4: input_data

bool SimpleNN::predict(const Mat_<double> &test_X, Mat_<double> &result, string &err_msg){
    
    Mat_<double> input_data = test_X.reshape(0, test_X.rows*test_X.cols); // make it column vector
    
    if (input_data.rows != this->structure[0]){
        err_msg = "wrong input size";
        return false;
    }
    
    for (int row_index = 1; row_index < this->layers[0].rows; ++row_index){
        this->layers[0](row_index, 0) = input_data(row_index-1, 0);
    }

    int num_layers = (int) this->layers.size();
    
    for (int layer_id = 0; layer_id < num_layers - 2; ++layer_id){
        Mat_<double> product = tanh(this->weights[layer_id]*this->layers[layer_id]);
        
        for (int row_index = 1; row_index < this->layers[layer_id+1].rows; ++row_index){
            this->layers[layer_id+1](row_index, 0) = product(row_index-1, 0);
        }
    }
    
    // compute the output layer
    {
        int layer_id = num_layers - 2;
        this->layers[layer_id + 1] = tanh(this->weights[layer_id] * this->layers[layer_id]);
    }
    
    result = this->layers[num_layers - 1]; // return last layers (output layer).
    cout << "result:\n" << result << endl;
    
    err_msg = "";
    
    return true;
}
开发者ID:dboyliao,项目名称:Talks_MLDM,代码行数:36,代码来源:SimpleNN.cpp

示例5: optical_flow

Mat optical_flow(const Mat_<float>& ImgA, const Mat_<float>& ImgB, 
		 int num_it, float threshold) {

	// Compute Gaussin Pyramid 
	int nl = 5;
	float ds = 0.5;
	stack<pair<Mat, Mat> > gp = compute_gaussian_pyramids(ImgA, ImgB, nl, ds);
	
	Mat_<float> u = Mat::zeros(ImgA.size(), CV_32F);
	Mat_<float> v = Mat::zeros(ImgA.size(), CV_32F);
	while (!gp.empty()) {
	    Mat imgA = (gp.top()).first;
	    Mat imgB = (gp.top()).second;
	    
	    // Warp the first image. 
	    Mat_<float> imgBw = imgB.clone();
	   
	    /* Compute warping here from u and v. */
	    imgBw = compute_warp(imgB, u, v, ds);

	    /* Compute the derivatives. */
	    Mat_<float> Ix, Iy, It;
	    compute_derivatives(imgBw, imgA, Ix, Iy, It); // papers       
	    Mat_<float> du = Mat::zeros(imgA.size(), CV_32F);
	    Mat_<float> dv = Mat::zeros(imgA.size(), CV_32F);
	    for (int i = 0; i < 500; i ++) 
		  iterative_computation(du, dv, Ix, Iy, It);

	    u = u - du;
	    v = v - dv;
	    gp.pop();
	}
	
	Mat Mflow = color_map(u, v);
	return Mflow;
}
开发者ID:qingchen1984,项目名称:Spring15_3d_photography,代码行数:36,代码来源:main.cpp

示例6: scannls

void NNLSOptimizer::scannls(const Mat& A, const Mat& b,Mat &x)
{
    int iter = 0;
    int m = A.size().height;
    int n = A.size().width;
    Mat_<double> AT = A.t();
    double error = 1e-8;
    Mat_<double> H = AT*A;
    Mat_<double> f = -AT*b;

    Mat_<double> x_old = Mat_<double>::zeros(n,1);
    Mat_<double> x_new = Mat_<double>::zeros(n,1);

    Mat_<double> mu_old = Mat_<double>::zeros(n,1);
    Mat_<double> mu_new = Mat_<double>::zeros(n,1);
    Mat_<double> r = Mat_<double>::zeros(n,1);
    f.copyTo(mu_old);

    while(iter < NNLS_MAX_ITER)
    {
        iter++;
        for(int k=0;k<n;k++)
        {
            x_old.copyTo(x_new);
            x_new(k,0) = std::max(0.0, x_old(k,0) - (mu_old(k,0)/H(k,k)) );

            if(x_new(k,0) != x_old(k,0))
            {
                r = mu_old + (x_new(k,0) - x_old(k,0))*H.col(k);
                r.copyTo(mu_new);
            }
            x_new.copyTo(x_old);
            mu_new.copyTo(mu_old);
        }

        if(eKKT(H,f,x_new,error) == true)
        {            
            break;
        }
    }
    x_new.copyTo(x);
}
开发者ID:dem42,项目名称:exp_tran,代码行数:42,代码来源:nnlsoptimizer.cpp

示例7: GaussianBlur

void SegmenterHumanSimple::segment(const cv::Mat& img, Mat_<uchar>& mask)
{
	Mat imgBGR;
	Mat imgLAB;
	Mat imgBGRo;

	float rate = 500.0f/img.cols;

	GaussianBlur(img,imgBGRo,Size(),0.8,0.8);

	vector<Rect> faces;

	resize(imgBGRo,imgBGRo,Size(),rate,rate);
	cv::CascadeClassifier faceModel(this->_m_filenameFaceModel);
	faceModel.detectMultiScale(imgBGRo,faces);

	imgBGRo.convertTo( imgBGR, CV_32F, 1.0/255. );

	cvtColor( imgBGR, imgLAB, CV_BGR2Lab );

	Superpixel sp(1000,1,5);

	Mat_<int> segmentation = sp.segment(imgLAB);
	vector<SuperpixelStatistic> stat = sp.stat(imgLAB,imgBGR,segmentation);

	Mat_<float> prob;
	this->getPixelProbability(imgBGRo,prob,faces);
	Mat_<float> sprob;
	UtilsSuperpixel::Stat(segmentation,prob,stat,sprob);

	Mat_<int> initial(int(stat.size()),1);
	initial.setTo(1,sprob>0.5);
	initial.setTo(0,sprob<=0.5);
	Mat_<float> probaColor;
	int myx = cv::countNonZero(initial);
	this->_getColorProba(stat,initial,probaColor);

	Mat_<float> fgdInit,bgdInit,fgdColor,bgdColor;
	this->_prob2energy(sprob,fgdInit,bgdInit);
	this->_prob2energy(probaColor,fgdColor,bgdColor);
	Mat_<float> fgdEnergy, bgdEnergy;
	
	fgdEnergy = fgdInit + fgdColor;
	bgdEnergy = bgdInit + bgdColor;

	Mat_<int> label;
	mask.create(imgBGRo.rows,imgBGRo.cols);

	UtilsSegmentation::MaxFlowSuperpixel(stat,fgdEnergy,bgdEnergy,50.0,label);

	for( int i=0;i<mask.rows;i++)
	{
		for(int j=0;j<mask.cols;j++)
		{
			if ( label(segmentation(i,j)) > 0.5)
			{
				mask(i,j) = 255;
			}
			else
			{
				mask(i,j) = 0;
			}
		}
	}

	cv::resize(mask,mask,Size(img.cols,img.rows));
	mask.setTo(255,mask>128);
	mask.setTo(0,mask<=128);
}
开发者ID:zouxiaochuan,项目名称:icome2013,代码行数:69,代码来源:SegmenterHumanSimple.cpp

示例8: if

//===========================================================================
void SVR_patch_expert::Response(const Mat_<float>& area_of_interest, Mat_<double>& response)
{

	int response_height = area_of_interest.rows - weights.rows + 1;
	int response_width = area_of_interest.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	cv::Mat_<float> normalised_area_of_interest;
  
	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// If type is raw just normalise mean and standard deviation
	if(type == 0)
	{
		// Perform normalisation across whole patch
		cv::Scalar mean;
		cv::Scalar std;

		cv::meanStdDev(area_of_interest, mean, std);
		// Avoid division by zero
		if(std[0] == 0)
		{
			std[0] = 1;
		}
		normalised_area_of_interest = (area_of_interest - mean[0]) / std[0];
	}
	// If type is gradient, perform the image gradient computation
	else if(type == 1)
	{
		Grad(area_of_interest, normalised_area_of_interest);
	}
  	else
	{
		printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__, type);
		abort();
	}
	
	Mat_<float> svr_response;

	// The empty matrix as we don't pass precomputed dft's of image
	Mat_<double> empty_matrix_0(0,0,0.0);
	Mat_<float> empty_matrix_1(0,0,0.0);
	Mat_<float> empty_matrix_2(0,0,0.0);

	// Efficient calc of patch expert SVR response across the area of interest
	matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF_NORMED); 

	response.create(svr_response.size());
	MatIterator_<double> p = response.begin();

	cv::MatIterator_<float> q1 = svr_response.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = svr_response.end();

	while(q1 != q2)
	{
		// the SVR response passed into logistic regressor
		*p++ = 1.0/(1.0 + exp( -(*q1++ * scaling + bias )));
	}

}
开发者ID:yeshbourne,项目名称:VRBallGame,代码行数:64,代码来源:SVR_patch_expert.cpp

示例9: ConstrainShapeInImage

/**
 * @author     	JIA Pei
 * @version    	2010-05-20
 * @brief      	Basic AAM Fitting, for dynamic image sequence
 * @param      	iImg			Input - image to be fitted
 * @param      	ioShape         Input and Output - the fitted shape
 * @param      	oImg            Output - the fitted image
 * @param		epoch			Input - the iteration epoch
*/
float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg,
											VO_Shape& ioShape,
											Mat& oImg,
											unsigned int epoch)
{
	this->m_VOFittingShape.clone(ioShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMBasic);
    this->m_iIteration = 0;

    // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
    this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain(	this->m_VOFittingShape,
																this->m_MatModelAlignedShapeParam,
																this->m_fScale,
																this->m_vRotateAngles,
																this->m_MatCenterOfGravity);
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

	// Get m_MatModelNormalizedTextureParam
    VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
												this->m_ImageProcessing,
												this->m_vTriangle2D,
												this->m_vPointWarpInfo,
												this->m_VOFittingTexture );
	// estimate the texture model parameters
    this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);

    // Calculate m_MatCurrentC
    this->m_VOAAMBasic->VO_SParamTParamProjectToCParam(	this->m_MatModelAlignedShapeParam,
														this->m_MatModelNormalizedTextureParam,
														this->m_MatCurrentC );
    // Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc.
	this->m_MatCurrentT 	= Mat_<float>::zeros(this->m_MatCurrentT.size());
	this->m_MatDeltaT 		= Mat_<float>::zeros(this->m_MatDeltaT.size());
	this->m_MatEstimatedT 	= Mat_<float>::zeros(this->m_MatEstimatedT.size());
	this->m_MatDeltaC 		= Mat_<float>::zeros(this->m_MatDeltaC.size());
	this->m_MatEstimatedC 	= Mat_<float>::zeros(this->m_MatEstimatedC.size());
	
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// explained by JIA Pei. 2010-05-20
	// For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape"
	// But this is not the case. why?
	// Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by 
	// a) assigning m_VOTemplateAlignedShape
	// b) align to the real-size face using detected eyes and mouth
	// c) constrain the shape within the image
	// d) constrain the shape parameters and calculate those rigid transform parameters
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// Estimate m_VOFittingShape and m_VOFittingTexture
	this->VO_CParamTParam2FittingShape(	this->m_MatCurrentC,
										this->m_MatCurrentT,
										this->m_VOModelNormalizedTexture,
										this->m_VOFittingShape,
										this->m_fScale,
										this->m_vRotateAngles,
										this->m_MatCenterOfGravity );
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);		// Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by
	// a) c parameters to reconstruct shape parameters
	// b) shape parameters to reconstruct shape
	// c) align to the real-size face by global shape normalization
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	
	this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
															this->m_VOFittingShape,
															this->m_VOModelNormalizedTexture,
															this->m_VOTextureError);

    do
    {
		float estScale = this->m_fScale;
		vector<float> estRotateAngles = this->m_vRotateAngles;
		Mat_<float> estCOG = this->m_MatCenterOfGravity.clone();
		bool cBetter 	= false;
		bool poseBetter = false;

        /**First shape parameters, c parameters. refer to equation (9.3)
		* Cootes "Statistical Model of Appearance for Computer Vision" */
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T);

        // damp -- C
        for(unsigned int i = 0; i < k_values.size(); i++)
        {
            // make damped c prediction
            cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC);

            // make sure m_MatEstimatedC are constrained
//.........这里部分代码省略.........
开发者ID:haifaben,项目名称:vosm,代码行数:101,代码来源:VO_FittingAAMBasic.cpp

示例10: fit

int BaseDecisionTree::fit(Mat_<double> _X,
                          Mat_<double> _y,
                          Mat_<double> sample_weight)
{
    // Validation
    if (_X.rows == 0 || _X.cols == 0)
        return 1;

    // Determine output setting
    _n_samples = _X.rows;
    _n_features = _X.cols;

    // Reshape y to shape[n_samples, 1]
    _y = _y.reshape(1, _y.total());

    // Validation
    if (_y.rows != _n_samples)
        return 2;

    // Calculate class_weight
    Mat expended_class_weight(0, 0, CV_32F);
    // Get class_weight
    if (_class_weight.total() != 0)
        expended_class_weight = compute_sample_weight(_class_weight, _y);

    // Validation
    if (_max_depth <= 0)
        _max_depth = static_cast<int>(pow(2, 31) - 1);
    if (_max_leaf_nodes <= 0)
        _max_leaf_nodes = -1;
    if (_max_features <= 0)
        _max_features = _n_features;
    if (_max_leaf_nodes > -1 && _max_leaf_nodes < 2)
        return 3;
    if (_min_samples_split <= 0)
        return 4;
    if (_min_samples_leaf <= 0)
        return 5;
    if (_min_weight_fraction_leaf >= 0 && _min_weight_fraction_leaf <= 0.5)
        return 6;

    // Set samples' weight
    if (expended_class_weight.total())
    {
        for (int i = 0; i < sample_weight.total(); i++)
        {
            sample_weight.at<double>(i, 0) = sample_weight.at<double>(i, 0) * \
                                             expended_class_weight.at<double>(i, 0);
        }
    }
    else
    {
        sample_weight = expended_class_weight;
    }

    // Set min_weight_fraction_leaf
    if (_min_weight_fraction_leaf != 0.)
        _min_weight_fraction_leaf = _min_weight_fraction_leaf * cv::sum(sample_weight);
    else
        _min_weight_fraction_leaf = 0.;

    // Set min_samples_split
    _min_samples_split = max(_min_samples_split, 2 * _min_samples_leaf);




}
开发者ID:xs2maverick,项目名称:GBRT,代码行数:68,代码来源:tree.cpp

示例11: assert

/**
* @param    avgSParam       - input        mean shape parameters
* @param    icovSParam      - input        covariance matrix of shape parameters
* @param    avgTParam       - input        mean texture parameters
* @param    icovTParam      - input        covariance matrix of texture parameters
* @param    iSParams        - input        the vector of multiple input shape parameters
* @param    iTParams        - input        the vector of multiple input texture parameter
* @param    ShapeDistMean   - input        mean texture parameters
* @param    ShapeDistStddev - input        covariance matrix of texture parameters
* @param    TextureDistMean - input        the input shape parameter
* @param    TextureDistStddev   - input    the input texture parameter
* @param    WeakFitting     - input        only shape parameter is used?
* @return   whether the fitting is acceptable
*/
bool CRecognitionAlgs::CalcFittingEffect4ImageSequence( 
    const Mat_<float>& avgSParam,
    const Mat_<float>& icovSParam,
    const Mat_<float>& avgTParam,
    const Mat_<float>& icovTParam,
    const Mat_<float>& iSParams,
    const Mat_<float>& iTParams,
    const Scalar& ShapeDistMean,
    const Scalar& ShapeDistStddev,
    const Scalar& TextureDistMean,
    const Scalar& TextureDistStddev,
    bool WeakFitting )
{
    assert(iSParams.rows == iTParams.rows);
    unsigned int NbOfSamples = iSParams.rows;
    vector<float> sDists, tDists;
    sDists.resize(NbOfSamples);
    tDists.resize(NbOfSamples);

    for(unsigned int i = 0; i < NbOfSamples; ++i)
    {
        CRecognitionAlgs::CalcFittingEffect4StaticImage(
            avgSParam,
            icovSParam,
            avgTParam,
            icovTParam,
            iSParams.row(i),
            iTParams.row(i),
            ShapeDistMean,
            ShapeDistStddev,
            TextureDistMean,
            TextureDistStddev,
            sDists[i],
            tDists[i],
            WeakFitting );
    }

    unsigned int NbOfGood1 = 0;
    unsigned int NbOfGood2 = 0;

    for(unsigned int i = 0; i < NbOfSamples; ++i)
    {
        if( ( fabs( sDists[i] - ShapeDistMean.val[0] )
            < 1.5f * ShapeDistStddev.val[0] ) )
        {
            NbOfGood1++;
            if( ( fabs( tDists[i] - TextureDistMean.val[0] )
                < 3.0f*TextureDistStddev.val[0] ) )
            {
                NbOfGood2++;
            }
        }
    }

    if(WeakFitting)
    {
        if(NbOfGood1 >= (unsigned int )(0.75*NbOfSamples) )
            return true;
        else
            return false;
    }
    else
    {
        if(NbOfGood2 >= (unsigned int )(0.75*NbOfGood1) )
            return true;
        else
            return false;
    }
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:83,代码来源:VO_RecognitionAlgs.cpp

示例12: edgeVertex

void UtilsSegmentation::MaxFlowSuperpixel(std::vector<SuperpixelStatistic>& spstat, const Mat_<float>& fgdEnergy,
		const Mat_<float>& bgdEnergy, float gamma, Mat_<int>& label)
{
	//::Graph<float,float,float> graph(nNode,nEdge,errfunc);
	//graph
	int nEdge = UtilsSuperpixel::CountEdge(spstat);
	Mat_<int> edgeVertex(nEdge,2);
	Mat_<float> edgeWeight(nEdge,1);
	Mat_<float> edgeLen(nEdge,1);

	int idx = 0;
	for(int i=0;i<spstat.size();i++)
	{
		SuperpixelStatistic& sp = spstat[i];
		for( set<int>::iterator j=sp.conn.begin();
			j!= sp.conn.end();
			j++)
		{
			int d = (*j);
			SuperpixelStatistic& dsp = spstat[d];
			if ( i != d)
			{
				edgeVertex(idx,0) = min(i,d);
				edgeVertex(idx,1) = max(i,d);
				float diff = (float) norm(sp.mean_color_ - dsp.mean_color_);
				edgeWeight(idx) = diff*diff;
				edgeLen(idx) = (float) cv::norm(sp.mean_position_-dsp.mean_position_);
				idx++;
			}
		}
	}

	float beta = (float) cv::mean(edgeWeight)[0];

	Graph<float,float,float> graph((int)spstat.size(), nEdge, errfunc);

	graph.add_node((int)spstat.size());

	for(int i=0;i<fgdEnergy.total();i++)
	{
		graph.add_tweights(i,bgdEnergy(i),fgdEnergy(i));
	}

	edgeWeight = - edgeWeight / beta;
	cv::exp(edgeWeight,edgeWeight);
	edgeWeight *= gamma;
	cv::divide(edgeWeight, edgeLen,edgeWeight);

	for(int i=0;i<nEdge;i++)
	{
		float w = edgeWeight(i);
		graph.add_edge(edgeVertex(i,0),edgeVertex(i,1),w,w);
	}

	graph.maxflow();

	label.create((int)spstat.size(),1);
	for(int i=0;i<spstat.size();i++)
	{
		if ( graph.what_segment(i) == Graph<float,float,float>::SOURCE)
		{
			label(i) = 1;
		}
		else
		{
			label(i) = 0;
		}
	}
}
开发者ID:zouxiaochuan,项目名称:icome2013,代码行数:69,代码来源:UtilsSegmentation.cpp

示例13: fftw_plan_dft_3d

bool c_FourierTransfrom::ifftw_complex_3d(const Mat_<Vec6d> &_input,
                                         Mat_<Vec6d> &_output)
{
    size_t height = _input.rows;
    size_t width = _input.cols;
    size_t n_channels = _input.channels() / 2;
    size_t n_pixels = height * width;
    size_t n_data = n_pixels * n_channels;

    fftw_complex *in, *out;
    fftw_plan p;

    in = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * n_data);
    out = (fftw_complex *)fftw_malloc(sizeof(fftw_complex) * n_data);

    p = fftw_plan_dft_3d(height, width, n_channels, in, out, FFTW_BACKWARD,
                         FFTW_ESTIMATE);

    /*!< prepare the data */
    for (size_t i_row = 0; i_row < height; ++i_row)
    {
        const Vec3d *p = _input.ptr<Vec3d>(i_row);
        for (size_t i_col = 0; i_col < width; ++i_col)
        {
            size_t index = i_row * width + i_col;
            for (size_t k = 0; k < n_channels; ++k)
            {
                in[n_pixels * k + index][0] = p[i_col][k];
                in[n_pixels * k + index][1] = p[i_col][k + n_channels];
            }
#if 0
            in[index][0] = p[i_col][4];
            in[index][1] = p[i_col][5];
            in[n_pixels + index][0] = p[i_col][2];
            in[n_pixels + index][1] = p[i_col][3];
            in[n_pixels * 2 + index][0] = p[i_col][0];
            in[n_pixels * 2 + index][1] = p[i_col][1];
#endif
        }
    }

    fftw_execute(p);

    /*!< write back data */
    _output = Mat_<Vec6d>::zeros(_input.size());
    for (size_t i_row = 0; i_row < height; ++i_row)
    {
        Vec6d *p = _output.ptr<Vec6d>(i_row);
        for (size_t i_col = 0; i_col < width; ++i_col)
        {
            size_t index = i_row * width + i_col;
            for (size_t k = 0; k < n_channels; ++k)
            {
                p[i_col][k] = out[n_pixels * k + index][0];
                p[i_col][k + n_channels] = out[n_pixels * k + index][1];
            }
#if 0
            p[i_col][0] = out[n_pixels * 2 + index][0];
            p[i_col][1] = out[n_pixels + index][0];
            p[i_col][2] = out[index][0];
            p[i_col][3] = out[n_pixels * 2 + index][1];
            p[i_col][4] = out[n_pixels + index][1];
            p[i_col][5] = out[index][1];
#endif
        }
    }

    _output /= n_data;

    fftw_destroy_plan(p);
    fftw_free(in);
    fftw_free(out);

    return true;
}
开发者ID:xiehao,项目名称:Image-Vectorization,代码行数:75,代码来源:fourier_transfrom.cpp

示例14: collectData

void collectData(int subjId,
				 CascadeClassifier &classifier,
				 ShapePredictor &predictor,
				 Mat_<float> &labels,
				 Mat_<float> &multihog,
				 Mat_<float> &landmarks)
{
	int H[] = { -15, -10, -5, 0, 5, 10, 15 };
	int V[] = { -10, 0, 10 };

	string path = to_string(subjId) + "/";
	if (subjId < 10) path = "columbia/000" + path;
	else path = "columbia/00" + path;
	ifstream fin(path + "annotation.txt");

	for (int imgId = 0; imgId < 105; imgId++) {
		int p, v, h;
		fin >> p >> v >> h;
		if (abs(p) > 15) continue;
		string imgpath = path + to_string(imgId) + ".jpg";
		Mat_<uchar> img = imread(imgpath, 0);
		BBox bbox = getTestBBox(img, classifier);
		if (EmptyBox(bbox)) continue;

		int l = 0;
		// EYE, MOUTH, NOF
		if (abs(h) <= 5 && v == 0) l = 0;
		else if (abs(h) <= 5 && v == -10) l = 1;
		else l = 2;

		if (l == 2) {
			RNG rng(getTickCount());
			double num = rng.uniform(0.0, 1.0);
			if (num > 0.5) continue;
		}

		// 上中下
		/*if (v < 0) l = 0;
		else if (v == 0) l = 1;
		else l = 2;*/

		// 9分类
		/*if (h < -5) l += 0;
		else if (h > 5) l += 2;
		else l += 1;
		if (v < 0) l += 0;
		else if (v > 0) l += 2 * 3;
		else l += 1 * 3;*/

		Mat_<float> lab = l*Mat_<float>::ones(1, 1);
		labels.push_back(lab);

		Mat_<double> shape = predictor(img, bbox);
		Geom G;	initGeom(shape, G);
		Pose P; calcPose(G, P);

		Mat_<uchar> lEye, rEye;
		regularize(img, bbox, P, shape, lEye, rEye);

		vector<float> lRlt;
		vector<float> rRlt;
		calcMultiHog(lEye, lRlt);
		calcMultiHog(rEye, rRlt);

		vector<float> _hog2nd_vec;
		for (int k = 0; k < lRlt.size(); k++)
			_hog2nd_vec.push_back(lRlt[k]);
		for (int k = 0; k < rRlt.size(); k++)
			_hog2nd_vec.push_back(rRlt[k]);
		Mat_<float> _hog2nd_row = Mat_<float>(_hog2nd_vec).reshape(1, 1);
		multihog.push_back(_hog2nd_row);

		vector<float> _ldmks;
		for (int i = 28; i < 48; i++) {
			_ldmks.push_back((shape(i, 0) - bbox.cx) / bbox.w);
			_ldmks.push_back((shape(i, 1) - bbox.cy) / bbox.h);
		}
		float mouthx = (shape(51, 0) + shape(62, 0) + shape(66, 0) + shape(57, 0)) / 4;
		float mouthy = (shape(51, 1) + shape(62, 1) + shape(66, 1) + shape(57, 1)) / 4;
		_ldmks.push_back((mouthx - bbox.cx) / bbox.w);
		_ldmks.push_back((mouthy - bbox.cy) / bbox.h);
		float maxVal = *std::max_element(_ldmks.begin(), _ldmks.end());
		for (int i = 0; i < _ldmks.size(); i++) _ldmks[i] *= 1.0 / maxVal; // scale to [-1, 1]

		Mat_<float> ldmks = Mat_<float>(_ldmks).reshape(1, 1);
		landmarks.push_back(ldmks);
	}
	fin.close();
}
开发者ID:tiejian,项目名称:GazeClassification,代码行数:89,代码来源:TestMain.cpp

示例15: computePoseDifference


//.........这里部分代码省略.........
   drawMatches(img1, KeyPoints_1, img2, KeyPoints_2, // draw only inliers given by mask
         matches, img_matches, Scalar::all(-1), Scalar::all(-1), mask);

   vector<Point2f> imgpts1_masked, imgpts2_masked;
   for (int i = 0; i < imgpts1.size(); i++) 
   {
      if (mask.at<uchar>(i,0) == 1) 
      {
         imgpts1_masked.push_back(imgpts1[i]);
         imgpts2_masked.push_back(imgpts2[i]);
      }
   }

   Mat pnts4D;
   Mat P1 = camera_matrix * Mat::eye(3, 4, CV_64FC1), P2;
   Mat p2[2] = { R, t }; 
   hconcat(p2, 2, P2);
   P2 = camera_matrix * P2;

#define USE_OPENCV_TRIANGULATION
#ifndef USE_OPENCV_TRIANGULATION // strangely, both methods yield identical results
   vector<Point3d> homogPoints1, homogPoints2;
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Point2f currentPoint1 = imgpts1_masked[i];
      homogPoints1.push_back(Point3d(currentPoint1.x, currentPoint1.y, 1));
      Point2f currentPoint2 = imgpts2_masked[i];
      homogPoints2.push_back(Point3d(currentPoint2.x, currentPoint2.y, 1));
   }

   Mat dehomogenized(imgpts1_masked.size(), 3, CV_64FC1);
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Mat_<double> triangulatedPoint = IterativeLinearLSTriangulation(homogPoints1[i], P1, homogPoints2[i], P2);
      Mat r = triangulatedPoint.t();
      r.colRange(0,3).copyTo(dehomogenized.row(i)); // directly assigning to dehomogenized.row(i) compiles but does nothing, wtf?
   }
#else
   triangulatePoints(P1, P2, imgpts1_masked, imgpts2_masked, pnts4D);
   pnts4D = pnts4D.t();
   Mat dehomogenized;
   convertPointsFromHomogeneous(pnts4D, dehomogenized);
   dehomogenized = dehomogenized.reshape(1); // instead of 3 channels and 1 col, we want 1 channel and 3 cols
#endif


   double mDist = 0;
   int n = 0;
   int pos = 0, neg = 0;

   /* Write ply file header */
   ofstream ply_file("points.ply", ios_base::trunc);
   ply_file << 
      "ply\n"
      "format ascii 1.0\n"
      "element vertex " << dehomogenized.rows << "\n"
      "property float x\n"
      "property float y\n"
      "property float z\n"
      "property uchar red\n"
      "property uchar green\n"
      "property uchar blue\n"
      "end_header" << endl;

   Mat_<double> row;
   for (int i = 0; i < dehomogenized.rows; i++) 
开发者ID:AnnKatrinBecker,项目名称:OpenCV-test-crap,代码行数:67,代码来源:stereo_v3.cpp


注:本文中的Mat_类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。