当前位置: 首页>>代码示例>>C++>>正文


C++ VectorXd::array方法代码示例

本文整理汇总了C++中VectorXd::array方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::array方法的具体用法?C++ VectorXd::array怎么用?C++ VectorXd::array使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VectorXd的用法示例。


在下文中一共展示了VectorXd::array方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: nLLeval

mfloat_t CKroneckerLMM::nLLeval(mfloat_t ldelta, const MatrixXdVec& A,const MatrixXdVec& X, const MatrixXd& Y, const VectorXd& S_C1, const VectorXd& S_R1, const VectorXd& S_C2, const VectorXd& S_R2)
{
//#define debugll
	muint_t R = (muint_t)Y.rows();
	muint_t C = (muint_t)Y.cols();
	assert(A.size() == X.size());
	assert(R == (muint_t)S_R1.rows());
	assert(C == (muint_t)S_C1.rows());
	assert(R == (muint_t)S_R2.rows());
	assert(C == (muint_t)S_C2.rows());
	muint_t nWeights = 0;
	for(muint_t term = 0; term < A.size();++term)
	{
		assert((muint_t)(X[term].rows())==R);
		assert((muint_t)(A[term].cols())==C);
		nWeights+=(muint_t)(A[term].rows()) * (muint_t)(X[term].cols());
	}
	mfloat_t delta = exp(ldelta);
	mfloat_t ldet = 0.0;//R * C * ldelta;

	//build D and compute the logDet of D
	MatrixXd D = MatrixXd(R,C);
	for (muint_t r=0; r<R;++r)
	{
		if(S_R2(r)>1e-10)
		{
			ldet += (mfloat_t)C * log(S_R2(r));//ldet
		}
		else
		{
			std::cout << "S_R2(" << r << ")="<< S_R2(r)<<"\n";
		}
	}
#ifdef debugll
	std::cout << ldet;
	std::cout << "\n";
#endif
	for (muint_t c=0; c<C;++c)
	{
		if(S_C2(c)>1e-10)
		{
			ldet += (mfloat_t)R * log(S_C2(c));//ldet
		}
		else
		{
			std::cout << "S_C2(" << c << ")="<< S_C2(c)<<"\n";
		}
	}
#ifdef debugll
	std::cout << ldet;
	std::cout << "\n";
#endif
	for (muint_t r=0; r<R;++r)
	{
		for (muint_t c=0; c<C;++c)
		{
			mfloat_t SSd = S_R1.data()[r]*S_C1.data()[c] + delta;
			ldet+=log(SSd);
			D(r,c) = 1.0/SSd;
		}
	}
#ifdef debugll
	std::cout << ldet;
	std::cout << "\n";
#endif
	MatrixXd DY = Y.array() * D.array();

	VectorXd XYA = VectorXd(nWeights);

	muint_t cumSumR = 0;

	MatrixXd covW = MatrixXd(nWeights,nWeights);
	for(muint_t termR = 0; termR < A.size();++termR){
		muint_t nW_AR = A[termR].rows();
		muint_t nW_XR = X[termR].cols();
		muint_t rowsBlock = nW_AR * nW_XR;
		MatrixXd XYAblock = X[termR].transpose() * DY * A[termR].transpose();
		XYAblock.resize(rowsBlock,1);
		XYA.block(cumSumR,0,rowsBlock,1) = XYAblock;

		muint_t cumSumC = 0;

		for(muint_t termC = 0; termC < A.size(); ++termC){
			muint_t nW_AC = A[termC].rows();
			muint_t nW_XC = X[termC].cols();
			muint_t colsBlock = nW_AC * nW_XC;
			MatrixXd block = MatrixXd::Zero(rowsBlock,colsBlock);
			if (R<C)
			{
				for(muint_t r=0; r<R; ++r)
				{
					MatrixXd AD = A[termR];
					AD.array().rowwise() *= D.row(r).array();
					MatrixXd AA = AD * A[termC].transpose();
					//sum up col matrices
					MatrixXd XX = X[termR].row(r).transpose() * X[termC].row(r);
					akron(block,AA,XX,true);
				}
			}
			else
//.........这里部分代码省略.........
开发者ID:MMesbahU,项目名称:limix,代码行数:101,代码来源:kronecker_lmm.cpp

示例2: ExtendTree_NHC_Sort_OnlyGS_TermCond_Heading

extendTreeR_t ExtendTree_NHC_Sort_OnlyGS_TermCond_Heading(MatrixXd tree, VectorXd vecEndNode, worldLine_t world, setting_t P,
				double dKmax, double c4, double dMax, double dMaxBeta, sigma_t sigma, int iMaxIte, int iINDC)
{
	int iFlag, kk, iTemp, n, idx, flagRet;
	double p, r, theta, Sx, Sy;

	Vector3d vec3RandomPoint;
	VectorXd vecTempDiag, vecIdx, vecP1, vecP2, vecP3, vecNewNode;
	MatrixXd matTemp, matTempSq, matWP, newTree, matdKappa;
	VectorXd vecBeta;

	extendTreeR_t funcReturn;

	iFlag = 0;

	while (iFlag==0) {
		// select a biased random point
		p=Uniform01();

		if ( (iINDC==0 && p<0.1) || (iINDC==1 && p<0.05) ) {
			vec3RandomPoint << vecEndNode(0), vecEndNode(1), vecEndNode(2);
		} else {
			r = sigma.r*Uniform01();
			theta = sigma.theta0 + sigma.theta*quasi_normal_random();

			Sx = sigma.sp(0) + r*cos(theta);
			Sy = sigma.sp(1) + r*sin(theta);
			vec3RandomPoint << Sx, Sy, vecEndNode(2);
		}
		std::cout << "Random Point : " << vec3RandomPoint(0) << " " << vec3RandomPoint(1) << " " << vec3RandomPoint(2) << std::endl;

		// Find node that is closest to random point
		matTemp.resize(tree.rows(),3);

		for (iTemp=0; iTemp<tree.rows(); iTemp++)	{
			matTemp(iTemp,0) = tree(iTemp,0) - vec3RandomPoint(0);
			matTemp(iTemp,1) = tree(iTemp,1) - vec3RandomPoint(1);
			matTemp(iTemp,2) = tree(iTemp,2) - vec3RandomPoint(2);
		}
		matTempSq = matTemp*matTemp.transpose();

		vecTempDiag.resize(matTemp.rows());
		for (iTemp=0; iTemp<matTemp.rows(); iTemp++)
			vecTempDiag(iTemp) = matTempSq(iTemp, iTemp);

		SortVec(vecTempDiag, vecIdx); // vecTempDiag : sorted vector, vecIdx : index of vecTempDiag

		// Modification 3
		if ( vecIdx.rows() > iMaxIte )
			n = iMaxIte;
		else
			n = vecIdx.rows();

		/// Nonholonomic Length Decision
		kk=-1;

		// Modification 4
		if (tree.rows() == 2) {
			vecP1.resize(3); vecP2.resize(3); vecP3.resize(3);
			vecP1 << tree(0,0), tree(0,1), tree(0,2);
			vecP2 << tree(1,0), tree(1,1), tree(1,2);
			vecP3 = vec3RandomPoint;

			matWP.resize(3,3);
			matWP.row(0) = vecP1.segment(0,3).transpose();
			matWP.row(1) = vecP2.segment(0,3).transpose();
			matWP.row(2) = vecP3.segment(0,3).transpose();
			Kappa_Max_Calculation(matWP, P, matdKappa, vecBeta);

			if ( (vecBeta.array().abs()<= dMaxBeta).all() )
				// Method 2 : use the maximum length margin - when there is straight line, there is more margin1
				P.L = 2*dMax;
			else if ( (vecBeta.array().abs() > dMaxBeta).all() ) {
				funcReturn.flag = 0;
				funcReturn.newTree = tree;
				funcReturn.INDC = iINDC;
				funcReturn.sigma = sigma;
				funcReturn.maxIte = iMaxIte;

				return funcReturn;
			}
		} else if (tree.rows() > 2)	{
			for (iTemp=0; iTemp<n; iTemp++)	{
				kk = iTemp;

				if ( tree(vecIdx(iTemp), tree.cols()-1) == 0 )	{
					funcReturn.flag = 0;
					funcReturn.newTree = tree;
					funcReturn.INDC = iINDC;
					funcReturn.sigma = sigma;
					funcReturn.maxIte = iMaxIte;

					return funcReturn;
				}

				vecP2.resize(tree.cols());
				vecP2 = tree.row(vecIdx(iTemp)).transpose();
				vecP1.resize(tree.cols());
				vecP1 = tree.row( vecP2(vecP2.rows()-1)-1 ).transpose();
				vecP3.resize(vec3RandomPoint.rows());
//.........这里部分代码省略.........
开发者ID:amakou,项目名称:rrt,代码行数:101,代码来源:ExtendTree.cpp

示例3: infer


//.........这里部分代码省略.........

        //cout << "----------------------" << endl;
        //cout << nodeBeliefs[ ID1 ] << endl;
        //cout << "----------------------" << endl;
        //cout << message2To1 << endl;

        VectorXd node1Belief = nodeBeliefs[ ID1 ].cwiseQuotient( message2To1 );
        VectorXd node2Belief = nodeBeliefs[ ID2 ].cwiseQuotient( message1To2 );

        //cout << "----------------------" << endl;

        MatrixXd node1BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() );
        for ( size_t row = 0; row < edgePotentials.rows(); row++ )
            for ( size_t col = 0; col < edgePotentials.cols(); col++ )
                node1BeliefMatrix(row,col) = node1Belief(row);

        //cout << "Node 1 belief matrix: " << endl << node1BeliefMatrix << endl;

        edgeBelief = edgeBelief.cwiseProduct( node1BeliefMatrix );

        MatrixXd node2BeliefMatrix ( edgePotentials.rows(), edgePotentials.cols() );
        for ( size_t row = 0; row < edgePotentials.rows(); row++ )
            for ( size_t col = 0; col < edgePotentials.cols(); col++ )
                node2BeliefMatrix(row,col) = node2Belief(col);

        //cout << "Node 2 belief matrix: " << endl << node2BeliefMatrix << endl;

        edgeBelief = edgeBelief.cwiseProduct( node2BeliefMatrix );

        //cout << "Edge potentials" << endl << edgePotentials << endl;
        //cout << "Edge beliefs" << endl << edgeBelief << endl;

        // Normalize
        edgeBelief = edgeBelief / edgeBelief.sum();



        edgeBeliefs[ edgeID ] = edgeBelief;
    }

    //
    // 4. Compute logZ
    //

    double energyNodes  = 0;
    double energyEdges  = 0;
    double entropyNodes = 0;
    double entropyEdges = 0;

    // Compute energy and entropy from nodes

    for ( size_t nodeIndex = 0; nodeIndex < nodes.size(); nodeIndex++ )
    {
        CNodePtr nodePtr     = nodes[ nodeIndex ];
        size_t   nodeID      = nodePtr->getID();
        size_t   N_Neighbors = graph.getNumberOfNodeNeighbors( nodeID );

        // Useful computations and shorcuts
        VectorXd &nodeBelief        = nodeBeliefs[nodeID];
        VectorXd logNodeBelief      = nodeBeliefs[nodeID].array().log();
        VectorXd nodePotentials    = nodePtr->getPotentials( m_options.considerNodeFixedValues );
        VectorXd logNodePotentials = nodePotentials.array().log();

        // Entropy from the node
        energyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodeBelief ).sum() );

        // Energy from the node
        entropyNodes += N_Neighbors*( nodeBelief.cwiseProduct( logNodePotentials ).sum() );
    }

    // Compute energy and entropy from nodes

    for ( size_t edgeIndex = 0; edgeIndex < N_edges; edgeIndex++ )
    {
        CEdgePtr edgePtr = edges[ edgeIndex ];
        size_t   edgeID  = edgePtr->getID();

        // Useful computations and shorcuts
        MatrixXd &edgeBelief       = edgeBeliefs[ edgeID ];
        MatrixXd logEdgeBelief     = edgeBelief.array().log();
        MatrixXd &edgePotentials   = edgePtr->getPotentials();
        MatrixXd logEdgePotentials = edgePotentials.array().log();

        // Entropy from the edge
        energyEdges += edgeBelief.cwiseProduct( logEdgeBelief ).sum();

        // Energy from the edge
        entropyEdges += edgeBelief.cwiseProduct( logEdgePotentials ).sum();

    }

    // Final Bethe free energy

    double BethefreeEnergy = ( energyNodes - energyEdges ) - ( entropyNodes - entropyEdges );

    // Compute logZ

    logZ = - BethefreeEnergy;

}
开发者ID:mintar,项目名称:upgmpp,代码行数:101,代码来源:inference.cpp

示例4: sparseAECost

lbfgsfloatval_t sparseAECost(
    void* netParam,
    const lbfgsfloatval_t *ptheta,
    lbfgsfloatval_t *grad,
    const int n,
    const lbfgsfloatval_t step)
{
    instanceSP* pStruct = (instanceSP*)(netParam);
    int hiddenSize = pStruct->hiddenSize;
    int visibleSize = pStruct->visibleSize;
    double lambda = pStruct->lambda;
    double beta = pStruct->beta;
    double sp = pStruct->sparsityParam;
    MatrixXd& data = pStruct->data;
    double cost = 0;

    MatrixXd w1(hiddenSize, visibleSize);
    MatrixXd w2(visibleSize, hiddenSize);
    VectorXd b1(hiddenSize);
    VectorXd b2(visibleSize);

    for (int i=0; i<hiddenSize*visibleSize; i++)
    {
        *(w1.data()+i) = *ptheta;
        ptheta++;
    }
    for (int i=0; i<visibleSize*hiddenSize; i++)
    {
        *(w2.data()+i) = *ptheta;
        ptheta++;
    }
    for (int i=0; i<hiddenSize; i++)
    {
        *(b1.data()+i) = *ptheta;
        ptheta++;
    }
    for (int i=0; i<visibleSize; i++)
    {
        *(b2.data()+i) = *ptheta;
        ptheta++;
    }

    int ndim = data.rows();
    int ndata = data.cols();

    MatrixXd z2 = w1 * data + b1.replicate(1, ndata);
    MatrixXd a2 = sigmoid(z2);
    MatrixXd z3 = w2 * a2 + b2.replicate(1, ndata);
    MatrixXd a3 = sigmoid(z3);

    VectorXd rho = a2.rowwise().sum() / ndata;
    VectorXd sparsityDelta = -sp / rho.array() + (1 - sp) / (1 - rho.array());

    MatrixXd delta3 = (a3 - data).array() * sigmoidGrad(z3).array();
    MatrixXd delta2 = (w2.transpose() * delta3 + beta * sparsityDelta.replicate(1, ndata)).array() 
                      * sigmoidGrad(z2).array();

    MatrixXd w1Grad = delta2 * data.transpose() / ndata + lambda * w1;
    VectorXd b1Grad = delta2.rowwise().sum() / ndata;
    MatrixXd w2Grad = delta3 * a2.transpose() / ndata + lambda * w2;
    VectorXd b2Grad = delta3.rowwise().sum() / ndata;

    cost = (0.5 * (a3 - data).array().pow(2)).matrix().sum() / ndata
            + 0.5 * lambda * ((w1.array().pow(2)).matrix().sum() 
            + (w2.array().pow(2)).matrix().sum())
            + beta * (sp * (sp / rho.array()).log() 
            + (1 - sp) * ((1 - sp) / (1 - rho.array())).log() ).matrix().sum();

    double* pgrad = grad;
    for (int i=0; i<hiddenSize*visibleSize; i++)
    {
        *pgrad = *(w1Grad.data()+i);
        pgrad++;
        
    }
    for (int i=0; i<visibleSize*hiddenSize; i++)
    {
        *pgrad = *(w2Grad.data()+i);
        pgrad++;
    }
    for (int i=0; i<hiddenSize; i++)
    {
        *pgrad = *(b1Grad.data()+i);
        pgrad++;
    }
    for (int i=0; i<visibleSize; i++)
    {
        *pgrad = *(b2Grad.data()+i);
        pgrad++;
    }

    return cost;
}
开发者ID:George-Zhu,项目名称:UFLDL-Tutorial-Exercises-C,代码行数:93,代码来源:sparseAECost.cpp


注:本文中的VectorXd::array方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。