本文整理汇总了C++中VectorXd::transpose方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::transpose方法的具体用法?C++ VectorXd::transpose怎么用?C++ VectorXd::transpose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VectorXd
的用法示例。
在下文中一共展示了VectorXd::transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: compute_empirical_T3
MatrixXd compute_empirical_T3(SparseMatrix<double> whitened_data, VectorXd y_mean){
int nx = (int)whitened_data.cols();
//printf("nx = %d\n", nx);
double shift12 = (alpha0 + 1.0)*(alpha0 + 2.0) / (2.0*(double)nx);
double shift01 = -alpha0 *(alpha0 + 1.0) / (2.0*(double)nx);
double shift00 = alpha0*alpha0;
MatrixXd emp_T3_whitened = MatrixXd::Zero(KHID, KHID * KHID);
for(int n=0; n<nx; n++){
VectorXd y = whitened_data.col(n);
MatrixXd temp0, temp1, temp2, temp3, temp4;
temp0.noalias() = shift12 * (y * y.transpose());
temp1.noalias() = shift01 * (y * y.transpose());
temp2.noalias() = shift01 * (y * y_mean.transpose());
temp3.noalias() = shift01 * (y_mean * y.transpose());
temp4.noalias() = shift00 * (y_mean * y_mean.transpose());
for(int i=0; i < KHID; i++){
emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp0*y(i);
emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp1*y_mean(i);
emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp2*y(i);
emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp3*y(i);
emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp4*y_mean(i);
}
}
return emp_T3_whitened;
}
示例2: body
void DMultivariateGaussian::body(const VectorXd &x )
{
// Look http://en.wikipedia.org/wiki/Multinomial_distribution
double n=x.size();
double value=0.0;
if (isStandard)
{
double factor1 = 1.0/pow(2.0*M_PI,n/2.0);
double factor2 = 1.0/( sqrt(abs(covar.determinant()) ) );
VectorXd dx = x-mean;
RowVectorXd dxt = dx.transpose();
VectorXd x1 = invCovar*dx;
double arg = -0.5* (x1.dot(dxt));
value = factor1*factor2*exp(arg);
}
else
{
double factor1 = 1.0/pow(2.0*M_PI,n/2.0);
double sigma=1.0;
double factor2 = 1.0/sigma;
VectorXd dx = x-mean;
RowVectorXd dxt = dx.transpose();
VectorXd x1 = invCovar*dx;
double arg = -0.5* (x1.dot(dxt));
value = factor1*factor2*exp(arg);
}
result = value;
}
示例3: gibbsSamplerC
// [[Rcpp::depends(RcppEigen)]]
// [[Rcpp::export]]
MatrixXd gibbsSamplerC(int nMC,
VectorXd y,
MatrixXd X,
double a = 1.1,
double b = 1.1,
double kappa = 0.1) {
int n = X.rows();
int p = X.cols();
VectorXd beta = VectorXd::Zero(p);
MatrixXd Res(nMC, p+1);
// take Cholesky decomposition of M matrix
MatrixXd M = ( kappa * MatrixXd::Identity(p, p) ) + ( X.transpose() * X );
LLT<MatrixXd> lltOfM(M);
MatrixXd L = lltOfM.matrixL();
// to be used in draws of sigmasq estimates
double A = a + 0.5*(n+p);
// to be used in draws of beta estimates
VectorXd beta_means = M.inverse() * (X.transpose() * y);
// run Markov Chain
for ( int i=0; i<nMC; ++i ) {
// UPDATE SIGMASQ ESTIMATE
// update scale parameter
VectorXd mat = y - (X * beta);
double beta_val = (beta.transpose() * beta).sum();
double mat_val = (mat.transpose() * mat).sum();
double B = ( b + 0.5 * kappa * beta_val ) + ( 0.5 * mat_val );
// new sigmasq inverse gamma draw
double gamma_draw = rgamma(1, A, 1/B)[0];
double new_sigmasq = 1.0/gamma_draw;
// UPDATE BETA ESTIMATES
for ( int c=0; c<p; ++c ) {
beta(c) = beta_means(c) + sqrt(new_sigmasq) * L.inverse().sum() * rnorm(1, 0, 1)[0];
}
// STORE RESULTS
Res(i, p) = new_sigmasq;
Res.row(i).leftCols(p) = beta;
}
return Res;
}
示例4: getEnergy
double cRBLayer::getEnergy(VectorXd& vNodes){
VectorXd wx_b = vNodes.transpose()*W;
wx_b=wx_b+hb;
double vxb = vNodes.transpose()*vb;
VectorXd ones(h);
ones.setOnes();
wx_b.array().exp();
wx_b+=ones;
wx_b.array().log();
return -wx_b.sum()-vxb;
}
示例5: _getNextQuadraticHitTime
void HmcSampler::_getNextQuadraticHitTime(const VectorXd & a, const VectorXd & b, double & hit_time, int & cn , const bool first_bounce ){
hit_time=0;
double mint;
if (first_bounce) {mint=0;}
else {mint=min_t;}
for (int i=0; i != quadraticConstraints.size(); i++ ){
QuadraticConstraint qc = quadraticConstraints[i];
double q1= - ((a.transpose())*(qc.A))*a;
q1 = q1 + ((b.transpose())*(qc.A))*b;
double q2= (qc.B).dot(b);
double q3= qc.C + a.transpose()*(qc.A)*a;
double q4= 2*b.transpose()*(qc.A)*a;
double q5= (qc.B).dot(a);
double r4 = q1*q1 + q4*q4;
double r3 = 2*q1*q2 + 2*q4*q5;
double r2 = q2*q2 + 2*q1*q3 + q5*q5 -q4*q4;
double r1 = 2*q2*q3 - 2*q4*q5;
double r0= q3*q3 - q5*q5;
double roots[]={0,0,0,0};
double aa = r3/r4;
double bb = r2/r4;
double cc = r1/r4;
double dd = r0/r4;
//Solve quartics of the form x^4 + aa x^3 + bb x^2 + cc x + dd ==0
int sols = quarticSolve(aa, bb, cc, dd, roots[0], roots[1], roots[2], roots[3]);
for (int j=0; j<sols; j++){
double r = roots[j];
if (abs(r) <=1 ){
double l1 = q1*r*r + q2*r + q3;
double l2 = -sqrt(1-r*r)*(q4*r + q5);
if (l1/l2 > 0){
double t = acos(r);
if ( t> mint && (hit_time == 0 || t < hit_time)){
hit_time=t;
cn=i;
}
}
}
}
}
}
示例6: reduceBasisVectorSet
void SOGP::reduceBasisVectorSet(unsigned int index) {
unsigned int end = this->current_size-1;
VectorXd zero_vector = VectorXd::Zero(this->current_size);
VectorXd alpha_star = this->alpha.row(index);
VectorXd last_item = this->alpha.row(end);
alpha.block(index,0,1,this->output_dim) = last_item.transpose();
alpha.block(end,0,1, this->output_dim) = VectorXd::Zero(this->output_dim).transpose();
double cstar = this->C(index, index);
VectorXd Cstar = this->C.col(index);
Cstar(index) = Cstar(end);
Cstar.conservativeResize(end);
VectorXd Crep = C.col(end);
Crep(index) = Crep(end);
C.block(index, 0, 1, this->current_size) = Crep.transpose();
C.block(0, index, this->current_size, 1) = Crep;
C.block(end, 0, 1, this->current_size) = zero_vector.transpose();
C.block(0, end, this->current_size,1) = zero_vector;
double qstar = this->Q(index, index);
VectorXd Qstar = this->Q.col(index);
Qstar(index) = Qstar(end);
Qstar.conservativeResize(end);
VectorXd Qrep = Q.col(end);
Qrep(index) = Qrep(end);
Q.block(index, 0, 1, this->current_size) = Qrep.transpose();
Q.block(0, index, this->current_size, 1) = Qrep;
Q.block(end, 0, 1, this->current_size) = zero_vector.transpose();
Q.block(0, end, this->current_size,1) = zero_vector;
VectorXd qc = (Qstar + Cstar)/(qstar + cstar);
for (unsigned int i=0; i<this->output_dim; i++) {
VectorXd diffAlpha = alpha.block(0,i,end,1) - alpha_star(i)*qc;
alpha.block(0,i,end,1) = diffAlpha;
}
MatrixXd oldC = C.block(0,0, end, end);
C.block(0,0, end,end) = oldC + (Qstar*Qstar.transpose())/qstar -
((Qstar + Cstar)*((Qstar + Cstar).transpose()))/(qstar+cstar);
MatrixXd oldQ = Q.block(0,0,end,end);
Q.block(0,0, end, end) = oldQ - (Qstar*Qstar.transpose())/qstar;
this->basis_vectors[index] = this->basis_vectors[end];
this->basis_vectors.pop_back();
this->current_size = end;
}
示例7: predict
void SOGP::predict(const VectorXd &state, VectorXd &prediction,
VectorXd &prediction_variance) {
//check if we have initialised the system
if (!this->initialized) {
throw OTLException("SOGP not yet initialised");
}
double kstar = kernel->eval(state,state);
//check if we not been trained
if (this->current_size == 0) {
prediction = VectorXd::Zero(this->output_dim);
prediction_variance = VectorXd::Ones(this->output_dim)*
(kstar + this->noise);
return;
}
VectorXd k;
kernel->eval(state, this->basis_vectors, k);
//std::cout << "K: \n" << k << std::endl;
//std::cout << "alpha: \n" << this->alpha.block(0,0,this->current_size, this->output_dim) << std::endl;
prediction = k.transpose() *this->alpha.block(0,0,this->current_size, this->output_dim);
prediction_variance = VectorXd::Ones(this->output_dim)*
(k.dot(this->C.block(0,0, this->current_size, this->current_size)*k)
+ kstar + this->noise);
return;
}
示例8: Likelihood
void parameters::Likelihood(const VectorXd & eff){
VectorXd loc;
loc.resize(m_proba.rows());
loc=(m_proba.rowwise().sum().array().log());
m_loglikelihood=eff.transpose()*loc;
m_bic=m_loglikelihood - 0.5*m_nbparam*log(eff.sum());
}
示例9: WishartUnit
MatrixXd WishartUnit(int m, int df)
{
MatrixXd c(m,m);
c.setZero();
for ( int i = 0; i < m; i++ ) {
std::gamma_distribution<> gam(0.5*(df - i));
c(i,i) = sqrt(2.0 * gam(rng));
VectorXd r = nrandn(m-i-1);
c.block(i,i+1,1,m-i-1) = r.transpose();
}
MatrixXd ret = c.transpose() * c;
#ifdef TEST_MVNORMAL
cout << "WISHART UNIT {\n" << endl;
cout << " m:\n" << m << endl;
cout << " df:\n" << df << endl;
cout << " ret;\n" << ret << endl;
cout << " c:\n" << c << endl;
cout << "}\n" << ret << endl;
#endif
return ret;
}
示例10: TrainOnevsAll
void CLogistic::TrainOnevsAll(const Matrix<double, Dynamic, Dynamic, RowMajor>& X, const VectorXd& y_class, int num_labels, double lambda)
{
/*
trains multiple logistic regression classifiers and returns all
the classifiers in a matrix classifier, where the i-th row of classifier
corresponds to the classifier for label i
*/
int m = X.rows();
int n = X.cols();
classifier = Matrix<double, Dynamic, Dynamic, RowMajor>::Zero(num_labels, n);
// Iterate through all the classification classes
for(int class_ndx = 0; class_ndx < num_labels; class_ndx++)
{
VectorXd theta = VectorXd::Zero(n);
// classify one vs. all
VectorXd c = VectorXd::Zero(y_class.rows());
for (int point_ndx = 0; point_ndx < y_class.rows(); point_ndx++)
c(class_ndx) = (y_class(point_ndx) == class_ndx ? 1.0 : .0);
GradientDescent(X, c, theta, lambda);
// store the result inside classifier
classifier.row(class_ndx) = theta.transpose();
}
}
示例11: _verifyConstraints
double HmcSampler::_verifyConstraints(const VectorXd & b){
double r =0;
int idx =0;
for (int i=0; i != (int)quadraticConstraints.size(); i++ ){
QuadraticConstraint qc = quadraticConstraints[i];
double check = ((b.transpose())*(qc.A))*b + (qc.B).dot(b) + qc.C;
if (i==0 || check < r) {
r = check;
}
}
for (int i=0; i != (int)linearConstraints.size(); i++ ){
LinearConstraint lc = linearConstraints[i];
double check = (lc.f).dot(b) + lc.g;
if (i==0 || check < r) {
r = check;
idx = i;
}
}
// if (r < 0) {
// LinearConstraint lc = linearConstraints[idx];
// double fpart = (lc.f).dot(b);
// double gpart = lc.g;
// printf("The %ith constraint is negative. fb: %g, g: %g\n", idx, fpart, gpart);
// }
return r;
}
示例12: squaredDistsToVectors
ArrayXd squaredDistsToVectors(const MatrixXd& X, const MatrixXd& V) {
auto prods = -2. * (X * V.transpose());
MatrixXd dists = prods;
VectorXd rowSquaredNorms = X.rowwise().squaredNorm();
VectorXd colSquaredNorms = V.rowwise().squaredNorm();
RowVectorXd colSquaredNormsAsRow = colSquaredNorms.transpose();
dists.colwise() += rowSquaredNorms;
dists.rowwise() += colSquaredNormsAsRow;
// // does the above compute the distances properly? -> yes
// ArrayXd trueDists = ArrayXd(X.rows(), V.rows());
// for (int i = 0; i < X.rows(); i++) {
// for (int j = 0; j < V.rows(); j++) {
// VectorXd diff = X.row(i) - V.row(j);
// trueDists(i, j) = diff.squaredNorm();
// auto gap = fabs(trueDists(i, j) - dists(i, j));
// if (gap > .001) {
// printf("WE'RE COMPUTING THE DISTANCES WRONG!!!");
// }
// assert(gap < .001);
// }
// }
return dists.array();
}
示例13: insertRow
void insertRow(MatrixXd &mat, const VectorXd &row)
{
assert(mat.cols() == row.size());
int rows = mat.rows();
int cols = mat.cols();
mat.noalias() = (MatrixXd(rows+1,cols) << mat, row.transpose()).finished();
}
示例14: evalwa
double evalwa(const VectorXd & a0, const VectorXd & ainf, const vector<MatrixXd> & wa, const vector<int> s) {
RowVectorXd a = a0.transpose();
for (vector<int>::const_iterator it = s.begin(); it != s.end(); it++) {
a *= wa[*it];
}
return a*ainf;
}
示例15: main
int main(int argc, const char * argv[])
{
// insert code here...
int m = 2;
int n = 4;
MatrixXd A(m,n);
VectorXd c(n);
VectorXd b(m);
VectorXd index(m);
/*
A << 4, 3, 1, 0, 0,
2, 3, 0, 1, 0,
4, 2, 0, 0, 1;
c << 9,12,0,0,0;
b << 180, 150, 160;
index<<2,3,4;
*/
A<<-1, 2,1,0,
1,0,0,1;
c<<-1,4,0,0;
b<<30,30;
Simplex sim(A,b,c);
VectorXd sol = sim.SolveLP(1);
index = sim.getIdx();
double J = sim.getZ();
cout<<"the solution is:\n"<<index.transpose()<<endl<<sol.transpose()<<endl;
cout<<"the optimal value is: "<<J<<endl;
return 0;
}