本文整理汇总了C++中eigen::MatrixXf::array方法的典型用法代码示例。如果您正苦于以下问题:C++ MatrixXf::array方法的具体用法?C++ MatrixXf::array怎么用?C++ MatrixXf::array使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::MatrixXf
的用法示例。
在下文中一共展示了MatrixXf::array方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: VectorXf
virtual Eigen::VectorXf gradient( const Eigen::MatrixXf & a, const Eigen::MatrixXf & b ) const {
if (ktype_ == CONST_KERNEL)
return Eigen::VectorXf();
Eigen::MatrixXf fg = featureGradient( a, b );
if (ktype_ == DIAG_KERNEL)
return (f_.array()*fg.array()).rowwise().sum();
else {
Eigen::MatrixXf p = fg*f_.transpose();
p.resize( p.cols()*p.rows(), 1 );
return p;
}
}
示例2: K_function
float SaddlePointApproximation::K_function(float t, const Eigen::MatrixXf& G,
const Eigen::MatrixXf& mu) const {
// NOTE:
// calculation of (log(1 - mu.array() + mu.array() * (exp(G.array() * t))))
// can be very inaccurate,
// when mu*exp(G*t) is very close to mu, 1 - mu + mu*exp(Gt) can cancel out
// many useful digits.
// refer: google "when log1p should be used".
Eigen::ArrayXf ret = (log1p(
mu.array() * (G.array() * t).unaryExpr<float (*)(const float)>(&expm1f)));
ret -= t * (G.array() * mu.array());
return ret.isFinite().select(ret, 0).sum();
}
示例3: featureGradient
Eigen::MatrixXf featureGradient( const Eigen::MatrixXf & a, const Eigen::MatrixXf & b ) const {
if (ntype_ == NO_NORMALIZATION )
return kernelGradient( a, b );
else if (ntype_ == NORMALIZE_SYMMETRIC ) {
Eigen::MatrixXf fa = lattice_.compute( a*norm_.asDiagonal(), true );
Eigen::MatrixXf fb = lattice_.compute( b*norm_.asDiagonal() );
Eigen::MatrixXf ones = Eigen::MatrixXf::Ones( a.rows(), a.cols() );
Eigen::VectorXf norm3 = norm_.array()*norm_.array()*norm_.array();
Eigen::MatrixXf r = kernelGradient( 0.5*( a.array()*fb.array() + fa.array()*b.array() ).matrix()*norm3.asDiagonal(), ones );
return - r + kernelGradient( a*norm_.asDiagonal(), b*norm_.asDiagonal() );
}
else if (ntype_ == NORMALIZE_AFTER ) {
Eigen::MatrixXf fb = lattice_.compute( b );
Eigen::MatrixXf ones = Eigen::MatrixXf::Ones( a.rows(), a.cols() );
Eigen::VectorXf norm2 = norm_.array()*norm_.array();
Eigen::MatrixXf r = kernelGradient( ( a.array()*fb.array() ).matrix()*norm2.asDiagonal(), ones );
return - r + kernelGradient( a*norm_.asDiagonal(), b );
}
else /*if (ntype_ == NORMALIZE_BEFORE )*/ {
Eigen::MatrixXf fa = lattice_.compute( a, true );
Eigen::MatrixXf ones = Eigen::MatrixXf::Ones( a.rows(), a.cols() );
Eigen::VectorXf norm2 = norm_.array()*norm_.array();
Eigen::MatrixXf r = kernelGradient( ( fa.array()*b.array() ).matrix()*norm2.asDiagonal(), ones );
return -r+kernelGradient( a, b*norm_.asDiagonal() );
}
}
示例4: from_melspectrum
Eigen::MatrixXf mfcc::from_melspectrum(const Eigen::MatrixXf& mel)
{
MINILOG(logTRACE) << "Computing MFCCs.";
Eigen::MatrixXf mfcc_coeffs = dct.compress((1.0f + mel.array()).log());
MINILOG(logTRACE) << "MFCCS: " << mfcc_coeffs;
MINILOG(logTRACE) << "Finished Computing MFCCs.";
return mfcc_coeffs;
}
示例5: K_prime_function
float SaddlePointApproximation::K_prime_function(
float t, const Eigen::MatrixXf& G, const Eigen::MatrixXf& mu) const {
Eigen::MatrixXf tmp = exp(-G.array() * t);
return (mu.array() * G.array() /
((1.0 + mu.array()) * (-G.array() * t).exp() + mu.array()) -
G.array() * mu.array())
.sum();
}
示例6: exp
float SaddlePointApproximation::K_prime2_function(
float t, const Eigen::MatrixXf& G, const Eigen::MatrixXf& mu) const {
Eigen::ArrayXf denom =
(1.0 - mu.array()) * (-G.array() * t).exp() + mu.array();
Eigen::ArrayXf tmp = ((1.0 - mu.array()) * mu.array() * G.array() *
G.array() * exp(-G.array() * t) / (denom * denom));
return tmp.isNaN().select(0, tmp).sum();
// dumpToFile(tmp, "tmp.tmp");
// return ((1.0 - mu.array()) * mu.array() * G.array() * G.array() *
// exp(-G.array() * t) / (denom * denom))
// .sum();
}
示例7: klDivergence
// Compute the KL-divergence of a set of marginals
double DenseCRF::klDivergence( const Eigen::MatrixXf & Q ) const {
double kl = 0;
// Add the entropy term
for( int i=0; i<Q.cols(); i++ )
for( int l=0; l<Q.rows(); l++ )
kl += Q(l,i)*log(std::max( Q(l,i), 1e-20f) );
// Add the unary term
if( unary_ ) {
Eigen::MatrixXf unary = unary_->get();
for( int i=0; i<Q.cols(); i++ )
for( int l=0; l<Q.rows(); l++ )
kl += unary(l,i)*Q(l,i);
}
// Add all pairwise terms
Eigen::MatrixXf tmp;
for( unsigned int k=0; k<pairwise_.size(); k++ ) {
pairwise_[k]->apply( tmp, Q );
kl += (Q.array()*tmp.array()).sum();
}
return kl;
}
示例8: make_tuple
std::tuple<Eigen::MatrixXf, Eigen::VectorXf> dart::L2NormOfWeightedError::computeGNParam(const Eigen::VectorXf &diff) {
// compute error from joint deviation
// error: L2 norm of weighted joint angle difference
const float e = (_weight * diff).norm();
// Jacobian of error, e.g. the partial derivation of the error w.r.t. to each joint value
// For an error of zero, its partial derivative is not defined. Therefore we set its derivative to 0.
const Eigen::MatrixXf J = (diff.array()==0).select(0, - pow(_weight, 2) * diff.array()/diff.norm());
const Eigen::VectorXf JTe = J.array()*e;
return std::make_tuple(J, JTe);
}
示例9: TestCovariate
int TestCovariate(Matrix& Xnull, Matrix& Y, Matrix& Xcol,
const EigenMatrix& kinshipU, const EigenMatrix& kinshipS){
Eigen::MatrixXf g;
G_to_Eigen(Xcol, &g);
// store U'*G for computing AF later.
const Eigen::MatrixXf& U = kinshipU.mat;
this->ug = U.transpose() * g;
Eigen::RowVectorXf g_mean = g.colwise().mean();
g = g.rowwise() - g_mean;
double gTg = g.array().square().sum();
double t_new = (g.array() * this->transformedY.array()).sum();
t_new = t_new * t_new / gTg;
double t_score = t_new / this->gamma;
this->betaG = (g.transpose() * this->transformedY).sum() / gTg / this->gamma;
this->betaGVar = this->ySigmaY / gTg / this->gamma;
this->pvalue = gsl_cdf_chisq_Q(t_score, 1.0);
return 0;
}
示例10: matlabFormat
ConverterPlaneFromTo3d::ConverterPlaneFromTo3d(float fx, float fy, float cx, float cy, int height, int width)
{
Eigen::VectorXf colIndicies = Eigen::VectorXf::LinSpaced(Eigen::Sequential, width, 1, (float)width);
Eigen::VectorXf rowIndicies = Eigen::VectorXf::LinSpaced(Eigen::Sequential, height, 1, (float)height);
Eigen::VectorXf onesColSize = Eigen::VectorXf::Ones(width, 1);
Eigen::VectorXf onesRowSize = Eigen::VectorXf::Ones(height, 1);
Eigen::MatrixXf indiciesMatrixAxisX = onesRowSize * colIndicies.transpose(); //row = 1, 2, 3, 4, ..
Eigen::MatrixXf indiciesMatrixAxisY = rowIndicies * onesColSize.transpose();
xAdjustment_ = (indiciesMatrixAxisX.array() - cx) / fx;
yAdjustment_ = (indiciesMatrixAxisY.array() - cy) / fy;
Eigen::IOFormat matlabFormat(Eigen::StreamPrecision, Eigen::DontAlignCols, ", ", "\n", "", "", "", "");
std::ostringstream saveFileNameString0;
saveFileNameString0 << "indiciesMatrixAxisX.csv";
std::ofstream matrixFile;
matrixFile.open(saveFileNameString0.str());
if (matrixFile.is_open())
{
matrixFile << indiciesMatrixAxisX.format(matlabFormat);
}
std::ostringstream saveFileNameString;
saveFileNameString << "xAdjustment.csv";
std::ofstream matrixFile1;
matrixFile1.open(saveFileNameString.str());
if (matrixFile1.is_open())
{
matrixFile1 << xAdjustment_.format(matlabFormat);
}
}
示例11: CGF_equation
// CGF - S = K'(t) - S
float SaddlePointApproximation::CGF_equation(float t, const Eigen::MatrixXf& G,
const Eigen::MatrixXf& mu,
const Eigen::MatrixXf& y) const {
float val = (mu.array() * G.array() /
((1.0 - mu.array()) * (-G.array() * t).exp() + mu.array()))
.sum() -
(G.array() * (y).array()).sum();
static int counter = 0;
counter++;
fprintf(stderr, "[%d] t = %g, CGF_equation(t) = %g\n", counter, t, val);
return val;
}
示例12: calculatePvalue
int SaddlePointApproximation::calculatePvalue(const Eigen::MatrixXf& g_tilde,
float* newPvalue) {
// find root
// 1. find boundary of the root
// first try [0, 5] or [0, -5]
const float s = (g_tilde.array() * resid_.array()).sum();
const float var_s =
(g_tilde.array().square() * (mu_.array() * (1.0 - mu_.array())).abs())
.sum();
// if (fabs(s) < 2.0 * sqrt(var_s) &&
// !std::getenv("BOLTLMM_FORCE_SADDLEPOINT")) {
// fprintf(stderr, "Skip saddle point approximation (far from mean, |%g| >
// 2.0 * sqrt(%g) )!\n", s, var_s);
// return -2;
// }
float t_grid[2];
float y_grid[2];
if (s > 0) { // \hat{t} > 0
t_grid[0] = -0.01;
t_grid[1] = std::min(s / K_prime2_function(0, g_tilde, mu_), (float)5.);
} else {
t_grid[0] = 0.01;
t_grid[1] = std::max(s / K_prime2_function(0, g_tilde, mu_), (float)-5.);
}
y_grid[0] = CGF_equation(t_grid[0], g_tilde, mu_, y_);
y_grid[1] = CGF_equation(t_grid[1], g_tilde, mu_, y_);
int iter = 0;
// extend boundary
// e.g. [0, 5] => [5, 15] => [15, 60]...
while (y_grid[0] * y_grid[1] > 0) {
t_grid[0] = t_grid[1];
y_grid[0] = y_grid[1];
t_grid[1] *= 4;
y_grid[1] = CGF_equation(t_grid[1], g_tilde, mu_, y_);
++iter;
fprintf(stderr, "iter %d, %g -> %g \n", iter, t_grid[1], y_grid[1]);
if (iter > 10) {
fprintf(stderr,
"after 10 iteration, still cannot find boundary conditions\n");
dumpToFile(y_, "tmp.y");
dumpToFile(g_tilde, "tmp.g_tilde");
dumpToFile(mu_, "tmp.mu");
dumpToFile(resid_, "tmp.resid");
return 1; // exit(1);
}
}
if (t_grid[0] > t_grid[1]) {
std::swap(t_grid[0], t_grid[1]);
std::swap(y_grid[0], y_grid[1]);
}
assert(y_grid[0] < 0 && y_grid[1] > 0); // TODO: make this more robust
if (y_grid[0] * y_grid[1] > 0) {
fprintf(stderr, "Wrong boundary conditions!\n");
// dumpToFile(covEffect, "tmp.covEffect");
// dumpToFile(xbeta, "tmp.xbeta");
dumpToFile(g_tilde, "tmp.g_tilde");
dumpToFile(mu_, "tmp.mu");
dumpToFile(resid_, "tmp.resid");
// exit(1);
return 1;
}
float t_new = t_grid[0];
float y_new;
iter = 0;
// stop condition:
// 1. secant method narrows to a small enough region
// 2. new propose point has the differnt sign of statistic s. Usually they
// have the same sign. Unless numerical issue arises.
while (fabs(t_grid[1] - t_grid[0]) > 1e-3 || t_new * s < 0) {
t_new = t_grid[0] +
(t_grid[1] - t_grid[0]) * (-y_grid[0]) / (y_grid[1] - y_grid[0]);
// switch to bisect?
const float dist = t_grid[1] - t_grid[0];
if (t_grid[1] - t_new < 0.1 * dist) {
t_new = t_grid[0] + (t_grid[1] - t_grid[0]) * 0.8;
}
if (t_new - t_grid[0] < 0.1 * dist) {
t_new = t_grid[0] + (t_grid[1] - t_grid[0]) * 0.2;
}
y_new = CGF_equation(t_new, g_tilde, mu_, y_);
if (y_new == 0) {
break;
} else if (y_new > 0) {
t_grid[1] = t_new;
y_grid[1] = y_new;
} else if (y_new < 0) {
t_grid[0] = t_new;
y_grid[0] = y_new;
}
++iter;
fprintf(stderr, "%g -> %g, %g -> %g \n", t_grid[0], y_grid[0], t_grid[1],
y_grid[1]);
if (iter > 10) {
//.........这里部分代码省略.........
示例13: FitNullModel
int FitNullModel(Matrix& mat_Xnull, Matrix& mat_y,
const EigenMatrix& kinshipU, const EigenMatrix& kinshipS){
// type conversion
Eigen::MatrixXf x;
Eigen::MatrixXf y;
G_to_Eigen(mat_Xnull, &x);
G_to_Eigen(mat_y, &y);
this->lambda = kinshipS.mat;
const Eigen::MatrixXf& U = kinshipU.mat;
// rotate
this->ux = U.transpose() * x;
this->uy = U.transpose() * y;
// get beta, sigma2_g and delta
// where delta = sigma2_e / sigma2_g
double loglik[101];
int maxIndex = -1;
double maxLogLik = 0;
for (int i = 0; i <= 100; ++i ){
double d = exp(-10 + i * 0.2);
getBetaSigma2(d);
loglik[i] = getLogLikelihood(d);
// fprintf(stderr, "%d\tdelta=%g\tll=%lf\n", i, delta, loglik[i]);
if (std::isnan(loglik[i])) {
continue;
}
if (maxIndex < 0 || loglik[i] > maxLogLik) {
maxIndex = i;
maxLogLik = loglik[i];
}
}
if (maxIndex < -1) {
fprintf(stderr, "Cannot optimize\n");
return -1;
}
if (maxIndex == 0 || maxIndex == 100) {
// on the boundary
// do not try maximize it.
} else {
gsl_function F;
F.function = goalFunction;
F.params = this;
Minimizer minimizer;
double lb = exp(-10 + (maxIndex-1) * 0.2);
double ub = exp(-10 + (maxIndex+1) * 0.2);
double start = exp(-10 + maxIndex * 0.2);
if (minimizer.minimize(F, start, lb, ub)) {
// fprintf(stderr, "Minimization failed, fall back to initial guess.\n");
this->delta = start;
} else {
this->delta = minimizer.getX();
// fprintf(stderr, "minimization succeed when delta = %g, sigma2_g = %g\n", this->delta, this->sigma2_g);
}
}
// store some intermediate results
// fprintf(stderr, "maxIndex = %d, delta = %g, Try brent\n", maxIndex, delta);
// fprintf(stderr, "beta[%d][%d] = %g\n", (int)beta.rows(), (int)beta.cols(), beta(0,0));
this->h2 = 1.0 /(1.0 + this->delta);
this->sigma2 = this->sigma2_g * this->h2;
// we derive different formular to replace original eqn (7)
this->gamma = (this->lambda.array() / (this->lambda.array() + this->delta)).sum() / this->sigma2_g / (this->ux.rows() - 1 ) ;
// fprintf(stderr, "gamma = %g\n", this->gamma);
// transformedY = \Sigma^{-1} * (y_tilda) and y_tilda = y - X * \beta
// since \Sigma = (\sigma^2_g * h^2 ) * (U * (\lambda + delta) * U')
// transformedY = 1 / (\sigma^2_g * h^2 ) * (U * (\lambda+delta)^{-1} * U' * (y_tilda))
// = 1 / (\sigma^2_g * h^2 ) * (U * \lambda^{-1} * (uResid))
// since h^2 = 1 / (1+delta)
// transformedY = (1 + delta/ (\sigma^2_g ) * (U * \lambda^{-1} * (uResid))
Eigen::MatrixXf resid = y - x * (x.transpose() * x).eval().ldlt().solve(x.transpose() * y); // this is y_tilda
this->transformedY.noalias() = U.transpose() * resid;
this->transformedY = (this->lambda.array() + this->delta).inverse().matrix().asDiagonal() * this->transformedY;
this->transformedY = U * this->transformedY;
this->transformedY /= this->sigma2_g;
// fprintf(stderr, "transformedY(0,0) = %g\n", transformedY(0,0));
this->ySigmaY= (resid.array() * transformedY.array()).sum();
return 0;
}