本文整理汇总了C++中VectorXd::append方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::append方法的具体用法?C++ VectorXd::append怎么用?C++ VectorXd::append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VectorXd
的用法示例。
在下文中一共展示了VectorXd::append方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: fastLasso
// barebones version of the lasso for fixed lambda
// Eigen library is used for linear algebra
// x .............. predictor matrix
// y .............. response
// lambda ......... penalty parameter
// useSubset ...... logical indicating whether lasso should be computed on a
// subset
// subset ......... indices of subset on which lasso should be computed
// normalize ...... logical indicating whether predictors should be normalized
// useIntercept ... logical indicating whether intercept should be included
// eps ............ small numerical value (effective zero)
// useGram ........ logical indicating whether Gram matrix should be computed
// in advance
// useCrit ........ logical indicating whether to compute objective function
void fastLasso(const MatrixXd& x, const VectorXd& y, const double& lambda,
const bool& useSubset, const VectorXi& subset, const bool& normalize,
const bool& useIntercept, const double& eps, const bool& useGram,
const bool& useCrit,
// intercept, coefficients, residuals and objective function are returned
// through the following parameters
double& intercept, VectorXd& beta, VectorXd& residuals, double& crit) {
// data initializations
int n, p = x.cols();
MatrixXd xs;
VectorXd ys;
if(useSubset) {
n = subset.size();
xs.resize(n, p);
ys.resize(n);
int s;
for(int i = 0; i < n; i++) {
s = subset(i);
xs.row(i) = x.row(s);
ys(i) = y(s);
}
} else {
n = x.rows();
xs = x; // does this copy memory?
ys = y; // does this copy memory?
}
double rescaledLambda = n * lambda / 2;
// center data and store means
RowVectorXd meanX;
double meanY;
if(useIntercept) {
meanX = xs.colwise().mean(); // columnwise means of predictors
xs.rowwise() -= meanX; // sweep out columnwise means
meanY = ys.mean(); // mean of response
for(int i = 0; i < n; i++) {
ys(i) -= meanY; // sweep out mean
}
} else {
meanY = 0; // just to avoid warning, this is never used
// intercept = 0; // zero intercept
}
// some initializations
VectorXi inactive(p); // inactive predictors
int m = 0; // number of inactive predictors
VectorXi ignores; // indicates variables to be ignored
int s = 0; // number of ignored variables
// normalize predictors and store norms
RowVectorXd normX;
if(normalize) {
normX = xs.colwise().norm(); // columnwise norms
double epsNorm = eps * sqrt(n); // R package 'lars' uses n, not n-1
for(int j = 0; j < p; j++) {
if(normX(j) < epsNorm) {
// variance is too small: ignore variable
ignores.append(j, s);
s++;
// set norm to tolerance to avoid numerical problems
normX(j) = epsNorm;
} else {
inactive(m) = j; // add variable to inactive set
m++; // increase number of inactive variables
}
xs.col(j) /= normX(j); // sweep out norm
}
// resize inactive set and update number of variables if necessary
if(m < p) {
inactive.conservativeResize(m);
p = m;
}
} else {
for(int j = 0; j < p; j++) inactive(j) = j; // add variable to inactive set
m = p;
}
// compute Gram matrix if requested (saves time if number of variables is
// not too large)
MatrixXd Gram;
if(useGram) {
Gram.noalias() = xs.transpose() * xs;
}
// further initializations for iterative steps
//.........这里部分代码省略.........