本文整理汇总了C++中VectorXd::mean方法的典型用法代码示例。如果您正苦于以下问题:C++ VectorXd::mean方法的具体用法?C++ VectorXd::mean怎么用?C++ VectorXd::mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VectorXd
的用法示例。
在下文中一共展示了VectorXd::mean方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: standard_deviation
double standard_deviation(const VectorXd& xx)
{
const double mean = xx.mean();
double accum = 0;
for (int kk=0, kk_max=xx.rows(); kk<kk_max; kk++)
accum += (xx(kk)-mean)*(xx(kk)-mean);
return sqrt(accum)/(xx.size()-1);
}
示例2: main
int main(int argc, char *argv[])
{
using namespace Eigen;
using namespace std;
MatrixXd V;
MatrixXi F;
// Load a mesh in OFF format
igl::readOFF("../shared/cheburashka.off", V, F);
// Read scalar function values from a file, U: #V by 1
VectorXd U;
igl::readDMAT("../shared/cheburashka-scalar.dmat",U);
// Compute gradient operator: #F*3 by #V
SparseMatrix<double> G;
igl::grad(V,F,G);
// Compute gradient of U
MatrixXd GU = Map<const MatrixXd>((G*U).eval().data(),F.rows(),3);
// Compute gradient magnitude
const VectorXd GU_mag = GU.rowwise().norm();
igl::viewer::Viewer viewer;
viewer.data.set_mesh(V, F);
// Compute pseudocolor for original function
MatrixXd C;
igl::jet(U,true,C);
// // Or for gradient magnitude
//igl::jet(GU_mag,true,C);
viewer.data.set_colors(C);
// Average edge length divided by average gradient (for scaling)
const double max_size = igl::avg_edge_length(V,F) / GU_mag.mean();
// Draw a black segment in direction of gradient at face barycenters
MatrixXd BC;
igl::barycenter(V,F,BC);
const RowVector3d black(0,0,0);
viewer.data.add_edges(BC,BC+max_size*GU, black);
// Hide wireframe
viewer.core.show_lines = false;
viewer.launch();
}
示例3: poisson
void linear::poisson(const VectorXd& lapl, VectorXd& f) {
if(stiffp1.size()==0) fill_stiff();
if(mass.size()==0) fill_mass();
VectorXd massl = mass * lapl ;
int N=massl.size();
VectorXd massll = massl.tail( N-1 );
VectorXd ff= solver_stiffp1.solve( massll );
f(0) = 0;
f.tail(N-1) = ff;
// zero mean
f = f.array() - f.mean();
return;
}
示例4: costsToWeights
void UpdaterMean::costsToWeights(const VectorXd& costs, string weighting_method, double eliteness, VectorXd& weights) const
{
weights.resize(costs.size());
if (weighting_method.compare("PI-BB")==0)
{
// PI^2 style weighting: continuous, cost exponention
double h = eliteness; // In PI^2, eliteness parameter is known as "h"
double range = costs.maxCoeff()-costs.minCoeff();
if (range==0)
weights.fill(1);
else
weights = (-h*(costs.array()-costs.minCoeff())/range).exp();
}
else if (weighting_method.compare("CMA-ES")==0 || weighting_method.compare("CEM")==0 )
{
// CMA-ES and CEM are rank-based, so we must first sort the costs, and the assign a weight to
// each rank.
VectorXd costs_sorted = costs;
std::sort(costs_sorted.data(), costs_sorted.data()+costs_sorted.size());
// In Python this is more elegant because we have argsort.
// indices = np.argsort(costs)
// It is possible to do this with fancy lambda functions or std::pair in C++ too, but I don't
// mind writing two for loops instead ;-)
weights.fill(0.0);
int mu = eliteness; // In CMA-ES, eliteness parameter is known as "mu"
assert(mu<costs.size());
for (int ii=0; ii<mu; ii++)
{
double cur_cost = costs_sorted[ii];
for (int jj=0; jj<costs.size(); jj++)
{
if (costs[jj] == cur_cost)
{
if (weighting_method.compare("CEM")==0)
weights[jj] = 1.0/mu; // CEM
else
weights[jj] = log(mu+0.5) - log(ii+1); // CMA-ES
break;
}
}
}
// For debugging
//MatrixXd print_mat(3,costs.size());
//print_mat.row(0) = costs_sorted;
//print_mat.row(1) = costs;
//print_mat.row(2) = weights;
//cout << print_mat << endl;
}
else
{
cout << __FILE__ << ":" << __LINE__ << ":WARNING: Unknown weighting method '" << weighting_method << "'. Calling with PI-BB weighting." << endl;
costsToWeights(costs, "PI-BB", eliteness, weights);
return;
}
// Relative standard deviation of total costs
double mean = weights.mean();
double std = sqrt((weights.array()-mean).pow(2).mean());
double rel_std = std/mean;
if (rel_std<1e-10)
{
// Special case: all costs are the same
// Set same weights for all.
weights.fill(1);
}
// Normalize weights
weights = weights/weights.sum();
}
示例5: calculateRowMean
double DataPackage::calculateRowMean(const VectorXd &dataRow)
{
return dataRow.mean();
}
示例6: msac
Matrix3d msac( const Eigen::Matrix2Xd& pointsFrom, const Eigen::Matrix2Xd& pointsTo,
int maxNumTrials, double confidence, double maxDistance ) {
double threshold = maxDistance;
int numPts = pointsFrom.cols();
int idxTrial = 1;
int numTrials = maxNumTrials;
double maxDis = threshold * numPts;
double bestDist = maxDis;
Matrix3d bestT;
bestT << 1, 0, 0, 0, 1, 0, 0, 0, 1;
int index1;
int index2;
// Get two random, different numbers in [0:pointsFrom.cols()-1]
std::uniform_int_distribution<int> distribution1( 0, pointsFrom.cols()-1 );
std::uniform_int_distribution<int> distribution2( 0, pointsFrom.cols()-2 );
while ( idxTrial <= numTrials ) {
// Get two random, different numbers in [0:pointsFrom.cols()-1]
index1 = distribution1( msacGenerator );
index2 = distribution2( msacGenerator );
if ( index2 >= index1 )
index2++;
Vector2d indices( index1, index2 );
/*std::cout << "indices: " << indices.transpose()
<< " pointsFrom.cols: " << pointsFrom.cols()
<< " pointsTo.cols: " << pointsTo.cols() << std::endl;*/
// Get T form Calculated from this set of points
Matrix3d T = computeTform( pointsFrom, pointsTo, indices );
VectorXd dis = evaluateTform( pointsFrom, pointsTo, T, threshold );
double accDis = dis.sum();
if ( accDis < bestDist ) {
bestDist = accDis;
bestT = T;
}
idxTrial++;
}
VectorXd dis = evaluateTform( pointsFrom, pointsTo, bestT, threshold );
threshold *= dis.mean();
int numInliers = 0;
for ( int i = 0; i < dis.rows(); i++ ){
if ( dis(i) < threshold )
numInliers++;
}
VectorXd inliers( numInliers );
int j = 0;
for ( int i = 0; i < dis.rows(); i++ ){
if ( dis(i) < threshold )
inliers(j++) = i;
}
Matrix3d T;
if ( numInliers >= 2 )
T = computeTform( pointsFrom, pointsTo, inliers );
else
T << 1, 0, 0, 0, 1, 0, 0, 0, 1;
return T;
}
示例7: fastLasso
// barebones version of the lasso for fixed lambda
// Eigen library is used for linear algebra
// x .............. predictor matrix
// y .............. response
// lambda ......... penalty parameter
// useSubset ...... logical indicating whether lasso should be computed on a
// subset
// subset ......... indices of subset on which lasso should be computed
// normalize ...... logical indicating whether predictors should be normalized
// useIntercept ... logical indicating whether intercept should be included
// eps ............ small numerical value (effective zero)
// useGram ........ logical indicating whether Gram matrix should be computed
// in advance
// useCrit ........ logical indicating whether to compute objective function
void fastLasso(const MatrixXd& x, const VectorXd& y, const double& lambda,
const bool& useSubset, const VectorXi& subset, const bool& normalize,
const bool& useIntercept, const double& eps, const bool& useGram,
const bool& useCrit,
// intercept, coefficients, residuals and objective function are returned
// through the following parameters
double& intercept, VectorXd& beta, VectorXd& residuals, double& crit) {
// data initializations
int n, p = x.cols();
MatrixXd xs;
VectorXd ys;
if(useSubset) {
n = subset.size();
xs.resize(n, p);
ys.resize(n);
int s;
for(int i = 0; i < n; i++) {
s = subset(i);
xs.row(i) = x.row(s);
ys(i) = y(s);
}
} else {
n = x.rows();
xs = x; // does this copy memory?
ys = y; // does this copy memory?
}
double rescaledLambda = n * lambda / 2;
// center data and store means
RowVectorXd meanX;
double meanY;
if(useIntercept) {
meanX = xs.colwise().mean(); // columnwise means of predictors
xs.rowwise() -= meanX; // sweep out columnwise means
meanY = ys.mean(); // mean of response
for(int i = 0; i < n; i++) {
ys(i) -= meanY; // sweep out mean
}
} else {
meanY = 0; // just to avoid warning, this is never used
// intercept = 0; // zero intercept
}
// some initializations
VectorXi inactive(p); // inactive predictors
int m = 0; // number of inactive predictors
VectorXi ignores; // indicates variables to be ignored
int s = 0; // number of ignored variables
// normalize predictors and store norms
RowVectorXd normX;
if(normalize) {
normX = xs.colwise().norm(); // columnwise norms
double epsNorm = eps * sqrt(n); // R package 'lars' uses n, not n-1
for(int j = 0; j < p; j++) {
if(normX(j) < epsNorm) {
// variance is too small: ignore variable
ignores.append(j, s);
s++;
// set norm to tolerance to avoid numerical problems
normX(j) = epsNorm;
} else {
inactive(m) = j; // add variable to inactive set
m++; // increase number of inactive variables
}
xs.col(j) /= normX(j); // sweep out norm
}
// resize inactive set and update number of variables if necessary
if(m < p) {
inactive.conservativeResize(m);
p = m;
}
} else {
for(int j = 0; j < p; j++) inactive(j) = j; // add variable to inactive set
m = p;
}
// compute Gram matrix if requested (saves time if number of variables is
// not too large)
MatrixXd Gram;
if(useGram) {
Gram.noalias() = xs.transpose() * xs;
}
// further initializations for iterative steps
//.........这里部分代码省略.........