本文整理匯總了C++中eigen::VectorXd::array方法的典型用法代碼示例。如果您正苦於以下問題:C++ VectorXd::array方法的具體用法?C++ VectorXd::array怎麽用?C++ VectorXd::array使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類eigen::VectorXd
的用法示例。
在下文中一共展示了VectorXd::array方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: execute
output fc_rnn::execute(input const& in)
{
// Set activation of input neurons
auto const num_input = in.size();
for(size_t n = 0; n < num_input; ++n)
{
vInput[n] = in[n];
}
// Summation for hidden neurons
Eigen::VectorXd vHiddenSums =
wmInput * vInput +
wmHidden * vHidden;
// Transfer function
vHidden =
evaluate(af_hidden, vHiddenSums.array());
// TODO: Maybe should just store as a single vector?
Eigen::VectorXd joined(input_layer_count() + hidden_count());
joined << vInput, vHidden;
Eigen::VectorXd vOutputSums =
wmOutput * joined;
Eigen::VectorXd vOutput =
evaluate(af_output, vOutputSums.array());
// Return the output values
output out{ output_count() };
std::copy(vOutput.data(), vOutput.data() + output_count(), out.begin());
return out;
}
示例2: bouncyBounds
// A function to bounce the MCMC proposal off the hard boundaries.
Eigen::VectorXd bouncyBounds(const Eigen::VectorXd& val,
const Eigen::VectorXd& min, const Eigen::VectorXd& max)
{
Eigen::VectorXd delta = max - min;
Eigen::VectorXd result = val;
Eigen::Matrix<bool, Eigen::Dynamic, 1> tooBig = (val.array() > max.array());
Eigen::Matrix<bool, Eigen::Dynamic, 1> tooSmall = (val.array() < min.array());
for (uint i=0; i< result.size(); i++)
{
bool big = tooBig(i);
bool small = tooSmall(i);
if (big)
{
double overstep = val(i)-max(i);
int nSteps = (int)(overstep / delta(i));
double stillToGo = overstep - nSteps*delta(i);
if (nSteps % 2 == 0)
result(i) = max(i) - stillToGo;
else
result(i) = min(i) + stillToGo;
}
if (small)
{
double understep = min(i) - val(i);
int nSteps = (int)(understep / delta(i));
double stillToGo = understep - nSteps*delta(i);
if (nSteps % 2 == 0)
result(i) = min(i) + stillToGo;
else
result(i) = max(i) - stillToGo;
}
}
return result;
}
示例3: initialize
void corrClass::initialize(Eigen::VectorXd a, Eigen::VectorXd b){
vecA = a.array();
vecB = b.array();
sumA = vecA.sum();
sumB = vecB.sum();
sumA2 = vecA.square().sum();
sumB2 = vecB.square().sum();
sumAB = (vecA*vecB).sum();
}
示例4: numericalDampingForce
double numericalDampingForce(const EnergyCondition<double> &c, const Eigen::VectorXd &uv, const Eigen::VectorXd &x, const Eigen::VectorXd &v, double d, int i, double dx)
{
// find dC/dt
Eigen::VectorXd dCdt = numericalCTimeDerivative(c, x, v, uv, dx);
// find dC/dx
Eigen::VectorXd dCdx = numericalFirstCDerivative(c, x, uv, i, dx);
// fd = -d * sum( i, dC_i/dx * dC_i/dt )
return -d * (dCdx.array() * dCdt.array()).sum();
}
示例5: main
int main(int argc, char *argv[])
{
using namespace Eigen;
using namespace std;
cout<<"Usage:"<<endl;
cout<<"[space] toggle showing input mesh, output mesh or slice "<<endl;
cout<<" through tet-mesh of convex hull."<<endl;
cout<<"'.'/',' push back/pull forward slicing plane."<<endl;
cout<<endl;
// Load mesh: (V,T) tet-mesh of convex hull, F contains facets of input
// surface mesh _after_ self-intersection resolution
igl::readMESH(TUTORIAL_SHARED_PATH "/big-sigcat.mesh",V,T,F);
// Compute barycenters of all tets
igl::barycenter(V,T,BC);
// Compute generalized winding number at all barycenters
cout<<"Computing winding number over all "<<T.rows()<<" tets..."<<endl;
igl::winding_number(V,F,BC,W);
// Extract interior tets
MatrixXi CT((W.array()>0.5).count(),4);
{
size_t k = 0;
for(size_t t = 0;t<T.rows();t++)
{
if(W(t)>0.5)
{
CT.row(k) = T.row(t);
k++;
}
}
}
// find bounary facets of interior tets
igl::boundary_facets(CT,G);
// boundary_facets seems to be reversed...
G = G.rowwise().reverse().eval();
// normalize
W = (W.array() - W.minCoeff())/(W.maxCoeff()-W.minCoeff());
// Plot the generated mesh
igl::opengl::glfw::Viewer viewer;
update_visualization(viewer);
viewer.callback_key_down = &key_down;
viewer.launch();
}
示例6: result
std::vector<double> ClassifyMotion::classify_motion(const Eigen::MatrixXd &motion )
{
double realmin = numeric_limits<double>::min();
std::vector<double> result( m_nb_classes );
std::vector<Eigen::MatrixXd> Pxi( m_nb_classes );
// Compute probability of each class
for(int i=0;i<m_nb_classes;i++)
{
Pxi[i].setZero( motion.cols(), m_nb_states );
for(int j=0;j<m_nb_states;j++)
{
//Compute the new probability p(x|i)
Pxi[i].col(j) = gauss_pdf( motion, i, j );
}
// Compute the log likelihood of the class
Eigen::VectorXd F = Pxi[i]*m_priors[i];
for(int k=0;k<F.size();k++)
if( F(k) < realmin || std::isnan(F(k)) )
F(k) = realmin;
// for(int k=0;k<F.size();k++)
// if( F(k) > 1 )
// cout << "F(" << k << ") : " << F(k) << endl;
result[i] = F.array().log().sum()/F.size();
}
return result;
}
示例7: varianceToWeights
Eigen::VectorXd TrajectoryThread::varianceToWeights(Eigen::VectorXd& desiredVariance, const double beta)
{
desiredVariance /= maximumVariance;
desiredVariance = desiredVariance.array().min(varianceThresh); //limit desiredVariance to 0.99 maximum
Eigen::VectorXd desiredWeights = (Eigen::VectorXd::Ones(desiredVariance.rows()) - desiredVariance) / beta;
return desiredWeights;
}
示例8: estimateF
void KukaRMControllerRTNET::estimateF(Eigen::VectorXd& F)
{
for(unsigned int i=0;i<LWRDOF;i++)
{
m_bufSum(i) = m_bufSum(i)-m_FTab(i,m_Fidx);
m_FTab(i,m_Fidx) = F(i);
m_bufSum(i) = m_bufSum(i)+F(i);
}
m_Fidx = m_Fidx+1;
if(m_Fidx>m_delta-1){
m_Fidx = 0;
m_FBoucle = true;
}
if(m_FBoucle){
F = m_bufSum/m_delta;
}
else{
F = m_bufSum/m_Fidx;
}
for(unsigned int i=0;i<LWRDOF;i++)
{
if((F.array().abs())(i)>m_FSat)
{
if(F(i)>0.0){
F(i) = m_FSat;
}
else{
F(i) = -m_FSat;
}
}
}
}
示例9: inv
Mat Mat::inv()
{
if (nrow() != ncol()) throw runtime_error("Mat::inv() error: only symmetric positive definite matrices can be inverted with Mat::inv()");
if (mattype == MatType::DIAGONAL)
{
Eigen::VectorXd diag = matrix.diagonal();
diag = 1.0 / diag.array();
vector<Eigen::Triplet<double>> triplet_list;
for (int i = 0; i != diag.size(); ++i)
{
triplet_list.push_back(Eigen::Triplet<double>(i, i, diag[i]));
}
Eigen::SparseMatrix<double> inv_mat(triplet_list.size(),triplet_list.size());
inv_mat.setZero();
inv_mat.setFromTriplets(triplet_list.begin(), triplet_list.end());
return Mat(row_names, col_names, inv_mat);
}
//Eigen::ConjugateGradient<Eigen::SparseMatrix<double>> solver;
Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver;
solver.compute(matrix);
Eigen::SparseMatrix<double> I(nrow(), nrow());
I.setIdentity();
Eigen::SparseMatrix<double> inv_mat = solver.solve(I);
return Mat(row_names, col_names, inv_mat);
}
示例10: evaluate
void gaussian_process::evaluate( const Eigen::MatrixXd& domains, Eigen::VectorXd& means, Eigen::VectorXd& variances ) const
{
if( domains.cols() != domains_.cols() ) { COMMA_THROW( comma::exception, "expected " << domains_.cols() << " column(s) in domains, got " << domains.cols() << std::endl ); }
Eigen::MatrixXd Kxsx = Eigen::MatrixXd::Zero( domains.rows(), domains_.rows() );
for( std::size_t r = 0; r < std::size_t( domains.rows() ); ++r )
{
const Eigen::VectorXd& row = domains.row( r );
for( std::size_t c = 0; c < std::size_t( domains_.rows() ); ++c )
{
Kxsx( r, c ) = covariance_( row, domains_.row( c ) );
}
}
means = Kxsx * alpha_;
means.array() += offset_;
Eigen::MatrixXd Kxxs = Kxsx.transpose();
L_.matrixL().solveInPlace( Kxxs );
Eigen::MatrixXd& variance = Kxxs;
variance = variance.array() * variance.array();
variances = variance.colwise().sum();
// for each diagonal variance, set v(r) = -v(r,r) + Kxsxs
for( std::size_t r = 0; r < std::size_t( domains.rows() ); ++r )
{
variances( r ) = -variances( r ) + self_covariance_;
}
}
示例11: nextDesVal
void KukaRMControllerRTNET::nextDesVal()
{
Eigen::VectorXd period = m_freq*m_time;
Eigen::VectorXd amp = Eigen::VectorXd::Zero(LWRDOF);
if(m_time<1.0){
amp = m_amp*m_time;
}
else{
amp = m_amp;
}
for(unsigned int i=0;i<LWRDOF;i++)
{
m_q_des(i) = amp(i)*(period.array().sin())(i)+m_bias(i);
m_qp_des(i) = amp(i)*m_freq(i)*(period.array().cos())(i);
m_qpp_des(i) = -amp(i)*m_freq(i)*m_freq(i)*(period.array().sin())(i);
}
}
示例12:
TimeIntegrator::TimeIntegrator(Eigen::VectorXd &napp)
{
_napp = napp;
// Determine nstep, dt, F
_nstep = 10;
_dt = 1.0/_nstep;
Eigen::VectorXd x; x.setLinSpaced(_nstep,0.0,60.0);
F1=200*x.array().sin();
}
示例13: zero_vec
Eigen::VectorXd Shrinkage
(
const Eigen::VectorXd& vec, const double kappa
) const
{
Eigen::ArrayXd zero_vec(vec.size());
zero_vec.setZero();
return zero_vec.max( vec.array() - kappa) -
zero_vec.max(-vec.array() - kappa);
}
示例14: polyfit
double polyfit(size_t n, size_t deg, const double* xp, const double* yp,
const double* wp, double* pp)
{
ConstMappedVector x(xp, n);
Eigen::VectorXd y = ConstMappedVector(yp, n);
MappedVector p(pp, deg+1);
if (deg >= n) {
throw CanteraError("polyfit", "Polynomial degree ({}) must be less "
"than number of input data points ({})", deg, n);
}
// Construct A such that each row i of A has the elements
// 1, x[i], x[i]^2, x[i]^3 ... + x[i]^deg
Eigen::MatrixXd A(n, deg+1);
A.col(0).setConstant(1.0);
if (deg > 0) {
A.col(1) = x;
}
for (size_t i = 1; i < deg; i++) {
A.col(i+1) = A.col(i).array() * x.array();
}
if (wp != nullptr && wp[0] > 0) {
// For compatibility with old Fortran dpolft, input weights are the
// squares of the weight vector used in this algorithm
Eigen::VectorXd w = ConstMappedVector(wp, n).cwiseSqrt().eval();
// Multiply by the weights on both sides
A = w.asDiagonal() * A;
y.array() *= w.array();
}
// Solve W*A*p = W*y to find the polynomial coefficients
p = A.colPivHouseholderQr().solve(y);
// Evaluate the computed polynomial at the input x coordinates to compute
// the RMS error as the return value
return (A*p - y).eval().norm() / sqrt(n);
}
示例15:
normal_fullrank operator/=(const normal_fullrank& rhs) {
static const char* function =
"stan::variational::normal_fullrank::operator/=";
stan::math::check_size_match(function,
"Dimension of lhs", dimension_,
"Dimension of rhs", rhs.dimension());
mu_.array() /= rhs.mu().array();
L_chol_.array() /= rhs.L_chol().array();
return *this;
}