本文整理汇总了C++中eigen::Matrix::size方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix::size方法的具体用法?C++ Matrix::size怎么用?C++ Matrix::size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Matrix
的用法示例。
在下文中一共展示了Matrix::size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: y
std::vector<double>
unit_vector_grad(Eigen::Matrix<double,Eigen::Dynamic,1>& y_dbl,
int k) {
using Eigen::Matrix;
using Eigen::Dynamic;
using stan::math::var;
Matrix<var,Dynamic,1> y(y_dbl.size());
for (int i = 0; i < y.size(); ++i)
y(i) = y_dbl(i);
std::vector<var> x(y.size());
for (size_t i = 0; i < x.size(); ++i)
x[i] = y(i);
var fx_k = stan::math::unit_vector_constrain(y)[k];
std::vector<double> grad(y.size());
fx_k.grad(x,grad);
return grad;
}
示例2: alpha
// compute grad using templated definition in math
// to check custom derivatives
std::vector<double>
softmax_grad(Eigen::Matrix<double,Eigen::Dynamic,1>& alpha_dbl,
int k) {
using Eigen::Matrix;
using Eigen::Dynamic;
using stan::agrad::var;
Matrix<var,Dynamic,1> alpha(alpha_dbl.size());
for (int i = 0; i < alpha.size(); ++i)
alpha(i) = alpha_dbl(i);
std::vector<var> x(alpha.size());
for (size_t i = 0; i < x.size(); ++i)
x[i] = alpha(i);
var fx_k = stan::math::softmax(alpha)[k];
std::vector<double> grad(alpha.size());
fx_k.grad(x,grad);
return grad;
}
示例3: log_determinant_spd
inline var log_determinant_spd(const Eigen::Matrix<var,R,C>& m) {
using stan::math::domain_error;
using Eigen::Matrix;
math::check_square("log_determinant_spd", "m", m);
Matrix<double,R,C> m_d(m.rows(),m.cols());
for (int i = 0; i < m.size(); ++i)
m_d(i) = m(i).val();
Eigen::LDLT<Matrix<double,R,C> > ldlt(m_d);
if (ldlt.info() != Eigen::Success) {
double y = 0;
domain_error("log_determinant_spd",
"matrix argument", y,
"failed LDLT factorization");
}
// compute the inverse of A (needed for the derivative)
m_d.setIdentity(m.rows(), m.cols());
ldlt.solveInPlace(m_d);
if (ldlt.isNegative() || (ldlt.vectorD().array() <= 1e-16).any()) {
double y = 0;
domain_error("log_determinant_spd",
"matrix argument", y,
"matrix is negative definite");
}
double val = ldlt.vectorD().array().log().sum();
if (!boost::math::isfinite(val)) {
double y = 0;
domain_error("log_determinant_spd",
"matrix argument", y,
"log determininant is infinite");
}
vari** operands = ChainableStack::memalloc_
.alloc_array<vari*>(m.size());
for (int i = 0; i < m.size(); ++i)
operands[i] = m(i).vi_;
double* gradients = ChainableStack::memalloc_
.alloc_array<double>(m.size());
for (int i = 0; i < m.size(); ++i)
gradients[i] = m_d(i);
return var(new precomputed_gradients_vari(val,m.size(),operands,gradients));
}
示例4: stick_len
Eigen::Matrix<T, Eigen::Dynamic, 1>
simplex_free(const Eigen::Matrix<T, Eigen::Dynamic, 1>& x) {
using Eigen::Dynamic;
using Eigen::Matrix;
using std::log;
typedef typename index_type<Matrix<T, Dynamic, 1> >::type size_type;
check_simplex("stan::math::simplex_free",
"Simplex variable", x);
int Km1 = x.size() - 1;
Eigen::Matrix<T, Eigen::Dynamic, 1> y(Km1);
T stick_len(x(Km1));
for (size_type k = Km1; --k >= 0; ) {
stick_len += x(k);
T z_k(x(k) / stick_len);
y(k) = logit(z_k) + log(Km1 - k);
// note: log(Km1 - k) = logit(1.0 / (Km1 + 1 - k));
}
return y;
}
示例5: x
Eigen::Matrix<T, Eigen::Dynamic, 1>
positive_ordered_free(const Eigen::Matrix<T, Eigen::Dynamic, 1>& y) {
using Eigen::Matrix;
using Eigen::Dynamic;
using stan::math::index_type;
typedef typename index_type<Matrix<T, Dynamic, 1> >::type size_type;
stan::math::check_positive_ordered("stan::math::positive_ordered_free",
"Positive ordered variable",
y);
size_type k = y.size();
Matrix<T, Dynamic, 1> x(k);
if (k == 0)
return x;
x[0] = log(y[0]);
for (size_type i = 1; i < k; ++i)
x[i] = log(y[i] - y[i-1]);
return x;
}
示例6: print_ijv
IGL_INLINE void igl::print_ijv(
const Eigen::SparseMatrix<T>& X,
const int offset)
{
Eigen::Matrix<int,Eigen::Dynamic,1> I;
Eigen::Matrix<int,Eigen::Dynamic,1> J;
Eigen::Matrix<T,Eigen::Dynamic,1> V;
igl::find(X,I,J,V);
// Concatenate I,J,V
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> IJV(I.size(),3);
IJV.col(0) = I.cast<T>();
IJV.col(1) = J.cast<T>();
IJV.col(2) = V;
// Offset
if(offset != 0)
{
IJV.col(0).array() += offset;
IJV.col(1).array() += offset;
}
std::cout<<IJV;
}
示例7: p
void
MitsubishiH7::setMotorPulse(const ::Eigen::Matrix< ::std::int32_t, ::Eigen::Dynamic, 1>& p)
{
assert(p.size() >= this->getDof());
this->out.dat2.pls.p1 = 0;
this->out.dat2.pls.p2 = 0;
this->out.dat2.pls.p3 = 0;
this->out.dat2.pls.p4 = 0;
this->out.dat2.pls.p5 = 0;
this->out.dat2.pls.p6 = 0;
this->out.dat2.pls.p7 = 0;
this->out.dat2.pls.p8 = 0;
switch (this->getDof())
{
case 8:
this->out.dat2.pls.p8 = p(7);
case 7:
this->out.dat2.pls.p7 = p(6);
case 6:
this->out.dat2.pls.p6 = p(5);
case 5:
this->out.dat2.pls.p5 = p(4);
case 4:
this->out.dat2.pls.p4 = p(3);
case 3:
this->out.dat2.pls.p3 = p(2);
case 2:
this->out.dat2.pls.p2 = p(1);
case 1:
this->out.dat2.pls.p1 = p(0);
default:
break;
}
this->out.command = MXT_COMMAND_MOVE;
this->out.sendType = MXT_TYPE_PULSE;
}
示例8: f
void
grad_hessian(const F& f,
const Eigen::Matrix<double, Eigen::Dynamic, 1>& x,
double& fx,
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>& H,
std::vector<Eigen::Matrix<double,
Eigen::Dynamic, Eigen::Dynamic> >&
grad_H) {
using Eigen::Matrix;
using Eigen::Dynamic;
fx = f(x);
int d = x.size();
H.resize(d, d);
grad_H.resize(d, Matrix<double, Dynamic, Dynamic>(d, d));
try {
for (int i = 0; i < d; ++i) {
for (int j = i; j < d; ++j) {
start_nested();
Matrix<fvar<fvar<var> >, Dynamic, 1> x_ffvar(d);
for (int k = 0; k < d; ++k)
x_ffvar(k) = fvar<fvar<var> >(fvar<var>(x(k), i == k),
fvar<var>(j == k, 0));
fvar<fvar<var> > fx_ffvar = f(x_ffvar);
H(i, j) = fx_ffvar.d_.d_.val();
H(j, i) = H(i, j);
grad(fx_ffvar.d_.d_.vi_);
for (int k = 0; k < d; ++k) {
grad_H[i](j, k) = x_ffvar(k).val_.val_.adj();
grad_H[j](i, k) = grad_H[i](j, k);
}
recover_memory_nested();
}
}
} catch (const std::exception& e) {
recover_memory_nested();
throw;
}
}
示例9: result
inline Eigen::Matrix <
typename boost::math::tools::promote_args<T1, T2>::type,
Eigen::Dynamic, Eigen::Dynamic>
quad_form_diag(const Eigen::Matrix<T1, Eigen::Dynamic, Eigen::Dynamic>& mat,
const Eigen::Matrix<T2, R, C>& vec) {
using boost::math::tools::promote_args;
check_vector("quad_form_diag", "vec", vec);
check_square("quad_form_diag", "mat", mat);
int size = vec.size();
check_equal("quad_form_diag", "matrix size", mat.rows(),
size);
Eigen::Matrix<typename promote_args<T1, T2>::type,
Eigen::Dynamic, Eigen::Dynamic> result(size, size);
for (int i = 0; i < size; i++) {
result(i, i) = vec(i)*vec(i)*mat(i, i);
for (int j = i+1; j < size; ++j) {
typename promote_args<T1, T2>::type temp = vec(i)*vec(j);
result(j, i) = temp*mat(j, i);
result(i, j) = temp*mat(i, j);
}
}
return result;
}
示例10: test_sort_indices_desc3
void test_sort_indices_desc3(Eigen::Matrix<T,R,C> val) {
typedef Eigen::Matrix<fvar<fvar<double> >,R,C> AVEC;
const size_t size = val.size();
AVEC x(size);
for(size_t i=0U; i<size; i++)
x.data()[i] = fvar<fvar<double> >(val[i]);
std::vector<int> val_sorted = sort_indices_desc(val);
std::vector<int> x_sorted = sort_indices_desc(x);
for(size_t i=0U; i<size; i++)
EXPECT_EQ(val_sorted.data()[i],x_sorted.data()[i]);
for(size_t i=0U; i<size; i++)
for(size_t j=0U; j<size; j++)
if(val_sorted.data()[i] == val.data()[j])
EXPECT_EQ(x_sorted.data()[i],x.data()[j]);
else
EXPECT_FALSE(x_sorted.data()[i]==x.data()[j]);
}
示例11: check_simplex
bool check_simplex(const char* function,
const char* name,
const Eigen::Matrix<T_prob, Eigen::Dynamic, 1>& theta) {
using Eigen::Dynamic;
using Eigen::Matrix;
using stan::math::index_type;
typedef typename index_type<Matrix<T_prob, Dynamic, 1> >::type size_t;
check_nonzero_size(function, name, theta);
if (!(fabs(1.0 - theta.sum()) <= CONSTRAINT_TOLERANCE)) {
std::stringstream msg;
T_prob sum = theta.sum();
msg << "is not a valid simplex.";
msg.precision(10);
msg << " sum(" << name << ") = " << sum
<< ", but should be ";
std::string msg_str(msg.str());
domain_error(function, name, 1.0,
msg_str.c_str());
return false;
}
for (size_t n = 0; n < theta.size(); n++) {
if (!(theta[n] >= 0)) {
std::ostringstream msg;
msg << "is not a valid simplex. "
<< name << "[" << n + stan::error_index::value << "]"
<< " = ";
std::string msg_str(msg.str());
domain_error(function, name, theta[n],
msg_str.c_str(),
", but should be greater than or equal to 0");
return false;
}
}
return true;
}
示例12: test_sort_indices_desc
void test_sort_indices_desc(Eigen::Matrix<T,R,C> val) {
using stan::math::sort_indices_desc;
typedef Eigen::Matrix<AVAR,R,C> AVEC;
const size_t size = val.size();
AVEC x(size);
for(size_t i=0U; i<size; i++)
x.data()[i] = AVAR(val[i]);
std::vector<int> val_sorted = sort_indices_desc(val);
std::vector<int> x_sorted = sort_indices_desc(x);
for(size_t i=0U; i<size; i++)
EXPECT_EQ(val_sorted.data()[i],x_sorted.data()[i]);
for(size_t i=0U; i<size; i++)
for(size_t j=0U; j<size; j++)
if(val_sorted.data()[i] == val.data()[j])
EXPECT_EQ(x_sorted.data()[i],x.data()[j]);
else
EXPECT_FALSE(x_sorted.data()[i]==x.data()[j]);
}
示例13: sum
typename boost::math::tools::promote_args<T_prob>::type
categorical_logit_log(const std::vector<int>& ns,
const Eigen::Matrix<T_prob,Eigen::Dynamic,1>& beta) {
static const char* function = "stan::prob::categorical_logit_log(%1%)";
using stan::math::check_bounded;
using stan::math::check_finite;
using stan::math::log_softmax;
using stan::math::sum;
double lp = 0.0;
for (size_t k = 0; k < ns.size(); ++k)
if (!check_bounded(function, ns[k], 1, beta.size(),
"categorical outcome out of support",
&lp))
return lp;
if (!check_finite(function, beta, "log odds parameter", &lp))
return lp;
if (!include_summand<propto,T_prob>::value)
return 0.0;
if (ns.size() == 0)
return 0.0;
Eigen::Matrix<T_prob,Eigen::Dynamic,1> log_softmax_beta
= log_softmax(beta);
// FIXME: replace with more efficient sum()
Eigen::Matrix<typename boost::math::tools::promote_args<T_prob>::type,
Eigen::Dynamic,1> results(ns.size());
for (size_t i = 0; i < ns.size(); ++i)
results[i] = log_softmax_beta(ns[i] - 1);
return sum(results);
}
示例14: beta
typename boost::math::tools::promote_args<T_prob>::type
categorical_logit_log(int n,
const Eigen::Matrix<T_prob,Eigen::Dynamic,1>& beta) {
static const char* function = "stan::prob::categorical_logit_log(%1%)";
using stan::math::check_bounded;
using stan::math::check_finite;
using stan::math::log_sum_exp;
double lp = 0.0;
if (!check_bounded(function, n, 1, beta.size(),
"categorical outcome out of support",
&lp))
return lp;
if (!check_finite(function, beta, "log odds parameter", &lp))
return lp;
if (!include_summand<propto,T_prob>::value)
return 0.0;
// FIXME: wasteful vs. creating term (n-1) if not vectorized
return beta(n-1) - log_sum_exp(beta); // == log_softmax(beta)(n-1);
}
示例15: softmax
inline
Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>
softmax(const Eigen::Matrix<fvar<T>, Eigen::Dynamic, 1>& alpha) {
using stan::math::softmax;
using Eigen::Matrix;
using Eigen::Dynamic;
Matrix<T, Dynamic, 1> alpha_t(alpha.size());
for (int k = 0; k < alpha.size(); ++k)
alpha_t(k) = alpha(k).val_;
Matrix<T, Dynamic, 1> softmax_alpha_t = softmax(alpha_t);
Matrix<fvar<T>, Dynamic, 1> softmax_alpha(alpha.size());
for (int k = 0; k < alpha.size(); ++k) {
softmax_alpha(k).val_ = softmax_alpha_t(k);
softmax_alpha(k).d_ = 0;
}
// for each input position
for (int m = 0; m < alpha.size(); ++m) {
// for each output position
T negative_alpha_m_d_times_softmax_alpha_t_m
= - alpha(m).d_ * softmax_alpha_t(m);
for (int k = 0; k < alpha.size(); ++k) {
// chain from input to output
if (m == k) {
softmax_alpha(k).d_
+= softmax_alpha_t(k)
* (alpha(m).d_
+ negative_alpha_m_d_times_softmax_alpha_t_m);
} else {
softmax_alpha(k).d_
+= negative_alpha_m_d_times_softmax_alpha_t_m
* softmax_alpha_t(k);
}
}
}
return softmax_alpha;
}