本文整理汇总了C++中MatrixX类的典型用法代码示例。如果您正苦于以下问题:C++ MatrixX类的具体用法?C++ MatrixX怎么用?C++ MatrixX使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MatrixX类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
const typename
Type<Scalar>::MatrixX& HumanoidLipComJerkMinimizationObjective<Scalar>::getHessian()
{
assert(feetSupervisor_.getNbSamples() == lipModel_.getNbSamples());;
int N = lipModel_.getNbSamples();
int M = feetSupervisor_.getNbPreviewedSteps();
int nb = feetSupervisor_.getNbOfCallsBeforeNextSample() - 1;
const LinearDynamic<Scalar>& dynCopX = lipModel_.getCopXLinearDynamic(nb);
const LinearDynamic<Scalar>& dynCopY = lipModel_.getCopYLinearDynamic(nb);
MatrixX tmp = MatrixX::Zero(2*N, 2*N + 2*M);
tmp.block(0, 0, 2*N, 2*N) = feetSupervisor_.getRotationMatrixT();
tmp.block(0, 2*N, N, M) = feetSupervisor_.getFeetPosLinearDynamic().U;
tmp.block(N, 2*N + M, N, M) = feetSupervisor_.getFeetPosLinearDynamic().U;
MatrixX tmp2 = MatrixX::Zero(2*N, 2*N);
const MatrixX& weight = feetSupervisor_.getSampleWeightMatrix();
tmp2.block(0, 0, N, N) = dynCopX.UTinv*weight*dynCopX.Uinv;
tmp2.block(N, N, N, N) = dynCopY.UTinv*weight*dynCopY.Uinv;
hessian_.noalias() = tmp.transpose()*tmp2*tmp;
return hessian_;
}
示例2: run_fixed_size_test
void run_fixed_size_test(int num_elements)
{
using std::abs;
typedef Matrix<Scalar, Dimension+1, Dynamic> MatrixX;
typedef Matrix<Scalar, Dimension+1, Dimension+1> HomMatrix;
typedef Matrix<Scalar, Dimension, Dimension> FixedMatrix;
typedef Matrix<Scalar, Dimension, 1> FixedVector;
const int dim = Dimension;
// MUST be positive because in any other case det(cR_t) may become negative for
// odd dimensions!
const Scalar c = abs(internal::random<Scalar>());
FixedMatrix R = randMatrixSpecialUnitary<Scalar>(dim);
FixedVector t = Scalar(50)*FixedVector::Random(dim,1);
HomMatrix cR_t = HomMatrix::Identity(dim+1,dim+1);
cR_t.block(0,0,dim,dim) = c*R;
cR_t.block(0,dim,dim,1) = t;
MatrixX src = MatrixX::Random(dim+1, num_elements);
src.row(dim) = Matrix<Scalar, 1, Dynamic>::Constant(num_elements, Scalar(1));
MatrixX dst = cR_t*src;
Block<MatrixX, Dimension, Dynamic> src_block(src,0,0,dim,num_elements);
Block<MatrixX, Dimension, Dynamic> dst_block(dst,0,0,dim,num_elements);
HomMatrix cR_t_umeyama = umeyama(src_block, dst_block);
const Scalar error = ( cR_t_umeyama*src - dst ).array().square().sum();
VERIFY(error < Scalar(10)*std::numeric_limits<Scalar>::epsilon());
}
示例3: pxi
GMMExpectationMaximization::uint GMMExpectationMaximization::execute(const MatrixX & dataset)
{
const uint data_count = dataset.rows();
const uint num_gaussians = m_means.size();
const uint dim = dataset.cols();
MatrixX pxi(data_count,num_gaussians);
MatrixX pix(data_count,num_gaussians);
VectorX pxidatatot(data_count);
VectorX weights(num_gaussians);
VectorX ex(data_count);
MatrixX ts(dim,dim);
VectorX dif(dim);
Real prev_log_likelyhood = 1.0;
uint it_num;
for (it_num = 0; it_num < m_max_iterations; it_num++)
{
for (uint g = 0; g < num_gaussians; g++)
weights[g] = m_weights[g];
for (uint d = 0; d < data_count; d++)
for (uint g = 0; g < num_gaussians; g++)
pxi(d,g) = gauss(m_means[g],m_covs[g],dataset.row(d).transpose());
pxidatatot = pxi * weights;
Real log_likelyhood = pxidatatot.array().log().sum() / Real(data_count);
if (it_num != 0 && (std::abs(log_likelyhood / prev_log_likelyhood - 1.0) < m_termination_threshold))
break;
prev_log_likelyhood = log_likelyhood;
for (uint d = 0; d < data_count; d++)
pix.row(d) = (pxi.row(d).transpose().array() * weights.array()).transpose() / pxidatatot[d];
ex = pix.colwise().sum();
for(uint g = 0; g < num_gaussians; g++)
{
m_weights[g] = ex[g] / Real(data_count);
m_means[g] = (dataset.transpose() * pix.col(g)) / ex[g];
ts = MatrixX::Zero(dim,dim);
for (uint d = 0; d < data_count; d++)
{
dif = dataset.row(d).transpose() - m_means[g];
ts.noalias() += (dif * dif.transpose()) * pix(d,g);
}
m_covs[g] = (ts / ex[g]) + MatrixX::Identity(dim,dim) * m_epsilon;
}
// interruption point here
if (m_termination_handler && m_termination_handler->isTerminated())
return it_num;
}
return it_num;
}
示例4: TYPED_TEST
/**
* In this test we check if the gradient is correct by appling
* a finite difference method.
*/
TYPED_TEST(TestSecondOrderMultinomialLogisticRegression, Gradient) {
// Gradient checking should only be made with a double type
if (is_float<TypeParam>::value) {
return;
}
// eta is typically of size KxC, where K is the number of topics and C the
// number of different classes.
// Here we choose randomly for conviency K=10 and C=5
MatrixX<TypeParam> eta = MatrixX<TypeParam>::Random(10, 5);
// X is of size KxD, where D is the total number of documents.
// In our case we have chosen D=15
MatrixX<TypeParam> X = MatrixX<TypeParam>::Random(10, 1);
// y is vector of size Dx1
VectorXi y(1);
for (int i=0; i<1; i++) {
y(i) = rand() % (int)5;
}
std::vector<MatrixX<TypeParam> > X_var = {MatrixX<TypeParam>::Random(10, 10).array().abs()};
TypeParam L = 1;
SecondOrderLogisticRegressionApproximation<TypeParam> mlr(X, X_var, y, L);
// grad is the gradient according to the equation
// implemented in MultinomialLogisticRegression.cpp
// gradient function
// grad is of same size as eta, which is KxC
MatrixX<TypeParam> grad(10, 5);
// Calculate the gradients
mlr.gradient(eta, grad);
// Grad's approximation
TypeParam grad_hat;
TypeParam t = 1e-6;
for (int i=0; i < eta.rows(); i++) {
for (int j=0; j < eta.cols(); j++) {
eta(i, j) += t;
TypeParam ll1 = mlr.value(eta);
eta(i, j) -= 2*t;
TypeParam ll2 = mlr.value(eta);
// Compute gradients approximation
grad_hat = (ll1 - ll2) / (2 * t);
auto absolute_error = std::abs(grad(i, j) - grad_hat);
if (grad_hat != 0) {
auto relative_error = absolute_error / std::abs(grad_hat);
EXPECT_TRUE(
relative_error < 1e-4 ||
absolute_error < 1e-5
) << relative_error << " " << absolute_error;
}
else {
EXPECT_LT(absolute_error, 1e-5);
}
}
}
}
示例5: predictions
Eigen::VectorXi LDA<Scalar>::predict(const MatrixX &scores) {
Eigen::VectorXi predictions(scores.cols());
for (int d=0; d<scores.cols(); d++) {
scores.col(d).maxCoeff( &predictions[d] );
}
return predictions;
}
示例6: TYPED_TEST
TYPED_TEST(TestMultinomialMaximizationStep, Maximization) {
// Build the corpus
std::mt19937 rng;
rng.seed(0);
MatrixXi X(100, 50);
VectorXi y(50);
std::uniform_int_distribution<> class_generator(0, 5);
std::exponential_distribution<> words_generator(0.1);
for (int d=0; d<50; d++) {
for (int w=0; w<100; w++) {
X(w, d) = static_cast<int>(words_generator(rng));
}
y(d) = class_generator(rng);
}
// Create the corpus and the model
auto corpus = std::make_shared<corpus::EigenClassificationCorpus>(X, y);
MatrixX<TypeParam> beta = MatrixX<TypeParam>::Random(10, 100);
beta.array() -= beta.minCoeff();
beta.array().rowwise() /= beta.array().colwise().sum();
auto model = std::make_shared<parameters::SupervisedModelParameters<TypeParam> >(
VectorX<TypeParam>::Constant(10, 0.1),
beta,
MatrixX<TypeParam>::Constant(10, 6, 1. / 6)
);
em::MultinomialSupervisedEStep<TypeParam> e_step(10, 1e-2, 2);
em::MultinomialSupervisedMStep<TypeParam> m_step(2);
for (size_t i=0; i<corpus->size(); i++) {
m_step.doc_m_step(
corpus->at(i),
e_step.doc_e_step(
corpus->at(i),
model
),
model
);
}
std::vector<TypeParam> progress;
m_step.get_event_dispatcher()->add_listener(
[&progress](std::shared_ptr<events::Event> event) {
if (event->id() == "MaximizationProgressEvent") {
auto prog_ev = std::static_pointer_cast<events::MaximizationProgressEvent<TypeParam> >(event);
progress.push_back(prog_ev->likelihood());
}
}
);
m_step.m_step(
model
);
ASSERT_EQ(1, progress.size());
ASSERT_GT(0, progress[0]);
}
开发者ID:angeloskath,项目名称:supervised-lda,代码行数:57,代码来源:test_multinomial_supervised_maximization_step.cpp
示例7: getBIC
GMMExpectationMaximization::Real GMMExpectationMaximization::getBIC(const MatrixX & dataset) const
{
const uint dim = dataset.cols();
const uint num_gaussians = m_means.size();
Real number_of_parameters = (num_gaussians * dim * (dim + 1) / 2) + num_gaussians * dim + num_gaussians - 1;
uint data_count = dataset.rows();
Real sum = 0.0;
for(uint i = 0; i < data_count; i++)
sum += log(expectation(dataset.row(i).transpose()));
return -sum + (number_of_parameters / 2.0) * log(Real(data_count));
}
示例8: gauss
GMMExpectationMaximization::Real GMMExpectationMaximization::gauss(const VectorX & mean,
const MatrixX & cov,const VectorX & pt) const
{
Real det = cov.determinant();
uint dim = mean.size();
// check that the covariance matrix is invertible
if (std::abs(det) < std::pow(m_epsilon,dim) * 0.1)
return 0.0; // the gaussian has approximately zero width: the probability of any point falling into it is approximately 0.
// else, compute pdf
MatrixX inverse_cov = cov.inverse();
VectorX dist = pt - mean;
Real exp = - (dist.dot(inverse_cov * dist)) / 2.0;
Real den = std::sqrt(std::pow(2.0 * M_PI,dim) * std::abs(det));
return std::exp(exp) / den;
}
示例9: callSolver
bool BCCoreSiconos::callSolver(MatrixX& Mlcp, VectorX& b, VectorX& solution, VectorX& contactIndexToMu, ofstream& os)
{
#ifdef BUILD_BCPLUGIN_WITH_SICONOS
int NC3 = Mlcp.rows();
if(NC3<=0) return true;
int NC = NC3/3;
int CFS_DEBUG = 0;
int CFS_DEBUG_VERBOSE = 0;
if(CFS_DEBUG)
{
if(NC3%3 != 0 ){ os << " warning-1 " << std::endl;return false;}
if( b.rows()!= NC3){ os << " warning-2 " << std::endl;return false;}
if(solution.rows()!= NC3){ os << " warning-3 " << std::endl;return false;}
}
for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++)prob->q [3*ia+i]= b(((i==0)?(ia):(2*ia+i+NC-1)));
for(int ia=0;ia<NC;ia++) prob->mu[ ia ]= contactIndexToMu[ia];
prob->numberOfContacts = NC;
if( USE_FULL_MATRIX )
{
prob->M->storageType = 0;
prob->M->size0 = NC3;
prob->M->size1 = NC3;
double* ptmp = prob->M->matrix0 ;
for(int ia=0;ia<NC;ia++)for(int i =0;i <3 ;i ++)
{
for(int ja=0;ja<NC;ja++)for(int j =0;j <3;j ++)
{
ptmp[NC3*(3*ia+i)+(3*ja+j)]=Mlcp(((i==0)?(ia):(2*ia+i+NC-1)),((j==0)?(ja):(2*ja+j+NC-1)));
}
}
}
else
{
prob->M->storageType = 1;
prob->M->size0 = NC3;
prob->M->size1 = NC3;
sparsify_A( prob->M->matrix1 , Mlcp , NC , &os);
}
fc3d_driver(prob,reaction,velocity,solops, numops);
double* prea = reaction ;
for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++) solution(((i==0)?(ia):(2*ia+i+NC-1))) = prea[3*ia+i] ;
if(CFS_DEBUG_VERBOSE)
{
os << "=---------------------------------="<< std::endl;
os << "| res_error =" << solops->dparam[1] << std::endl;
os << "=---------------------------------="<< std::endl;
}
#endif
return true;
}
示例10: run_test
void run_test(int dim, int num_elements)
{
using std::abs;
typedef typename internal::traits<MatrixType>::Scalar Scalar;
typedef Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> MatrixX;
typedef Matrix<Scalar, Eigen::Dynamic, 1> VectorX;
// MUST be positive because in any other case det(cR_t) may become negative for
// odd dimensions!
const Scalar c = abs(internal::random<Scalar>());
MatrixX R = randMatrixSpecialUnitary<Scalar>(dim);
VectorX t = Scalar(50)*VectorX::Random(dim,1);
MatrixX cR_t = MatrixX::Identity(dim+1,dim+1);
cR_t.block(0,0,dim,dim) = c*R;
cR_t.block(0,dim,dim,1) = t;
MatrixX src = MatrixX::Random(dim+1, num_elements);
src.row(dim) = Matrix<Scalar, 1, Dynamic>::Constant(num_elements, Scalar(1));
MatrixX dst = cR_t*src;
MatrixX cR_t_umeyama = umeyama(src.block(0,0,dim,num_elements), dst.block(0,0,dim,num_elements));
const Scalar error = ( cR_t_umeyama*src - dst ).norm() / dst.norm();
VERIFY(error < Scalar(40)*std::numeric_limits<Scalar>::epsilon());
}
示例11: PseudoInverse
const MatrixX& Jacobian::GetNullspace()
{
if(computeNullSpace_)
{
computeNullSpace_ = false;
/*jacobianInverseNoDls_ = jacobian_;
PseudoInverse(jacobianInverseNoDls_); // tmp while figuring out how to chose lambda*/
//ComputeSVD();
MatrixX id = MatrixX::Identity(jacobian_.cols(), jacobian_.cols());
ComputeSVD();
//Eigen::JacobiSVD<MatrixX> svd(jacobian_, Eigen::ComputeThinU | Eigen::ComputeThinV);
MatrixX res = MatrixX::Zero(id.rows(), id.cols());
for(int i =0; i < svd_.matrixV().cols(); ++ i)
{
VectorX v = svd_.matrixV().col(i);
res += v * v.transpose();
}
Identitymin_ = id - res;
//Identitymin_ = id - (jacobianInverseNoDls_* jacobian_);
}
return Identitymin_;
}
示例12:
typename LDA<Scalar>::MatrixX LDA<Scalar>::decision_function(const MatrixX &X) {
// this function requires a supervised LDA so let's cast our models
// parameters accordingly
auto model = std::static_pointer_cast<parameters::SupervisedModelParameters<Scalar> >(
model_parameters_
);
// the linear model is trained on
// E_q[\bar z] = \fraction{\gamma - \alpha}{\sum_i \gamma_i}
MatrixX expected_z_bar = X.colwise() - model->alpha;
expected_z_bar.array().rowwise() /= expected_z_bar.array().colwise().sum();
// finally return the linear scores like a boss
return model->eta.transpose() * expected_z_bar;
}
示例13: s
void SlaterSet::initCalculation()
{
if (m_initialized)
return;
m_normalized.resize(m_overlap.cols(), m_overlap.rows());
SelfAdjointEigenSolver<MatrixX> s(m_overlap);
MatrixX p = s.eigenvectors();
MatrixX m = p * s.eigenvalues().array().inverse().array().sqrt()
.matrix().asDiagonal() * p.inverse();
m_normalized = m * m_eigenVectors;
if (!(m_overlap * m * m).eval().isIdentity())
cout << "Identity test FAILED - do you need a newer version of Eigen?\n";
m_factors.resize(m_zetas.size());
m_PQNs = m_pqns;
// Calculate the normalizations of the orbitals.
for (size_t i = 0; i < m_zetas.size(); ++i) {
switch (m_slaterTypes[i]) {
case S:
m_factors[i] = pow(2.0 * m_zetas[i], m_pqns[i] + 0.5)
* sqrt(1.0 / (4.0 * M_PI) / factorial(2 * m_pqns[i]));
m_PQNs[i] -= 1;
break;
case PX:
case PY:
case PZ:
m_factors[i] = pow(2.0 * m_zetas[i], m_pqns[i] + 0.5)
* sqrt(3.0 / (4.0 * M_PI) / factorial(2 * m_pqns[i]));
m_PQNs[i] -= 2;
break;
case X2:
m_factors[i] = 0.5 * pow(2.0 * m_zetas[i], m_pqns[i] + 0.5)
* sqrt(15.0 / (4.0 * M_PI) / factorial(2 * m_pqns[i]));
m_PQNs[i] -= 3;
break;
case XZ:
m_factors[i] = pow(2.0 * m_zetas[i], m_pqns[i] + 0.5)
* sqrt(15.0 / (4.0 * M_PI) / factorial(2 * m_pqns[i]));
m_PQNs[i] -= 3;
break;
case Z2:
m_factors[i] = (0.5 / sqrt(3.0)) * pow(2.0 * m_zetas[i], m_pqns[i] + 0.5)
* sqrt(15.0 / (4.0 * M_PI) / factorial(2 * m_pqns[i]));
m_PQNs[i] -= 3;
break;
case YZ:
case XY:
m_factors[i] = pow(2.0 * m_zetas[i], m_pqns[i] + 0.5) *
sqrt(15.0 / (4.0*M_PI) / factorial(2*m_pqns[i]));
m_PQNs[i] -= 3;
break;
default:
cout << "Orbital " << i << " not handled, type " << m_slaterTypes[i]
<< endl;
}
}
// Convert the exponents into Angstroms
for (size_t i = 0; i < m_zetas.size(); ++i)
m_zetas[i] = m_zetas[i] / BOHR_TO_ANGSTROM;
m_initialized = true;
}
示例14: setSpinDensityMatrix
bool GaussianSet::setSpinDensityMatrix(const MatrixX &m)
{
m_spinDensity.resize(m.rows(), m.cols());
m_spinDensity = m;
return true;
}
示例15: index
void GMMExpectationMaximization::autoInitializeByEqualIntervals(uint num_gaussians,uint col,const MatrixX & dataset)
{
uint data_count = dataset.rows();
uint dim = dataset.cols();
std::vector<std::vector<uint> > index(num_gaussians);
for(uint g = 0; g < num_gaussians; g++)
index[g].reserve(data_count / num_gaussians);
m_weights.clear();
m_weights.resize(num_gaussians);
m_means.clear();
m_means.resize(num_gaussians,VectorX::Zero(dim));
m_covs.clear();
m_covs.resize(num_gaussians,MatrixX::Zero(dim,dim));
// find max and min value for column col
Real cmax = dataset(0,col);
Real cmin = dataset(0,col);
for(uint n = 1; n < data_count; n++)
{
if (dataset(n,col) > cmax) cmax = dataset(n,col);
if (dataset(n,col) < cmin) cmin = dataset(n,col);
}
Real cspan = cmax - cmin;
for(uint n = 0; n < data_count; n++)
{
// compute gaussian index to which this point belongs
uint gi = uint((dataset(n,col) - cmin) / (cspan + 1.0) * Real(num_gaussians));
// sum the points to obtain means
m_means[gi] += dataset.row(n);
index[gi].push_back(n);
}
for (uint g = 0; g < num_gaussians; g++)
{
uint popsize = index[g].size();
// avoid division by zero: if no samples are available, initialize to something from somewhere
if (popsize == 0)
{
m_means[g] = dataset.row(g % data_count);
m_covs[g] = MatrixX::Identity(dim,dim);
m_weights[g] = 1.0f / Real(num_gaussians);
continue;
}
// average by popsize
m_means[g] /= Real(popsize);
// same weight for all gaussians
m_weights[g] = 1.0f / Real(num_gaussians);
// compute covariance matrix
for (uint p = 0; p < popsize; p++)
{
const Eigen::VectorXf & r = dataset.row(index[g][p]);
const Eigen::VectorXf & m = m_means[g];
m_covs[g] += (r - m) * (r - m).transpose();
}
m_covs[g] /= Real(popsize);
m_covs[g] += MatrixX::Identity(dim,dim) * m_epsilon;
}
}