本文整理汇总了C++中eigen::Ref::rows方法的典型用法代码示例。如果您正苦于以下问题:C++ Ref::rows方法的具体用法?C++ Ref::rows怎么用?C++ Ref::rows使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eigen::Ref
的用法示例。
在下文中一共展示了Ref::rows方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: train
void FunctionApproximatorGPR::train(const Eigen::Ref<const Eigen::MatrixXd>& inputs, const Eigen::Ref<const Eigen::MatrixXd>& targets)
{
if (isTrained())
{
cerr << "WARNING: You may not call FunctionApproximatorGPR::train more than once. Doing nothing." << endl;
cerr << " (if you really want to retrain, call reTrain function instead)" << endl;
return;
}
assert(inputs.rows() == targets.rows());
assert(inputs.cols()==getExpectedInputDim());
const MetaParametersGPR* meta_parameters_gpr =
dynamic_cast<const MetaParametersGPR*>(getMetaParameters());
double max_covar = meta_parameters_gpr->maximum_covariance();
VectorXd sigmas = meta_parameters_gpr->sigmas();
// Compute the gram matrix
// In a gram matrix, every input point is itself a center
MatrixXd centers = inputs;
// Replicate sigmas, because they are the same for each data point/center
MatrixXd widths = sigmas.transpose().colwise().replicate(centers.rows());
MatrixXd gram(inputs.rows(),inputs.rows());
bool normalize_activations = false;
bool asymmetric_kernels = false;
BasisFunction::Gaussian::activations(centers,widths,inputs,gram,normalize_activations,asymmetric_kernels);
gram *= max_covar;
setModelParameters(new ModelParametersGPR(inputs,targets,gram,max_covar,sigmas));
}
示例2: pointsPerDimension
// create initial points distribution
Eigen::ArrayXXd distmesh::utils::createInitialPoints(
Functional const& distanceFunction, double const initialPointDistance,
Functional const& elementSizeFunction, Eigen::Ref<Eigen::ArrayXXd const> const boundingBox,
Eigen::Ref<Eigen::ArrayXXd const> const fixedPoints) {
// extract dimension of mesh
unsigned const dimension = boundingBox.cols();
// initially distribute points evenly in complete bounding box
Eigen::ArrayXi pointsPerDimension(dimension);
for (int dim = 0; dim < dimension; ++dim) {
pointsPerDimension(dim) = ceil((boundingBox(1, dim) - boundingBox(0, dim)) /
(initialPointDistance * (dim == 0 ? 1.0 : sqrt(3.0) / 2.0)));
}
Eigen::ArrayXXd points(pointsPerDimension.prod(), dimension);
for (int point = 0; point < points.rows(); ++point)
for (int dim = 0; dim < dimension; ++dim) {
int const pointIndex = (point / std::max(pointsPerDimension.topRows(dim).prod(), 1)) %
pointsPerDimension(dim);
points(point, dim) = boundingBox(0, dim) + (double)pointIndex * initialPointDistance *
(dim == 0 ? 1.0 : sqrt(3.0) / 2.0);
if (dim > 0) {
points(point, dim - 1) += pointIndex % 2 != 0 ? initialPointDistance / 2.0 : 0.0;
}
}
// reject points outside of region defined by distance function
points = selectMaskedArrayElements<double>(points,
distanceFunction(points) < constants::geometryEvaluationThreshold * initialPointDistance);
// clear duplicate points
Eigen::Array<bool, Eigen::Dynamic, 1> isUniquePoint =
Eigen::Array<bool, Eigen::Dynamic, 1>::Constant(points.rows(), true);
for (int i = 0; i < fixedPoints.rows(); ++i)
for (int j = 0; j < points.rows(); ++j) {
isUniquePoint(j) &= !(fixedPoints.row(i) == points.row(j)).all();
}
points = selectMaskedArrayElements<double>(points, isUniquePoint);
// calculate probability to keep points
Eigen::ArrayXd probability = 1.0 / elementSizeFunction(points).pow(dimension);
probability /= probability.maxCoeff();
// reject points with wrong probability
points = selectMaskedArrayElements<double>(points,
0.5 * (1.0 + Eigen::ArrayXd::Random(points.rows())) < probability);
// combine fixed and variable points to one array
Eigen::ArrayXXd finalPoints(points.rows() + fixedPoints.rows(), dimension);
finalPoints << fixedPoints, points;
return finalPoints;
}
示例3: if
bool mrpt::vision::pnp::CPnP::so3(const Eigen::Ref<Eigen::MatrixXd> obj_pts, const Eigen::Ref<Eigen::MatrixXd> img_pts, int n, const Eigen::Ref<Eigen::MatrixXd> cam_intrinsic, Eigen::Ref<Eigen::MatrixXd> pose_mat)
{
try{
// Input 2d/3d correspondences and camera intrinsic matrix
Eigen::MatrixXd cam_in_eig,img_pts_eig, obj_pts_eig;
// Check for consistency of input matrix dimensions
if (img_pts.rows() != obj_pts.rows() || img_pts.cols() !=obj_pts.cols())
throw(2);
else if (cam_intrinsic.rows()!=3 || cam_intrinsic.cols()!=3)
throw(3);
if(obj_pts.rows() < obj_pts.cols())
{
cam_in_eig=cam_intrinsic.transpose();
img_pts_eig=img_pts.transpose().block(0,0,n,2);
obj_pts_eig=obj_pts.transpose();
}
else
{
cam_in_eig=cam_intrinsic;
img_pts_eig=img_pts.block(0,0,n,2);
obj_pts_eig=obj_pts;
}
// Output pose
Eigen::Matrix3d R;
Eigen::Vector3d t;
// Compute pose
mrpt::vision::pnp::p3p p(cam_in_eig);
p.solve(R,t, obj_pts_eig, img_pts_eig);
mrpt::vision::pnp::so3 s(obj_pts_eig, img_pts_eig, cam_in_eig, n);
bool ret = s.compute_pose(R,t);
Eigen::Quaterniond q(R);
pose_mat<<t,q.vec();
return ret;
}
catch(int e)
{
switch(e)
{
case 2: std::cout << "2d/3d correspondences mismatch\n Check dimension of obj_pts and img_pts" << std::endl;
case 3: std::cout << "Camera intrinsic matrix does not have 3x3 dimensions " << std::endl;
}
return false;
}
}
示例4: polygon
// check whether points lies inside or outside of polygon
Eigen::ArrayXd distmesh::utils::pointsInsidePoly(
Eigen::Ref<Eigen::ArrayXXd const> const points,
Eigen::Ref<Eigen::ArrayXXd const> const polygon) {
Eigen::ArrayXd inside = Eigen::ArrayXd::Zero(points.rows());
for (int i = 0, j = polygon.rows() - 1; i < polygon.rows(); j = i++) {
inside = (((points.col(1) < polygon(i, 1)) != (points.col(1) < polygon(j, 1))) &&
(points.col(0) < (polygon(j, 0) - polygon(i, 0)) * (points.col(1) - polygon(i, 1)) /
(polygon(j, 1) - polygon(i, 1)) + polygon(i, 0))).select(1.0 - inside, inside);
}
return inside;
}
示例5: boundary
// determine boundary edges of given triangulation
Eigen::ArrayXi distmesh::utils::boundEdges(
Eigen::Ref<Eigen::ArrayXXi const> const triangulation,
Eigen::Ref<Eigen::ArrayXXi const> const _edges,
Eigen::Ref<Eigen::ArrayXXi const> const _edgeIndices) {
// create a new edge list, if none was given
Eigen::ArrayXXi edges;
if (_edges.rows() == 0) {
edges = utils::findUniqueEdges(triangulation);
}
else {
edges = _edges;
}
// get edge indices for each triangle in triangulation
Eigen::ArrayXXi edgeIndices;
if (_edgeIndices.rows() == 0) {
edgeIndices = utils::getTriangulationEdgeIndices(triangulation, edges);
}
else {
edgeIndices = _edgeIndices;
}
// find edges, which only appear once in triangulation
std::set<int> uniqueEdges;
std::vector<int> boundaryEdges;
for (int triangle = 0; triangle < triangulation.rows(); ++triangle)
for (int edge = 0; edge < triangulation.cols(); ++edge) {
auto const edgeIndex = edgeIndices(triangle, edge);
// insert edge in set to get info about multiple appearance
if (!std::get<1>(uniqueEdges.insert(edgeIndex))) {
// find edge in vector and delete it
auto const it = std::find(boundaryEdges.begin(), boundaryEdges.end(), edgeIndex);
if (it != boundaryEdges.end()) {
boundaryEdges.erase(it);
}
}
else {
boundaryEdges.push_back(edgeIndex);
}
}
// convert stl vector to eigen array
Eigen::ArrayXi boundary(boundaryEdges.size());
for (int edge = 0; edge < boundary.rows(); ++edge) {
boundary(edge) = boundaryEdges[edge];
}
return boundary;
}
示例6: predictVariance
void FunctionApproximatorGPR::predictVariance(const Eigen::Ref<const Eigen::MatrixXd>& inputs, MatrixXd& variances)
{
if (!isTrained())
{
cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl;
return;
}
const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters());
assert(inputs.cols()==getExpectedInputDim());
unsigned int n_samples = inputs.rows();
variances.resize(n_samples,1);
MatrixXd ks;
model_parameters_gpr->kernelActivations(inputs, ks);
double maximum_covariance = model_parameters_gpr->maximum_covariance();
MatrixXd gram_inv = model_parameters_gpr->gram_inv();
for (unsigned int ii=0; ii<n_samples; ii++)
variances(ii) = maximum_covariance - (ks.row(ii)*gram_inv).dot(ks.row(ii).transpose());
}
示例7: edgeIndices
Eigen::ArrayXXi distmesh::utils::findUniqueEdges(Eigen::Ref<Eigen::ArrayXXi const> const triangulation) {
// find all unique combinations
auto const combinations = nOverK(triangulation.cols(), 2);
// find unique edges for all combinations
// guarantee direction of edges with lower node index to higher index
std::set<std::array<int, 2>> uniqueEdges;
std::array<int, 2> edge = {{0, 0}};
for (int combination = 0; combination < combinations.rows(); ++combination)
for (int triangle = 0; triangle < triangulation.rows(); ++triangle) {
edge[0] = triangulation(triangle, combinations(combination, 0));
edge[1] = triangulation(triangle, combinations(combination, 1));
edge = edge[1] < edge[0] ? std::array<int, 2>{edge[1], edge[0]} : edge;
uniqueEdges.insert(edge);
}
// copy set to eigen array
Eigen::ArrayXXi edgeIndices(uniqueEdges.size(), 2);
int index = 0;
for (auto const& edge : uniqueEdges) {
edgeIndices(index, 0) = edge[0];
edgeIndices(index, 1) = edge[1];
index++;
}
return edgeIndices;
}
示例8: predictVariance
void FunctionApproximatorGMR::predictVariance(const Eigen::Ref<const Eigen::MatrixXd>& inputs, Eigen::MatrixXd& variances)
{
ENTERING_REAL_TIME_CRITICAL_CODE
variances.resize(inputs.rows(),getExpectedOutputDim());
predict(inputs,empty_prealloc_,variances);
EXITING_REAL_TIME_CRITICAL_CODE
}
示例9: A_extended
double Nullspace
(
const Eigen::Ref<const Mat> & A,
Eigen::Ref<Vec> nullspace
)
{
if ( A.rows() >= A.cols() )
{
Eigen::JacobiSVD<Mat> svd( A, Eigen::ComputeFullV );
nullspace = svd.matrixV().col( A.cols() - 1 );
return svd.singularValues()( A.cols() - 1 );
}
// Extend A with rows of zeros to make it square. It's a hack, but it is
// necessary until Eigen supports SVD with more columns than rows.
Mat A_extended( A.cols(), A.cols() );
A_extended.block( A.rows(), 0, A.cols() - A.rows(), A.cols() ).setZero();
A_extended.block( 0, 0, A.rows(), A.cols() ) = A;
return Nullspace( A_extended, nullspace );
}
示例10: re
void softmax<T>::train(const Eigen::Ref<const EigenMat> &train,
const std::vector<int> &labels)
{
#ifdef OCV_TEST_SOFTMAX
gradient_check();
#endif
auto const UniqueLabels = get_unique_labels(labels);
auto const NumClass = UniqueLabels.size();
weight_ = EigenMat::Random(NumClass, train.rows());
grad_ = EigenMat::Zero(NumClass, train.rows());
auto const TrainCols = static_cast<int>(train.cols());
EigenMat const GroundTruth = get_ground_truth(static_cast<int>(NumClass),
TrainCols,
UniqueLabels,
labels);
std::random_device rd;
std::default_random_engine re(rd());
int const Batch = (get_batch_size(TrainCols));
int const RandomSize = TrainCols != Batch ?
TrainCols - Batch - 1 : 0;
std::uniform_int_distribution<int>
uni_int(0, RandomSize);
for(size_t i = 0; i != params_.max_iter_; ++i){
auto const Cols = uni_int(re);
auto const &TrainBlock =
train.block(0, Cols, train.rows(), Batch);
auto const >Block =
GroundTruth.block(0, Cols, NumClass, Batch);
auto const Cost = compute_cost(TrainBlock, weight_, GTBlock);
if(std::abs(params_.cost_ - Cost) < params_.epsillon_ ||
Cost < 0){
break;
}
params_.cost_ = Cost;
compute_gradient(TrainBlock, weight_, GTBlock);
weight_.array() -= grad_.array() * params_.lrate_;//*/
}
}
示例11: pinvDampedEigen
//**************************************************************************************************
Eigen::MatrixRXd wholeBodyReach::pinvDampedEigen(const Eigen::Ref<Eigen::MatrixRXd> &A, double damp)
{
// allocate memory
int m = A.rows(), n = A.cols(), k = m<n?m:n;
VectorXd SpinvD = VectorXd::Zero(k);
// compute decomposition
JacobiSVD<MatrixRXd> svd(A, ComputeThinU | ComputeThinV); // default Eigen SVD
VectorXd sv = svd.singularValues();
// compute pseudoinverse of singular value matrix
double damp2 = damp*damp;
for (int c=0;c<k; c++)
SpinvD(c) = sv(c) / (sv(c)*sv(c) + damp2);
// compute damped pseudoinverse
return svd.matrixV() * SpinvD.asDiagonal() * svd.matrixU().transpose();
}
示例12: gradient
// project points outside of domain back to boundary
void distmesh::utils::projectPointsToBoundary(
Functional const& distanceFunction, double const initialPointDistance,
Eigen::Ref<Eigen::ArrayXXd> points) {
Eigen::ArrayXd distance = distanceFunction(points);
// check for points outside of boundary
Eigen::Array<bool, Eigen::Dynamic, 1> outside = distance > 0.0;
if (outside.any()) {
// calculate gradient
Eigen::ArrayXXd gradient(points.rows(), points.cols());
Eigen::ArrayXXd deltaX = Eigen::ArrayXXd::Zero(points.rows(), points.cols());
for (int dim = 0; dim < points.cols(); ++dim) {
deltaX.col(dim).fill(constants::deltaX * initialPointDistance);
gradient.col(dim) = (distanceFunction(points + deltaX) - distance) /
(constants::deltaX * initialPointDistance);
deltaX.col(dim).fill(0.0);
}
// project points back to boundary
points -= outside.replicate(1, points.cols()).select(
gradient.colwise() * distance / gradient.square().rowwise().sum(), 0.0);
}
}
示例13: nullSpaceProjector
//**************************************************************************************************
Eigen::MatrixRXd wholeBodyReach::nullSpaceProjector(const Eigen::Ref<MatrixRXd> A, double tol)
{
// allocate memory
int m = A.rows(), n = A.cols(), k = m<n?m:n;
MatrixRXd Spinv = MatrixRXd::Zero(k,k);
// compute decomposition
JacobiSVD<MatrixRXd> svd(A, ComputeThinU | ComputeThinV); // default Eigen SVD
VectorXd sv = svd.singularValues();
// compute pseudoinverse of singular value matrix
for (int c=0;c<k; c++)
if ( sv(c)> tol)
Spinv(c,c) = 1/sv(c);
// compute pseudoinverse
MatrixRXd N = MatrixRXd::Identity(n,n);
N -= svd.matrixV() * Spinv * svd.matrixU().transpose() * A;
return N;
}
示例14: predict
void FunctionApproximatorGPR::predict(const Eigen::Ref<const Eigen::MatrixXd>& inputs, MatrixXd& outputs)
{
if (!isTrained())
{
cerr << "WARNING: You may not call FunctionApproximatorLWPR::predict if you have not trained yet. Doing nothing." << endl;
return;
}
const ModelParametersGPR* model_parameters_gpr = static_cast<const ModelParametersGPR*>(getModelParameters());
assert(inputs.cols()==getExpectedInputDim());
unsigned int n_samples = inputs.rows();
outputs.resize(n_samples,1);
MatrixXd ks(n_samples,n_samples);
model_parameters_gpr->kernelActivations(inputs, ks);
VectorXd weights = model_parameters_gpr->weights();
for (unsigned int ii=0; ii<n_samples; ii++)
outputs(ii) = ks.row(ii).dot(weights);
}
示例15: train
void FunctionApproximatorGMR::train(const Eigen::Ref<const Eigen::MatrixXd>& inputs, const Eigen::Ref<const Eigen::MatrixXd>& targets)
{
if (isTrained())
{
cerr << "WARNING: You may not call FunctionApproximatorGMR::train more than once. Doing nothing." << endl;
cerr << " (if you really want to retrain, call reTrain function instead)" << endl;
return;
}
assert(inputs.rows() == targets.rows()); // Must have same number of examples
assert(inputs.cols() == getExpectedInputDim());
const MetaParametersGMR* meta_parameters_GMR =
static_cast<const MetaParametersGMR*>(getMetaParameters());
const ModelParametersGMR* model_parameters_GMR =
static_cast<const ModelParametersGMR*>(getModelParameters());
int n_gaussians;
if(meta_parameters_GMR!=NULL)
n_gaussians = meta_parameters_GMR->number_of_gaussians_;
else if(model_parameters_GMR!=NULL)
n_gaussians = model_parameters_GMR->priors_.size();
else
cerr << "FunctionApproximatorGMR::train Something wrong happened, both ModelParameters and MetaParameters are not initialized." << endl;
int n_dims_in = inputs.cols();
int n_dims_out = targets.cols();
int n_dims_gmm = n_dims_in + n_dims_out;
// Initialize the means, priors and covars
std::vector<VectorXd> means(n_gaussians);
std::vector<MatrixXd> covars(n_gaussians);
std::vector<double> priors(n_gaussians);
int n_observations = 0;
for (int i = 0; i < n_gaussians; i++)
{
means[i] = VectorXd(n_dims_gmm);
priors[i] = 0.0;
covars[i] = MatrixXd(n_dims_gmm, n_dims_gmm);
}
// Put the input/output data in one big matrix
MatrixXd data = MatrixXd(inputs.rows(), n_dims_gmm);
data << inputs, targets;
n_observations = data.rows();
// Initialization
if (inputs.cols() == 1)
firstDimSlicingInit(data, means, priors, covars);
else
kMeansInit(data, means, priors, covars);
// Expectation-Maximization
expectationMaximization(data, means, priors, covars);
// Extract the different input/output components from the means/covars which contain both
std::vector<Eigen::VectorXd> means_x(n_gaussians);
std::vector<Eigen::VectorXd> means_y(n_gaussians);
std::vector<Eigen::MatrixXd> covars_x(n_gaussians);
std::vector<Eigen::MatrixXd> covars_y(n_gaussians);
std::vector<Eigen::MatrixXd> covars_y_x(n_gaussians);
for (int i_gau = 0; i_gau < n_gaussians; i_gau++)
{
means_x[i_gau] = means[i_gau].segment(0, n_dims_in);
means_y[i_gau] = means[i_gau].segment(n_dims_in, n_dims_out);
covars_x[i_gau] = covars[i_gau].block(0, 0, n_dims_in, n_dims_in);
covars_y[i_gau] = covars[i_gau].block(n_dims_in, n_dims_in, n_dims_out, n_dims_out);
covars_y_x[i_gau] = covars[i_gau].block(n_dims_in, 0, n_dims_out, n_dims_in);
}
setModelParameters(new ModelParametersGMR(n_observations, priors, means_x, means_y, covars_x, covars_y, covars_y_x));
// After training, we know the sizes of the matrices that should be cached
preallocateMatrices(n_gaussians,n_dims_in,n_dims_out);
// std::vector<VectorXd> centers;
// std::vector<MatrixXd> slopes;
// std::vector<VectorXd> biases;
// std::vector<MatrixXd> inverseCovarsL;
// // int n_dims_in = inputs.cols();
// // int n_dims_out = targets.cols();
// for (int i_gau = 0; i_gau < n_gaussians; i_gau++)
// {
// centers.push_back(VectorXd(means[i_gau].segment(0, n_dims_in)));
// slopes.push_back(MatrixXd(covars[i_gau].block(n_dims_in, 0, n_dims_out, n_dims_in) * covars[i_gau].block(0, 0, n_dims_in, n_dims_in).inverse()));
// biases.push_back(VectorXd(means[i_gau].segment(n_dims_in, n_dims_out) -
// slopes[i_gau]*means[i_gau].segment(0, n_dims_in)));
// MatrixXd L = covars[i_gau].block(0, 0, n_dims_in, n_dims_in).inverse().llt().matrixL();
// inverseCovarsL.push_back(MatrixXd(L));
// }
// setModelParameters(new ModelParametersGMR(centers, priors, slopes, biases, inverseCovarsL));
//.........这里部分代码省略.........