本文整理汇总了C++中DataPoints::getDescriptorViewByName方法的典型用法代码示例。如果您正苦于以下问题:C++ DataPoints::getDescriptorViewByName方法的具体用法?C++ DataPoints::getDescriptorViewByName怎么用?C++ DataPoints::getDescriptorViewByName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DataPoints
的用法示例。
在下文中一共展示了DataPoints::getDescriptorViewByName方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: InvalidField
void DataPointsFiltersImpl<T>::OrientNormalsDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
if (!cloud.descriptorExists("normals"))
throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors.");
if (!cloud.descriptorExists("observationDirections"))
throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find observation directions in descriptors.");
BOOST_AUTO(normals, cloud.getDescriptorViewByName("normals"));
const BOOST_AUTO(observationDirections, cloud.getDescriptorViewByName("observationDirections"));
assert(normals.rows() == observationDirections.rows());
for (int i = 0; i < cloud.features.cols(); i++)
{
// Check normal orientation
const Vector vecP = observationDirections.col(i);
const Vector vecN = normals.col(i);
const double scalar = vecP.dot(vecN);
// Swap normal
if(towardCenter)
{
if (scalar < 0)
normals.col(i) = -vecN;
}
else
{
if (scalar > 0)
normals.col(i) = -vecN;
}
}
}
示例2: dim
void ObservationDirectionDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
const int dim(cloud.features.rows() - 1);
const int featDim(cloud.features.cols());
if (dim != 2 && dim != 3)
{
throw InvalidField(
(boost::format("ObservationDirectionDataPointsFilter: Error, works only in 2 or 3 dimensions, cloud has %1% dimensions.") % dim).str()
);
}
Vector center(dim);
center[0] = centerX;
center[1] = centerY;
if (dim == 3)
center[2] = centerZ;
cloud.allocateDescriptor("observationDirections", dim);
BOOST_AUTO(observationDirections, cloud.getDescriptorViewByName("observationDirections"));
for (int i = 0; i < featDim; ++i)
{
// Check normal orientation
const Vector p(cloud.features.block(0, i, dim, 1));
observationDirections.col(i) = center - p;
}
}
示例3: BOOST_AUTO
void InspectorsImpl<T>::AbstractVTKInspector::buildGenericAttributeStream(std::ostream& stream, const std::string& attribute, const std::string& nameTag, const DataPoints& cloud, const int forcedDim)
{
if (!cloud.descriptorExists(nameTag))
return;
const BOOST_AUTO(desc, cloud.getDescriptorViewByName(nameTag));
assert(desc.rows() <= forcedDim);
if(desc.rows() != 0)
{
if(attribute.compare("COLOR_SCALARS") == 0)
{
stream << attribute << " " << nameTag << " " << forcedDim << "\n";
stream << padWithOnes(desc, forcedDim, desc.cols()).transpose();
}
else
{
stream << attribute << " " << nameTag << " float\n";
if(attribute.compare("SCALARS") == 0)
stream << "LOOKUP_TABLE default\n";
stream << padWithZeros(desc, forcedDim, desc.cols()).transpose();
}
stream << "\n";
}
}
示例4: squaredValues
void SimpleSensorNoiseDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
cloud.allocateDescriptor("simpleSensorNoise", 1);
BOOST_AUTO(noise, cloud.getDescriptorViewByName("simpleSensorNoise"));
switch(sensorType)
{
case 0: // Sick LMS-1xx
{
noise = computeLaserNoise(0.012, 0.0068, 0.0008, cloud.features);
break;
}
case 1: // Hokuyo URG-04LX
{
noise = computeLaserNoise(0.028, 0.0013, 0.0001, cloud.features);
break;
}
case 2: // Hokuyo UTM-30LX
{
noise = computeLaserNoise(0.018, 0.0006, 0.0015, cloud.features);
break;
}
case 3: // Kinect / Xtion
{
const int dim = cloud.features.rows();
const Matrix squaredValues(cloud.features.topRows(dim-1).colwise().norm().array().square());
noise = squaredValues*(0.5*0.00285);
break;
}
case 4: // Sick Tim3xx
{
noise = computeLaserNoise(0.004, 0.0053, -0.0092, cloud.features);
break;
}
default:
throw InvalidParameter(
(boost::format("SimpleSensorNoiseDataPointsFilter: Error, cannot compute noise for sensorType id %1% .") % sensorType).str());
}
}
示例5: covariance
typename ErrorMinimizersImpl<T>::Matrix
ErrorMinimizersImpl<T>::PointToPlaneWithCovErrorMinimizer::estimateCovariance(const DataPoints& reading, const DataPoints& reference, const Matches& matches, const OutlierWeights& outlierWeights, const TransformationParameters& transformation)
{
int max_nbr_point = outlierWeights.cols();
Matrix covariance(Matrix::Zero(6,6));
Matrix J_hessian(Matrix::Zero(6,6));
Matrix d2J_dReadingdX(Matrix::Zero(6, max_nbr_point));
Matrix d2J_dReferencedX(Matrix::Zero(6, max_nbr_point));
Vector reading_point(Vector::Zero(3));
Vector reference_point(Vector::Zero(3));
Vector normal(3);
Vector reading_direction(Vector::Zero(3));
Vector reference_direction(Vector::Zero(3));
Matrix normals = reference.getDescriptorViewByName("normals");
if (normals.rows() < 3) // Make sure there are normals in DataPoints
return std::numeric_limits<T>::max() * Matrix::Identity(6,6);
T beta = -asin(transformation(2,0));
T alpha = atan2(transformation(2,1), transformation(2,2));
T gamma = atan2(transformation(1,0)/cos(beta), transformation(0,0)/cos(beta));
T t_x = transformation(0,3);
T t_y = transformation(1,3);
T t_z = transformation(2,3);
Vector tmp_vector_6(Vector::Zero(6));
int valid_points_count = 0;
for(int i = 0; i < max_nbr_point; ++i)
{
if (outlierWeights(0,i) > 0.0)
{
reading_point = reading.features.block(0,i,3,1);
int reference_idx = matches.ids(0,i);
reference_point = reference.features.block(0,reference_idx,3,1);
normal = normals.block(0,reference_idx,3,1);
T reading_range = reading_point.norm();
reading_direction = reading_point / reading_range;
T reference_range = reference_point.norm();
reference_direction = reference_point / reference_range;
T n_alpha = normal(2)*reading_direction(1) - normal(1)*reading_direction(2);
T n_beta = normal(0)*reading_direction(2) - normal(2)*reading_direction(0);
T n_gamma = normal(1)*reading_direction(0) - normal(0)*reading_direction(1);
T E = normal(0)*(reading_point(0) - gamma*reading_point(1) + beta*reading_point(2) + t_x - reference_point(0));
E += normal(1)*(gamma*reading_point(0) + reading_point(1) - alpha*reading_point(2) + t_y - reference_point(1));
E += normal(2)*(-beta*reading_point(0) + alpha*reading_point(1) + reading_point(2) + t_z - reference_point(2));
T N_reading = normal(0)*(reading_direction(0) - gamma*reading_direction(1) + beta*reading_direction(2));
N_reading += normal(1)*(gamma*reading_direction(0) + reading_direction(1) - alpha*reading_direction(2));
N_reading += normal(2)*(-beta*reading_direction(0) + alpha*reading_direction(1) + reading_direction(2));
T N_reference = -(normal(0)*reference_direction(0) + normal(1)*reference_direction(1) + normal(2)*reference_direction(2));
// update the hessian and d2J/dzdx
tmp_vector_6 << normal(0), normal(1), normal(2), reading_range * n_alpha, reading_range * n_beta, reading_range * n_gamma;
J_hessian += tmp_vector_6 * tmp_vector_6.transpose();
tmp_vector_6 << normal(0) * N_reading, normal(1) * N_reading, normal(2) * N_reading, n_alpha * (E + reading_range * N_reading), n_beta * (E + reading_range * N_reading), n_gamma * (E + reading_range * N_reading);
d2J_dReadingdX.block(0,valid_points_count,6,1) = tmp_vector_6;
tmp_vector_6 << normal(0) * N_reference, normal(1) * N_reference, normal(2) * N_reference, reference_range * n_alpha * N_reference, reference_range * n_beta * N_reference, reference_range * n_gamma * N_reference;
d2J_dReferencedX.block(0,valid_points_count,6,1) = tmp_vector_6;
valid_points_count++;
} // if (outlierWeights(0,i) > 0.0)
}
Matrix d2J_dZdX(Matrix::Zero(6, 2 * valid_points_count));
d2J_dZdX.block(0,0,6,valid_points_count) = d2J_dReadingdX.block(0,0,6,valid_points_count);
d2J_dZdX.block(0,valid_points_count,6,valid_points_count) = d2J_dReferencedX.block(0,0,6,valid_points_count);
Matrix inv_J_hessian = J_hessian.inverse();
covariance = d2J_dZdX * d2J_dZdX.transpose();
covariance = inv_J_hessian * covariance * inv_J_hessian;
return (sensorStdDev * sensorStdDev) * covariance;
}
示例6: InvalidField
void CovarianceSamplingDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
const std::size_t featDim(cloud.features.rows());
assert(featDim == 4); //3D pts only
//Check number of points
const std::size_t nbPoints = cloud.getNbPoints();
if(nbSample >= nbPoints)
return;
//Check if there is normals info
if (!cloud.descriptorExists("normals"))
throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors.");
const auto& normals = cloud.getDescriptorViewByName("normals");
std::vector<std::size_t> keepIndexes;
keepIndexes.resize(nbSample);
///---- Part A, as we compare the cloud with himself, the overlap is 100%, so we keep all points
//A.1 and A.2 - Compute candidates
std::vector<std::size_t> candidates ;
candidates.resize(nbPoints);
for (std::size_t i = 0; i < nbPoints; ++i) candidates[i] = i;
const std::size_t nbCandidates = candidates.size();
//Compute centroid
Vector3 center;
for(std::size_t i = 0; i < featDim - 1; ++i) center(i) = T(0.);
for (std::size_t i = 0; i < nbCandidates; ++i)
for (std::size_t f = 0; f <= 3; ++f)
center(f) += cloud.features(f,candidates[i]);
for(std::size_t i = 0; i <= 3; ++i) center(i) /= T(nbCandidates);
//Compute torque normalization
T Lnorm = 1.0;
if(normalizationMethod == TorqueNormMethod::L1)
{
Lnorm = 1.0;
}
else if(normalizationMethod == TorqueNormMethod::Lavg)
{
Lnorm = 0.0;
for (std::size_t i = 0; i < nbCandidates; ++i)
Lnorm += (cloud.features.col(candidates[i]).head(3) - center).norm();
Lnorm /= nbCandidates;
}
else if(normalizationMethod == TorqueNormMethod::Lmax)
{
const Vector minValues = cloud.features.rowwise().minCoeff();
const Vector maxValues = cloud.features.rowwise().maxCoeff();
const Vector3 radii = maxValues.head(3) - minValues.head(3);
Lnorm = radii.maxCoeff() / 2.; //radii.mean() / 2.;
}
//A.3 - Compute 6x6 covariance matrix + EigenVectors
auto computeCovariance = [Lnorm, nbCandidates, &cloud, ¢er, &normals, &candidates](Matrix66 & cov) -> void {
//Compute F matrix, see Eq. (4)
Eigen::Matrix<T, 6, Eigen::Dynamic> F(6, nbCandidates);
for(std::size_t i = 0; i < nbCandidates; ++i)
{
const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
const Vector3 ni = normals.col(candidates[i]).head(3);
//compute (1 / L) * (pi - c) x ni
F.template block<3, 1>(0, i) = (1. / Lnorm) * p.cross(ni);
//set ni part
F.template block<3, 1>(3, i) = ni;
}
// Compute the covariance matrix Cov = FF'
cov = F * F.transpose();
};
Matrix66 covariance;
computeCovariance(covariance);
Eigen::EigenSolver<Matrix66> solver(covariance);
const Matrix66 eigenVe = solver.eigenvectors().real();
const Vector6 eigenVa = solver.eigenvalues().real();
///---- Part B
//B.1 - Compute the v-6 for each candidate
std::vector<Vector6, Eigen::aligned_allocator<Vector6>> v; // v[i] = [(pi-c) x ni ; ni ]'
v.resize(nbCandidates);
for(std::size_t i = 0; i < nbCandidates; ++i)
{
const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
const Vector3 ni = normals.col(candidates[i]).head(3);
v[i].template block<3, 1>(0, 0) = (1. / Lnorm) * p.cross(ni);
v[i].template block<3, 1>(3, 0) = ni;
//.........这里部分代码省略.........
示例7: pointsCount
void DataPointsFiltersImpl<T>::SamplingSurfaceNormalDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
typedef Matrix Features;
typedef typename DataPoints::View View;
typedef typename DataPoints::Label Label;
typedef typename DataPoints::Labels Labels;
const int pointsCount(cloud.features.cols());
const int featDim(cloud.features.rows());
const int descDim(cloud.descriptors.rows());
int insertDim(0);
if (averageExistingDescriptors)
{
// TODO: this should be in the form of an assert
// Validate descriptors and labels
for(unsigned int i = 0; i < cloud.descriptorLabels.size(); i++)
insertDim += cloud.descriptorLabels[i].span;
if (insertDim != descDim)
throw InvalidField("SamplingSurfaceNormalDataPointsFilter: Error, descriptor labels do not match descriptor data");
}
// Compute space requirement for new descriptors
const int dimNormals(featDim-1);
const int dimDensities(1);
const int dimEigValues(featDim-1);
const int dimEigVectors((featDim-1)*(featDim-1));
// Allocate space for new descriptors
Labels cloudLabels;
if (keepNormals)
cloudLabels.push_back(Label("normals", dimNormals));
if (keepDensities)
cloudLabels.push_back(Label("densities", dimDensities));
if (keepEigenValues)
cloudLabels.push_back(Label("eigValues", dimEigValues));
if (keepEigenVectors)
cloudLabels.push_back(Label("eigVectors", dimEigVectors));
cloud.allocateDescriptors(cloudLabels);
// we keep build data on stack for reentrant behaviour
View cloudExistingDescriptors(cloud.descriptors.block(0,0,cloud.descriptors.rows(),cloud.descriptors.cols()));
BuildData buildData(cloud.features, cloud.descriptors);
// get views
if (keepNormals)
buildData.normals = cloud.getDescriptorViewByName("normals");
if (keepDensities)
buildData.densities = cloud.getDescriptorViewByName("densities");
if (keepEigenValues)
buildData.eigenValues = cloud.getDescriptorViewByName("eigValues");
if (keepEigenVectors)
buildData.eigenVectors = cloud.getDescriptorViewByName("eigVectors");
// build the new point cloud
buildNew(
buildData,
0,
pointsCount,
cloud.features.rowwise().minCoeff(),
cloud.features.rowwise().maxCoeff()
);
// Bring the data we keep to the front of the arrays then
// wipe the leftover unused space.
std::sort(buildData.indicesToKeep.begin(), buildData.indicesToKeep.end());
int ptsOut = buildData.indicesToKeep.size();
for (int i = 0; i < ptsOut; i++){
int k = buildData.indicesToKeep[i];
assert(i <= k);
cloud.features.col(i) = cloud.features.col(k);
if (cloud.descriptors.rows() != 0)
cloud.descriptors.col(i) = cloud.descriptors.col(k);
if(keepNormals)
buildData.normals->col(i) = buildData.normals->col(k);
if(keepDensities)
(*buildData.densities)(0,i) = (*buildData.densities)(0,k);
if(keepEigenValues)
buildData.eigenValues->col(i) = buildData.eigenValues->col(k);
if(keepEigenVectors)
buildData.eigenVectors->col(i) = buildData.eigenVectors->col(k);
}
cloud.features.conservativeResize(Eigen::NoChange, ptsOut);
cloud.descriptors.conservativeResize(Eigen::NoChange, ptsOut);
// warning if some points were dropped
if(buildData.unfitPointsCount != 0)
LOG_INFO_STREAM(" SamplingSurfaceNormalDataPointsFilter - Could not compute normal for " << buildData.unfitPointsCount << " pts.");
}