本文整理汇总了C++中DataPoints类的典型用法代码示例。如果您正苦于以下问题:C++ DataPoints类的具体用法?C++ DataPoints怎么用?C++ DataPoints使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataPoints类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: point
void DataPointsFiltersImpl<T>::BoundingBoxDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
const int nbPointsIn = cloud.features.cols();
const int nbRows = cloud.features.rows();
int j = 0;
for (int i = 0; i < nbPointsIn; i++)
{
bool keepPt = false;
const Vector point = cloud.features.col(i);
// FIXME: improve performance by using Eigen array operations
const bool x_in = (point(0) > xMin && point(0) < xMax);
const bool y_in = (point(1) > yMin && point(1) < yMax);
const bool z_in = (point(2) > zMin && point(2) < zMax) || nbRows == 3;
const bool in_box = x_in && y_in && z_in;
if(removeInside)
keepPt = !in_box;
else
keepPt = in_box;
if(keepPt)
{
cloud.setColFrom(j, cloud, i);
j++;
}
}
cloud.conservativeResize(j);
}
示例2: InvalidParameter
void DataPointsFiltersImpl<T>::MaxQuantileOnAxisDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
if (int(dim) >= cloud.features.rows())
throw InvalidParameter((boost::format("MaxQuantileOnAxisDataPointsFilter: Error, filtering on dimension number %1%, larger than feature dimensionality %2%") % dim % cloud.features.rows()).str());
const int nbPointsIn = cloud.features.cols();
const int nbPointsOut = nbPointsIn * ratio;
// build array
vector<T> values;
values.reserve(cloud.features.cols());
for (int x = 0; x < cloud.features.cols(); ++x)
values.push_back(cloud.features(dim, x));
// get quartiles value
nth_element(values.begin(), values.begin() + (values.size() * ratio), values.end());
const T limit = values[nbPointsOut];
// copy towards beginning the elements we keep
int j = 0;
for (int i = 0; i < nbPointsIn; i++)
{
if (cloud.features(dim, i) < limit)
{
assert(j <= i);
cloud.setColFrom(j, cloud, i);
j++;
}
}
assert(j <= nbPointsOut);
cloud.conservativeResize(j);
}
示例3: BOOST_AUTO
void InspectorsImpl<T>::AbstractVTKInspector::buildGenericAttributeStream(std::ostream& stream, const std::string& attribute, const std::string& nameTag, const DataPoints& cloud, const int forcedDim)
{
if (!cloud.descriptorExists(nameTag))
return;
const BOOST_AUTO(desc, cloud.getDescriptorViewByName(nameTag));
assert(desc.rows() <= forcedDim);
if(desc.rows() != 0)
{
if(attribute.compare("COLOR_SCALARS") == 0)
{
stream << attribute << " " << nameTag << " " << forcedDim << "\n";
stream << padWithOnes(desc, forcedDim, desc.cols()).transpose();
}
else
{
stream << attribute << " " << nameTag << " float\n";
if(attribute.compare("SCALARS") == 0)
stream << "LOOKUP_TABLE default\n";
stream << padWithZeros(desc, forcedDim, desc.cols()).transpose();
}
stream << "\n";
}
}
示例4: dim
void ObservationDirectionDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
const int dim(cloud.features.rows() - 1);
const int featDim(cloud.features.cols());
if (dim != 2 && dim != 3)
{
throw InvalidField(
(boost::format("ObservationDirectionDataPointsFilter: Error, works only in 2 or 3 dimensions, cloud has %1% dimensions.") % dim).str()
);
}
Vector center(dim);
center[0] = centerX;
center[1] = centerY;
if (dim == 3)
center[2] = centerZ;
cloud.allocateDescriptor("observationDirections", dim);
BOOST_AUTO(observationDirections, cloud.getDescriptorViewByName("observationDirections"));
for (int i = 0; i < featDim; ++i)
{
// Check normal orientation
const Vector p(cloud.features.block(0, i, dim, 1));
observationDirections.col(i) = center - p;
}
}
示例5: InvalidField
void DataPointsFiltersImpl<T>::ShadowDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
// Check if normals are present
if (!cloud.descriptorExists("normals"))
{
throw InvalidField("ShadowDataPointsFilter, Error: cannot find normals in descriptors");
}
const int dim = cloud.features.rows();
const BOOST_AUTO(normals, cloud.getDescriptorViewByName("normals"));
int j = 0;
for(int i=0; i < cloud.features.cols(); i++)
{
const Vector normal = normals.col(i).normalized();
const Vector point = cloud.features.block(0, i, dim-1, 1).normalized();
const T value = anyabs(normal.dot(point));
if(value > eps) // test to keep the points
{
cloud.features.col(j) = cloud.features.col(i);
cloud.descriptors.col(j) = cloud.descriptors.col(i);
j++;
}
}
cloud.conservativeResize(j);
}
示例6:
void DataPointsFiltersImpl<T>::RandomSamplingDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
const int nbPointsIn = cloud.features.cols();
int j = 0;
for (int i = 0; i < nbPointsIn; i++)
{
const float r = (float)std::rand()/(float)RAND_MAX;
if (r < prob)
{
cloud.setColFrom(j, cloud, i);
j++;
}
}
cloud.conservativeResize(j);
}
示例7: BOOST_AUTO
void RemoveNaNDataPointsFilter<T>::inPlaceFilter(
DataPoints& cloud)
{
const int nbPointsIn = cloud.features.cols();
int j = 0;
for (int i = 0; i < nbPointsIn; ++i)
{
const BOOST_AUTO(colArray, cloud.features.col(i).array());
const BOOST_AUTO(hasNaN, !(colArray == colArray).all());
if (!hasNaN)
{
cloud.setColFrom(j, cloud, i);
++j;
}
}
cloud.conservativeResize(j);
}
示例8: descRef
void InspectorsImpl<T>::AbstractVTKInspector::buildTensorStream(std::ostream& stream,
const std::string& name,
const DataPoints& ref,
const DataPoints& reading)
{
const Matrix descRef(ref.getDescriptorByName(name));
const Matrix descRead(reading.getDescriptorByName(name));
if(descRef.rows() != 0 && descRead.rows() != 0)
{
stream << "TENSORS " << name << " float\n";
stream << padWithZeros(
descRef, 9, ref.descriptors.cols()).transpose();
stream << "\n";
stream << padWithZeros(
descRead, 9, reading.descriptors.cols()).transpose();
stream << "\n";
}
}
示例9: BOOST_AUTO
void SimpleSensorNoiseDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
cloud.allocateDescriptor("simpleSensorNoise", 1);
BOOST_AUTO(noise, cloud.getDescriptorViewByName("simpleSensorNoise"));
switch(sensorType)
{
case 0: // Sick LMS-1xx
{
noise = computeLaserNoise(0.012, 0.0068, 0.0008, cloud.features);
break;
}
case 1: // Hokuyo URG-04LX
{
noise = computeLaserNoise(0.028, 0.0013, 0.0001, cloud.features);
break;
}
case 2: // Hokuyo UTM-30LX
{
noise = computeLaserNoise(0.018, 0.0006, 0.0015, cloud.features);
break;
}
case 3: // Kinect / Xtion
{
const int dim = cloud.features.rows();
const Matrix squaredValues(cloud.features.topRows(dim-1).colwise().norm().array().square());
noise = squaredValues*(0.5*0.00285);
break;
}
case 4: // Sick Tim3xx
{
noise = computeLaserNoise(0.004, 0.0053, -0.0092, cloud.features);
break;
}
default:
throw InvalidParameter(
(boost::format("SimpleSensorNoiseDataPointsFilter: Error, cannot compute noise for sensorType id %1% .") % sensorType).str());
}
}
示例10: iStep
void DataPointsFiltersImpl<T>::FixStepSamplingDataPointsFilter::inPlaceFilter(
DataPoints& cloud)
{
const int iStep(step);
const int nbPointsIn = cloud.features.cols();
const int phase(rand() % iStep);
int j = 0;
for (int i = phase; i < nbPointsIn; i += iStep)
{
cloud.setColFrom(j, cloud, i);
j++;
}
cloud.conservativeResize(j);
const double deltaStep(startStep * stepMult - startStep);
step *= stepMult;
if (deltaStep < 0 && step < endStep)
step = endStep;
if (deltaStep > 0 && step > endStep)
step = endStep;
}
示例11: covariance
typename ErrorMinimizersImpl<T>::Matrix
ErrorMinimizersImpl<T>::PointToPlaneWithCovErrorMinimizer::estimateCovariance(const DataPoints& reading, const DataPoints& reference, const Matches& matches, const OutlierWeights& outlierWeights, const TransformationParameters& transformation)
{
int max_nbr_point = outlierWeights.cols();
Matrix covariance(Matrix::Zero(6,6));
Matrix J_hessian(Matrix::Zero(6,6));
Matrix d2J_dReadingdX(Matrix::Zero(6, max_nbr_point));
Matrix d2J_dReferencedX(Matrix::Zero(6, max_nbr_point));
Vector reading_point(Vector::Zero(3));
Vector reference_point(Vector::Zero(3));
Vector normal(3);
Vector reading_direction(Vector::Zero(3));
Vector reference_direction(Vector::Zero(3));
Matrix normals = reference.getDescriptorViewByName("normals");
if (normals.rows() < 3) // Make sure there are normals in DataPoints
return std::numeric_limits<T>::max() * Matrix::Identity(6,6);
T beta = -asin(transformation(2,0));
T alpha = atan2(transformation(2,1), transformation(2,2));
T gamma = atan2(transformation(1,0)/cos(beta), transformation(0,0)/cos(beta));
T t_x = transformation(0,3);
T t_y = transformation(1,3);
T t_z = transformation(2,3);
Vector tmp_vector_6(Vector::Zero(6));
int valid_points_count = 0;
for(int i = 0; i < max_nbr_point; ++i)
{
if (outlierWeights(0,i) > 0.0)
{
reading_point = reading.features.block(0,i,3,1);
int reference_idx = matches.ids(0,i);
reference_point = reference.features.block(0,reference_idx,3,1);
normal = normals.block(0,reference_idx,3,1);
T reading_range = reading_point.norm();
reading_direction = reading_point / reading_range;
T reference_range = reference_point.norm();
reference_direction = reference_point / reference_range;
T n_alpha = normal(2)*reading_direction(1) - normal(1)*reading_direction(2);
T n_beta = normal(0)*reading_direction(2) - normal(2)*reading_direction(0);
T n_gamma = normal(1)*reading_direction(0) - normal(0)*reading_direction(1);
T E = normal(0)*(reading_point(0) - gamma*reading_point(1) + beta*reading_point(2) + t_x - reference_point(0));
E += normal(1)*(gamma*reading_point(0) + reading_point(1) - alpha*reading_point(2) + t_y - reference_point(1));
E += normal(2)*(-beta*reading_point(0) + alpha*reading_point(1) + reading_point(2) + t_z - reference_point(2));
T N_reading = normal(0)*(reading_direction(0) - gamma*reading_direction(1) + beta*reading_direction(2));
N_reading += normal(1)*(gamma*reading_direction(0) + reading_direction(1) - alpha*reading_direction(2));
N_reading += normal(2)*(-beta*reading_direction(0) + alpha*reading_direction(1) + reading_direction(2));
T N_reference = -(normal(0)*reference_direction(0) + normal(1)*reference_direction(1) + normal(2)*reference_direction(2));
// update the hessian and d2J/dzdx
tmp_vector_6 << normal(0), normal(1), normal(2), reading_range * n_alpha, reading_range * n_beta, reading_range * n_gamma;
J_hessian += tmp_vector_6 * tmp_vector_6.transpose();
tmp_vector_6 << normal(0) * N_reading, normal(1) * N_reading, normal(2) * N_reading, n_alpha * (E + reading_range * N_reading), n_beta * (E + reading_range * N_reading), n_gamma * (E + reading_range * N_reading);
d2J_dReadingdX.block(0,valid_points_count,6,1) = tmp_vector_6;
tmp_vector_6 << normal(0) * N_reference, normal(1) * N_reference, normal(2) * N_reference, reference_range * n_alpha * N_reference, reference_range * n_beta * N_reference, reference_range * n_gamma * N_reference;
d2J_dReferencedX.block(0,valid_points_count,6,1) = tmp_vector_6;
valid_points_count++;
} // if (outlierWeights(0,i) > 0.0)
}
Matrix d2J_dZdX(Matrix::Zero(6, 2 * valid_points_count));
d2J_dZdX.block(0,0,6,valid_points_count) = d2J_dReadingdX.block(0,0,6,valid_points_count);
d2J_dZdX.block(0,valid_points_count,6,valid_points_count) = d2J_dReferencedX.block(0,0,6,valid_points_count);
Matrix inv_J_hessian = J_hessian.inverse();
covariance = d2J_dZdX * d2J_dZdX.transpose();
covariance = inv_J_hessian * covariance * inv_J_hessian;
return (sensorStdDev * sensorStdDev) * covariance;
}
示例12: featDim
void CovarianceSamplingDataPointsFilter<T>::inPlaceFilter(DataPoints& cloud)
{
const std::size_t featDim(cloud.features.rows());
assert(featDim == 4); //3D pts only
//Check number of points
const std::size_t nbPoints = cloud.getNbPoints();
if(nbSample >= nbPoints)
return;
//Check if there is normals info
if (!cloud.descriptorExists("normals"))
throw InvalidField("OrientNormalsDataPointsFilter: Error, cannot find normals in descriptors.");
const auto& normals = cloud.getDescriptorViewByName("normals");
std::vector<std::size_t> keepIndexes;
keepIndexes.resize(nbSample);
///---- Part A, as we compare the cloud with himself, the overlap is 100%, so we keep all points
//A.1 and A.2 - Compute candidates
std::vector<std::size_t> candidates ;
candidates.resize(nbPoints);
for (std::size_t i = 0; i < nbPoints; ++i) candidates[i] = i;
const std::size_t nbCandidates = candidates.size();
//Compute centroid
Vector3 center;
for(std::size_t i = 0; i < featDim - 1; ++i) center(i) = T(0.);
for (std::size_t i = 0; i < nbCandidates; ++i)
for (std::size_t f = 0; f <= 3; ++f)
center(f) += cloud.features(f,candidates[i]);
for(std::size_t i = 0; i <= 3; ++i) center(i) /= T(nbCandidates);
//Compute torque normalization
T Lnorm = 1.0;
if(normalizationMethod == TorqueNormMethod::L1)
{
Lnorm = 1.0;
}
else if(normalizationMethod == TorqueNormMethod::Lavg)
{
Lnorm = 0.0;
for (std::size_t i = 0; i < nbCandidates; ++i)
Lnorm += (cloud.features.col(candidates[i]).head(3) - center).norm();
Lnorm /= nbCandidates;
}
else if(normalizationMethod == TorqueNormMethod::Lmax)
{
const Vector minValues = cloud.features.rowwise().minCoeff();
const Vector maxValues = cloud.features.rowwise().maxCoeff();
const Vector3 radii = maxValues.head(3) - minValues.head(3);
Lnorm = radii.maxCoeff() / 2.; //radii.mean() / 2.;
}
//A.3 - Compute 6x6 covariance matrix + EigenVectors
auto computeCovariance = [Lnorm, nbCandidates, &cloud, ¢er, &normals, &candidates](Matrix66 & cov) -> void {
//Compute F matrix, see Eq. (4)
Eigen::Matrix<T, 6, Eigen::Dynamic> F(6, nbCandidates);
for(std::size_t i = 0; i < nbCandidates; ++i)
{
const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
const Vector3 ni = normals.col(candidates[i]).head(3);
//compute (1 / L) * (pi - c) x ni
F.template block<3, 1>(0, i) = (1. / Lnorm) * p.cross(ni);
//set ni part
F.template block<3, 1>(3, i) = ni;
}
// Compute the covariance matrix Cov = FF'
cov = F * F.transpose();
};
Matrix66 covariance;
computeCovariance(covariance);
Eigen::EigenSolver<Matrix66> solver(covariance);
const Matrix66 eigenVe = solver.eigenvectors().real();
const Vector6 eigenVa = solver.eigenvalues().real();
///---- Part B
//B.1 - Compute the v-6 for each candidate
std::vector<Vector6, Eigen::aligned_allocator<Vector6>> v; // v[i] = [(pi-c) x ni ; ni ]'
v.resize(nbCandidates);
for(std::size_t i = 0; i < nbCandidates; ++i)
{
const Vector3 p = cloud.features.col(candidates[i]).head(3) - center; // pi-c
const Vector3 ni = normals.col(candidates[i]).head(3);
v[i].template block<3, 1>(0, 0) = (1. / Lnorm) * p.cross(ni);
v[i].template block<3, 1>(3, 0) = ni;
//.........这里部分代码省略.........
示例13: getValue
bool DataPointsNode::getValue (const String& strMemberName, String& strValue)
{
bool bValueSet = false;
DataPoints* pObject = dynamic_cast<DataPoints*>(m_pObject);
if (strMemberName == L"value")
{
ValueObject* pValueObj = dynamic_cast<ValueObject*>(m_pObject);
if (pValueObj)
{
if (!pValueObj->isNothing())
{
strValue = pValueObj->toString();
bValueSet = true;
}
}
}
else if (strMemberName == L"Name")
{
if (pObject->hasValue_Name())
{
strValue = (StringObjectImpl(pObject->getName())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"Desc")
{
if (pObject->hasValue_Desc())
{
strValue = (StringObjectImpl(pObject->getDesc())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"Code")
{
if (pObject->hasValue_Code())
{
strValue = (StringObjectImpl(pObject->getCode())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"State")
{
if (pObject->hasValue_State())
{
strValue = (EnumStateTypeImpl(pObject->getState())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"PntRef")
{
if (pObject->hasValue_PntRef())
{
strValue = (StringObjectImpl(pObject->getPntRef())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"PointGeometry")
{
if (pObject->hasValue_PointGeometry())
{
strValue = (EnumPointGeometryTypeImpl(pObject->getPointGeometry())).toString();
bValueSet = true;
}
}
else if (strMemberName == L"DTMAttribute")
{
if (pObject->hasValue_DTMAttribute())
{
strValue = (EnumDTMAttributeTypeImpl(pObject->getDTMAttribute())).toString();
bValueSet = true;
}
}
return bValueSet;
}
示例14: setValue
bool DataPointsNode::setValue (const String& strMemberName, const String* pstrValue)
{
bool bValueSet = false;
DataPoints* pObject = dynamic_cast<DataPoints*>(m_pObject);
if (strMemberName == L"Name")
{
if (!pstrValue)
{
pObject->resetValue_Name();
}
else
{
pObject->setName(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"Desc")
{
if (!pstrValue)
{
pObject->resetValue_Desc();
}
else
{
pObject->setDesc(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"Code")
{
if (!pstrValue)
{
pObject->resetValue_Code();
}
else
{
pObject->setCode(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"State")
{
if (!pstrValue)
{
pObject->resetValue_State();
}
else
{
pObject->setState(EnumStateTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"PntRef")
{
if (!pstrValue)
{
pObject->resetValue_PntRef();
}
else
{
pObject->setPntRef(StringObjectImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"PointGeometry")
{
if (!pstrValue)
{
pObject->resetValue_PointGeometry();
}
else
{
pObject->setPointGeometry(EnumPointGeometryTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
if (strMemberName == L"DTMAttribute")
{
if (!pstrValue)
{
pObject->resetValue_DTMAttribute();
}
else
{
pObject->setDTMAttribute(EnumDTMAttributeTypeImpl::parseString(pstrValue->c_str(), pstrValue->length()));
bValueSet = true;
}
}
return bValueSet;
}
示例15: numPoints
void DataPointsFiltersImpl<T>::VoxelGridDataPointsFilter::inPlaceFilter(DataPoints& cloud)
{
const int numPoints(cloud.features.cols());
const int featDim(cloud.features.rows());
const int descDim(cloud.descriptors.rows());
assert (featDim == 3 || featDim == 4);
int insertDim(0);
if (averageExistingDescriptors)
{
// TODO: this should be in the form of an assert
// Validate descriptors and labels
for(unsigned int i = 0; i < cloud.descriptorLabels.size(); i++)
insertDim += cloud.descriptorLabels[i].span;
if (insertDim != descDim)
throw InvalidField("VoxelGridDataPointsFilter: Error, descriptor labels do not match descriptor data");
}
// TODO: Check that the voxel size is not too small, given the size of the data
// Calculate number of divisions along each axis
Vector minValues = cloud.features.rowwise().minCoeff();
Vector maxValues = cloud.features.rowwise().maxCoeff();
T minBoundX = minValues.x() / vSizeX;
T maxBoundX = maxValues.x() / vSizeX;
T minBoundY = minValues.y() / vSizeY;
T maxBoundY = maxValues.y() / vSizeY;
T minBoundZ = 0;
T maxBoundZ = 0;
if (featDim == 4)
{
minBoundZ = minValues.z() / vSizeZ;
maxBoundZ = maxValues.z() / vSizeZ;
}
// number of divisions is total size / voxel size voxels of equal length + 1
// with remaining space
unsigned int numDivX = 1 + maxBoundX - minBoundX;
unsigned int numDivY = 1 + maxBoundY - minBoundY;;
unsigned int numDivZ = 0;
// If a 3D point cloud
if (featDim == 4 )
numDivZ = 1 + maxBoundZ - minBoundZ;
unsigned int numVox = numDivX * numDivY;
if ( featDim == 4)
numVox *= numDivZ;
// Assume point cloud is randomly ordered
// compute a linear index of the following type
// i, j, k are the component indices
// nx, ny number of divisions in x and y components
// idx = i + j * nx + k * nx * ny
std::vector<unsigned int> indices(numPoints);
// vector to hold the first point in a voxel
// this point will be ovewritten in the input cloud with
// the output value
std::vector<Voxel>* voxels;
// try allocating vector. If too big return error
try {
voxels = new std::vector<Voxel>(numVox);
} catch (std::bad_alloc&) {
throw InvalidParameter((boost::format("VoxelGridDataPointsFilter: Memory allocation error with %1% voxels. Try increasing the voxel dimensions.") % numVox).str());
}
for (int p = 0; p < numPoints; p++ )
{
unsigned int i = floor(cloud.features(0,p)/vSizeX - minBoundX);
unsigned int j = floor(cloud.features(1,p)/vSizeY- minBoundY);
unsigned int k = 0;
unsigned int idx;
if ( featDim == 4 )
{
k = floor(cloud.features(2,p)/vSizeZ - minBoundZ);
idx = i + j * numDivX + k * numDivX * numDivY;
}
else
{
idx = i + j * numDivX;
}
unsigned int pointsInVox = (*voxels)[idx].numPoints + 1;
if (pointsInVox == 1)
{
(*voxels)[idx].firstPoint = p;
}
(*voxels)[idx].numPoints = pointsInVox;
indices[p] = idx;
//.........这里部分代码省略.........