本文整理汇总了C++中ScalarField::resize方法的典型用法代码示例。如果您正苦于以下问题:C++ ScalarField::resize方法的具体用法?C++ ScalarField::resize怎么用?C++ ScalarField::resize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ScalarField
的用法示例。
在下文中一共展示了ScalarField::resize方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: enableScalarField
bool ChunkedPointCloud::enableScalarField()
{
ScalarField* currentInScalarField = getCurrentInScalarField();
if (!currentInScalarField)
{
//if we get there, it means that either the caller has forgot to create
//(and assign) a scalar field to the cloud, or that we are in a compatibility
//mode with old/basic behaviour: a unique SF for everything (input/output)
//we look for any already existing "default" scalar field
m_currentInScalarFieldIndex = getScalarFieldIndexByName("Default");
if (m_currentInScalarFieldIndex < 0)
{
//if not, we create it
m_currentInScalarFieldIndex = addScalarField("Default");
if (m_currentInScalarFieldIndex < 0) //Something went wrong
return false;
}
currentInScalarField = getCurrentInScalarField();
assert(currentInScalarField);
}
//if there's no output scalar field either, we set this new scalar field as output also
if (!getCurrentOutScalarField())
m_currentOutScalarFieldIndex = m_currentInScalarFieldIndex;
return currentInScalarField->resize(m_points->capacity());
}
示例2: addScalarField
int ChunkedPointCloud::addScalarField(const char* uniqueName)
{
//we don't accept two SF with the same name!
if (getScalarFieldIndexByName(uniqueName) >= 0)
return -1;
//create requested scalar field
ScalarField* sf = new ScalarField(uniqueName);
if (!sf || (size() && !sf->resize(size())))
{
//Not enough memory!
if (sf)
sf->release();
return -1;
}
try
{
//we don't want 'm_scalarFields' to grow by 50% each time! (default behavior of std::vector::push_back)
m_scalarFields.resize(m_scalarFields.size()+1);
}
catch (std::bad_alloc) //out of memory
{
sf->release();
return -1;
}
m_scalarFields.back() = sf;
sf->link();
return (int)m_scalarFields.size()-1;
}
示例3: frontPropagationBasedSegmentation
bool AutoSegmentationTools::frontPropagationBasedSegmentation(GenericIndexedCloudPersist* theCloud,
ScalarType minSeedDist,
uchar octreeLevel,
ReferenceCloudContainer& theSegmentedLists,
GenericProgressCallback* progressCb,
DgmOctree* inputOctree,
bool applyGaussianFilter,
float alpha)
{
unsigned numberOfPoints = (theCloud ? theCloud->size() : 0);
if (numberOfPoints == 0)
return false;
//compute octree if none was provided
DgmOctree* theOctree = inputOctree;
if (!theOctree)
{
theOctree = new DgmOctree(theCloud);
if (theOctree->build(progressCb)<1)
{
delete theOctree;
return false;
}
}
//on calcule le gradient (va ecraser le champ des distances)
if (ScalarFieldTools::computeScalarFieldGradient(theCloud,true,true,progressCb,theOctree) < 0)
{
if (!inputOctree)
delete theOctree;
return false;
}
//et on lisse le resultat
if (applyGaussianFilter)
{
uchar level = theOctree->findBestLevelForAGivenPopulationPerCell(NUMBER_OF_POINTS_FOR_GRADIENT_COMPUTATION);
PointCoordinateType cellSize = theOctree->getCellSize(level);
ScalarFieldTools::applyScalarFieldGaussianFilter(static_cast<float>(cellSize/3),theCloud,-1,progressCb,theOctree);
}
unsigned seedPoints = 0;
unsigned numberOfSegmentedLists = 0;
//on va faire la propagation avec le FastMarching();
FastMarchingForPropagation* fm = new FastMarchingForPropagation();
fm->setJumpCoef(50.0);
fm->setDetectionThreshold(alpha);
int result = fm->init(theCloud,theOctree,octreeLevel);
int octreeLength = OCTREE_LENGTH(octreeLevel)-1;
if (result<0)
{
if (!inputOctree)
delete theOctree;
delete fm;
return false;
}
if (progressCb)
{
progressCb->reset();
progressCb->setMethodTitle("FM Propagation");
char buffer[256];
sprintf(buffer,"Octree level: %i\nNumber of points: %u",octreeLevel,numberOfPoints);
progressCb->setInfo(buffer);
progressCb->start();
}
ScalarField* theDists = new ScalarField("distances");
{
ScalarType d = theCloud->getPointScalarValue(0);
if (!theDists->resize(numberOfPoints,true,d))
{
if (!inputOctree)
delete theOctree;
return false;
}
}
unsigned maxDistIndex = 0, begin = 0;
CCVector3 startPoint;
while (true)
{
ScalarType maxDist = NAN_VALUE;
//on cherche la premiere distance superieure ou egale a "minSeedDist"
while (begin<numberOfPoints)
{
const CCVector3 *thePoint = theCloud->getPoint(begin);
const ScalarType& theDistance = theDists->getValue(begin);
++begin;
//FIXME DGM: what happens if SF is negative?!
if (theCloud->getPointScalarValue(begin) >= 0 && theDistance >= minSeedDist)
{
//.........这里部分代码省略.........
示例4: RegisterClouds
//.........这里部分代码省略.........
// delete dataCloud;
// dataCloud=0;
//}
unsigned n=dataCloud->size();
if (!c->reserve(n) || !newCPSet->reserve(n) || (newdataWeights && !newdataWeights->reserve(n)))
{
//not enough memory
delete c;
delete newCPSet;
if (newdataWeights)
newdataWeights->release();
result = ICP_ERROR_REGISTRATION_STEP;
break;
}
//we keep only the points with "not too high" distances
DistanceType maxDist = mu+3.0f*sqrt(sigma2);
unsigned realSize=0;
for (unsigned i=0;i<n;++i)
{
unsigned index = dataCloud->getPointGlobalIndex(i);
if (dataCloud->getAssociatedCloud()->getPointScalarValue(index)<maxDist)
{
c->addPointIndex(index);
newCPSet->addPointIndex(CPSet->getPointGlobalIndex(i));
if (newdataWeights)
newdataWeights->addElement(_dataWeights->getValue(index));
++realSize;
}
}
//resize should be ok as we have called reserve first
c->resize(realSize); //should always be ok as counter<n
newCPSet->resize(realSize); //idem
if (newdataWeights)
newdataWeights->resize(realSize); //idem
//replace old structures by new ones
delete CPSet;
CPSet=newCPSet;
delete dataCloud;
dataCloud=c;
if (_dataWeights)
{
_dataWeights->release();
_dataWeights = newdataWeights;
}
}
}
//update CPSet weights (if any)
if (_modelWeights)
{
unsigned count=CPSet->size();
assert(CPSetWeights);
if (CPSetWeights->currentSize()!=count)
{
if (!CPSetWeights->resize(count))
{
result = ICP_ERROR_REGISTRATION_STEP;
break;
}
}
for (unsigned i=0;i<count;++i)
CPSetWeights->setValue(i,_modelWeights->getValue(CPSet->getPointGlobalIndex(i)));