本文整理汇总了C++中VO_Shape::GetA2DPoint方法的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape::GetA2DPoint方法的具体用法?C++ VO_Shape::GetA2DPoint怎么用?C++ VO_Shape::GetA2DPoint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VO_Shape
的用法示例。
在下文中一共展示了VO_Shape::GetA2DPoint方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: VO_CMUInverseCompositional
/**
* @author Yao Wei
* @brief CMU Inverse Compositional !!
* @param - matDeltaP Input -- deltap
* @param - matDeltaQ Input -- deltaq
* @param - s Input -- the shape
* @param - estShape Output -- newly estimated shape by Inverse compositional
*/
void VO_FittingAAMInverseIA::VO_CMUInverseCompositional(const Mat_<float>& matDeltaP,
const Mat_<float>& matDeltaQ,
const VO_Shape& s,
VO_Shape& estShape)
{
VO_Shape S0;
this->VO_PParamQParam2ModelAlignedShape( matDeltaP, matDeltaQ, S0);
// cvConvertScale(dpq, __inv_pq, -1);
// __shape.CalcShape(__inv_pq, __update_s0); // __update_s0 = N.W(s0, -delta_p, -delta_q)
//Secondly: Composing the Incremental Warp with the Current Warp Estimate.
Point2f res, tmp;
int count = 0;
vector<unsigned int> vertexIdxes;
for(unsigned int i = 0; i < this->m_VOAAMInverseIA->m_iNbOfPoints; i++)
{
res.x = 0.0; res.y = 0.0;
count = 0;
//The only problem with this approach is which triangle do we use?
//In general there will be several triangles that share the i-th vertex.
for(unsigned j = 0; j < this->m_VOAAMInverseIA->m_iNbOfTriangles; j++) // see Figure (11)
{
if ( this->m_vTriangle2D[j].HasNode(i) )
{
vertexIdxes = this->m_vTriangle2D[j].GetVertexIndexes();
VO_WarpingPoint::WarpOnePoint( S0.GetA2DPoint(i),
this->m_vTriangle2D[j],
tmp,
s.GetA2DPoint(vertexIdxes[0]),
s.GetA2DPoint(vertexIdxes[1]),
s.GetA2DPoint(vertexIdxes[2]) );
res.x += tmp.x;
res.y += tmp.y;
count++;
}
}
// average the result so as to smooth the warp at each vertex
if(count == 0)
cerr << "There must be something wrong when CMU Inverse Compositional !" << endl;
res.x /= count;
res.y /= count;
estShape.SetA2DPoint(res, i);
}
}
示例2:
/**
* @author JIA Pei
* @version 2010-06-07
* @brief Constrain all points respetively
* @param ioShape Input and Output - the input and output shape
*/
void VO_Point2DDistributionModel::VO_ConstrainAllPoints(VO_Shape& ioShape)
{
unsigned int NbOfPoints = ioShape.GetNbOfPoints();
Point2f pt;
for(unsigned int i = 0; i < NbOfPoints; i++)
{
pt = ioShape.GetA2DPoint(i);
VO_Point2DDistributionModel::VO_ConstrainSinglePoint( pt, this->m_VONormalizedEllipses[i] );
ioShape.SetA2DPoint(pt, i);
}
}
示例3: WritePTS
/**
* @author JIA Pei
* @version 2010-02-07
* @brief Write all annotation data in VO_Shape to a file
* @param filename output parameter, which .pts annotation file to write
* @param iAAMShape input parameter, save annotation data from AAM shape data structure
*/
void CAnnotationDBIO::WritePTS( const std::string &filename,
const VO_Shape& iAAMShape)
{
std::fstream fp;
fp.open(filename.c_str (), std::ios::out);
std::string temp, oneLine;
std::stringstream ss;
float tempFloat = 0.0f;
unsigned int NbOfPoints = iAAMShape.GetNbOfPoints();
fp << "version: 1" << std::endl
<< "n_points: " << NbOfPoints << std::endl
<< "{" << std::endl;
for (unsigned int i = 0; i < NbOfPoints; i++)
{
fp << iAAMShape.GetA2DPoint(i).x << " " << iAAMShape.GetA2DPoint(i).y << std::endl;
}
fp << "}" << std::endl << std::endl;
fp.close ();
}
示例4: iImg
/**
* @author JIA Pei
* @version 2010-02-22
* @brief Build wavelet for key points
* @param iImg Input -- the concerned image
* @param theShape Input -- the concerned shape
* @param ptIdx Input -- which point?
* @param imgSize Input -- the image size
* @param mtd Input -- LTC method
* @param shiftX Input -- shift in X direction
* @param shiftY Input -- shift in Y direction
* @return cv::Mat_<float> Output -- the extracted LTC
*/
void VO_ASMLTCs::VO_LoadLTC4OneAnnotatedPoint( const cv::Mat& iImg,
const VO_Shape& theShape,
unsigned int ptIdx,
cv::Size imgSize,
VO_Features* vofeatures,
int shiftX,
int shiftY)
{
cv::Point2f pt = theShape.GetA2DPoint(ptIdx);
pt.x += shiftX;
pt.y += shiftY;
cv::Rect rect = VO_ASMLTCs::VO_CalcImagePatchRect(iImg, pt, imgSize);
cv::Mat imgPatch = iImg(rect);
vofeatures->VO_GenerateAllFeatures(imgPatch);
}
示例5:
/**
* @author JIA Pei
* @version 2010-05-07
* @brief draw a point on the image
* @param iShape Input -- the input shape
* @param iAAMModel Input -- the model
* @param ioImg Input and Output -- the image
* @return void
*/
void VO_Fitting2DSM::VO_DrawMesh(const VO_Shape& iShape, const VO_AXM* iModel, cv::Mat& ioImg)
{
cv::Point iorg,idst;
std::vector<VO_Edge> edges = iModel->GetEdge();
unsigned int NbOfEdges = iModel->GetNbOfEdges();
for (unsigned int i = 0; i < NbOfEdges; i++)
{
iorg = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex1() ) );
idst = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex2() ) );
// Edge
cv::line( ioImg, iorg, idst, colors[8], 1, 0, 0 );
// Key points
cv::circle( ioImg, iorg, 2, colors[0], -1, 8, 0 );
cv::circle( ioImg, idst, 2, colors[0], -1, 8, 0 );
}
}
示例6: UpdateShape
/**
* @author YAO Wei, JIA Pei
* @version 2010-05-20
* @brief Find the best offset for one point
* @param asmmodel Input - the ASM model
* @param iImg Input - image to be fitted
* @param ioShape Input and output - the input and output shape
* @param iShapeInfo Input - the shape information
* @param iMean Input - mean profile
* @param iCovInverse Input - covariance inverse
* @param Lev Input - current pyramid level
* @param offSetTolerance Input - offset tolerance, which is used to determine whether this point is convergede or not
* @param profdim Input - specify the dimension that is going to be used when updating shape.
* Sometimes, the trained data is of 4D profiles, but the user may only use 1D to test.
* @note Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
int VO_FittingASMNDProfiles::UpdateShape( const VO_ASMNDProfiles* asmmodel,
const cv::Mat& iImg,
VO_Shape& ioShape,
const std::vector<VO_Shape2DInfo>& iShapeInfo,
const std::vector< VO_Profile >& iMean,
const std::vector< std::vector< cv::Mat_<float> > >& iCovInverse,
unsigned int offSetTolerance,
unsigned int profdim)
{
int nGoodLandmarks = 0;
std::vector<int> nBestOffset(profdim, 0);
unsigned int NbOfPoints = ioShape.GetNbOfPoints();
unsigned int NbOfShapeDim = ioShape.GetNbOfDim();
unsigned int ProfileLength = iMean[0].GetProfileLength();
//std::vector<float> dists(NbOfPoints, 0.0f);
cv::Point2f pt;
// Take care of the 1st direction first.
for (unsigned int i = 0; i < NbOfPoints; i++)
{
/////////////////////////////////////////////////////////////////////////////
///Calculate profile norm direction//////////////////////////////////////////
/** Here, this is not compatible with 3D */
cv::Point2f PrevPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetFrom() );
cv::Point2f ThisPoint = ioShape.GetA2DPoint ( i );
cv::Point2f NextPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetTo() );
float deltaX, deltaY;
float normX, normY;
float sqrtsum;
float bestXOffset, bestYOffset;
// left side (connected from side)
deltaX = ThisPoint.x - PrevPoint.x;
deltaY = ThisPoint.y - PrevPoint.y;
sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
deltaX /= sqrtsum; deltaY /= sqrtsum; // Normalize
// Firstly, normX normY record left side norm.
normX = -deltaY;
normY = deltaX;
// right side (connected to side)
deltaX = NextPoint.x - ThisPoint.x;
deltaY = NextPoint.y - ThisPoint.y;
sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
deltaX /= sqrtsum; deltaY /= sqrtsum; // Normalize
// Secondly, normX normY will average both left side and right side norm.
normX += -deltaY;
normY += deltaX;
// Average left right side
sqrtsum = sqrt ( normX*normX + normY*normY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
normX /= sqrtsum;
normY /= sqrtsum; // Final Normalize
/////////////////////////////////////////////////////////////////////////////
nBestOffset[0] = VO_FittingASMNDProfiles::VO_FindBestMatchingProfile1D( iImg,
ThisPoint,
iMean[i].Get1DimProfile(0),
iCovInverse[i][0],
ProfileLength,
offSetTolerance,
normX,
normY);
// set OutShape(iPoint) to best offset from current position
// one dimensional profile: must move point along the whisker
bestXOffset = nBestOffset[0] * normX;
bestYOffset = nBestOffset[0] * normY;
pt.x = ThisPoint.x + bestXOffset;
pt.y = ThisPoint.y + bestYOffset;
ioShape.SetA2DPoint(pt, i);
//dists[i] = sqrt( pow( (double)bestXOffset, 2.0) + pow( (double)bestYOffset, 2.0) );
//if (abs(nBestOffset[0]) <= offSetTolerance/2)
if(profdim == 1)
{
if (abs(nBestOffset[0]) <= 1)
nGoodLandmarks++;
}
}
//.........这里部分代码省略.........
示例7: CalcFaceKeyPoint
/**
* @brief Calculate some key points on the face
* @param oPoint output point list
* @param iShape input shape
* @param iFaceParts inut faceparts
* @param ptType input point type
* @return void
*/
void VO_KeyPoint::CalcFaceKeyPoint( cv::Point2f& oPoint,
const VO_Shape& iShape,
const VO_FaceParts& iFaceParts,
unsigned int ptType)
{
std::vector<unsigned int> facePartsPoints;
VO_Shape subiShape;
// Very very very very important.
// Explained by JIA Pei.
// "resize()" is just for resize;
// it doesn't always set what's already inside the the std::vector to "0"
// Therefore, clear() is a must before resize().
switch(ptType)
{
case CENTEROFGRAVITY:
if (iShape.GetNbOfPoints() > 0)
oPoint = iShape.GetA2DPoint( VO_Shape::CENTER);
break;
case LEFTEYELEFTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
}
}
break;
case LEFTEYERIGHTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
}
}
break;
case LEFTEYECENTER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
}
}
break;
case RIGHTEYELEFTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
}
}
break;
case RIGHTEYERIGHTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
}
}
break;
case RIGHTEYECENTER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
}
}
break;
case NOSETIPKEY:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSETIP).GetIndexes(); // Just one point
if (facePartsPoints.size() == 1)
oPoint = iShape.GetA2DPoint(facePartsPoints[0]);
}
break;
case NOSTRILLEFT:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
//.........这里部分代码省略.........