本文整理汇总了C++中VO_Shape::GetNbOfDim方法的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape::GetNbOfDim方法的具体用法?C++ VO_Shape::GetNbOfDim怎么用?C++ VO_Shape::GetNbOfDim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VO_Shape
的用法示例。
在下文中一共展示了VO_Shape::GetNbOfDim方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CalcShapeFittingEffect
/**
* @brief Calculate face fitting effect
* @param refShape - input reference shape
* @param fittedShape - input fitting result
* @param deviation - output what is the deviation from refShape to fittedShape
* @param ptErrorFreq - output point error frequency
* @param nb - input how many evaluation levels that is to be used
* @return whether the fitting is acceptable
*/
void CRecognitionAlgs::CalcShapeFittingEffect( const VO_Shape& refShape,
const VO_Shape& fittedShape,
float& deviation,
vector<float>& ptErrorFreq,
int nb,
vector<float>* ptErrPerPoint)
{
assert(refShape.GetNbOfDim() == fittedShape.GetNbOfDim());
assert(refShape.GetNbOfPoints() == fittedShape.GetNbOfPoints());
unsigned int NbOfShapeDim = refShape.GetNbOfDim();
unsigned int NbOfPoints = refShape.GetNbOfPoints();
ptErrorFreq.resize(nb);
vector<float> ptDists(NbOfPoints, 0.0f);
for(unsigned int i = 0; i < NbOfPoints; i++)
{
ptDists[i] = 0.0f;
for(unsigned int j = 0; j < NbOfShapeDim; j++)
{
ptDists[i] += pow(refShape.GetAShape(j*NbOfPoints+i) - fittedShape.GetAShape(j*NbOfPoints+i), 2.0f);
}
ptDists[i] = sqrt(ptDists[i]);
}
ptErrorFreq.resize(nb);
for(int i = 0; i < nb; i++)
{
for (unsigned int j = 0; j < NbOfPoints; j++)
{
if (ptDists[j] < i)
{
ptErrorFreq[i]++;
}
}
ptErrorFreq[i] /= static_cast<float>(NbOfPoints);
}
float sumPtDist = 0.0;
for(unsigned int i = 0; i<NbOfPoints;++i){
sumPtDist += ptDists[i];
}
printf("Avg ptDists = %f\n",sumPtDist/NbOfPoints);
deviation = CRecognitionAlgs::ShapeDistance(refShape, fittedShape);
if(ptErrPerPoint != 0){
(*ptErrPerPoint) = ptDists;
}
}
示例2: CalcFaceYaw
float CRecognitionAlgs::CalcFaceYaw(const vector<float>& iLine,
const VO_Shape& iShape,
const VO_FaceParts& iFaceParts)
{
float yaw = 0.0f;
int dim = iShape.GetNbOfDim();
// Theoretically, using eye corner is correct, but it's not stable at all. Therefore, here we use COG_left and COG_right instead.
///////////////////////////////////////////////////////////////////////////////
// float leftDist = 0.0f, rightDist = 0.0f;
// vector<unsigned int> eyeCornerPoints = iFaceParts.GetEyeCornerPoints().GetIndexes();
// Point2f leftmostEyeCorner = Point2f(FLT_MAX, 0.0f);
// Point2f rightmostEyeCorner = Point2f(0.0f, 0.0f);
//
// for(unsigned int i = 0; i < eyeCornerPoints.size(); ++i)
// {
// if(leftmostEyeCorner.x > iShape.GetAShape(dim*eyeCornerPoints[i]) )
// {
// leftmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
// leftmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
// }
// if(rightmostEyeCorner.x < iShape.GetAShape(dim*eyeCornerPoints[i]) )
// {
// rightmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
// rightmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
// }
// }
// leftDist = cvDistFromAPoint2ALine2D(leftmostEyeCorner, iLine);
// rightDist = cvDistFromAPoint2ALine2D(rightmostEyeCorner, iLine);
// float r = leftDist/rightDist;
// Refer to my PhD dissertation. Chapter 4
// yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
///////////////////////////////////////////////////////////////////////////////
float leftDist = 0.0f, rightDist = 0.0f;
vector<unsigned int> leftSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTSIDEPOINTS).GetIndexes();
vector<unsigned int> rightSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTSIDEPOINTS).GetIndexes();
for(unsigned int i = 0; i < leftSidePoints.size(); ++i)
{
leftDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*leftSidePoints[i]), iShape.GetAShape(dim*leftSidePoints[i]+1)), iLine);
}
for(unsigned int i = 0; i < rightSidePoints.size(); ++i)
{
rightDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*rightSidePoints[i]), iShape.GetAShape(dim*rightSidePoints[i]+1)), iLine);
}
float r = leftDist/rightDist;
// Refer to my PhD dissertation. Chapter 4
// yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
yaw = atan ( ( (r-1) ) / ((r+1) ) ) * safeDoubleToFloat(180.0 / CV_PI);
return yaw;
}
示例3: SaveShapeResults
/**
* @param fd - input folder name
* @param fnIdx - input fitting result
* @param deviation - input what is the deviation from refShape to fittedShape
* @param ptErrorFreq - input for curve to display frequency -- point distance
* @param fittedShape - input fitting result
* @return whether the fitting is acceptable
*/
void CRecognitionAlgs::SaveShapeResults( const string& fd,
const string& fnIdx,
float deviation,
vector<float>& ptDists,
vector<float>& ptErrorFreq,
const VO_Shape& fittedShape)
{
string fn;
fn = fd + "/" + fnIdx + ".res";
fstream fp;
fp.open(fn.c_str (), ios::out);
fp << "Error per point -- Distance from ground truth" << endl;
for(unsigned int i = 0; i < ptDists.size(); ++i){
fp << ptDists[i] << endl;
}
fp << endl;
fp << "Total landmark error" << endl;
float errSum = std::accumulate(ptDists.begin(),ptDists.end(),0.0f);
fp << errSum << endl;
fp <<"Average landmark distance" << endl;
fp << errSum / ptDists.size() << endl;
fp << endl;
fp << "Total Deviation" << endl << deviation << endl; // deviation
fp << "Point Error -- Frequency" << endl;
for(unsigned int i = 0; i < ptErrorFreq.size(); i++)
{
fp << ptErrorFreq[i] << " ";
}
fp << endl;
fp << endl;
fp << "Fitted points" << endl;
//output actual points along with error frequency
unsigned int NbOfShapeDim = fittedShape.GetNbOfDim();
unsigned int NbOfPoints = fittedShape.GetNbOfPoints();
for(unsigned int i = 0; i < NbOfPoints; i++)
{
for(unsigned int j = 0; j < NbOfShapeDim; j++)
{
fp << fittedShape.GetAShape(j*NbOfPoints+i) << " ";
}
fp << endl;
}
fp << endl;
fp.close();fp.clear();
}
示例4: SplitShapeTextureParams
/**
* @author JIA Pei
* @version 2016-08-24
* @brief a pair of shape and texture, respectively decomposed to a shape and a texture
* @param iPairShapeTexture Input - the pair of shape and texture
* @param oShapeParams Output - shape parameters
* @param oTextureParams Output - texture parameters
* @return void
*/
void VO_AXM::SplitShapeTextureParams(const std::pair<VO_Shape, VO_Texture>& iPairShapeTexture,
cv::Mat_<float>& oShapeParams,
cv::Mat_<float>& oTextureParams )
{
VO_Shape iShape = iPairShapeTexture.first;
VO_Texture iTexture = iPairShapeTexture.second;
unsigned int NbOfShapeDim = iShape.GetNbOfDim();
float tempNorm = 0.0f;
std::vector<float> tempTheta;
tempTheta.resize(NbOfShapeDim == 2? 1:3);
cv::Mat_<float> tempCOG = cv::Mat_<float>::zeros(1, NbOfShapeDim);
this->VO_CalcAllParams4AnyShapeWithConstrain(iShape, oShapeParams, tempNorm, tempTheta, tempCOG);
this->VO_CalcAllParams4AnyTexture(iTexture, oTextureParams);
}
示例5: CalcFaceKeyline
/**
* @brief Calculate some key lines on the face
* @param oLine Output output those lines
* @param iShape Input the known shape
* @param iFaceParts Input the faceparts
* @param oSubshape Output the output subshape, namely, the line is represented by a VO_Shape
* @param partIdx Input which part is it
* @return void
*/
void VO_KeyPoint::CalcFaceKeyline(
std::vector<float>& oLine,
const VO_Shape& iShape,
const VO_FaceParts& iFaceParts,
VO_Shape& oSubshape,
unsigned int partIdx)
{
oLine.resize(3);
int dim = iShape.GetNbOfDim();
cv::Vec4f line;
std::vector<unsigned int> linePoints;
switch(partIdx)
{
case VO_FacePart::NOSTRIL:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
break;
case VO_FacePart::MOUTHCORNERPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::MOUTHCORNERPOINTS).GetIndexes();
break;
case VO_FacePart::PITCHAXISLINEPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
break;
case VO_FacePart::EYECORNERPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
break;
case VO_FacePart::MIDLINEPOINTS:
default:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
break;
}
oSubshape = iShape.GetSubShape(linePoints);
// Explained by JIA Pei, some times, there is no linePoints, which means the specified parts are not in one of the database
if(linePoints.size() >= 2 )
{
cv::fitLine( oSubshape.GetTheShape(), line, CV_DIST_L2, 0, 0.001, 0.001 );
// Ax+By+C = 0
oLine[0] = -line[1];
oLine[1] = line[0];
oLine[2] = line[1]*line[2]-line[0]*line[3];
}
}
示例6: CalcFacePitch
// Refer to my PhD thesis, chapter 4
float CRecognitionAlgs::CalcFacePitch( const VO_Shape& iShape,
const VO_FaceParts& iFaceParts)
{
float pitch = 0.0f;
int dim = iShape.GetNbOfDim();
float NNQ, ENQ, EQ, NO;
// Theoretically, using eye corner is correct, but it's not quite stable at all. It's better we use two nostrils first if nostirl is defined in faceparts
///////////////////////////////////////////////////////////////////////////////
// unsigned int nosetipBottom = 0;
// vector<unsigned int> nosePoints = iFaceParts.GetNose().GetIndexes();
// vector<unsigned int> midlinePoints = iFaceParts.GetMidlinePoints().GetIndexes();
// vector<unsigned int> pitchAxisPoints = iFaceParts.GetPitchAxisLinePoints().GetIndexes();
// VO_Shape nose, midLine, pitchAxis;
// nose.SetDim(dim);
// midLine.SetDim(dim);
// pitchAxis.SetDim(dim);
// nose.SetSize( nosePoints.size()*dim );
// midLine.SetSize( midlinePoints.size()*dim );
// pitchAxis.SetSize(pitchAxisPoints.size()*dim );
//
// for(unsigned int i = 0; i < nosePoints.size(); ++i)
// {
// for(unsigned int j = 0; j < midlinePoints.size(); ++j)
// {
// if(nosePoints[i] == midlinePoints[j])
// {
// nosetipBottom = nosePoints[i];
// break;
// }
// }
// }
//
// Point2f ntPoint = Point2f(iShape.GetAShape(dim*nosetipBottom), iShape.GetAShape(dim*nosetipBottom+1));
// Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
// Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));
//
// float NNQ = ( (ntPoint.y - paPoint1.y) + (ntPoint.y - paPoint2.y) ) / 2.0f;
// float ENQ = fabs(ntPoint.x - paPoint1.x) > fabs(paPoint2.x - ntPoint.x) ? fabs(ntPoint.x - paPoint1.x) : fabs(paPoint2.x - ntPoint.x);
// float EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
// float NO = sqrt(2.0f)/2.0f*EQ;
///////////////////////////////////////////////////////////////////////////////
vector<unsigned int> nostrilPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
if(nostrilPoints.size() != 0)
{
vector<unsigned int> pitchAxisPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
Point2f ntPoint1 = Point2f(iShape.GetAShape(dim*nostrilPoints[0]), iShape.GetAShape(dim*nostrilPoints[0]+1));
Point2f ntPoint2 = Point2f(iShape.GetAShape(dim*nostrilPoints[1]), iShape.GetAShape(dim*nostrilPoints[1]+1));
Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));
NNQ = ( (ntPoint1.y - paPoint1.y) + (ntPoint2.y - paPoint2.y) ) / 2.0f;
ENQ = fabs(ntPoint1.x - paPoint1.x) > fabs(paPoint2.x - ntPoint2.x) ? fabs(ntPoint1.x - paPoint1.x + (ntPoint2.x - ntPoint1.x) / 2.0f) : fabs(paPoint2.x - ntPoint2.x + (ntPoint2.x - ntPoint1.x) / 2.0f);
EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
NO = sqrt(2.0f)/2.0f*EQ;
}
else
{
unsigned int nosetipBottom = 0;
vector<unsigned int> nosePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSE).GetIndexes();
vector<unsigned int> midlinePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::MIDLINEPOINTS).GetIndexes();
vector<unsigned int> pitchAxisPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
for(unsigned int i = 0; i < nosePoints.size(); ++i)
{
for(unsigned int j = 0; j < midlinePoints.size(); ++j)
{
if(nosePoints[i] == midlinePoints[j])
{
nosetipBottom = nosePoints[i];
break;
}
}
}
Point2f ntPoint = Point2f(iShape.GetAShape(dim*nosetipBottom), iShape.GetAShape(dim*nosetipBottom+1));
Point2f paPoint1 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[0]), iShape.GetAShape(dim*pitchAxisPoints[0]+1));
Point2f paPoint2 = Point2f(iShape.GetAShape(dim*pitchAxisPoints[1]), iShape.GetAShape(dim*pitchAxisPoints[1]+1));
NNQ = ( (ntPoint.y - paPoint1.y) + (ntPoint.y - paPoint2.y) ) / 2.0f;
ENQ = fabs(ntPoint.x - paPoint1.x) > fabs(paPoint2.x - ntPoint.x) ? fabs(ntPoint.x - paPoint1.x) : fabs(paPoint2.x - ntPoint.x);
EQ = sqrt(ENQ*ENQ + NNQ*NNQ);
NO = sqrt(2.0f)/2.0f*EQ;
}
if( fabs(NNQ/NO) < 1.0f)
pitch = asin ( NNQ / NO ) * safeDoubleToFloat(180.0 / CV_PI);
else if (NNQ * NO < 0.0f)
pitch = -90.0f;
else
pitch = 90.0f;
return pitch;
}
示例7: SaveFittingResults
/**
* @param fd - input folder name
* @param fnIdx - input fitting result
* @param deviation - input what is the deviation from refShape to fittedShape
* @param ptErrorFreq - input for curve to display frequency -- point distance
* @param fittedShape - input fitting result
* @param gt_cp - input ground truth canidate points
* @param t_cp - input tested canidate points (l eye, r eye, mouth)
* @return whether the fitting is acceptable
*/
void CRecognitionAlgs::SaveFittingResults( const string& fd,
const string& fnIdx,
float deviation,
vector<float>& ptDists,
vector<float>& ptErrorFreq,
const VO_Shape& fittedShape,
cv::Point2f* gt_cP,
cv::Point2f* t_cP,
float fitTime)
{
string fn;
fn = fd + "/" + fnIdx + ".res";
fstream fp;
fp.open(fn.c_str (), ios::out);
fp << "Error per point -- Distance from ground truth" << endl;
for(unsigned int i = 0; i < ptDists.size(); ++i){
fp << ptDists[i] << endl;
}
fp << endl;
fp << "Total landmark error" << endl;
float errSum = std::accumulate(ptDists.begin(),ptDists.end(),0.0f);
fp << errSum << endl;
fp << "Average landmark distance" << endl;
fp << errSum / ptDists.size() << endl;
fp << "Candidate point error (Left eye, Right eye, Mouth)" << endl;
//messy distance, too lazy
float le_dist = sqrt(pow(gt_cP[0].x - t_cP[0].x,2) + pow(gt_cP[0].y - t_cP[0].y,2));
float re_dist = sqrt(pow(gt_cP[1].x - t_cP[1].x,2) + pow(gt_cP[1].y - t_cP[1].y,2));
float m_dist = sqrt(pow(gt_cP[2].x - t_cP[2].x,2) + pow(gt_cP[2].y - t_cP[2].y,2));
fp << le_dist << endl;
fp << re_dist << endl;
fp << m_dist << endl;
fp << endl;
fp << "Fitting time" << endl;
fp << fitTime << endl;
fp << endl;
fp << "Total deviation" << endl << deviation << endl; // deviation
fp << "Point error -- Frequency" << endl;
for(unsigned int i = 0; i < ptErrorFreq.size(); i++)
{
fp << ptErrorFreq[i] << " ";
}
fp << endl;
fp << endl;
fp << "Canidate points" << endl;
fp << t_cP[0].x << " " << t_cP[0].y << endl;
fp << t_cP[1].x << " " << t_cP[1].y << endl;
fp << t_cP[2].x << " " << t_cP[2].y << endl;
fp << "Fitted points" << endl;
//output actual points along with error frequency
unsigned int NbOfShapeDim = fittedShape.GetNbOfDim();
unsigned int NbOfPoints = fittedShape.GetNbOfPoints();
for(unsigned int i = 0; i < NbOfPoints; i++)
{
for(unsigned int j = 0; j < NbOfShapeDim; j++)
{
fp << fittedShape.GetAShape(j*NbOfPoints+i) << " ";
}
fp << endl;
}
fp << endl;
fp.close();fp.clear();
}
示例8: UpdateShape
/**
* @author YAO Wei, JIA Pei
* @version 2010-05-20
* @brief Find the best offset for one point
* @param asmmodel Input - the ASM model
* @param iImg Input - image to be fitted
* @param ioShape Input and output - the input and output shape
* @param iShapeInfo Input - the shape information
* @param iMean Input - mean profile
* @param iCovInverse Input - covariance inverse
* @param Lev Input - current pyramid level
* @param offSetTolerance Input - offset tolerance, which is used to determine whether this point is convergede or not
* @param profdim Input - specify the dimension that is going to be used when updating shape.
* Sometimes, the trained data is of 4D profiles, but the user may only use 1D to test.
* @note Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
int VO_FittingASMNDProfiles::UpdateShape( const VO_ASMNDProfiles* asmmodel,
const cv::Mat& iImg,
VO_Shape& ioShape,
const std::vector<VO_Shape2DInfo>& iShapeInfo,
const std::vector< VO_Profile >& iMean,
const std::vector< std::vector< cv::Mat_<float> > >& iCovInverse,
unsigned int offSetTolerance,
unsigned int profdim)
{
int nGoodLandmarks = 0;
std::vector<int> nBestOffset(profdim, 0);
unsigned int NbOfPoints = ioShape.GetNbOfPoints();
unsigned int NbOfShapeDim = ioShape.GetNbOfDim();
unsigned int ProfileLength = iMean[0].GetProfileLength();
//std::vector<float> dists(NbOfPoints, 0.0f);
cv::Point2f pt;
// Take care of the 1st direction first.
for (unsigned int i = 0; i < NbOfPoints; i++)
{
/////////////////////////////////////////////////////////////////////////////
///Calculate profile norm direction//////////////////////////////////////////
/** Here, this is not compatible with 3D */
cv::Point2f PrevPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetFrom() );
cv::Point2f ThisPoint = ioShape.GetA2DPoint ( i );
cv::Point2f NextPoint = ioShape.GetA2DPoint ( iShapeInfo[i].GetTo() );
float deltaX, deltaY;
float normX, normY;
float sqrtsum;
float bestXOffset, bestYOffset;
// left side (connected from side)
deltaX = ThisPoint.x - PrevPoint.x;
deltaY = ThisPoint.y - PrevPoint.y;
sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
deltaX /= sqrtsum; deltaY /= sqrtsum; // Normalize
// Firstly, normX normY record left side norm.
normX = -deltaY;
normY = deltaX;
// right side (connected to side)
deltaX = NextPoint.x - ThisPoint.x;
deltaY = NextPoint.y - ThisPoint.y;
sqrtsum = sqrt ( deltaX*deltaX + deltaY*deltaY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
deltaX /= sqrtsum; deltaY /= sqrtsum; // Normalize
// Secondly, normX normY will average both left side and right side norm.
normX += -deltaY;
normY += deltaX;
// Average left right side
sqrtsum = sqrt ( normX*normX + normY*normY );
if ( sqrtsum < FLT_EPSILON ) sqrtsum = 1.0f;
normX /= sqrtsum;
normY /= sqrtsum; // Final Normalize
/////////////////////////////////////////////////////////////////////////////
nBestOffset[0] = VO_FittingASMNDProfiles::VO_FindBestMatchingProfile1D( iImg,
ThisPoint,
iMean[i].Get1DimProfile(0),
iCovInverse[i][0],
ProfileLength,
offSetTolerance,
normX,
normY);
// set OutShape(iPoint) to best offset from current position
// one dimensional profile: must move point along the whisker
bestXOffset = nBestOffset[0] * normX;
bestYOffset = nBestOffset[0] * normY;
pt.x = ThisPoint.x + bestXOffset;
pt.y = ThisPoint.y + bestYOffset;
ioShape.SetA2DPoint(pt, i);
//dists[i] = sqrt( pow( (double)bestXOffset, 2.0) + pow( (double)bestYOffset, 2.0) );
//if (abs(nBestOffset[0]) <= offSetTolerance/2)
if(profdim == 1)
{
if (abs(nBestOffset[0]) <= 1)
nGoodLandmarks++;
}
}
//.........这里部分代码省略.........