本文整理汇总了C++中VO_Shape::GetSubShape方法的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape::GetSubShape方法的具体用法?C++ VO_Shape::GetSubShape怎么用?C++ VO_Shape::GetSubShape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VO_Shape
的用法示例。
在下文中一共展示了VO_Shape::GetSubShape方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CalcFaceKeyline
/**
* @brief Calculate some key lines on the face
* @param oLine Output output those lines
* @param iShape Input the known shape
* @param iFaceParts Input the faceparts
* @param oSubshape Output the output subshape, namely, the line is represented by a VO_Shape
* @param partIdx Input which part is it
* @return void
*/
void VO_KeyPoint::CalcFaceKeyline(
std::vector<float>& oLine,
const VO_Shape& iShape,
const VO_FaceParts& iFaceParts,
VO_Shape& oSubshape,
unsigned int partIdx)
{
oLine.resize(3);
int dim = iShape.GetNbOfDim();
cv::Vec4f line;
std::vector<unsigned int> linePoints;
switch(partIdx)
{
case VO_FacePart::NOSTRIL:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
break;
case VO_FacePart::MOUTHCORNERPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::MOUTHCORNERPOINTS).GetIndexes();
break;
case VO_FacePart::PITCHAXISLINEPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
break;
case VO_FacePart::EYECORNERPOINTS:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
break;
case VO_FacePart::MIDLINEPOINTS:
default:
linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
break;
}
oSubshape = iShape.GetSubShape(linePoints);
// Explained by JIA Pei, some times, there is no linePoints, which means the specified parts are not in one of the database
if(linePoints.size() >= 2 )
{
cv::fitLine( oSubshape.GetTheShape(), line, CV_DIST_L2, 0, 0.001, 0.001 );
// Ax+By+C = 0
oLine[0] = -line[1];
oLine[1] = line[0];
oLine[2] = line[1]*line[2]-line[0]*line[3];
}
}
示例2: EvaluateFaceTrackedByCascadeDetection
/**
* @brief whether the tracked shape is really a face?
* If we can detect both eyes and mouth
* according to some prior knowledge due to its shape,
* we may regard this shape correctly describe a face.
* @param iImg - input input image
* @param iShape - input the current tracked shape
* @param iShapeInfo - input shape info
* @param iFaceParts - input face parts
* @return bool whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByCascadeDetection(
const CFaceDetectionAlgs* fd,
const Mat& iImg,
const VO_Shape& iShape,
const vector<VO_Shape2DInfo>& iShapeInfo,
const VO_FaceParts& iFaceParts)
{
double t = (double)cvGetTickCount();
unsigned int ImgWidth = iImg.cols;
unsigned int ImgHeight = iImg.rows;
vector<unsigned int> leftEyePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
vector<unsigned int> rightEyePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
vector<unsigned int> lipOuterLinerPoints= iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();
VO_Shape leftEyeShape = iShape.GetSubShape(leftEyePoints);
VO_Shape rightEyeShape = iShape.GetSubShape(rightEyePoints);
VO_Shape lipOuterLinerShape = iShape.GetSubShape(lipOuterLinerPoints);
float dolEye = 12.0f;
float dolMouth = 12.0f;
unsigned int possibleLeftEyeMinX = 0.0f > (leftEyeShape.MinX() - dolEye) ? 0: (int)(leftEyeShape.MinX() - dolEye);
unsigned int possibleLeftEyeMinY = 0.0f > (leftEyeShape.MinY() - dolEye) ? 0: (int)(leftEyeShape.MinY() - dolEye);
unsigned int possibleLeftEyeMaxX = (leftEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(leftEyeShape.MaxX() + dolEye);
unsigned int possibleLeftEyeMaxY = (leftEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(leftEyeShape.MaxY() + dolEye);
unsigned int possibleLeftEyeWidth = possibleLeftEyeMaxX - possibleLeftEyeMinX;
unsigned int possibleLeftEyeHeight = possibleLeftEyeMaxY - possibleLeftEyeMinY;
unsigned int possibleRightEyeMinX = 0.0f > (rightEyeShape.MinX() - dolEye) ? 0: (int)(rightEyeShape.MinX() - dolEye);
unsigned int possibleRightEyeMinY = 0.0f > (rightEyeShape.MinY() - dolEye) ? 0: (int)(rightEyeShape.MinY() - dolEye);
unsigned int possibleRightEyeMaxX = (rightEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(rightEyeShape.MaxX() + dolEye);
unsigned int possibleRightEyeMaxY = (rightEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(rightEyeShape.MaxY() + dolEye);
unsigned int possibleRightEyeWidth = possibleRightEyeMaxX - possibleRightEyeMinX;
unsigned int possibleRightEyeHeight = possibleRightEyeMaxY - possibleRightEyeMinY;
unsigned int possibleMouthMinX = 0.0f > (lipOuterLinerShape.MinX() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinX() - dolMouth);
unsigned int possibleMouthMinY = 0.0f > (lipOuterLinerShape.MinY() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinY() - dolMouth);
unsigned int possibleMouthMaxX = (lipOuterLinerShape.MaxX() + dolMouth) > ImgWidth ? ImgWidth : (int)(lipOuterLinerShape.MaxX() + dolMouth);
unsigned int possibleMouthMaxY = (lipOuterLinerShape.MaxY() + dolMouth) > ImgHeight ? ImgHeight : (int)(lipOuterLinerShape.MaxY() + dolMouth);
unsigned int possibleMouthWidth = possibleMouthMaxX - possibleMouthMinX;
unsigned int possibleMouthHeight = possibleMouthMaxY - possibleMouthMinY;
Rect LeftEyePossibleWindow = Rect( possibleLeftEyeMinX, possibleLeftEyeMinY, possibleLeftEyeWidth, possibleLeftEyeHeight );
Rect RightEyePossibleWindow = Rect( possibleRightEyeMinX, possibleRightEyeMinY, possibleRightEyeWidth, possibleRightEyeHeight );
Rect MouthPossibleWindow = Rect( possibleMouthMinX, possibleMouthMinY, possibleMouthWidth, possibleMouthHeight );
Rect CurrentWindow = Rect( 0, 0, iImg.cols, iImg.rows );
Rect DetectedLeftEyeWindow, DetectedRightEyeWindow, DetectedMouthWindow;
bool LeftEyeDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, LeftEyePossibleWindow, DetectedLeftEyeWindow, VO_FacePart::LEFTEYE);
bool RightEyeDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, RightEyePossibleWindow, DetectedRightEyeWindow, VO_FacePart::RIGHTEYE );
bool MouthDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, MouthPossibleWindow, DetectedMouthWindow, VO_FacePart::LIPOUTERLINE );
t = ((double)cvGetTickCount() - t )
/ (cvGetTickFrequency()*1000.0f);
cout << "Detection Confirmation time cost: " << t << "millisec" << endl;
if(LeftEyeDetected && RightEyeDetected && MouthDetected)
return true;
else
return false;
}
示例3: CalcFaceKeyPoint
/**
* @brief Calculate some key points on the face
* @param oPoint output point list
* @param iShape input shape
* @param iFaceParts inut faceparts
* @param ptType input point type
* @return void
*/
void VO_KeyPoint::CalcFaceKeyPoint( cv::Point2f& oPoint,
const VO_Shape& iShape,
const VO_FaceParts& iFaceParts,
unsigned int ptType)
{
std::vector<unsigned int> facePartsPoints;
VO_Shape subiShape;
// Very very very very important.
// Explained by JIA Pei.
// "resize()" is just for resize;
// it doesn't always set what's already inside the the std::vector to "0"
// Therefore, clear() is a must before resize().
switch(ptType)
{
case CENTEROFGRAVITY:
if (iShape.GetNbOfPoints() > 0)
oPoint = iShape.GetA2DPoint( VO_Shape::CENTER);
break;
case LEFTEYELEFTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
}
}
break;
case LEFTEYERIGHTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
}
}
break;
case LEFTEYECENTER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
}
}
break;
case RIGHTEYELEFTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
}
}
break;
case RIGHTEYERIGHTCORNER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
}
}
break;
case RIGHTEYECENTER:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
}
}
break;
case NOSETIPKEY:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSETIP).GetIndexes(); // Just one point
if (facePartsPoints.size() == 1)
oPoint = iShape.GetA2DPoint(facePartsPoints[0]);
}
break;
case NOSTRILLEFT:
{
facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
if (facePartsPoints.size() > 0)
{
subiShape = iShape.GetSubShape(facePartsPoints);
//.........这里部分代码省略.........