本文整理汇总了C++中VO_Shape::MaxX方法的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape::MaxX方法的具体用法?C++ VO_Shape::MaxX怎么用?C++ VO_Shape::MaxX使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VO_Shape
的用法示例。
在下文中一共展示了VO_Shape::MaxX方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: switch
/**
* @author JIA Pei
* @version 2010-05-07
* @brief draw a point on the image
* @param iShape Input -- the input shape
* @param theSubshape Output -- and input, the image drawn with the point
* @param iLine Input -- the line
* @param oImg Output-- output image
* @param dir Input -- direction
* @param ws Input --
* @param offset Input -- add some offset at both ends of the line segment itself
* @param ci Input -- color index
* @return void
*/
void VO_Fitting2DSM::VO_DrawAline( const VO_Shape& iShape,
const VO_Shape& theSubshape,
const std::vector<float>& iLine,
cv::Mat& oImg,
unsigned int dir,
bool ws,
unsigned int offset,
unsigned int ci)
{
switch(dir)
{
case VERTICAL:
{
float A = iLine[0];
float B = iLine[1];
float C = iLine[2];
cv::Point2f ptf1, ptf2;
if(ws)
{
ptf1.y = iShape.MinY() - offset;
ptf2.y = iShape.MaxY() + offset;
}
else
{
ptf1.y = theSubshape.MinY() - offset;
ptf2.y = theSubshape.MaxY() + offset;
}
ptf1.x = -(C + B*ptf1.y)/A;
ptf2.x = -(C + B*ptf2.y)/A;
cv::Point pt1 = cvPointFrom32f( ptf1 );
cv::Point pt2 = cvPointFrom32f( ptf2 );
cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
}
break;
case HORIZONTAL:
default:
{
float A = iLine[0];
float B = iLine[1];
float C = iLine[2];
cv::Point2f ptf1, ptf2;
if(ws)
{
ptf1.x = iShape.MinX() - offset;
ptf2.x = iShape.MaxX() + offset;
}
else
{
ptf1.x = theSubshape.MinX() - offset;
ptf2.x = theSubshape.MaxX() + offset;
}
ptf1.y = -(C + A*ptf1.x)/B;
ptf2.y = -(C + A*ptf2.x)/B;
cv::Point pt1 = cvPointFrom32f( ptf1 );
cv::Point pt2 = cvPointFrom32f( ptf2 );
cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
}
break;
}
}
示例2: EvaluateFaceTrackedByCascadeDetection
/**
* @brief whether the tracked shape is really a face?
* If we can detect both eyes and mouth
* according to some prior knowledge due to its shape,
* we may regard this shape correctly describe a face.
* @param iImg - input input image
* @param iShape - input the current tracked shape
* @param iShapeInfo - input shape info
* @param iFaceParts - input face parts
* @return bool whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByCascadeDetection(
const CFaceDetectionAlgs* fd,
const Mat& iImg,
const VO_Shape& iShape,
const vector<VO_Shape2DInfo>& iShapeInfo,
const VO_FaceParts& iFaceParts)
{
double t = (double)cvGetTickCount();
unsigned int ImgWidth = iImg.cols;
unsigned int ImgHeight = iImg.rows;
vector<unsigned int> leftEyePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
vector<unsigned int> rightEyePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
vector<unsigned int> lipOuterLinerPoints= iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();
VO_Shape leftEyeShape = iShape.GetSubShape(leftEyePoints);
VO_Shape rightEyeShape = iShape.GetSubShape(rightEyePoints);
VO_Shape lipOuterLinerShape = iShape.GetSubShape(lipOuterLinerPoints);
float dolEye = 12.0f;
float dolMouth = 12.0f;
unsigned int possibleLeftEyeMinX = 0.0f > (leftEyeShape.MinX() - dolEye) ? 0: (int)(leftEyeShape.MinX() - dolEye);
unsigned int possibleLeftEyeMinY = 0.0f > (leftEyeShape.MinY() - dolEye) ? 0: (int)(leftEyeShape.MinY() - dolEye);
unsigned int possibleLeftEyeMaxX = (leftEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(leftEyeShape.MaxX() + dolEye);
unsigned int possibleLeftEyeMaxY = (leftEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(leftEyeShape.MaxY() + dolEye);
unsigned int possibleLeftEyeWidth = possibleLeftEyeMaxX - possibleLeftEyeMinX;
unsigned int possibleLeftEyeHeight = possibleLeftEyeMaxY - possibleLeftEyeMinY;
unsigned int possibleRightEyeMinX = 0.0f > (rightEyeShape.MinX() - dolEye) ? 0: (int)(rightEyeShape.MinX() - dolEye);
unsigned int possibleRightEyeMinY = 0.0f > (rightEyeShape.MinY() - dolEye) ? 0: (int)(rightEyeShape.MinY() - dolEye);
unsigned int possibleRightEyeMaxX = (rightEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(rightEyeShape.MaxX() + dolEye);
unsigned int possibleRightEyeMaxY = (rightEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(rightEyeShape.MaxY() + dolEye);
unsigned int possibleRightEyeWidth = possibleRightEyeMaxX - possibleRightEyeMinX;
unsigned int possibleRightEyeHeight = possibleRightEyeMaxY - possibleRightEyeMinY;
unsigned int possibleMouthMinX = 0.0f > (lipOuterLinerShape.MinX() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinX() - dolMouth);
unsigned int possibleMouthMinY = 0.0f > (lipOuterLinerShape.MinY() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinY() - dolMouth);
unsigned int possibleMouthMaxX = (lipOuterLinerShape.MaxX() + dolMouth) > ImgWidth ? ImgWidth : (int)(lipOuterLinerShape.MaxX() + dolMouth);
unsigned int possibleMouthMaxY = (lipOuterLinerShape.MaxY() + dolMouth) > ImgHeight ? ImgHeight : (int)(lipOuterLinerShape.MaxY() + dolMouth);
unsigned int possibleMouthWidth = possibleMouthMaxX - possibleMouthMinX;
unsigned int possibleMouthHeight = possibleMouthMaxY - possibleMouthMinY;
Rect LeftEyePossibleWindow = Rect( possibleLeftEyeMinX, possibleLeftEyeMinY, possibleLeftEyeWidth, possibleLeftEyeHeight );
Rect RightEyePossibleWindow = Rect( possibleRightEyeMinX, possibleRightEyeMinY, possibleRightEyeWidth, possibleRightEyeHeight );
Rect MouthPossibleWindow = Rect( possibleMouthMinX, possibleMouthMinY, possibleMouthWidth, possibleMouthHeight );
Rect CurrentWindow = Rect( 0, 0, iImg.cols, iImg.rows );
Rect DetectedLeftEyeWindow, DetectedRightEyeWindow, DetectedMouthWindow;
bool LeftEyeDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, LeftEyePossibleWindow, DetectedLeftEyeWindow, VO_FacePart::LEFTEYE);
bool RightEyeDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, RightEyePossibleWindow, DetectedRightEyeWindow, VO_FacePart::RIGHTEYE );
bool MouthDetected = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, MouthPossibleWindow, DetectedMouthWindow, VO_FacePart::LIPOUTERLINE );
t = ((double)cvGetTickCount() - t )
/ (cvGetTickFrequency()*1000.0f);
cout << "Detection Confirmation time cost: " << t << "millisec" << endl;
if(LeftEyeDetected && RightEyeDetected && MouthDetected)
return true;
else
return false;
}