本文整理汇总了C++中Mat::checkVector方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::checkVector方法的具体用法?C++ Mat::checkVector怎么用?C++ Mat::checkVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::checkVector方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: myConvexityDefects
void myConvexityDefects( InputArray _points, InputArray _hull, OutputArray _defects ) {
Mat points = _points.getMat();
int ptnum = points.checkVector(2, CV_32S);
CV_Assert( ptnum > 3 );
Mat hull = _hull.getMat();
CV_Assert( hull.checkVector(1, CV_32S) > 2 );
std::vector<CvConvexityDefect> seq;
convexityDefects(_points, _hull, seq);
if( seq.size() == 0 ) {
_defects.release();
return;
}
_defects.create(seq.size(), 1, CV_32SC4);
Mat defects = _defects.getMat();
auto it = seq.begin();
CvPoint* ptorg = (CvPoint*)points.data;
for( unsigned i = 0; i < seq.size(); ++i, ++it ) {
CvConvexityDefect& d = *it;
int idx0 = (int)(d.start - ptorg);
int idx1 = (int)(d.end - ptorg);
int idx2 = (int)(d.depth_point - ptorg);
CV_Assert( 0 <= idx0 && idx0 < ptnum );
CV_Assert( 0 <= idx1 && idx1 < ptnum );
CV_Assert( 0 <= idx2 && idx2 < ptnum );
CV_Assert( d.depth >= 0 );
int idepth = cvRound(d.depth*256);
defects.at<Vec4i>(i) = Vec4i(idx0, idx1, idx2, idepth);
}
}
示例2: convertPointsHomogeneous
void cv::convertPointsHomogeneous( const Mat& src, vector<Point3f>& dst )
{
int srccn = src.checkVector(2) >= 0 ? 2 : src.checkVector(4) >= 0 ? 4 : -1;
CV_Assert( srccn > 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
dst.resize(src.cols*src.rows*src.channels()/srccn);
CvMat _src = src, _dst = Mat(dst);
cvConvertPointsHomogeneous(&_src, &_dst);
}
示例3: solveP3P
int solveP3P( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags) {
CV_INSTRUMENT_REGION();
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints == 3 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
std::vector<Mat> Rs, ts;
int solutions = 0;
if (flags == SOLVEPNP_P3P)
{
p3p P3Psolver(cameraMatrix);
solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
}
else if (flags == SOLVEPNP_AP3P)
{
ap3p P3Psolver(cameraMatrix);
solutions = P3Psolver.solve(Rs, ts, opoints, undistortedPoints);
}
if (solutions == 0) {
return 0;
}
if (_rvecs.needed()) {
_rvecs.create(solutions, 1, CV_64F);
}
if (_tvecs.needed()) {
_tvecs.create(solutions, 1, CV_64F);
}
for (int i = 0; i < solutions; i++) {
Mat rvec;
Rodrigues(Rs[i], rvec);
_tvecs.getMatRef(i) = ts[i];
_rvecs.getMatRef(i) = rvec;
}
return solutions;
}
示例4: computeCorrespondEpilines
void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
InputArray _Fmat, OutputArray _lines )
{
Mat points = _points.getMat(), F = _Fmat.getMat();
int npoints = points.checkVector(2);
if( npoints < 0 )
npoints = points.checkVector(3);
CV_Assert( npoints >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S));
_lines.create(npoints, 1, CV_32FC3, -1, true);
CvMat c_points = points, c_lines = _lines.getMat(), c_F = F;
cvComputeCorrespondEpilines(&c_points, whichImage, &c_F, &c_lines);
}
示例5: solvePnP
bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
if (flags == CV_EPNP)
{
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
return true;
}
else if (flags == CV_P3P)
{
CV_Assert( npoints == 4);
cv::Mat undistortedPoints;
cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
cv::Rodrigues(R, rvec);
return result;
}
else if (flags == CV_ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
return true;
}
else
CV_Error(CV_StsBadArg, "The flags argument must be one of CV_ITERATIVE or CV_EPNP");
return false;
}
示例6: arcLength
// calculates length of a curve (e.g. contour perimeter)
double cv::arcLength( InputArray _curve, bool is_closed )
{
Mat curve = _curve.getMat();
int count = curve.checkVector(2);
int depth = curve.depth();
CV_Assert( count >= 0 && (depth == CV_32F || depth == CV_32S));
double perimeter = 0;
int i;
if( count <= 1 )
return 0.;
bool is_float = depth == CV_32F;
int last = is_closed ? count-1 : 0;
const Point* pti = curve.ptr<Point>();
const Point2f* ptf = curve.ptr<Point2f>();
Point2f prev = is_float ? ptf[last] : Point2f((float)pti[last].x,(float)pti[last].y);
for( i = 0; i < count; i++ )
{
Point2f p = is_float ? ptf[i] : Point2f((float)pti[i].x,(float)pti[i].y);
float dx = p.x - prev.x, dy = p.y - prev.y;
perimeter += std::sqrt(dx*dx + dy*dy);
prev = p;
}
return perimeter;
}
示例7: fitLine
void cv::fitLine( InputArray _points, OutputArray _line, int distType,
double param, double reps, double aeps )
{
Mat points = _points.getMat();
float linebuf[6]={0.f};
int npoints2 = points.checkVector(2, -1, false);
int npoints3 = points.checkVector(3, -1, false);
CV_Assert( npoints2 >= 0 || npoints3 >= 0 );
if( points.depth() != CV_32F || !points.isContinuous() )
{
Mat temp;
points.convertTo(temp, CV_32F);
points = temp;
}
if( npoints2 >= 0 )
fitLine2D( points.ptr<Point2f>(), npoints2, distType,
(float)param, (float)reps, (float)aeps, linebuf);
else
fitLine3D( points.ptr<Point3f>(), npoints3, distType,
(float)param, (float)reps, (float)aeps, linebuf);
Mat(npoints2 >= 0 ? 4 : 6, 1, CV_32F, linebuf).copyTo(_line);
}
示例8: contourArea
// area of a whole sequence
double cv::contourArea( InputArray _contour, bool oriented )
{
Mat contour = _contour.getMat();
int npoints = contour.checkVector(2);
int depth = contour.depth();
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_32S));
if( npoints == 0 )
return 0.;
double a00 = 0;
bool is_float = depth == CV_32F;
const Point* ptsi = (const Point*)contour.data;
const Point2f* ptsf = (const Point2f*)contour.data;
Point2f prev = is_float ? ptsf[npoints-1] : Point2f((float)ptsi[npoints-1].x, (float)ptsi[npoints-1].y);
for( int i = 0; i < npoints; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
a00 += (double)prev.x * p.y - (double)prev.y * p.x;
prev = p;
}
a00 *= 0.5;
if( !oriented )
a00 = fabs(a00);
return a00;
}
示例9: convertPointsToHomogeneous
void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
{
Mat src = _src.getMat();
int npoints = src.checkVector(2), cn = 2;
if( npoints < 0 )
{
npoints = src.checkVector(3);
if( npoints >= 0 )
cn = 3;
}
CV_Assert( npoints >= 0 && (src.depth() == CV_32F || src.depth() == CV_32S));
_dst.create(npoints, 1, CV_MAKETYPE(CV_32F, cn+1));
CvMat c_src = src, c_dst = _dst.getMat();
cvConvertPointsHomogeneous(&c_src, &c_dst);
}
示例10:
int cv::estimateAffine3D(InputArray _from, InputArray _to,
OutputArray _out, OutputArray _inliers,
double param1, double param2)
{
Mat from = _from.getMat(), to = _to.getMat();
int count = from.checkVector(3, CV_32F);
CV_Assert( count >= 0 && to.checkVector(3, CV_32F) == count );
_out.create(3, 4, CV_64F);
Mat out = _out.getMat();
_inliers.create(count, 1, CV_8U, -1, true);
Mat inliers = _inliers.getMat();
inliers = Scalar::all(1);
Mat dFrom, dTo;
from.convertTo(dFrom, CV_64F);
to.convertTo(dTo, CV_64F);
CvMat F3x4 = out;
CvMat mask = inliers;
CvMat m1 = dFrom;
CvMat m2 = dTo;
const double epsilon = numeric_limits<double>::epsilon();
param1 = param1 <= 0 ? 3 : param1;
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
return Affine3DEstimator().runRANSAC(&m1, &m2, &F3x4, &mask, param1, param2 );
}
示例11: collectCalibrationData
static void collectCalibrationData( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints1,
InputArrayOfArrays imagePoints2,
Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
Mat& npoints )
{
int nimages = (int)objectPoints.total();
int i, j = 0, ni = 0, total = 0;
CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
(!imgPtMat2 || nimages == (int)imagePoints2.total()));
cout << " Number of Frames: " << nimages << endl;
for( i = 0; i < nimages; i++ )
{
cout << endl << "Object Points: " << endl;
printMatOBJ(objectPoints.getMat(i));
cout << endl << "Image Points: " << endl;
printMatIMG(imagePoints1.getMat(i));
ni = objectPoints.getMat(i).checkVector(3, CV_32F);
CV_Assert( ni >= 0 );
total += ni;
}
npoints.create(1, (int)nimages, CV_32S);
objPtMat.create(1, (int)total, CV_32FC3);
imgPtMat1.create(1, (int)total, CV_32FC2);
Point2f* imgPtData2 = 0;
if( imgPtMat2 )
{
imgPtMat2->create(1, (int)total, CV_32FC2);
imgPtData2 = imgPtMat2->ptr<Point2f>();
}
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
for( i = 0; i < nimages; i++, j += ni )
{
Mat objpt = objectPoints.getMat(i);
Mat imgpt1 = imagePoints1.getMat(i);
ni = objpt.checkVector(3, CV_32F);
int ni1 = imgpt1.checkVector(2, CV_32F);
CV_Assert( ni > 0 && ni == ni1 );
npoints.at<int>(i) = ni;
memcpy( objPtData + j, objpt.data, ni*sizeof(objPtData[0]) );
memcpy( imgPtData1 + j, imgpt1.data, ni*sizeof(imgPtData1[0]) );
if( imgPtData2 )
{
Mat imgpt2 = imagePoints2.getMat(i);
int ni2 = imgpt2.checkVector(2, CV_32F);
CV_Assert( ni == ni2 );
memcpy( imgPtData2 + j, imgpt2.data, ni*sizeof(imgPtData2[0]) );
}
}
}
示例12: solvePnP
void cv::solvePnP( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess )
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
}
示例13: computeCorrespondEpilines
void cv::computeCorrespondEpilines( const Mat& points, int whichImage,
const Mat& F, vector<Vec3f>& lines )
{
CV_Assert(points.checkVector(2) >= 0 &&
(points.depth() == CV_32F || points.depth() == CV_32S));
lines.resize(points.cols*points.rows*points.channels()/2);
CvMat _points = points, _lines = Mat(lines), matF = F;
cvComputeCorrespondEpilines(&_points, whichImage, &matF, &_lines);
}
示例14: _findFundamentalMat
static Mat _findFundamentalMat( const Mat& points1, const Mat& points2,
int method, double param1, double param2,
vector<uchar>* mask )
{
CV_Assert(points1.checkVector(2) >= 0 && points2.checkVector(2) >= 0 &&
(points1.depth() == CV_32F || points1.depth() == CV_32S) &&
points1.depth() == points2.depth());
Mat F(3, 3, CV_64F);
CvMat _pt1 = Mat(points1), _pt2 = Mat(points2);
CvMat matF = F, _mask, *pmask = 0;
if( mask )
{
mask->resize(points1.cols*points1.rows*points1.channels()/2);
pmask = &(_mask = cvMat(1, (int)mask->size(), CV_8U, (void*)&(*mask)[0]));
}
int n = cvFindFundamentalMat( &_pt1, &_pt2, &matF, method, param1, param2, pmask );
if( n <= 0 )
F = Scalar(0);
return F;
}
示例15: calcPosition
void calcPosition( InputArray _tvecs, InputArray _rvecs, InputArray _pts,
InputArray _cameraMatrices, InputArray _distortionMatrices,
OutputArray _state, OutputArray _covariance )
{
Ptr< PositionCalculator > p_pc = PositionCalculator::create();
std::vector< Mat > tvecs, rvecs;
_tvecs.getMatVector( tvecs );
_rvecs.getMatVector( rvecs );
CV_Assert( tvecs.size() >= 2 );
CV_Assert( tvecs.size() == rvecs.size() );
Mat pts = _pts.getMat();
CV_Assert( ( tvecs.size() == pts.checkVector( 2, CV_32F, true ) ) );
std::vector< Mat > camera_m, dist_m;
if ( _cameraMatrices.kind() == _InputArray::STD_VECTOR_MAT )
{
_cameraMatrices.getMatVector( camera_m );
CV_Assert( tvecs.size() == camera_m.size() );
}
else
{
camera_m.push_back( _cameraMatrices.getMat() );
CV_Assert( ( camera_m[0].rows == 3 ) && ( camera_m[0].cols == 3 ) );
}
if ( _distortionMatrices.kind() == _InputArray::STD_VECTOR_MAT )
{
_distortionMatrices.getMatVector( dist_m );
CV_Assert( tvecs.size() == dist_m.size() );
}
else
{
dist_m.push_back( _distortionMatrices.getMat() );
CV_Assert( ( ( dist_m[0].rows == 5 ) && ( dist_m[0].cols == 1 ) ) || dist_m[0].empty() );
}
Mat camera = camera_m[0];
Mat dist = dist_m[0];
for ( size_t i = 0; i < tvecs.size(); ++i )
{
if ( camera_m.size() == tvecs.size() )
camera = camera_m[i];
if ( dist_m.size() == tvecs.size() )
dist = dist_m[i];
p_pc->addMeasurement( tvecs[i], rvecs[i], pts.at< Point2f >( i ), camera, dist );
}
p_pc->computeState( _state, _covariance );
}