本文整理汇总了C++中InputArray::depth方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::depth方法的具体用法?C++ InputArray::depth怎么用?C++ InputArray::depth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::depth方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getBoundingBoxes
void EdgeBoxesImpl::getBoundingBoxes(InputArray edge_map, InputArray orientation_map, std::vector<Rect> &boxes)
{
CV_Assert(edge_map.depth() == CV_32F);
CV_Assert(orientation_map.depth() == CV_32F);
Mat E = edge_map.getMat().t();
Mat O = orientation_map.getMat().t();
h = E.cols;
w = E.rows;
clusterEdges(E, O);
prepDataStructs(E);
Boxes b;
scoreAllBoxes(b);
boxesNms(b, _beta, _eta, _maxBoxes);
// create output boxes
int n = (int) b.size();
boxes.resize(n);
for(int i=0; i < n; i++)
{
boxes[i] = Rect((int)b[i].x + 1, (int)b[i].y + 1, (int)b[i].w, (int)b[i].h);
}
}
示例2: stretchlim
//TODO: handle RGB or force user to do a channel at a time?
void stretchlim( const InputArray _image, double* low_value,
double* high_value, double low_fract, double high_fract )
{
Mat image = _image.getMat();
CV_Assert( image.type() == CV_8UC1 || image.type() == CV_16UC1 );
if (low_fract == 0 && high_fract == 1.0) {
// no need to waste calculating histogram
*low_value = 0;
*high_value = 1;
return;
}
int nPixelValues = 1 << bitsFromDepth( image.depth() );
int channels[] = { 0 };
MatND hist;
int histSize[] = { nPixelValues };
float range[] = { 0, nPixelValues };
const float* ranges[] = { range };
calcHist( &image, 1, channels, Mat(), hist, 1, histSize, ranges );
stretchlimFromHist( hist, low_value, high_value, low_fract, high_fract, image.rows * image.cols );
//TODO: scaling to 0..1 here, but should be in stretchlimFromHist?
unsigned int maxVal = (1 << bitsFromDepth( _image.depth() )) - 1;
*low_value /= maxVal;
*high_value /= maxVal;
}
示例3: initSrcAndJoint
void AdaptiveManifoldFilterN::initSrcAndJoint(InputArray src_, InputArray joint_)
{
srcSize = src_.size();
smallSize = getSmallSize();
srcCnNum = src_.channels();
split(src_, srcCn);
if (src_.depth() != CV_32F)
{
for (int i = 0; i < srcCnNum; i++)
srcCn[i].convertTo(srcCn[i], CV_32F);
}
if (joint_.empty() || joint_.getObj() == src_.getObj())
{
jointCnNum = srcCnNum;
if (src_.depth() == CV_32F)
{
jointCn = srcCn;
}
else
{
jointCn.resize(jointCnNum);
for (int i = 0; i < jointCnNum; i++)
srcCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(src_.depth()));
}
}
else
{
splitChannels(joint_, jointCn);
jointCnNum = (int)jointCn.size();
int jointDepth = jointCn[0].depth();
Size jointSize = jointCn[0].size();
CV_Assert( jointSize == srcSize && (jointDepth == CV_8U || jointDepth == CV_16U || jointDepth == CV_32F) );
if (jointDepth != CV_32F)
{
for (int i = 0; i < jointCnNum; i++)
jointCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(jointDepth));
}
}
}
示例4: filter
void GuidedFilterRefImpl::filter(InputArray src_, OutputArray dst_, int dDepth)
{
if (dDepth == -1) dDepth = src_.depth();
dst_.create(height, width, src_.type());
Mat src = src_.getMat();
Mat dst = dst_.getMat();
int cNum = src.channels();
CV_Assert(height == src.rows && width == src.cols);
Mat *Ichannels, *exp_I, **vars_I, **alpha, *beta;
Ichannels = new Mat[cNum];
exp_I = new Mat[cNum];
beta = new Mat[cNum];
vars_I = new Mat *[chNum];
alpha = new Mat *[chNum];
for (int i = 0; i < chNum; ++i){
vars_I[i] = new Mat[cNum];
alpha[i] = new Mat[cNum];
}
split(src, Ichannels);
for (int i = 0; i < cNum; ++i)
{
Ichannels[i].convertTo(Ichannels[i], CV_32F);
meanFilter(Ichannels[i], exp_I[i]);
}
computeCovGuideAndSrc(cNum, vars_I, Ichannels, exp_I);
computeAlpha(cNum, alpha, vars_I);
computeBeta(cNum, beta, exp_I, alpha);
for (int i = 0; i < chNum + 1; ++i)
for (int j = 0; j < cNum; ++j)
if (i < chNum)
meanFilter(alpha[i][j], alpha[i][j]);
else
meanFilter(beta[j], beta[j]);
applyTransform(cNum, Ichannels, beta, alpha, dDepth);
merge(Ichannels, cNum, dst);
delete [] Ichannels;
delete [] exp_I;
delete [] beta;
for (int i = 0; i < chNum; ++i)
{
delete [] vars_I[i];
delete [] alpha[i];
}
delete [] vars_I;
delete [] alpha;
}
示例5:
void cv::GlArrays::setNormalArray(InputArray normal)
{
int cn = normal.channels();
int depth = normal.depth();
CV_Assert(cn == 3);
CV_Assert(depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F);
normal_.copyFrom(normal);
}
示例6: points
cv::Mat cv::viz::vtkTrajectorySource::ExtractPoints(InputArray _traj)
{
CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));
Mat points(1, (int)_traj.total(), CV_MAKETYPE(_traj.depth(), 3));
const Affine3d* dpath = _traj.getMat().ptr<Affine3d>();
const Affine3f* fpath = _traj.getMat().ptr<Affine3f>();
if (_traj.depth() == CV_32F)
for(int i = 0; i < points.cols; ++i)
points.at<Vec3f>(i) = fpath[i].translation();
if (_traj.depth() == CV_64F)
for(int i = 0; i < points.cols; ++i)
points.at<Vec3d>(i) = dpath[i].translation();
return points;
}
示例7:
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
{
const int cn = texCoord.channels();
const int depth = texCoord.depth();
CV_Assert( cn >= 1 && cn <= 4 );
CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );
if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
texCoord_ = texCoord.getOGlBuffer();
else
texCoord_.copyFrom(texCoord);
}
示例8: matchTemplate_CCORR
static bool matchTemplate_CCORR(InputArray _image, InputArray _templ, OutputArray _result)
{
if (useNaive(_templ.size()))
return( matchTemplateNaive_CCORR(_image, _templ, _result));
else
{
if(_image.depth() == CV_8U)
{
UMat imagef, templf;
UMat image = _image.getUMat();
UMat templ = _templ.getUMat();
image.convertTo(imagef, CV_32F);
templ.convertTo(templf, CV_32F);
return(convolve_32F(imagef, templf, _result));
}
else
{
return(convolve_32F(_image, _templ, _result));
}
}
}
示例9: _prepareImgAndDrawKeypoints
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, DrawMatchesFlags flags )
{
Mat outImg;
Size img1size = img1.size(), img2size = img2.size();
Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
outImg = _outImg.getMat();
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
}
else
{
const int cn1 = img1.channels(), cn2 = img2.channels();
const int out_cn = std::max(3, std::max(cn1, cn2));
_outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
outImg = _outImg.getMat();
outImg = Scalar::all(0);
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
_prepareImage(img1, outImg1);
_prepareImage(img2, outImg2);
}
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
示例10: switch
void cv::viz::vtkImageMatSource::SetImage(InputArray _image)
{
CV_Assert(_image.depth() == CV_8U && (_image.channels() == 1 || _image.channels() == 3 || _image.channels() == 4));
Mat image = _image.getMat();
this->ImageData->SetDimensions(image.cols, image.rows, 1);
#if VTK_MAJOR_VERSION <= 5
this->ImageData->SetNumberOfScalarComponents(image.channels());
this->ImageData->SetScalarTypeToUnsignedChar();
this->ImageData->AllocateScalars();
#else
this->ImageData->AllocateScalars(VTK_UNSIGNED_CHAR, image.channels());
#endif
switch(image.channels())
{
case 1: copyGrayImage(image, this->ImageData); break;
case 3: copyRGBImage (image, this->ImageData); break;
case 4: copyRGBAImage(image, this->ImageData); break;
}
this->ImageData->Modified();
}
示例11: gatherResult
void AdaptiveManifoldFilterN::gatherResult(InputArray src_, OutputArray dst_)
{
int dDepth = src_.depth();
vector<Mat> dstCn(srcCnNum);
if (!adjust_outliers_)
{
for (int i = 0; i < srcCnNum; i++)
divide(sum_w_ki_Psi_blur_[i], sum_w_ki_Psi_blur_0_, dstCn[i], 1.0, dDepth);
merge(dstCn, dst_);
}
else
{
Mat1f& alpha = minDistToManifoldSquared;
double sigmaMember = -0.5 / (sigma_r_*sigma_r_);
multiply(minDistToManifoldSquared, sigmaMember, minDistToManifoldSquared);
cv::exp(minDistToManifoldSquared, alpha);
for (int i = 0; i < srcCnNum; i++)
{
Mat& f = srcCn[i];
Mat& g = dstCn[i];
divide(sum_w_ki_Psi_blur_[i], sum_w_ki_Psi_blur_0_, g);
subtract(g, f, g);
multiply(alpha, g, g);
add(g, f, g);
g.convertTo(g, dDepth);
}
merge(dstCn, dst_);
}
}
示例12: ocl_Canny
static bool ocl_Canny(InputArray _src, const UMat& dx_, const UMat& dy_, OutputArray _dst, float low_thresh, float high_thresh,
int aperture_size, bool L2gradient, int cn, const Size & size)
{
CV_INSTRUMENT_REGION_OPENCL()
UMat map;
const ocl::Device &dev = ocl::Device::getDefault();
int max_wg_size = (int)dev.maxWorkGroupSize();
int lSizeX = 32;
int lSizeY = max_wg_size / 32;
if (lSizeY == 0)
{
lSizeX = 16;
lSizeY = max_wg_size / 16;
}
if (lSizeY == 0)
{
lSizeY = 1;
}
if (aperture_size == 7)
{
low_thresh = low_thresh / 16.0f;
high_thresh = high_thresh / 16.0f;
}
if (L2gradient)
{
low_thresh = std::min(32767.0f, low_thresh);
high_thresh = std::min(32767.0f, high_thresh);
if (low_thresh > 0)
low_thresh *= low_thresh;
if (high_thresh > 0)
high_thresh *= high_thresh;
}
int low = cvFloor(low_thresh), high = cvFloor(high_thresh);
if (!useCustomDeriv &&
aperture_size == 3 && !_src.isSubmatrix())
{
/*
stage1_with_sobel:
Sobel operator
Calc magnitudes
Non maxima suppression
Double thresholding
*/
char cvt[40];
ocl::Kernel with_sobel("stage1_with_sobel", ocl::imgproc::canny_oclsrc,
format("-D WITH_SOBEL -D cn=%d -D TYPE=%s -D convert_floatN=%s -D floatN=%s -D GRP_SIZEX=%d -D GRP_SIZEY=%d%s",
cn, ocl::memopTypeToStr(_src.depth()),
ocl::convertTypeStr(_src.depth(), CV_32F, cn, cvt),
ocl::typeToStr(CV_MAKE_TYPE(CV_32F, cn)),
lSizeX, lSizeY,
L2gradient ? " -D L2GRAD" : ""));
if (with_sobel.empty())
return false;
UMat src = _src.getUMat();
map.create(size, CV_32S);
with_sobel.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::WriteOnlyNoSize(map),
(float) low, (float) high);
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height },
localsize[2] = { (size_t)lSizeX, (size_t)lSizeY };
if (!with_sobel.run(2, globalsize, localsize, false))
return false;
}
else
{
/*
stage1_without_sobel:
Calc magnitudes
Non maxima suppression
Double thresholding
*/
double scale = 1.0;
if (aperture_size == 7)
{
scale = 1 / 16.0;
}
UMat dx, dy;
if (!useCustomDeriv)
{
Sobel(_src, dx, CV_16S, 1, 0, aperture_size, scale, 0, BORDER_REPLICATE);
Sobel(_src, dy, CV_16S, 0, 1, aperture_size, scale, 0, BORDER_REPLICATE);
}
else
{
dx = dx_;
dy = dy_;
}
//.........这里部分代码省略.........
示例13: ipp_Deriv
static bool ipp_Deriv(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP()
::ipp::IwiSize size(_src.size().width, _src.size().height);
IppDataType srcType = ippiGetDataType(_src.depth());
IppDataType dstType = ippiGetDataType(_dst.depth());
int channels = _src.channels();
bool useScale = false;
bool useScharr = false;
if(channels != _dst.channels() || channels > 1)
return false;
if(fabs(delta) > FLT_EPSILON || fabs(scale-1) > FLT_EPSILON)
useScale = true;
if(ksize <= 0)
{
ksize = 3;
useScharr = true;
}
IppiMaskSize maskSize = ippiGetMaskSize(ksize, ksize);
if((int)maskSize < 0)
return false;
#if IPP_VERSION_X100 <= 201703
// Bug with mirror wrap
if(borderType == BORDER_REFLECT_101 && (ksize/2+1 > size.width || ksize/2+1 > size.height))
return false;
#endif
IwiDerivativeType derivType = ippiGetDerivType(dx, dy, (useScharr)?false:true);
if((int)derivType < 0)
return false;
// Acquire data and begin processing
try
{
Mat src = _src.getMat();
Mat dst = _dst.getMat();
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiImage iwSrcProc = iwSrc;
::ipp::IwiImage iwDstProc = iwDst;
::ipp::IwiBorderSize borderSize(maskSize);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
if(srcType == ipp8u && dstType == ipp8u)
{
iwDstProc.Alloc(iwDst.m_size, ipp16s, channels);
useScale = true;
}
else if(srcType == ipp8u && dstType == ipp32f)
{
iwSrc -= borderSize;
iwSrcProc.Alloc(iwSrc.m_size, ipp32f, channels);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwSrc, iwSrcProc, 1, 0, ::ipp::IwiScaleParams(ippAlgHintFast));
iwSrcProc += borderSize;
}
if(useScharr)
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterScharr, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder);
else
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterSobel, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder);
if(useScale)
CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwDstProc, iwDst, scale, delta, ::ipp::IwiScaleParams(ippAlgHintFast));
}
catch (::ipp::IwException)
{
return false;
}
return true;
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(dx); CV_UNUSED(dy); CV_UNUSED(ksize); CV_UNUSED(scale); CV_UNUSED(delta); CV_UNUSED(borderType);
return false;
#endif
}
示例14: solvePnP
bool solvePnP( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags )
{
CV_INSTRUMENT_REGION()
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
Mat rvec, tvec;
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;
if( useExtrinsicGuess )
{
int rtype = _rvec.type(), ttype = _tvec.type();
Size rsize = _rvec.size(), tsize = _tvec.size();
CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
(ttype == CV_32F || ttype == CV_64F) );
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
}
else
{
int mtype = CV_64F;
// use CV_32F if all PnP inputs are CV_32F and outputs are empty
if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
_rvec.empty() && _tvec.empty())
mtype = _opoints.depth();
_rvec.create(3, 1, mtype);
_tvec.create(3, 1, mtype);
}
rvec = _rvec.getMat();
tvec = _tvec.getMat();
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
bool result = false;
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
Mat R;
PnP.compute_pose(R, tvec);
Rodrigues(R, rvec);
result = true;
}
else if (flags == SOLVEPNP_P3P)
{
CV_Assert( npoints == 4);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
Mat R;
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
Rodrigues(R, rvec);
}
else if (flags == SOLVEPNP_AP3P)
{
CV_Assert( npoints == 4);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
ap3p P3Psolver(cameraMatrix);
Mat R;
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
Rodrigues(R, rvec);
}
else if (flags == SOLVEPNP_ITERATIVE)
{
CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
CvMat c_rvec = rvec, c_tvec = tvec;
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
result = true;
}
/*else if (flags == SOLVEPNP_DLS)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
dls PnP(opoints, undistortedPoints);
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = PnP.compute_pose(R, tvec);
if (result)
Rodrigues(R, rvec);
return result;
//.........这里部分代码省略.........