本文整理汇总了C++中InputArray::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::channels方法的具体用法?C++ InputArray::channels怎么用?C++ InputArray::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: height
GuidedFilterRefImpl::GuidedFilterRefImpl(InputArray _guide, int _rad, double _eps) :
height(_guide.rows()), width(_guide.cols()), rad(_rad), chNum(_guide.channels()), eps(_eps)
{
Mat guide = _guide.getMat();
CV_Assert(chNum > 0 && chNum <= 3);
channels = new Mat[chNum];
exps = new Mat[chNum];
A = new Mat *[chNum];
vars = new Mat *[chNum];
for (int i = 0; i < chNum; ++i)
{
A[i] = new Mat[chNum];
vars[i] = new Mat[chNum];
}
split(guide, channels);
for (int i = 0; i < chNum; ++i)
{
channels[i].convertTo(channels[i], CV_32F);
meanFilter(channels[i], exps[i]);
}
computeCovGuide();
computeCovGuideInv();
}
示例2: guiNLMUpsample
void guiNLMUpsample(InputArray srcimage, OutputArray dest, int resizeFactor, InputArray ref)
{
string windowName = "weighted mode";
namedWindow(windowName);
Mat src = srcimage.getMat();
int alpha = 0; createTrackbar("a",windowName, &alpha, 100);
int sw = 0; createTrackbar("sw",windowName, &sw, 1);
int sw2 = 0; createTrackbar("sw2",windowName, &sw2, 1);
int tr = 0; createTrackbar("tr",windowName, &tr, 10);
int sr = 3; createTrackbar("sr",windowName, &sr, 30);
int h = 100; createTrackbar("h/10",windowName, &h, 255);
int iter = 2; createTrackbar("iteration",windowName, &iter, 10);
int key = 0;
while(key!='q')
{
Mat srctemp;
{
Mat med;
medianBlur(srcimage, med,1);
src.copyTo(srctemp);
CalcTime t;
for(int i=0;i<iter;i++)
{
Mat tmp = srctemp.clone();
if(srcimage.channels()==1) fastNlMeansDenoising(srctemp, tmp, h/10.f, 2*tr+1, 2*sr+1);
else fastNlMeansDenoisingColored(srctemp, tmp, (float)h/10.f,(float)h/10.f, 2*tr+1, 2*sr+1);
tmp.copyTo(srctemp);
}
//upsampling function
if(sw==0) hqx(srctemp, dest, resizeFactor);
else resize(srctemp, dest, Size(src.cols*resizeFactor, src.rows*resizeFactor), 0,0, INTER_LANCZOS4);
}
//shock filter
if(sw2!=0)
{
Mat a = dest.getMat();
blurRemoveMinMax(a,a,2);
a.copyTo(dest);
}
//blending referece image for debug
alphaBlend(ref, dest, alpha/100.0, dest);
imshow(windowName, dest);
key = waitKey(30);
if(key=='f')
{
alpha = (alpha != 0) ? 0:100;
setTrackbarPos("a", windowName, alpha);
}
}
destroyWindow(windowName);
}
示例3: result_
static bool convolve_32F(InputArray _image, InputArray _templ, OutputArray _result)
{
_result.create(_image.rows() - _templ.rows() + 1, _image.cols() - _templ.cols() + 1, CV_32F);
if (_image.channels() == 1)
return(convolve_dft(_image, _templ, _result));
else
{
UMat image = _image.getUMat();
UMat templ = _templ.getUMat();
UMat result_(image.rows-templ.rows+1,(image.cols-templ.cols+1)*image.channels(), CV_32F);
bool ok = convolve_dft(image.reshape(1), templ.reshape(1), result_);
if (ok==false)
return false;
UMat result = _result.getUMat();
return (extractFirstChannel_32F(result_, _result, _image.channels()));
}
}
示例4:
void cv::GlArrays::setColorArray(InputArray color, bool bgra)
{
int cn = color.channels();
CV_Assert((cn == 3 && !bgra) || cn == 4);
color_.copyFrom(color);
bgra_ = bgra;
}
示例5: calcBtvRegularization
void calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize,
const std::vector<float>& btvWeights, const UMat & ubtvWeights)
{
CV_OCL_RUN(_dst.isUMat(),
ocl_calcBtvRegularization(_src, _dst, btvKernelSize, ubtvWeights))
CV_UNUSED(ubtvWeights);
if (_src.channels() == 1)
{
calcBtvRegularizationImpl<float>(_src, _dst, btvKernelSize, btvWeights);
}
else if (_src.channels() == 3)
{
calcBtvRegularizationImpl<Point3f>(_src, _dst, btvKernelSize, btvWeights);
}
else
{
CV_Error(Error::StsBadArg, "Unsupported number of channels in _src");
}
}
示例6:
void cv::ogl::Arrays::setColorArray(InputArray color)
{
const int cn = color.channels();
CV_Assert( cn == 3 || cn == 4 );
if (color.kind() == _InputArray::OPENGL_BUFFER)
color_ = color.getOGlBuffer();
else
color_.copyFrom(color);
}
示例7: compareRange
void compareRange(InputArray src, OutputArray destMask, const double validMin, const double validMax)
{
Mat gray;
if (src.channels() == 1) gray = src.getMat();
else cvtColor(src, gray, COLOR_BGR2GRAY);
Mat mask1;
Mat mask2;
compare(gray, validMin, mask1, cv::CMP_GE);
compare(gray, validMax, mask2, cv::CMP_LE);
bitwise_and(mask1, mask2, destMask);
}
示例8: _prepareImgAndDrawKeypoints
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
InputArray img2, const std::vector<KeyPoint>& keypoints2,
InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, DrawMatchesFlags flags )
{
Mat outImg;
Size img1size = img1.size(), img2size = img2.size();
Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
outImg = _outImg.getMat();
if( size.width > outImg.cols || size.height > outImg.rows )
CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
}
else
{
const int cn1 = img1.channels(), cn2 = img2.channels();
const int out_cn = std::max(3, std::max(cn1, cn2));
_outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
outImg = _outImg.getMat();
outImg = Scalar::all(0);
outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
_prepareImage(img1, outImg1);
_prepareImage(img2, outImg2);
}
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
示例9: switch
void cv::viz::vtkImageMatSource::SetImage(InputArray _image)
{
CV_Assert(_image.depth() == CV_8U && (_image.channels() == 1 || _image.channels() == 3 || _image.channels() == 4));
Mat image = _image.getMat();
this->ImageData->SetDimensions(image.cols, image.rows, 1);
#if VTK_MAJOR_VERSION <= 5
this->ImageData->SetNumberOfScalarComponents(image.channels());
this->ImageData->SetScalarTypeToUnsignedChar();
this->ImageData->AllocateScalars();
#else
this->ImageData->AllocateScalars(VTK_UNSIGNED_CHAR, image.channels());
#endif
switch(image.channels())
{
case 1: copyGrayImage(image, this->ImageData); break;
case 3: copyRGBImage (image, this->ImageData); break;
case 4: copyRGBAImage(image, this->ImageData); break;
}
this->ImageData->Modified();
}
示例10: myDetector
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
示例11: calcBtvRegularization
void calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize,
const std::vector<float>& btvWeights, const UMat & ubtvWeights)
{
CV_OCL_RUN(_dst.isUMat(),
ocl_calcBtvRegularization(_src, _dst, btvKernelSize, ubtvWeights))
(void)ubtvWeights;
typedef void (*func_t)(InputArray _src, OutputArray _dst, int btvKernelSize, const std::vector<float>& btvWeights);
static const func_t funcs[] =
{
0, calcBtvRegularizationImpl<float>, 0, calcBtvRegularizationImpl<Point3f>, 0
};
const func_t func = funcs[_src.channels()];
CV_Assert(func != 0);
func(_src, _dst, btvKernelSize, btvWeights);
}
示例12: initSrcAndJoint
void AdaptiveManifoldFilterN::initSrcAndJoint(InputArray src_, InputArray joint_)
{
srcSize = src_.size();
smallSize = getSmallSize();
srcCnNum = src_.channels();
split(src_, srcCn);
if (src_.depth() != CV_32F)
{
for (int i = 0; i < srcCnNum; i++)
srcCn[i].convertTo(srcCn[i], CV_32F);
}
if (joint_.empty() || joint_.getObj() == src_.getObj())
{
jointCnNum = srcCnNum;
if (src_.depth() == CV_32F)
{
jointCn = srcCn;
}
else
{
jointCn.resize(jointCnNum);
for (int i = 0; i < jointCnNum; i++)
srcCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(src_.depth()));
}
}
else
{
splitChannels(joint_, jointCn);
jointCnNum = (int)jointCn.size();
int jointDepth = jointCn[0].depth();
Size jointSize = jointCn[0].size();
CV_Assert( jointSize == srcSize && (jointDepth == CV_8U || jointDepth == CV_16U || jointDepth == CV_32F) );
if (jointDepth != CV_32F)
{
for (int i = 0; i < jointCnNum; i++)
jointCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(jointDepth));
}
}
}
示例13: ocl_norm
static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & result )
{
const ocl::Device & d = ocl::Device::getDefault();
#ifdef __ANDROID__
if (d.isNVidia())
return false;
#endif
const int cn = _src.channels();
if (cn > 4)
return false;
int type = _src.type(), depth = CV_MAT_DEPTH(type);
bool doubleSupport = d.doubleFPConfig() > 0,
haveMask = _mask.kind() != _InputArray::NONE;
if ( !(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) ||
(!doubleSupport && depth == CV_64F))
return false;
UMat src = _src.getUMat();
if (normType == NORM_INF)
{
if (!ocl_minMaxIdx(_src, NULL, &result, NULL, NULL, _mask,
std::max(depth, CV_32S), depth != CV_8U && depth != CV_16U))
return false;
}
else if (normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR)
{
Scalar sc;
bool unstype = depth == CV_8U || depth == CV_16U;
if ( !ocl_sum(haveMask ? src : src.reshape(1), sc, normType == NORM_L2 || normType == NORM_L2SQR ?
OCL_OP_SUM_SQR : (unstype ? OCL_OP_SUM : OCL_OP_SUM_ABS), _mask) )
return false;
double s = 0.0;
for (int i = 0; i < (haveMask ? cn : 1); ++i)
s += sc[i];
result = normType == NORM_L1 || normType == NORM_L2SQR ? s : std::sqrt(s);
}
return true;
}
示例14: ocl_matchTemplate
static bool ocl_matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method)
{
int cn = _img.channels();
if (cn > 4)
return false;
typedef bool (*Caller)(InputArray _img, InputArray _templ, OutputArray _result);
static const Caller callers[] =
{
matchTemplate_SQDIFF, matchTemplate_SQDIFF_NORMED, matchTemplate_CCORR,
matchTemplate_CCORR_NORMED, matchTemplate_CCOEFF, matchTemplate_CCOEFF_NORMED
};
const Caller caller = callers[method];
return caller(_img, _templ, _result);
}
示例15: upscale
void upscale(InputArray _src, OutputArray _dst, int scale)
{
int cn = _src.channels();
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
CV_OCL_RUN(_dst.isUMat(),
ocl_upscale(_src, _dst, scale))
typedef void (*func_t)(InputArray src, OutputArray dst, int scale);
static const func_t funcs[] =
{
0, upscaleImpl<float>, 0, upscaleImpl<Point3f>, upscaleImpl<Point4f>
};
const func_t func = funcs[cn];
CV_Assert(func != 0);
func(_src, _dst, scale);
}