本文整理汇总了C++中UMat::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ UMat::channels方法的具体用法?C++ UMat::channels怎么用?C++ UMat::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类UMat
的用法示例。
在下文中一共展示了UMat::channels方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ocl_fastNlMeansDenoisingColored
static bool ocl_fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
float h, float hForColorComponents,
int templateWindowSize, int searchWindowSize)
{
UMat src = _src.getUMat();
_dst.create(src.size(), src.type());
UMat dst = _dst.getUMat();
UMat src_lab;
cvtColor(src, src_lab, COLOR_LBGR2Lab);
UMat l(src.size(), CV_8U);
UMat ab(src.size(), CV_8UC2);
std::vector<UMat> l_ab(2), l_ab_denoised(2);
l_ab[0] = l;
l_ab[1] = ab;
l_ab_denoised[0].create(src.size(), CV_8U);
l_ab_denoised[1].create(src.size(), CV_8UC2);
int from_to[] = { 0,0, 1,1, 2,2 };
mixChannels(std::vector<UMat>(1, src_lab), l_ab, from_to, 3);
fastNlMeansDenoising(l_ab[0], l_ab_denoised[0], h, templateWindowSize, searchWindowSize);
fastNlMeansDenoising(l_ab[1], l_ab_denoised[1], hForColorComponents, templateWindowSize, searchWindowSize);
UMat dst_lab(src.size(), CV_8UC3);
mixChannels(l_ab_denoised, std::vector<UMat>(1, dst_lab), from_to, 3);
cvtColor(dst_lab, dst, COLOR_Lab2LBGR, src.channels());
return true;
}
示例2: convertToCn
UMat cv::superres::convertToType(const UMat& src, int type, UMat& buf0, UMat& buf1)
{
CV_INSTRUMENT_REGION();
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}
示例3: result_
static bool convolve_32F(InputArray _image, InputArray _templ, OutputArray _result)
{
_result.create(_image.rows() - _templ.rows() + 1, _image.cols() - _templ.cols() + 1, CV_32F);
if (_image.channels() == 1)
return(convolve_dft(_image, _templ, _result));
else
{
UMat image = _image.getUMat();
UMat templ = _templ.getUMat();
UMat result_(image.rows-templ.rows+1,(image.cols-templ.cols+1)*image.channels(), CV_32F);
bool ok = convolve_dft(image.reshape(1), templ.reshape(1), result_);
if (ok==false)
return false;
UMat result = _result.getUMat();
return (extractFirstChannel_32F(result_, _result, _image.channels()));
}
}
示例4: main
int main(int argc, const char** argv)
{
// declare capture engine that can read images from camera or file
VideoCapture cap;
// if no cmd-line arguments other than the app name then camera flag is raised
bool camera = (1==argc);
if(camera)
// call open(int) method to init capture engine to read from camera
// In case of many cameras the index of camera can be passed as argument.
cap.open(0);
else
// call open(char*) method to init capture engine to read images from file
// the argument is file name that will be opened for reading
// it can be name of video file or still image
cap.open(argv[1]);
// check that capture engine open source (camera or file) successfully
if (!cap.isOpened())
{
printf("can not open %s\n",camera?"camera":argv[1]);
printf("trying to open test.jpg\n");
// in case of fail try to open simple test file to be able check pipeline working
cap.open("test.jpg");
if (!cap.isOpened())
{
printf("can not open test.jpg\n");
return EXIT_FAILURE;
}
}
// prepare for processing images
// declare mat objects to store input, intermediate and output images
// this is main loop over all input frames
for (;;)
{
// get next frame from input stream
//cap >> imgInp;
imgInp = imread(cap, CV_LOAD_IMAGE_ANYDEPTH);
// check read result
// in case of reading from file the loop will be break after last frame is read and processed
// in case of camera this condition is always false until something wrong with camera
if (imgInp.empty())
{
// wait until user press any key and the break the loop
// we need to wait to ge
waitKey(0);
break;
}
// show the input image on the screen using opencv function
// this call creates window named "Input" and draws imgInp inside the window
imshow("Input", imgInp);
// convert input image into intermediate grayscale image
if (imgInp.channels() > 1)
{
printf("Preceding with blanks: %10d \n", imgInp.channels());
cvtColor(imgInp, imgGray, COLOR_BGR2GRAY);
Mat spl;
split(imgInp, spl);
imshow("spl1", spl[0]);//b
imshow("spl2", spl[1]);//g
imshow("spl3", spl[2]);//r
}
else{ imgGray = imgInp; }
/// Initialize values
alpha_slider = 0;
p2_slider = 0;
// run canny processing on grayscale image
//Canny(imgGray, imgOut, 50, 150);
// show the result on the screen using opencv function
// this call creates window named "Canny" and draw imgOut inside the window
//imshow("Canny", imgOut);
/// Create Windows
namedWindow("Linear Blend",WINDOW_NORMAL);
/// Create Trackbars
char TrackbarName[50];
char TrackbarName2[50];
sprintf(TrackbarName, "Thresh#1 /n x %d", alpha_slider_max);
sprintf(TrackbarName2, "Thresh#2 /n 2 x %d", alpha_slider_max);
//createTrackbar(TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar);
createTrackbar(TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar_Canny);
createTrackbar(TrackbarName2, "Linear Blend", &p2_slider, p2_slider_max, on_trackbar_Canny);
ps_array[0] = alpha_slider;
ps_array[1] = p2_slider;
/// Show some stuff
on_trackbar_Canny(0,0);
//on_trackbar(alpha_slider, 0);
//.........这里部分代码省略.........
示例5: meanShift
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
CV_INSTRUMENT_REGION()
Size size;
int cn;
Mat mat;
UMat umat;
bool isUMat = _probImage.isUMat();
if (isUMat)
umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size();
else
mat = _probImage.getMat(), cn = mat.channels(), size = mat.size();
Rect cur_rect = window;
CV_Assert( cn == 1 );
if( window.height <= 0 || window.width <= 0 )
CV_Error( Error::StsBadArg, "Input window has non-positive sizes" );
window = window & Rect(0, 0, size.width, size.height);
double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
eps = cvRound(eps*eps);
int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;
for( i = 0; i < niters; i++ )
{
cur_rect = cur_rect & Rect(0, 0, size.width, size.height);
if( cur_rect == Rect() )
{
cur_rect.x = size.width/2;
cur_rect.y = size.height/2;
}
cur_rect.width = std::max(cur_rect.width, 1);
cur_rect.height = std::max(cur_rect.height, 1);
Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect));
// Calculating center of mass
if( fabs(m.m00) < DBL_EPSILON )
break;
int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
int dy = cvRound( m.m01/m.m00 - window.height*0.5 );
int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width);
int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height);
dx = nx - cur_rect.x;
dy = ny - cur_rect.y;
cur_rect.x = nx;
cur_rect.y = ny;
// Check for coverage centers mass & window
if( dx*dx + dy*dy < eps )
break;
}
window = cur_rect;
return i;
}
示例6: main
int main(int argc, const char** argv)
{
// get next frame from input stream
//cap >> imgInp;
src = imread(argv[1], CV_LOAD_IMAGE_ANYDEPTH);
src.convertTo(imgInp, CV_8UC1, 0.00390625);
//src.copyTo(imgInp);
std::istringstream iss(argv[1]);
std::vector<std::string> tokens;
std::string token;
while (std::getline(iss, token, '.')) {
if (!token.empty())
tokens.push_back(token);
}
// check read result
// in case of reading from file the loop will be break after last frame is read and processed
// in case of camera this condition is always false until something wrong with camera
// show the input image on the screen using opencv function
// this call creates window named "Input" and draws imgInp inside the window
imshow("Input", imgInp);
// convert input image into intermediate grayscale image
if (imgInp.channels() > 1)
{
printf("Preceding with blanks: %10d \n", imgInp.channels());
cvtColor(imgInp, imgGray, COLOR_BGR2GRAY);
Mat spl;
split(imgInp, spl);
//imshow("spl1", spl[0]);//b
//imshow("spl2", spl[1]);//g
// imshow("spl3", spl[2]);//r
}
else
{
printf("Preceding with blanks: %10d \n", imgInp.channels());
imgGray = imgInp;
}
/// Initialize values
alpha_slider = 0;
p2_slider = 0;
// run canny processing on grayscale image
//Canny(imgGray, imgOut, 50, 150);
// show the result on the screen using opencv function
// this call creates window named "Canny" and draw imgOut inside the window
//imshow("Canny", imgOut);
/// Create Windows
namedWindow("Linear Blend",WINDOW_NORMAL);
/// Create Trackbars
char TrackbarName[50];
char TrackbarName2[50];
sprintf(TrackbarName, "Thresh#1 /n x %d", alpha_slider_max);
sprintf(TrackbarName2, "Thresh#2 /n 2 x %d", alpha_slider_max);
//createTrackbar(TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar);
createTrackbar(TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar_Canny);
createTrackbar(TrackbarName2, "Linear Blend", &p2_slider, p2_slider_max, on_trackbar_Canny);
ps_array[0] = alpha_slider;
ps_array[1] = p2_slider;
for (;;)
{
/// Show some stuff
on_trackbar_Canny(0, 0);
//on_trackbar(alpha_slider, 0);
// the waitKey function is called for 2 reasons
// 1. detect when ESC key is pressed
// 2. to allow "Input" and "Canny" windows to plumb messages. It allows user to manipulate with "Input" and "Canny" windows
// 10ms param is passed to spend only 10ms inside the waitKey function and then go to further processing
int key = waitKey(100);
//exit if ESC is pressed
if (key == 27)
break;
}
return EXIT_SUCCESS;
}