本文整理汇总了C++中cv::Mat::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::channels方法的具体用法?C++ Mat::channels怎么用?C++ Mat::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Preprocess
void Classifier::Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels) {
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == 3 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == 4 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry_)
cv::resize(sample, sample_resized, input_geometry_);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels_ == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
cv::Mat sample_normalized;
cv::subtract(sample_float, mean_, sample_normalized);
/* This operation will write the separate BGR planes directly to the
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
cv::split(sample_normalized, *input_channels);
CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
== net_->input_blobs()[0]->cpu_data())
<< "Input channels are not wrapping the input layer of the network.";
}
示例2: toGrayScaleMat
void Camera::toGrayScaleMat(cv::Mat& m)
{
unsigned char *m_ = (unsigned char *)(m.data);
for (int x = 0; x < w2; x++) {
for (int y = 0; y < height; y++) {
int y0, y1;
int i = (y * w2 + x)*4;
y0 = data[i];
y1 = data[i + 2];
i = y*m.cols*m.channels() + x*2*m.channels();
m_[i + 0] = (unsigned char) (y0);
m_[i + 1] = (unsigned char) (y1);
//More clear in logic but slower solution
//m.at<uchar>(y, (2*x)) = (unsigned char) (y0);
//m.at<uchar>(y, (2*x)+1) = (unsigned char) (y1);
}
}
}
示例3: dest
QImage Tools::Mat2QImage(cv::Mat const& src)
{
QImage dest(src.cols, src.rows, QImage::Format_ARGB32);
const float scale = 255.0;
if (src.depth() == CV_8U) {
if (src.channels() == 1) {
for (int i = 0; i < src.rows; ++i) {
for (int j = 0; j < src.cols; ++j) {
int level = src.at<quint8>(i, j);
dest.setPixel(j, i, qRgb(level, level, level));
}
}
} else if (src.channels() == 3) {
for (int i = 0; i < src.rows; ++i) {
for (int j = 0; j < src.cols; ++j) {
cv::Vec3b bgr = src.at<cv::Vec3b>(i, j);
dest.setPixel(j, i, qRgb(bgr[2], bgr[1], bgr[0]));
}
}
}
} else if (src.depth() == CV_32F) {
if (src.channels() == 1) {
for (int i = 0; i < src.rows; ++i) {
for (int j = 0; j < src.cols; ++j) {
int level = scale * src.at<float>(i, j);
dest.setPixel(j, i, qRgb(level, level, level));
}
}
} else if (src.channels() == 3) {
for (int i = 0; i < src.rows; ++i) {
for (int j = 0; j < src.cols; ++j) {
cv::Vec3f bgr = scale * src.at<cv::Vec3f>(i, j);
dest.setPixel(j, i, qRgb(bgr[2], bgr[1], bgr[0]));
}
}
}
}
return dest;
}
示例4: getProba
void NccHisto::getProba(const cv::Mat ncc, const cv::Mat sumstdev, cv::Mat proba)
{
if (lut==0) loadHistogram();
assert(lut);
if (lut==0) return;
assert(ncc.channels()==1);
assert(sumstdev.channels()==1);
assert(ncc.cols == proba.cols && ncc.rows==proba.rows);
assert(proba.channels()==1);
const int w=ncc.cols;
const int h=ncc.rows;
for (int y=0; y<h;y++) {
float *dst = proba.ptr<float>(y);
const float *src = ncc.ptr<float>(y);
const float *sum = sumstdev.ptr<float>(y);
for (int x=0;x<w;x++) {
dst[x] = lut[lut_idx(src[x], sum[x])];
}
}
}
示例5: crosscorrelation
cv::Mat crosscorrelation(const cv::Mat& A, const cv::Mat& B)
{
if (A.channels() != 2 || B.channels() != 2)
{
throw CustomException("crosscorrelation: Must be two two-channel images.");
}
cv::Mat aPadded;
cv::Mat bPadded;
cv::Mat doubleA, doubleB;
/*
//The following expands the image to an optimal size in order to be the fourier transform efficient
* It is not used right now, since the image is double-sized to apply crosscorrelation properly
int m = getOptimalDFTSize( A.rows );
int n = getOptimalDFTSize( A.cols ); // on the border add zero values
copyMakeBorder(A, aPadded, 0, m - A.rows, 0, n - A.cols, BORDER_CONSTANT, Scalar(0,0));
copyMakeBorder(B, bPadded, 0, p - B.rows, 0, q - B.cols, BORDER_CONSTANT, Scalar(0,0));
*/
//REMEMBER!! The result of the correlation is twice the size of the input arrays!
//There must be enough space for the overlapping of both functions
cv::copyMakeBorder(A, aPadded, 0, A.rows, 0, A.cols, cv::BORDER_CONSTANT, cv::Scalar(0.0, 0.0));
cv::copyMakeBorder(B, bPadded, 0, B.rows, 0, B.cols, cv::BORDER_CONSTANT, cv::Scalar(0.0, 0.0));
//CAUTION!! Know differences between: DFT_COMPLEX_OUTPUT, DFT_SCALE, DFT_REAL_OUTPUT
cv::dft(aPadded, aPadded, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);
cv::dft(bPadded, bPadded, cv::DFT_COMPLEX_OUTPUT + cv::DFT_SCALE);
cv::Mat C, tmpC;
//CAUTION!! Know differences between: DFT_COMPLEX_OUTPUT, DFT_SCALE, DFT_REAL_OUTPUT
bool conjugateB(true); //optional parameter, false by default :: set this parameter to false to turn this operation into convolution
cv::mulSpectrums(aPadded, bPadded.mul(aPadded.rows * aPadded.cols), tmpC, cv::DFT_COMPLEX_OUTPUT, conjugateB);
//Note None of dft and idft scales the result by default.
//So, you should pass DFT_SCALE to one of dft or idft explicitly to make these transforms mutually inverse.
cv::idft(tmpC, C, cv::DFT_COMPLEX_OUTPUT);
//REMEMBER NORMALIZE!! When calculating OTF, the value at origin must be equal to unity!! energy conservation
return C;
}
示例6: Apply
void MeanTransformer::Apply(size_t id, cv::Mat &mat)
{
UNUSED(id);
assert(m_meanImg.size() == cv::Size(0, 0) ||
(m_meanImg.size() == mat.size() &&
m_meanImg.channels() == mat.channels()));
// REVIEW alexeyk: check type conversion (float/double).
if (m_meanImg.size() == mat.size())
{
mat = mat - m_meanImg;
}
}
示例7: if
void cv::DisplayHelper::display(const cv::Mat& oInputImg, const cv::Mat& oDebugImg, const cv::Mat& oOutputImg, size_t nIdx) {
CV_Assert(!oInputImg.empty() && (oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3 || oInputImg.type()==CV_8UC4));
CV_Assert(!oDebugImg.empty() && (oDebugImg.type()==CV_8UC1 || oDebugImg.type()==CV_8UC3 || oDebugImg.type()==CV_8UC4) && oDebugImg.size()==oInputImg.size());
CV_Assert(!oOutputImg.empty() && (oOutputImg.type()==CV_8UC1 || oOutputImg.type()==CV_8UC3 || oOutputImg.type()==CV_8UC4) && oOutputImg.size()==oInputImg.size());
cv::Mat oInputImgBYTE3, oDebugImgBYTE3, oOutputImgBYTE3;
if(oInputImg.channels()==1)
cv::cvtColor(oInputImg,oInputImgBYTE3,cv::COLOR_GRAY2BGR);
else if(oInputImg.channels()==4)
cv::cvtColor(oInputImg,oInputImgBYTE3,cv::COLOR_BGRA2BGR);
else
oInputImgBYTE3 = oInputImg;
if(oDebugImg.channels()==1)
cv::cvtColor(oDebugImg,oDebugImgBYTE3,cv::COLOR_GRAY2BGR);
else if(oDebugImg.channels()==4)
cv::cvtColor(oDebugImg,oDebugImgBYTE3,cv::COLOR_BGRA2BGR);
else
oDebugImgBYTE3 = oDebugImg;
if(oOutputImg.channels()==1)
cv::cvtColor(oOutputImg,oOutputImgBYTE3,cv::COLOR_GRAY2BGR);
else if(oOutputImg.channels()==4)
cv::cvtColor(oOutputImg,oDebugImgBYTE3,cv::COLOR_BGRA2BGR);
else
oOutputImgBYTE3 = oOutputImg;
cv::Size oCurrDisplaySize;
if(m_oMaxDisplaySize.area()>0 && (oOutputImgBYTE3.cols>m_oMaxDisplaySize.width || oOutputImgBYTE3.rows>m_oMaxDisplaySize.height)) {
if(oOutputImgBYTE3.cols>m_oMaxDisplaySize.width && oOutputImgBYTE3.cols>oOutputImgBYTE3.rows)
oCurrDisplaySize = cv::Size(m_oMaxDisplaySize.width,m_oMaxDisplaySize.width*(oOutputImgBYTE3.rows/oOutputImgBYTE3.cols));
else
oCurrDisplaySize = cv::Size(m_oMaxDisplaySize.height*(oOutputImgBYTE3.cols/oOutputImgBYTE3.rows),m_oMaxDisplaySize.height);
cv::resize(oInputImgBYTE3,oInputImgBYTE3,oCurrDisplaySize);
cv::resize(oDebugImgBYTE3,oDebugImgBYTE3,oCurrDisplaySize);
cv::resize(oOutputImgBYTE3,oOutputImgBYTE3,oCurrDisplaySize);
}
else
oCurrDisplaySize = oOutputImgBYTE3.size();
std::stringstream sstr;
sstr << "Input #" << nIdx;
putText(oInputImgBYTE3,sstr.str(),cv::Scalar_<uchar>(0,0,255));
putText(oDebugImgBYTE3,"Debug",cv::Scalar_<uchar>(0,0,255));
putText(oOutputImgBYTE3,"Output",cv::Scalar_<uchar>(0,0,255));
if(m_bFirstDisplay) {
putText(oDebugImgBYTE3,"[Press space to continue]",cv::Scalar_<uchar>(0,0,255),true,cv::Point2i(oDebugImgBYTE3.cols/2-100,15),1,1.0);
m_bFirstDisplay = false;
}
std::lock_guard<std::mutex> oLock(m_oEventMutex);
const cv::Point2i& oDbgPt = m_oLatestMouseEvent.oPosition;
const cv::Size& oLastDbgSize = m_oLatestMouseEvent.oDisplaySize;
if(oDbgPt.x>=0 && oDbgPt.y>=0 && oDbgPt.x<oLastDbgSize.width*3 && oDbgPt.y<oLastDbgSize.height) {
const cv::Point2i oDbgPt_rescaled(int(oCurrDisplaySize.width*(float(oDbgPt.x%oLastDbgSize.width)/oLastDbgSize.width)),int(oCurrDisplaySize.height*(float(oDbgPt.y)/oLastDbgSize.height)));
cv::circle(oInputImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
cv::circle(oDebugImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
cv::circle(oOutputImgBYTE3,oDbgPt_rescaled,5,cv::Scalar(255,255,255));
}
cv::Mat displayH;
cv::hconcat(oInputImgBYTE3,oDebugImgBYTE3,displayH);
cv::hconcat(displayH,oOutputImgBYTE3,displayH);
cv::imshow(m_sDisplayName,displayH);
m_oLastDisplaySize = oCurrDisplaySize;
}
示例8: imshowAllChannels
void imshowAllChannels(const std::string winname, cv::Mat img)
{
std::vector<cv::Mat> imgs;
cv::split(img, imgs);
for (int i = 0; i < img.channels(); i++)
{
std::stringstream ss;
ss << winname <<":" <<i;
cv::imshow(ss.str(), imgs[i]);
}
}
示例9: if
QImage QNode::cvtCvMat2QImage(const cv::Mat & image)
{
QImage qtemp;
if(!image.empty() && image.depth() == CV_8U)
{
const unsigned char * data = image.data;
qtemp = QImage(image.cols, image.rows, QImage::Format_RGB32);
for(int y = 0; y < image.rows; ++y, data += image.cols*image.elemSize())
{
for(int x = 0; x < image.cols; ++x)
{
QRgb * p = ((QRgb*)qtemp.scanLine (y)) + x;
*p = qRgb(data[x * image.channels()+2], data[x * image.channels()+1], data[x * image.channels()]);
}
}
}
else if(!image.empty() && image.depth() != CV_8U)
{
printf("Wrong image format, must be 8_bits\n");
}
return qtemp;
}
示例10: filter
void drwnLBPFilterBank::filter(const cv::Mat& img, std::vector<cv::Mat>& response) const
{
// check input
DRWN_ASSERT(img.data != NULL);
if (response.empty()) {
response.resize(this->numFilters());
}
DRWN_ASSERT(response.size() == this->numFilters());
if (img.channels() != 1) {
cv::Mat tmp(img.rows, img.cols, img.depth());
cv::cvtColor(img, tmp, CV_RGB2GRAY);
return filter(tmp, response);
}
DRWN_ASSERT_MSG(img.depth() == CV_8U, "image must be 8-bit");
// allocate output channels as 32-bit floating point
for (unsigned i = 0; i < response.size(); i++) {
if ((response[i].rows == img.rows) && (response[i].cols == img.cols) &&
(response[i].depth() == CV_32F) && (response[i].channels() == 1)) {
response[i].setTo(0.0f);
} else {
response[i] = cv::Mat::zeros(img.rows, img.cols, CV_32FC1);
}
}
for (int y = 0; y < img.rows; y++) {
const unsigned char *p = img.ptr<const unsigned char>(y);
const unsigned char *p_prev = (y == 0) ? p : img.ptr<const unsigned char>(y - 1);
const unsigned char *p_next = (y == img.rows - 1) ? p : img.ptr<const unsigned char>(y + 1);
// 4-connected neighbourhood
for (int x = 0; x < img.cols; x++) {
if (p[x] > p_prev[x]) response[0].at<float>(y, x) = 1.0f;
if ((x < img.cols - 1) && (p[x] > p[x + 1])) response[1].at<float>(y, x) = 1.0f;
if (p[x] > p_next[x]) response[2].at<float>(y, x) = 1.0f;
if ((x > 0) && (p[x] > p[x - 1])) response[3].at<float>(y, x) = 1.0f;
}
// 8-connected neighbourhood
if (_b8Neighbourhood) {
for (int x = 0; x < img.cols; x++) {
if ((p[x] > p_prev[x]) && (x < img.cols - 1) && (p[x] > p[x + 1])) response[4].at<float>(y, x) = 1.0f;
if ((x < img.cols - 1) && (p[x] > p[x + 1]) && (p[x] > p_next[x])) response[5].at<float>(y, x) = 1.0f;
if ((p[x] > p_next[x]) && (x > 0) && (p[x] > p[x - 1])) response[6].at<float>(y, x) = 1.0f;
if ((x > 0) && (p[x] > p[x - 1]) && (p[x] > p_prev[x])) response[7].at<float>(y, x) = 1.0f;
}
}
}
}
示例11: retVal
cv::Mat
Auvsi_Recognize::doClustering( cv::Mat input, int numClusters, bool colored = true )
{
#ifdef TWO_CHANNEL
typedef cv::Vec<T, 2> VT;
#else
typedef cv::Vec<T, 3> VT;
#endif
typedef cv::Vec<int, 1> IT;
const int NUMBER_OF_ATTEMPTS = 5;
int inputSize = input.rows*input.cols;
// Create destination image
cv::Mat retVal( input.size(), input.type() );
// Format input to k-means
cv::Mat kMeansData( input );
kMeansData = kMeansData.reshape( input.channels(), inputSize );
// For the output of k-means
cv::Mat labels( inputSize, 1, CV_32S );
cv::Mat centers( numClusters, 1, input.type() );
// Perform the actual k-means clustering
// POSSIBLE FLAGS: KMEANS_PP_CENTERS KMEANS_RANDOM_CENTERS
cv::kmeans( kMeansData, numClusters, labels, cv::TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0 ), NUMBER_OF_ATTEMPTS, cv::KMEANS_RANDOM_CENTERS, ¢ers );
// Label the image according to the clustering results
cv::MatIterator_<VT> retIterator = retVal.begin<VT>();
cv::MatIterator_<VT> retEnd = retVal.end<VT>();
cv::MatIterator_<IT> labelIterator = labels.begin<IT>();
for( ; retIterator != retEnd; ++retIterator, ++labelIterator )
{
VT data = centers.at<VT>( cv::saturate_cast<int>((*labelIterator)[0]), 0);
#ifdef TWO_CHANNEL
*retIterator = VT( cv::saturate_cast<T>(data[0]), cv::saturate_cast<T>(data[1]) );//, cv::saturate_cast<T>(data[2]) );
#else
*retIterator = VT( cv::saturate_cast<T>(data[0]), cv::saturate_cast<T>(data[1]), cv::saturate_cast<T>(data[2]) );
#endif
}
if( colored )
return retVal;
else
return labels;
}
示例12: cvThresholdOtsu
void SkinSplit::cvThresholdOtsu(cv::Mat& src, cv::Mat& dst)
{
assert(src.channels() == 1);
int hist[256] = {0};
double pro_hist[256] = {0.0};
int height = src.rows;
int width = src.cols;
//统计每个灰度的数量
for(int i=0;i<width;i++)
{
for(int j=0;j<height;j++)
{
int temp = src.at<uchar>(i,j);
hist[temp]++;
}
}
//计算每个灰度级占图像中的概率
for(int i=0;i<256;i++)
{
pro_hist[i] = (double)hist[i]/(double)(width*height);
}
//计算平均灰度
double avgPixel = 0.0;
for(int i=0;i<256;i++)
{
avgPixel += i*pro_hist[i];
}
int threshold=0;
double maxVariance = 0;
double w =0,u=0;
for(int i=0;i<256;i++)
{
w+=pro_hist[i];
u+=i*pro_hist[i];
double t = avgPixel*w -u;
double variance = t*t/(w*(1-w));
if(variance>maxVariance)
{
maxVariance = variance;
threshold = i;
}
}
cv::threshold(src,dst,threshold,255,CV_THRESH_BINARY);
}
示例13: set_img
void GUI::set_img(const cv::Mat &frame, cv::Mat &result, Controller &controller) {
/** result == controller.current_image_to_display. */
// Convert controller.current_image_to_process (as src) to RGB(A) controller.current_image_to_display (as dst).
convert_image_to_gui_output_format(frame, result) ;
if (widget_current_image_to_display != NULL) {
delete widget_current_image_to_display ;
}
widget_current_image_to_display = Gtk::manage(new Gtk::Image());
widget_current_image_to_display->clear() ;
if (result.depth() != CV_8U) { // This desnt' should be !
result.assignTo(result, CV_8U) ;
}
// We resize the original image every time we display it.
// It's better like this because the image is resized (if needed) only one time per changement
// Not always resizing the same image.
//controller.resize_image_to_display(result) ;
if (controller.get_image_size_gt_layout()) {
cv::resize(result, result, cv::Size(controller.display_image_size.first, controller.display_image_size.second), 1.0, 1.0, cv::INTER_LINEAR) ;
}
IplImage iplimg = _IplImage(result) ;
widget_current_image_to_display->set(Gdk::Pixbuf::create_from_data( (const guint8 *) iplimg.imageData,
Gdk::COLORSPACE_RGB,
(result.channels() == 4),
iplimg.depth,
iplimg.width,
iplimg.height,
iplimg.widthStep)) ;
widget_current_image_to_display->show() ;
display_area.put(*widget_current_image_to_display, controller.current_image_position.first, controller.current_image_position.second) ;
}
示例14: execute_current
cv::Mat CatOP::execute_current(const cv::Mat& img,
const vector<string>& fields) {
string key = get_key(fields);
cout << key << " " << img.cols << " " << img.rows
<< " " << img.channels() << endl;
std::size_t last_dot = key.rfind(".");
string ext;
if (last_dot != string::npos) {
ext = key.substr(last_dot);
}
vector<unsigned char> img_content;
cv::imencode(ext, img, img_content);
string base64_string = base64_encode(img_content.data(), img_content.size());
char const* term = std::getenv("TERM");
char const* tmux = std::getenv("TMUX");
bool in_tmux = false;
if (term != NULL && tmux != NULL) {
if (strncmp(term, "xterm", 5) == 0
|| strncmp(term, "screen", 6) == 0) {
if (tmux[0] > 0) {
in_tmux = true;
}
}
}
if (in_tmux) {
std::cout << "\033Ptmux;\033\033]";
} else {
std::cout << "\033]";
}
const unsigned char * key_ptr =
reinterpret_cast<const unsigned char*>(key.data());
std::cout << "1337;File=name="
<< base64_encode(key_ptr, key.length())
<< ";size=" << img_content.size()
<< ";inline=1:" << base64_string;
if (in_tmux) {
std::cout << "\a\033\\";
} else {
std::cout << "\a";
}
std::cout << std::endl;
return img;
}
示例15: FuyangCalibra
float FuyangCalibra(const cv::Mat& in)
{
if(in.empty()||in.channels()!=1)
return 0;
float r=in.rows;
float c=in.cols;
float start_y=c-1;
float end_y=1;;
for(int i=0;i<r;i++)
{
const uchar* dataI=in.ptr<uchar>(i);
for(int j=0;j<c;j++)
{
if(dataI[j]==0)
continue;
else
{
if(start_y>i)
{
start_y=i;
}
if(end_y<i)
{
end_y=i;
}
}//if
}//for2 loop
}//for1 loop
start_y=(r-1)/2- start_y;
end_y=(r-1)/2- end_y;
float A=2*start_y*tan(CHUIZHI)/r;
float B=2*end_y*tan(CHUIZHI)/r;
float C=float(DELTA_Y)/float(HEIGHT);
ROS_INFO("A= %f ,B= %f C=%f ",A,B,C);
float _a=A-B-A*B*C;
float _b=A*C+B*C;
float _c=A-B-C;
float x1=(-_b+sqrt(_b*_b-4*_a*_c))/(2*_a);
float x2=(-_b-sqrt(_b*_b-4*_a*_c))/(2*_a);
float arcx=atan(x1)*180/CV_PI;
ROS_INFO("a= %f ,b= %f ,c= %f, ",_a,_b,_c);
ROS_INFO("start_y= %f ,end_y= %f ,x1= %f,,x2= %f ",start_y,end_y,x1,x2);
ROS_INFO("x= %f,arcx= %f ",atan(x1),arcx);
return arcx;
};///getPointset();待优化:分类记录点或者去除孤立区域