本文整理汇总了C++中Mat::channels方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::channels方法的具体用法?C++ Mat::channels怎么用?C++ Mat::channels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::channels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: write
bool PxMEncoder::write( const Mat& img, const std::vector<int>& params )
{
bool isBinary = true;
int width = img.cols, height = img.rows;
int _channels = img.channels(), depth = (int)img.elemSize1()*8;
int channels = _channels > 1 ? 3 : 1;
int fileStep = width*(int)img.elemSize();
int x, y;
for( size_t i = 0; i < params.size(); i += 2 )
if( params[i] == CV_IMWRITE_PXM_BINARY )
isBinary = params[i+1] != 0;
WLByteStream strm;
if( m_buf )
{
if( !strm.open(*m_buf) )
return false;
int t = CV_MAKETYPE(img.depth(), channels);
m_buf->reserve( alignSize(256 + (isBinary ? fileStep*height :
((t == CV_8UC1 ? 4 : t == CV_8UC3 ? 4*3+2 :
t == CV_16UC1 ? 6 : 6*3+2)*width+1)*height), 256));
}
else if( !strm.open(m_filename) )
return false;
int lineLength;
int bufferSize = 128; // buffer that should fit a header
if( isBinary )
lineLength = width * (int)img.elemSize();
else
lineLength = (6 * channels + (channels > 1 ? 2 : 0)) * width + 32;
if( bufferSize < lineLength )
bufferSize = lineLength;
AutoBuffer<char> _buffer(bufferSize);
char* buffer = _buffer;
// write header;
sprintf( buffer, "P%c\n%d %d\n%d\n",
'2' + (channels > 1 ? 1 : 0) + (isBinary ? 3 : 0),
width, height, (1 << depth) - 1 );
strm.putBytes( buffer, (int)strlen(buffer) );
for( y = 0; y < height; y++ )
{
const uchar* const data = img.ptr(y);
if( isBinary )
{
if( _channels == 3 )
{
if( depth == 8 )
icvCvt_BGR2RGB_8u_C3R( (const uchar*)data, 0,
(uchar*)buffer, 0, cvSize(width,1) );
else
icvCvt_BGR2RGB_16u_C3R( (const ushort*)data, 0,
(ushort*)buffer, 0, cvSize(width,1) );
}
// swap endianness if necessary
if( depth == 16 && !isBigEndian() )
{
if( _channels == 1 )
memcpy( buffer, data, fileStep );
for( x = 0; x < width*channels*2; x += 2 )
{
uchar v = buffer[x];
buffer[x] = buffer[x + 1];
buffer[x + 1] = v;
}
}
strm.putBytes( (channels > 1 || depth > 8) ? buffer : (const char*)data, fileStep );
}
else
{
char* ptr = buffer;
if( channels > 1 )
{
if( depth == 8 )
{
for( x = 0; x < width*channels; x += channels )
{
sprintf( ptr, "% 4d", data[x + 2] );
ptr += 4;
sprintf( ptr, "% 4d", data[x + 1] );
ptr += 4;
sprintf( ptr, "% 4d", data[x] );
ptr += 4;
*ptr++ = ' ';
*ptr++ = ' ';
}
}
else
{
//.........这里部分代码省略.........
示例2: write
bool PngEncoder::write( const Mat& img, const std::vector<int>& params )
{
png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
png_infop info_ptr = 0;
FILE * volatile f = 0;
int y, width = img.cols, height = img.rows;
int depth = img.depth(), channels = img.channels();
volatile bool result = false;
AutoBuffer<uchar*> buffer;
if( depth != CV_8U && depth != CV_16U )
return false;
if( png_ptr )
{
info_ptr = png_create_info_struct( png_ptr );
if( info_ptr )
{
if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )
{
if( m_buf )
{
png_set_write_fn(png_ptr, this,
(png_rw_ptr)writeDataToBuf, (png_flush_ptr)flushBuf);
}
else
{
f = fopen( m_filename.c_str(), "wb" );
if( f )
png_init_io( png_ptr, (png_FILE_p)f );
}
int compression_level = -1; // Invalid value to allow setting 0-9 as valid
int compression_strategy = IMWRITE_PNG_STRATEGY_RLE; // Default strategy
bool isBilevel = false;
for( size_t i = 0; i < params.size(); i += 2 )
{
if( params[i] == IMWRITE_PNG_COMPRESSION )
{
compression_strategy = IMWRITE_PNG_STRATEGY_DEFAULT; // Default strategy
compression_level = params[i+1];
compression_level = MIN(MAX(compression_level, 0), Z_BEST_COMPRESSION);
}
if( params[i] == IMWRITE_PNG_STRATEGY )
{
compression_strategy = params[i+1];
compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
}
if( params[i] == IMWRITE_PNG_BILEVEL )
{
isBilevel = params[i+1] != 0;
}
}
if( m_buf || f )
{
if( compression_level >= 0 )
{
png_set_compression_level( png_ptr, compression_level );
}
else
{
// tune parameters for speed
// (see http://wiki.linuxquestions.org/wiki/Libpng)
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_SUB);
png_set_compression_level(png_ptr, Z_BEST_SPEED);
}
png_set_compression_strategy(png_ptr, compression_strategy);
png_set_IHDR( png_ptr, info_ptr, width, height, depth == CV_8U ? isBilevel?1:8 : 16,
channels == 1 ? PNG_COLOR_TYPE_GRAY :
channels == 3 ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGBA,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT );
png_write_info( png_ptr, info_ptr );
if (isBilevel)
png_set_packing(png_ptr);
png_set_bgr( png_ptr );
if( !isBigEndian() )
png_set_swap( png_ptr );
buffer.allocate(height);
for( y = 0; y < height; y++ )
buffer[y] = img.data + y*img.step;
png_write_image( png_ptr, buffer );
png_write_end( png_ptr, info_ptr );
result = true;
}
}
}
}
png_destroy_write_struct( &png_ptr, &info_ptr );
//.........这里部分代码省略.........
示例3: main
int main(int argc, char **argv) {
InputData input_params(argc,argv);
// Init Caffe Net
LOG(INFO) << "Init caffe...";
if (input_params.gpu_en) {
Caffe::set_mode(Caffe::GPU);
Caffe::SetDevice(input_params.gpu_id);
} else {
Caffe::set_mode(Caffe::CPU);
}
Caffe::set_phase(Caffe::TEST);
Net<float> caffe_net(input_params.protoName);
caffe_net.CopyTrainedLayersFrom(input_params.modelName);
if (::strcmp(input_params.blobs[0].c_str(),"all")==0) { // extract all layers
input_params.blobs = caffe_net.blob_names();
input_params.wrPrefix = caffe_net.blob_names();
// '/' cannot appear in path, and we replace it with '_'
for (int i=0; i<input_params.wrPrefix.size(); ++i) {
for (int j=0; j<input_params.wrPrefix[i].length(); ++j) {
if (input_params.wrPrefix[i][j] == '/') {
input_params.wrPrefix[i][j] = '_';
}
}
}
}
CHECK_EQ(input_params.wrPrefix.size(),input_params.blobs.size())
<< "wr_prefix.size() != blobs.size()";
// Check feat specified
const int feat_nums = input_params.blobs.size();
CHECK_GT(feat_nums,0)
<< "No blob names specified in command line.";
for (size_t i = 0; i < feat_nums; i++) {
CHECK(caffe_net.has_blob(input_params.blobs[i]))
<< "Unknown feature blob name " << input_params.blobs[i]
<< " in the network " << input_params.protoName;
}
// Preprocess
LOG(INFO) << "Prepare image data...";
const vector<Blob<float>* >& input_blobs = caffe_net.input_blobs();
const int channels = input_blobs[0]->channels();
const int resize_height = input_blobs[0]->height();
const int resize_width = input_blobs[0]->width();
const int data_count = input_blobs[0]->count();
Mat img = cv::imread(input_params.imageName.c_str(),1);
if (!img.data) {
LOG(ERROR) << "Cannot open image.";
return -1;
}
CHECK_EQ(img.channels(),channels)
<< "Input image channel dismatch the Net.";
Mat resized_img;
cv::resize(img,resized_img,cv::Size(resize_height,resize_width));
float *_data_ptr = new float[data_count];
if (-1 == prepare_image(resized_img,_data_ptr,data_count) ) {
return -1;
}
const float* const data_ptr = reinterpret_cast<const float* const>(_data_ptr);
// Filling input and Extract feature
switch (Caffe::mode()) {
case Caffe::GPU:
caffe::caffe_copy(data_count,data_ptr,input_blobs[0]->mutable_gpu_data());
break;
case Caffe::CPU:
caffe::caffe_copy(data_count,data_ptr,input_blobs[0]->mutable_cpu_data());
break;
default:
LOG(ERROR) << "Unknow caffe mode";
}
delete[] _data_ptr;
LOG(INFO) << "Extracting features begin (total layers " << feat_nums << ")";
//const vector<Blob<float>*>& output_blobs =
caffe_net.ForwardPrefilled(); // Forward
for (int i=0; i<feat_nums; ++i) {
const shared_ptr<Blob<float> > feature_blob = caffe_net.blob_by_name(input_params.blobs[i]);
LOG(INFO) << "Extracting " << input_params.blobs[i] << " ("
<< feature_blob->channels()*feature_blob->num() << " Mat)";
vector<Mat> mat_feature;
blob2mat(feature_blob, mat_feature);
// Save to disk
for (int k=0; k<mat_feature.size(); ++k) {
char sn[128];
sprintf(sn,"%s%s_%.4d.png",input_params.wrRoot.c_str(),input_params.wrPrefix[i].c_str(),k);
imwrite(sn,mat_feature[k]);
}
}
LOG(INFO) << "Extracting features end";
}
示例4: split
void cv::split(const Mat& m, vector<Mat>& mv)
{
mv.resize(!m.empty() ? m.channels() : 0);
if(!m.empty())
split(m, &mv[0]);
}
示例5: guiAlphaBlend
void guiAlphaBlend(const Mat& src1, const Mat& src2)
{
showMatInfo(src1,"src1");
cout<<endl;
showMatInfo(src2,"src2");
double minv,maxv;
minMaxLoc(src1, &minv, &maxv);
bool isNormirized = (maxv<=1.0 &&minv>=0.0) ? true:false;
Mat s1,s2;
if(src1.depth()==CV_8U || src1.depth()==CV_32F)
{
if(src1.channels()==1)cvtColor(src1,s1,CV_GRAY2BGR);
else s1 = src1;
if(src2.channels()==1)cvtColor(src2,s2,CV_GRAY2BGR);
else s2 = src2;
}
else
{
Mat ss1,ss2;
src1.convertTo(ss1,CV_32F);
src2.convertTo(ss2,CV_32F);
if(src1.channels()==1)cvtColor(ss1,s1,CV_GRAY2BGR);
else s1 = ss1.clone();
if(src2.channels()==1)cvtColor(ss2,s2,CV_GRAY2BGR);
else s2 = ss2.clone();
}
namedWindow("alphaBlend");
int a = 0;
createTrackbar("a","alphaBlend",&a,100);
int key = 0;
Mat show;
while(key!='q')
{
addWeighted(s1,1.0-a/100.0,s2,a/100.0,0.0,show);
if(show.depth()==CV_8U)
{
imshow("alphaBlend",show);
}
else
{
if(isNormirized)
{
imshow("alphaBlend",show);
}
else
{
minMaxLoc(show, &minv, &maxv);
Mat s;
if(maxv<=255)
show.convertTo(s,CV_8U);
else
show.convertTo(s,CV_8U,255/maxv);
imshow("alphaBlend",s);
}
}
key = waitKey(1);
if(key=='f')
{
a = (a > 0) ? 0 : 100;
setTrackbarPos("a","alphaBlend",a);
}
}
destroyWindow("alphaBlend");
}
示例6: _getKnockMask
//knock point detection in HSV space, using Hue and Saturation
PTS32 _getKnockMask(Mat& srcImg, Mat& dstImg,PTSysEnum&eBoard)
{
PTDEBUG("Enter %s\n", __FUNCTION__);
//select the right desktop corner threshold according to Ipad
int RightDesktopConerthreshold = 0;
switch(eBoard) {
case PT_APPLE_IPAD2:
case PT_APPLE_IPAD3:
case PT_APPLE_IPAD4: {
RightDesktopConerthreshold = 30;
break;
}
case PT_APPLE_MINI1: {
RightDesktopConerthreshold = 40;
break;
}
case PT_APPLE_MINI2: {
RightDesktopConerthreshold = 40;
break;
}
case PT_APPLE_MINI3: {
RightDesktopConerthreshold = 30;
break;
}
case PT_APPLE_AIR : {
RightDesktopConerthreshold = 50;
break;
}
case PT_APPLE_AIR2 : {
RightDesktopConerthreshold = 50;
break;
}
default: {
RightDesktopConerthreshold = 30;
break;
}
}
Mat temp(srcImg.size(), CV_8UC3);
cvtColor(srcImg, temp, CV_RGB2HSV);
vector<Mat> hsv;
split(temp, hsv);
Mat hueImg = hsv[0];
Mat satImg = hsv[1];
const int hueMin = KNOCKRANGE[0][0];
const int hueMax = KNOCKRANGE[0][1];
const int satMin = KNOCKRANGE[0][2];
const int satMax = KNOCKRANGE[0][3];
const int hueMin_ = KNOCKRANGE[1][0];
const int hueMax_ = KNOCKRANGE[1][1];
const int satMin_ = KNOCKRANGE[1][2];
const int satMax_ = KNOCKRANGE[1][3];
PTDEBUG("hueMin[%d], hueMax[%d], satMin[%d], satMax[%d]\n", hueMin, hueMax, satMin, satMax);
const int rows = hueImg.rows;
const int cols = hueImg.cols;
const int channels = hueImg.channels();
for(int i = 0; i < rows; i++) {
const uchar* pHue = hueImg.ptr<uchar>(i);
const uchar* pSat = satImg.ptr<uchar>(i);
uchar* pDst = dstImg.ptr<uchar>(i);
for(int j = 0; j < cols; j += channels) {
const PTU8 hue = pHue[j];
const PTU8 sat = pSat[j];
if((cols - j + i > RightDesktopConerthreshold/*exclude top right desktop corner*/)
&& ((hueMin<=hue && hue<=hueMax && satMin<=sat && sat<=satMax) || (hueMin_<=hue && hue<=hueMax_ && satMin_<=sat && sat<=satMax_))) {
pDst[j] = 255;
} else {
pDst[j] = 0;
}
}
}
//dilate(dstImg, dstImg, Mat());
dilate(dstImg, dstImg, cv::Mat(), cv::Point(-1, -1), 1);
PTDEBUG("Exit %s\n", __FUNCTION__);
return PT_RET_OK;
}
示例7: write
bool PngEncoder::write( const Mat& img, const Vector<int>& params )
{
int compression_level = 0;
for( size_t i = 0; i < params.size(); i += 2 )
{
if( params[i] == CV_IMWRITE_PNG_COMPRESSION )
{
compression_level = params[i+1];
compression_level = MIN(MAX(compression_level, 0), MAX_MEM_LEVEL);
}
}
png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
png_infop info_ptr = 0;
FILE* f = 0;
int y, width = img.cols, height = img.rows;
int depth = img.depth(), channels = img.channels();
bool result = false;
AutoBuffer<uchar*> buffer;
if( depth != CV_8U && depth != CV_16U )
return false;
if( png_ptr )
{
info_ptr = png_create_info_struct( png_ptr );
if( info_ptr )
{
if( setjmp( png_ptr->jmpbuf ) == 0 )
{
if( m_buf )
{
png_set_write_fn(png_ptr, this,
(png_rw_ptr)writeDataToBuf, (png_flush_ptr)flushBuf);
}
else
{
f = fopen( m_filename.c_str(), "wb" );
if( f )
png_init_io( png_ptr, f );
}
if( m_buf || f )
{
if( compression_level > 0 )
{
png_set_compression_mem_level( png_ptr, compression_level );
}
else
{
// tune parameters for speed
// (see http://wiki.linuxquestions.org/wiki/Libpng)
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_SUB);
png_set_compression_level(png_ptr, Z_BEST_SPEED);
}
png_set_compression_strategy(png_ptr, Z_HUFFMAN_ONLY);
png_set_IHDR( png_ptr, info_ptr, width, height, depth == CV_8U ? 8 : 16,
channels == 1 ? PNG_COLOR_TYPE_GRAY :
channels == 3 ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGBA,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT );
png_write_info( png_ptr, info_ptr );
png_set_bgr( png_ptr );
if( !isBigEndian() )
png_set_swap( png_ptr );
buffer.allocate(height);
for( y = 0; y < height; y++ )
buffer[y] = img.data + y*img.step;
png_write_image( png_ptr, buffer );
png_write_end( png_ptr, info_ptr );
result = true;
}
}
}
}
png_destroy_write_struct( &png_ptr, &info_ptr );
if(f) fclose( f );
return result;
}
示例8: write
void write(std::ostream& out, const Mat& m, const int*, int) const
{
writeMat(out, m, ' ', ' ', m.rows*m.channels() == 1);
if(m.rows > 1)
out << "\n";
}
示例9: main
//.........这里部分代码省略.........
//If we have image(s) passed as an argument do to classification using NBNN
if (argc>2)
{
//First extract features
//Load filters bank and withenning params
Mat filters, M, P;
FileStorage fs("first_layer_filters.xml", FileStorage::READ);
fs["D"] >> filters;
fs["M"] >> M;
fs["P"] >> P;
fs.release();
int src_height = 64;
int image_size = 32;
int quad_size = 12;
int patch_size = 8;
int num_quads = 25; //extract 25 quads (12x12) from each image
int num_tiles = 25; //extract 25 patches (8x8) from each quad
double alpha = 0.5; //used for feature representation:
//scalar non-linear function z = max(0, |D*a| - alpha)
Mat quad;
Mat tmp;
ofstream outfile;
outfile.open (argv[argc-1]);
for (int f=1; f<argc-1; f++)
{
cout << "Extracting features for image " << argv[f] << " ... "; cout.flush();
Mat src = imread(argv[f]);
if(src.channels() != 3)
return 0;
cvtColor(src,src,COLOR_RGB2GRAY);
int src_width = (src.cols*src_height)/src.rows;
resize(src,src,Size(src_width,src_height));
Mat query = Mat::zeros(0,1737,CV_64FC1);
// Do sliding window from x=0 to src_width-image_size in three rows (top,middle,bottom)
for (int y=0; y<=src_height-image_size; y=y+8)
{
for (int x=0; x<=src_width-image_size; x=x+8)
{
Mat img;
src(Rect(x,y,image_size,image_size)).copyTo(img); // img must be 32x32 pixels
vector< vector<double> > data_pool(9);
int quad_id = 1;
for (int q_x=0; q_x<=image_size-quad_size; q_x=q_x+(quad_size/2-1))
{
for (int q_y=0; q_y<=image_size-quad_size; q_y=q_y+(quad_size/2-1))
{
Rect quad_rect = Rect(q_x,q_y,quad_size,quad_size);
img(quad_rect).copyTo(quad);
//start sliding window (8x8) in each tile and store the patch as row in data_pool
for (int w_x=0; w_x<=quad_size-patch_size; w_x++)
{
for (int w_y=0; w_y<=quad_size-patch_size; w_y++)
{
quad(Rect(w_x,w_y,patch_size,patch_size)).copyTo(tmp);
tmp = tmp.reshape(0,1);
示例10: meanStdDev
void meanStdDev(const Mat& m, Scalar& mean, Scalar& stddev, const Mat& mask) {
static MeanStdDevFunc tab[] = {
meanStdDev_<SqrC1<uchar, double> >, 0,
meanStdDev_<SqrC1<ushort, double> >,
meanStdDev_<SqrC1<short, double> >,
meanStdDev_<SqrC1<int, double> >,
meanStdDev_<SqrC1<float, double> >,
meanStdDev_<SqrC1<double, double> >, 0,
meanStdDev_<SqrC2<uchar, double> >, 0,
meanStdDev_<SqrC2<ushort, double> >,
meanStdDev_<SqrC2<short, double> >,
meanStdDev_<SqrC2<int, double> >,
meanStdDev_<SqrC2<float, double> >,
meanStdDev_<SqrC2<double, double> >, 0,
meanStdDev_<SqrC3<uchar, double> >, 0,
meanStdDev_<SqrC3<ushort, double> >,
meanStdDev_<SqrC3<short, double> >,
meanStdDev_<SqrC3<int, double> >,
meanStdDev_<SqrC3<float, double> >,
meanStdDev_<SqrC3<double, double> >, 0,
meanStdDev_<SqrC4<uchar, double> >, 0,
meanStdDev_<SqrC4<ushort, double> >,
meanStdDev_<SqrC4<short, double> >,
meanStdDev_<SqrC4<int, double> >,
meanStdDev_<SqrC4<float, double> >,
meanStdDev_<SqrC4<double, double> >, 0
};
static MeanStdDevMaskFunc mtab[] = {
meanStdDevMask_<SqrC1<uchar, double> >, 0,
meanStdDevMask_<SqrC1<ushort, double> >,
meanStdDevMask_<SqrC1<short, double> >,
meanStdDevMask_<SqrC1<int, double> >,
meanStdDevMask_<SqrC1<float, double> >,
meanStdDevMask_<SqrC1<double, double> >, 0,
meanStdDevMask_<SqrC2<uchar, double> >, 0,
meanStdDevMask_<SqrC2<ushort, double> >,
meanStdDevMask_<SqrC2<short, double> >,
meanStdDevMask_<SqrC2<int, double> >,
meanStdDevMask_<SqrC2<float, double> >,
meanStdDevMask_<SqrC2<double, double> >, 0,
meanStdDevMask_<SqrC3<uchar, double> >, 0,
meanStdDevMask_<SqrC3<ushort, double> >,
meanStdDevMask_<SqrC3<short, double> >,
meanStdDevMask_<SqrC3<int, double> >,
meanStdDevMask_<SqrC3<float, double> >,
meanStdDevMask_<SqrC3<double, double> >, 0,
meanStdDevMask_<SqrC4<uchar, double> >, 0,
meanStdDevMask_<SqrC4<ushort, double> >,
meanStdDevMask_<SqrC4<short, double> >,
meanStdDevMask_<SqrC4<int, double> >,
meanStdDevMask_<SqrC4<float, double> >,
meanStdDevMask_<SqrC4<double, double> >, 0
};
CV_Assert(m.channels() <= 4);
if (!mask.data) {
MeanStdDevFunc func = tab[m.type()];
CV_Assert(func != 0);
func(m, mean, stddev);
} else {
MeanStdDevMaskFunc func = mtab[m.type()];
CV_Assert(mask.size() == m.size() && mask.type() == CV_8U && func != 0);
func(m, mask, mean, stddev);
}
}
示例11: crossCorr
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,
Size corrsize, int ctype,
Point anchor, double delta, int borderType )
{
const double blockScale = 4.5;
const int minBlockSize = 256;
std::vector<uchar> buf;
Mat templ = _templ;
int depth = img.depth(), cn = img.channels();
int tdepth = templ.depth(), tcn = templ.channels();
int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);
CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );
if( depth != tdepth && tdepth != std::max(CV_32F, depth) )
{
_templ.convertTo(templ, std::max(CV_32F, depth));
tdepth = templ.depth();
}
CV_Assert( depth == tdepth || tdepth == CV_32F);
CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&
corrsize.width <= img.cols + templ.cols - 1 );
CV_Assert( ccn == 1 || delta == 0 );
corr.create(corrsize, ctype);
int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);
Size blocksize, dftsize;
blocksize.width = cvRound(templ.cols*blockScale);
blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );
blocksize.width = std::min( blocksize.width, corr.cols );
blocksize.height = cvRound(templ.rows*blockScale);
blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );
blocksize.height = std::min( blocksize.height, corr.rows );
dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);
dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);
if( dftsize.width <= 0 || dftsize.height <= 0 )
CV_Error( CV_StsOutOfRange, "the input arrays are too big" );
// recompute block size
blocksize.width = dftsize.width - templ.cols + 1;
blocksize.width = MIN( blocksize.width, corr.cols );
blocksize.height = dftsize.height - templ.rows + 1;
blocksize.height = MIN( blocksize.height, corr.rows );
Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );
Mat dftImg( dftsize, maxDepth );
int i, k, bufSize = 0;
if( tcn > 1 && tdepth != maxDepth )
bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);
if( cn > 1 && depth != maxDepth )
bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*
(blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));
if( (ccn > 1 || cn > 1) && cdepth != maxDepth )
bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));
buf.resize(bufSize);
// compute DFT of each template plane
for( k = 0; k < tcn; k++ )
{
int yofs = k*dftsize.height;
Mat src = templ;
Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));
if( tcn > 1 )
{
src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
int pairs[] = {k, 0};
mixChannels(&templ, 1, &src, 1, pairs, 1);
}
if( dst1.data != src.data )
src.convertTo(dst1, dst1.depth());
if( dst.cols > templ.cols )
{
Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
part = Scalar::all(0);
}
dft(dst, dst, 0, templ.rows);
}
int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
int tileCount = tileCountX * tileCountY;
Size wholeSize = img.size();
Point roiofs(0,0);
Mat img0 = img;
//.........这里部分代码省略.........
示例12: matchTemplate
void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method )
{
CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );
int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
bool isNormed = method == CV_TM_CCORR_NORMED ||
method == CV_TM_SQDIFF_NORMED ||
method == CV_TM_CCOEFF_NORMED;
Mat img = _img.getMat(), templ = _templ.getMat();
if( img.rows < templ.rows || img.cols < templ.cols )
std::swap(img, templ);
CV_Assert( (img.depth() == CV_8U || img.depth() == CV_32F) &&
img.type() == templ.type() );
Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1);
_result.create(corrSize, CV_32F);
Mat result = _result.getMat();
int cn = img.channels();
crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0);
if( method == CV_TM_CCORR )
return;
double invArea = 1./((double)templ.rows * templ.cols);
Mat sum, sqsum;
Scalar templMean, templSdv;
double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0;
double templNorm = 0, templSum2 = 0;
if( method == CV_TM_CCOEFF )
{
integral(img, sum, CV_64F);
templMean = mean(templ);
}
else
{
integral(img, sum, sqsum, CV_64F);
meanStdDev( templ, templMean, templSdv );
templNorm = CV_SQR(templSdv[0]) + CV_SQR(templSdv[1]) +
CV_SQR(templSdv[2]) + CV_SQR(templSdv[3]);
if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )
{
result = Scalar::all(1);
return;
}
templSum2 = templNorm +
CV_SQR(templMean[0]) + CV_SQR(templMean[1]) +
CV_SQR(templMean[2]) + CV_SQR(templMean[3]);
if( numType != 1 )
{
templMean = Scalar::all(0);
templNorm = templSum2;
}
templSum2 /= invArea;
templNorm = sqrt(templNorm);
templNorm /= sqrt(invArea); // care of accuracy here
q0 = (double*)sqsum.data;
q1 = q0 + templ.cols*cn;
q2 = (double*)(sqsum.data + templ.rows*sqsum.step);
q3 = q2 + templ.cols*cn;
}
double* p0 = (double*)sum.data;
double* p1 = p0 + templ.cols*cn;
double* p2 = (double*)(sum.data + templ.rows*sum.step);
double* p3 = p2 + templ.cols*cn;
int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0;
int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0;
int i, j, k;
for( i = 0; i < result.rows; i++ )
{
float* rrow = (float*)(result.data + i*result.step);
int idx = i * sumstep;
int idx2 = i * sqstep;
for( j = 0; j < result.cols; j++, idx += cn, idx2 += cn )
{
double num = rrow[j], t;
double wndMean2 = 0, wndSum2 = 0;
if( numType == 1 )
{
for( k = 0; k < cn; k++ )
{
t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k];
wndMean2 += CV_SQR(t);
//.........这里部分代码省略.........
示例13: divSpectrums
static void divSpectrums( InputArray _srcA, InputArray _srcB, OutputArray _dst, int flags, bool conjB)
{
Mat srcA = _srcA.getMat(), srcB = _srcB.getMat();
int depth = srcA.depth(), cn = srcA.channels(), type = srcA.type();
int rows = srcA.rows, cols = srcA.cols;
int j, k;
CV_Assert( type == srcB.type() && srcA.size() == srcB.size() );
CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 );
_dst.create( srcA.rows, srcA.cols, type );
Mat dst = _dst.getMat();
bool is_1d = (flags & DFT_ROWS) || (rows == 1 || (cols == 1 &&
srcA.isContinuous() && srcB.isContinuous() && dst.isContinuous()));
if( is_1d && !(flags & DFT_ROWS) )
cols = cols + rows - 1, rows = 1;
int ncols = cols*cn;
int j0 = cn == 1;
int j1 = ncols - (cols % 2 == 0 && cn == 1);
if( depth == CV_32F )
{
const float* dataA = srcA.ptr<float>();
const float* dataB = srcB.ptr<float>();
float* dataC = dst.ptr<float>();
float eps = FLT_EPSILON; // prevent div0 problems
size_t stepA = srcA.step/sizeof(dataA[0]);
size_t stepB = srcB.step/sizeof(dataB[0]);
size_t stepC = dst.step/sizeof(dataC[0]);
if( !is_1d && cn == 1 )
{
for( k = 0; k < (cols % 2 ? 1 : 2); k++ )
{
if( k == 1 )
dataA += cols - 1, dataB += cols - 1, dataC += cols - 1;
dataC[0] = dataA[0] / (dataB[0] + eps);
if( rows % 2 == 0 )
dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps);
if( !conjB )
for( j = 1; j <= rows - 2; j += 2 )
{
double denom = (double)dataB[j*stepB]*dataB[j*stepB] +
(double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps;
double re = (double)dataA[j*stepA]*dataB[j*stepB] +
(double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB];
double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] -
(double)dataA[j*stepA]*dataB[(j+1)*stepB];
dataC[j*stepC] = (float)(re / denom);
dataC[(j+1)*stepC] = (float)(im / denom);
}
else
for( j = 1; j <= rows - 2; j += 2 )
{
double denom = (double)dataB[j*stepB]*dataB[j*stepB] +
(double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps;
double re = (double)dataA[j*stepA]*dataB[j*stepB] -
(double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB];
double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] +
(double)dataA[j*stepA]*dataB[(j+1)*stepB];
dataC[j*stepC] = (float)(re / denom);
dataC[(j+1)*stepC] = (float)(im / denom);
}
if( k == 1 )
dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1;
}
}
for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC )
{
if( is_1d && cn == 1 )
{
dataC[0] = dataA[0] / (dataB[0] + eps);
if( cols % 2 == 0 )
dataC[j1] = dataA[j1] / (dataB[j1] + eps);
}
if( !conjB )
for( j = j0; j < j1; j += 2 )
{
double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps);
double re = (double)(dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]);
double im = (double)(dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]);
dataC[j] = (float)(re / denom);
dataC[j+1] = (float)(im / denom);
}
else
for( j = j0; j < j1; j += 2 )
{
//.........这里部分代码省略.........
示例14: main
int main( int argc, char** argv )
{
Mat img;
/// Load image
if( argc != 2 || !(img=imread(argv[1], 1)).data || img.channels()!=3 ) return -1;
/// Resize (downsize) the image
double scale = SIZE / ((img.rows>img.cols)?img.rows:img.cols);
Mat small;
resize(img, small, small.size(), scale, scale, INTER_AREA);
#ifdef DEBUG
printf("Source image resized to %d x %d\n", small.cols, small.rows);
#endif
/// Separate the image in 3 places ( B, G and R )
vector<Mat> rgbPlanes;
split( small, rgbPlanes );
/// Finding the petri-film circle
// TODO keep time
std::vector<cv::Point> circle;
findCircle(rgbPlanes[1], circle);
if(circle.size() <= 0) {
printf("Could not find the petri-film circle!\nPlease make sure that 20 percent of center of the image is entirely on the perti-film.\n");
exit(1);
}
#ifdef DEBUG
printf("Petri-film contour: %d points\n", circle.size());
Mat debugImg1 = small.clone();
std::vector< std::vector<cv::Point> > debugContours;
debugContours.push_back(circle);
drawContours(debugImg1, debugContours, 0, Scalar(0,255,0), 3, 8);
namedWindow("Found petri-film circle", CV_WINDOW_KEEPRATIO);
imshow("Found petri-film circle", debugImg1 );
//waitKey(0);
#endif
/// Get the petri-film region and circle mask
Mat petri;
Mat mask;
getMask(small, circle, petri, mask);
vector<Mat> maskPlanes;
split( mask, maskPlanes );
/// Separate the image in 3 places ( B, G and R )
vector<Mat> petriPlanes;
split( petri, petriPlanes );
#ifdef DEBUG
/// Histo-Quad
printf("\nHisto-Quad\n");
#endif
// TODO Check Background function (instead of mean)
/// Calculate 4 background colors
Rect tmpRect;
Mat tmpROI;
Mat tmpMask;
//cv::Scalar tmpMean;
Scalar tmpBG[4];
/// Top region
tmpRect.x = 0;
tmpRect.y = 0;
tmpRect.width = petri.cols;
tmpRect.height = petri.rows / 2;
tmpROI = Mat(petri, tmpRect);
tmpMask = Mat(maskPlanes[0], tmpRect);
tmpBG[0] = mean( tmpROI, tmpMask );
/// Buttom region
//tmpRect.x = 0;
tmpRect.y = petri.rows / 2;
//tmpRect.width = petri.cols;
//tmpRect.height = petri.rows / 2;
tmpROI = Mat(petri, tmpRect);
tmpMask = Mat(maskPlanes[0], tmpRect);
tmpBG[1] = mean( tmpROI, tmpMask );
/// Left region
//tmpRect.x = 0;
tmpRect.y = 0;
tmpRect.width = petri.cols / 2;
tmpRect.height = petri.rows;
tmpROI = Mat(petri, tmpRect);
tmpMask = Mat(maskPlanes[0], tmpRect);
tmpBG[2] = mean( tmpROI, tmpMask );
/// Right region
tmpRect.x = petri.cols / 2;
//tmpRect.y = 0;
//tmpRect.width = petri.cols / 2;
//tmpRect.height = petri.rows;
tmpROI = Mat(petri, tmpRect);
tmpMask = Mat(maskPlanes[0], tmpRect);
tmpBG[3] = mean( tmpROI, tmpMask );
#ifdef DEBUG
//.........这里部分代码省略.........
示例15: start
void Dtmove::start(int i ,String pas= "./images/")
{
int sorr=0x05,ack;
clientinit();
send(sockfd,&sorr,4,0);
path =pas;
cap = VideoCapture(i);
cap >> frame;
datasize = frame.rows*frame.cols*frame.channels();
occ = 0;
color = Scalar( 0, 255, 0);
element = getStructuringElement( 0,Size( 3, 3 ), Point(1, 1 ) );
ckcamera();
//初始化背景帧
cap >> frame;
cvtColor(frame, avg, COLOR_BGR2GRAY);
GaussianBlur(avg, avg, Size(7,7), 1.5, 1.5);
gray = avg.clone();
frameold = gray.clone();
while(1) //主循环
{
recvall(sockfd,&ack,4);
if(ack != 0x77) perror("ack");
cap >> frame; // 获取一帧图像
cvtColor(frame, gray, COLOR_BGR2GRAY); //转化为灰度图像
GaussianBlur(gray, gray, Size(7,7), 1.5, 1.5); //高斯模糊
absdiff(gray,avg,differ); //比较两幅图片结果放入differ中
threshold(differ,thresh, 40, 255, THRESH_BINARY); //根据给出的阈值二值化
dilate(thresh,bigger, element,Point(-1,-1), 1); //膨胀图像
findContours(bigger,contours, RETR_EXTERNAL,CHAIN_APPROX_SIMPLE); //寻找轮廓
for(unsigned int j = 0; j < contours.size(); j++ )
{
if (contourArea(contours[j])<1000) continue;
occ = 1;
ret = boundingRect(contours[j]);
rectangle(frame,ret,color,2);
}
//重新建立背景
if (occ==0)
{
times = 0;
mean = cv::mean(differ); //名称空间重复故使用cv::
//cout<<"m"<<mean[0]<<endl;
if (mean[0] > 2)
{
avg = gray.clone();
}
}else
{
if (times > 30)
{
/*重新建立背景*/
times = 0;
absdiff(gray,frameold,frameold);
mean = cv::mean(frameold); //名称空间重复故使用cv::
//cout<<"f"<<mean[0]<<endl;
if ( mean[0] < 2 )
{
avg = gray.clone();
}
frameold = gray.clone();
}
times++;
occ = 0;
}
//显示图片
sender();
//imshow("frame", frame);
//imshow("avg", avg);
//imshow("thresh", thresh);
//imshow("differ", differ);
//key = waitKey(25)&0xFF;
//if (key == 's')
//{
//now_time = time(NULL);
//p=localtime(&now_time);
//strftime(fmt_time, sizeof(fmt_time), "%Y_%m_%d_%H_%M_%S", p);
//svtime = path + format("%s.jpg",fmt_time);
//cout << svtime << endl;
//imwrite(svtime,frame);
//}
//if(key == 'q') break;
//timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+
//tpend.tv_usec-tpstart.tv_usec;
//timeuse/=1000000;
//cout<<timeuse<<endl;
}
//destroyWindow("frame");
cap.release();
close(sockfd);
}