本文整理汇总了C++中cv::Mat::type方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::type方法的具体用法?C++ Mat::type怎么用?C++ Mat::type使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::type方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: QImage
QImage img2qimg(cv::Mat& img)
{
// convert the color to RGB (OpenCV uses BGR)
switch (img.type()) {
case CV_8UC1:
cv::cvtColor(img, img, CV_GRAY2RGB);
break;
case CV_8UC3:
cv::cvtColor(img, img, CV_BGR2RGB);
break;
}
// return the QImage
return QImage((uchar*) img.data, img.cols, img.rows, img.step, QImage::Format_RGB888);
}
示例2: sum
float pixkit::qualityassessment::SNS(const cv::Mat &src1b,int ksize){
//////////////////////////////////////////////////////////////////////////
///// exception
if(src1b.type()!=CV_8UC1){
CV_Assert(false);
}
//////////////////////////////////////////////////////////////////////////
///// process
Mat src1b_bar;
cv::medianBlur(src1b,src1b_bar,ksize);
Mat m_diff;
cv::absdiff(src1b,src1b_bar,m_diff);
//////////////////////////////////////////////////////////////////////////
///// get sns
return cv::sum(m_diff)[0] / ((double)src1b.rows*src1b.cols*255.) * 100.;
}
示例3: contrastAndBrightByTrackbar
//改变图像对比度和亮度值的主方法
void OpenCV_Function::contrastAndBrightByTrackbar(){
system("color5F");
g_srcImage=cv::imread("girl-t1.jpg");
g_resultImage= cv::Mat::zeros( g_srcImage.size(), g_srcImage.type());
cv::namedWindow( WINDOW_NAME1, cv::WINDOW_AUTOSIZE );
cv::namedWindow( WINDOW_NAME2, cv::WINDOW_AUTOSIZE );
cv::createTrackbar("对比度:", WINDOW_NAME2,&g_nContrastValue,300,contrastAndBright );
cv::createTrackbar("亮 度:",WINDOW_NAME2,&g_nBrightValue,200,contrastAndBright );
contrastAndBright(g_nContrastValue,0);
contrastAndBright(g_nBrightValue,0);
}
示例4: InitShape
//===========================================================================
void Tracker::InitShape(cv::Rect &r,cv::Mat &shape)
{
assert((shape.rows == _rshape.rows) && (shape.cols == _rshape.cols) &&
(shape.type() == CV_64F));
int i,n = _rshape.rows/2; double a,b,tx,ty;
a = r.width*cos(_simil[1])*_simil[0] + 1;
b = r.width*sin(_simil[1])*_simil[0];
tx = r.x + r.width/2 + r.width *_simil[2];
ty = r.y + r.height/2 + r.height*_simil[3];
cv::MatIterator_<double> sx = _rshape.begin<double>();
cv::MatIterator_<double> sy = _rshape.begin<double>()+n;
cv::MatIterator_<double> dx = shape.begin<double>();
cv::MatIterator_<double> dy = shape.begin<double>()+n;
for(i = 0; i < n; i++,++sx,++sy,++dx,++dy){
*dx = a*(*sx) - b*(*sy) + tx; *dy = b*(*sx) + a*(*sy) + ty;
}return;
}
示例5: cvMatToQImage
inline QImage MainWindow::cvMatToQImage( const cv::Mat &inMat )
{
switch ( inMat.type() )
{
// 8-bit, 4 channel
case CV_8UC4:
{
QImage image( inMat.data, inMat.cols, inMat.rows, inMat.step, QImage::Format_RGB32 );
return image;
}
// 8-bit, 3 channel
case CV_8UC3:
{
QImage image( inMat.data, inMat.cols, inMat.rows, inMat.step, QImage::Format_RGB888 );
return image.rgbSwapped();
}
// 8-bit, 1 channel
case CV_8UC1:
{
static QVector<QRgb> sColorTable;
// only create our color table once
if ( sColorTable.isEmpty() )
{
for ( int i = 0; i < 256; ++i )
sColorTable.push_back( qRgb( i, i, i ) );
}
QImage image( inMat.data, inMat.cols, inMat.rows, inMat.step, QImage::Format_Indexed8 );
image.setColorTable( sColorTable );
return image;
}
default:
//qWarning() << "ASM::cvMatToQImage() - cv::Mat image type not handled in switch:" << inMat.type();
break;
}
return QImage();
}
示例6: remapImpl
template <typename T, template <typename> class Interpolator> void remapImpl(const cv::Mat& src, const cv::Mat& xmap, const cv::Mat& ymap, cv::Mat& dst, int borderType, cv::Scalar borderVal)
{
const int cn = src.channels();
cv::Size dsize = xmap.size();
dst.create(dsize, src.type());
for (int y = 0; y < dsize.height; ++y)
{
for (int x = 0; x < dsize.width; ++x)
{
for (int c = 0; c < cn; ++c)
dst.at<T>(y, x * cn + c) = Interpolator<T>::getValue(src, ymap.at<float>(y, x), xmap.at<float>(y, x), c, borderType, borderVal);
}
}
}
示例7: show
static inline void show(const char* name, const cv::Mat& img)
{
if(img.type() == CV_32F)
{
double min, max;
cv::minMaxLoc(img, &min, &max);
cv::Mat display = img / max;
cv::imshow(name, display);
}
else
{
cv::imshow(name, img);
}
}
示例8: ASSERT
void Video2Log::logImage(cv::Mat &matImgOrig)
{
cv::Mat matImg;
naoth::Image repImg;
ASSERT(matImgOrig.type() == CV_8SC3);
// always resize to 320x240
ASSERT(repImg.width() == 320);
ASSERT(repImg.height() == 240);
if(matImgOrig.rows != repImg.height() || matImgOrig.cols != repImg.width())
{
cv::resize(matImgOrig, matImg, cv::Size(repImg.width(),repImg.height()));
}
else
{
matImg = matImgOrig;
}
ASSERT(matImg.rows == repImg.height());
ASSERT(matImg.cols == repImg.width());
// we need YCbCr color space
cv::cvtColor(matImg, matImg, CV_RGB2YCrCb);
// convert the classical way
for(unsigned int x=0; x < matImg.cols && x < repImg.width(); x++)
{
for(unsigned int y=0; y < matImg.rows && y < repImg.height(); y++)
{
cv::Vec3b v = matImg.at<cv::Vec3b>(y,x);
Pixel p;
p.y = v[0];
p.u = v[1];
p.v = v[2];
repImg.set(x, y, p);
}
}
// write out
std::stringstream& stream = mgr.log(frameNumber, "Image");
naoth::Serializer<naoth::Image>::serialize(repImg, stream);
frameNumber++;
}
示例9: energy2RGB
void energy2RGB(const cv::Mat &energyImage, cv::Mat &outputImage) {
std::vector<cv::Mat> channels;
cv::Mat o = cv::Mat::ones(cv::Size(energyImage.cols, energyImage.rows),
energyImage.type()) * 255;
cv::Mat energyNormalized;
normalize(energyImage, energyNormalized, 0, 255, cv::NORM_MINMAX);
channels.push_back(energyNormalized);
channels.push_back(o);
channels.push_back(o);
cv::Mat resultHSV;
cv::merge(channels, resultHSV);
cv::cvtColor(resultHSV, outputImage, CV_HSV2BGR);
}
示例10: cannyEdgeDetect
void cannyEdgeDetect()
{
cv::VideoCapture cap(0); //capture the video from web cam
if ( !cap.isOpened() ) // if not success, exit program
{
std::cout << "Cannot open the web cam" << std::endl;
return;
}
// Load input image
while(true)
{
bool bSuccess = cap.read(src); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
std::cout << "Cannot read a frame from video stream" << std::endl;
break;
}
/// Create a matrix of the same type and size as src (for dst)
dst.create( src.size(), src.type() );
/// Convert the image to grayscale
cv::cvtColor( src, src_gray, CV_BGR2GRAY );
/// Create a window
cv::namedWindow( window_name, CV_WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
cv::createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show the image
CannyThreshold(0, 0);
if (cv::waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
{
std::cout << "esc key is pressed by user" << std::endl;
break;
}
}
}
示例11: fitt_image
/**
* @brief
* @details
*
* @param[in] orig
* .
* @param[out] out
* .
* @param[in] out_width
* .
* @param[in] out_height
* .
* @param[out] roi_out
* .
*
* @return
*
*/
double fitt_image(cv::Mat& orig, cv::Mat& out, int out_width, int out_height, cv::Rect* roi_out){
//Test Input
if(orig.cols <= 0 || orig.rows <= 0) return -1;
//Create Output
out = cv::Mat::zeros(cv::Size(out_width,out_height),orig.type());
//Calc ROI and Resize Image
if(!roi_out) roi_out = new cv::Rect();
cv::Mat temp;
int w,h;
double output_ratio = (double)out.cols / (double)out.rows;
double input_ratio = (double)orig.cols / (double)orig.rows;
double ratio = 1.0;
if(input_ratio < output_ratio){
ratio = (float)out.rows/(float)orig.rows;
w = orig.cols *ratio;
h = orig.rows *ratio;
cv::resize(orig,temp,cv::Size(w,h),0.0,0.0,cv::INTER_CUBIC);
roi_out->x = (out.cols - w) / 2.0;
roi_out->y = 0;
roi_out->width = w;
roi_out->height = h;
}
else{
ratio = (float)out.cols/(float)orig.cols;
w = orig.cols *ratio;
h = orig.rows *ratio;
cv::resize(orig,temp,cv::Size(w,h),0.0,0.0,cv::INTER_CUBIC);
roi_out->x = 0;
roi_out->y = (out.rows - h) / 2.0;
roi_out->width = w;
roi_out->height = h;
}
//Copy to Destination
temp.copyTo(out(*roi_out));
return ratio;
}
示例12: opencv_get_mat_image_format
inline image_format opencv_get_mat_image_format(const cv::Mat &mat)
{
switch(mat.type()){
case CV_8UC4:
return image_format(CL_BGRA, CL_UNORM_INT8);
case CV_16UC4:
return image_format(CL_BGRA, CL_UNORM_INT16);
case CV_32F:
return image_format(CL_INTENSITY, CL_FLOAT);
case CV_32FC4:
return image_format(CL_RGBA, CL_FLOAT);
case CV_8UC1:
return image_format(CL_INTENSITY, CL_UNORM_INT8);
}
BOOST_THROW_EXCEPTION(opencl_error(CL_IMAGE_FORMAT_NOT_SUPPORTED));
}
示例13: Mat_to_vector_Mat
//vector_Mat
void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)
{
v_mat.clear();
if(mat.type() == CV_32SC2 && mat.cols == 1)
{
v_mat.reserve(mat.rows);
for(int i=0; i<mat.rows; i++)
{
Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);
long long addr = (((long long)a[0])<<32) | (a[1]&0xffffffff);
Mat& m = *( (Mat*) addr );
v_mat.push_back(m);
}
} else {
LOGD("Mat_to_vector_Mat() FAILED: mat.type() == CV_32SC2 && mat.cols == 1");
}
}
示例14: saveMatBinary
void ImageUtil::saveMatBinary(cv::Mat &mat, std::string filename) {
int header[3] = { mat.rows, mat.cols, mat.type() };
FILE *out = my_io_openFileWrite1(filename.c_str());
int64_t n = fwrite(header, sizeof(int), 3, out);
my_assert_equalInt("written bytes", n, 3);
size_t size_pixel = 0;
if (mat.type() == CV_8S || mat.type() == CV_8U)
size_pixel = 1;
else if (mat.type() == CV_16S || mat.type() == CV_16U)
size_pixel = 2;
else if (mat.type() == CV_32S || mat.type() == CV_32F)
size_pixel = 4;
else if (mat.type() == CV_64F)
size_pixel = 8;
int length = mat.rows * mat.cols;
int64_t n2 = fwrite(mat.data, size_pixel, length, out);
my_assert_equalInt("written bytes", n2, length);
fclose(out);
}
示例15: assert
template<typename T>void ComputationWorker::divideReal(cv::Mat& input, cv::Mat& factor, cv::Mat& out) {
assert(input.type() == CV_32FC2 || input.type() == CV_64FC2);
assert(out.type() == CV_32FC2 || out.type() == CV_64FC2);
assert(factor.type() == CV_32FC1 || factor.type() == CV_64FC1);
out.forEach<Point_<T>>([&input, &factor](Point_<T> &p, const int pos[]) -> void {
T fac = factor.at<T>(pos[0], pos[1]);
Point_<T> inp = input.at<Point_<T>>(pos[0], pos[1]);
p.x = inp.x / fac;
p.y = inp.y / fac;
});
}