本文整理汇总了C++中cv::Mat::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::empty方法的具体用法?C++ Mat::empty怎么用?C++ Mat::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: weakClassifiersForTemplate
void TemplateMatchCandidates::weakClassifiersForTemplate(
const cv::Mat &templ,
const cv::Mat &templMask,
const std::vector< cv::Rect > &rects,
cv::Mat_<int> &classifiers,
cv::Scalar &mean)
{
const int nChannels = templ.channels();
classifiers.create(nChannels, (int)rects.size());
// Note we use cv::mean here to make use of mask.
mean = cv::mean(templ, templMask);
for (int x = 0; x < (int)rects.size(); ++x) {
cv::Scalar blockMean = cv::mean(templ(rects[x]), templMask.empty() ? cv::noArray() : templMask(rects[x]));
for (int y = 0; y < nChannels; ++y) {
classifiers(y, x) = blockMean[y] > mean[y] ? 1 : -1;
}
}
}
示例2: cameraToWorld
void cameraToWorld(const cv::Mat &R_in, const cv::Mat &T_in, const cv::Mat & in_points_ori,
cv::Mat &points_out) {
cv::Mat_<float> R, T, in_points;
R_in.convertTo(R, CV_32F);
T_in.reshape(1, 1).convertTo(T, CV_32F);
if (in_points_ori.empty() == false)
{
in_points_ori.reshape(1, in_points_ori.size().area()).convertTo(in_points, CV_32F);
cv::Mat_<float> T_repeat;
cv::repeat(T, in_points.rows, 1, T_repeat);
// Apply the inverse translation/rotation
cv::Mat points = (in_points - T_repeat) * R;
// Reshape to the original size
points_out = points.reshape(3, in_points_ori.rows);
}
else
{
points_out = cv::Mat();
}
}
示例3: findSquares
//this public function performs the role of
//main{} in the original file
vector<Point> CVSquares::detectedSquareInImage (cv::Mat image, float tol, int threshold, int levels, int acc)
{
vector<vector<Point> > squares;
vector<Point> biggest_square;
if( image.empty() )
{
cout << "CVSquares.m: Couldn't load " << endl;
}
tolerance = tol;
thresh = threshold;
N = levels;
accuracy = acc;
findSquares(image, squares);
find_largest_square(squares, biggest_square);
// drawSquares(image, squares);
drawSquare(image, biggest_square);
return biggest_square;
}
示例4: extractFeatures
bool FeatureAlgorithm::extractFeatures(const cv::Mat& image, Keypoints& kp, Descriptors& desc) const
{
assert(!image.empty());
if (featureEngine)
{
(*featureEngine)(image, cv::noArray(), kp, desc);
}
else
{
detector->detect(image, kp);
if (kp.empty())
return false;
extractor->compute(image, kp, desc);
}
return kp.size() > 0;
}
示例5: Train
bool Fern::Train(const vector<cv::Mat>& samp_imgs, const cv::Mat& labels, const FernTrainingParams& params)
{
if(samp_imgs.empty() || labels.empty() || samp_imgs.size() != labels.rows)
{
std::cerr<<"Invalid training data."<<std::endl;
return false;
}
// init data
double maxlabel, minlabel;
cv::minMaxLoc(labels, &minlabel, &maxlabel);
class_likelihood.resize((int)maxlabel+1);
int max_feat_id = (int)std::pow(2.f, params.num_features);
for(size_t i=0; i<class_likelihood.size(); i++)
{
class_likelihood[i] = NodeStatisticsHist(max_feat_id);
}
// generate random features
features.resize(params.num_features);
// compute node statistics for each feature evaluation
for(size_t id=0; id<samp_imgs.size(); id++)
{
int feat_val = 0;
for(size_t feat_id=0; feat_id=features.size(); feat_id++)
{
double response = features[feat_id].GetFeatureResponse(samp_imgs[id]);
if(response > 0)
feat_val = feat_val<<1 | 1;
}
// add to hist
class_likelihood[labels.at<int>(id)].AddSample(feat_val, 1);
}
std::cout<<"Finish training fern."<<std::endl;
m_ifTrained = true;
}
示例6: generatePointClouds
bool VirtualKinect::generatePointClouds(const cv::Mat& depth, cv::Mat& points)
{
if( depth.empty() || (depth.type() != CV_16UC1 && depth.type() != CV_16SC1) )
return false;
points.create(depth.size(), CV_32FC3);
int cols = depth.cols, rows = depth.rows;
float co = (cols-1)/2.0, ro = (rows-1)/2.0;
float scaleFactor = 0.0021f;
int minDistance = -10;
float unitFactor = 0.001f; // unit: meter
for(int r=0; r<rows; r++)
{
cv::Point3f* cloud_ptr = (cv::Point3f*)points.ptr(r);
const ushort* depth_ptr = (const ushort*)depth.ptr(r);
for(int c=0; c<cols; c++)
{
if (depth_ptr[c] > 0)
{
float z = depth_ptr[c];
cv::Point3f src(c,r,z);
//mapProjectiveToRealWorld(src, cloud_ptr[c]);
cloud_ptr[c].x = unitFactor * (c - co) * (z + minDistance) * scaleFactor;
cloud_ptr[c].y = unitFactor * (ro - r) * (z + minDistance) * scaleFactor;
cloud_ptr[c].z = unitFactor * z;
}
else
{
cloud_ptr[c].x = 0;
cloud_ptr[c].y = 0;
cloud_ptr[c].z = 0;
}
}
}
return true;
}
示例7: uIsFinite
// Metric constructor + 2d depth
SensorData::SensorData(const cv::Mat & laserScan,
const cv::Mat & image,
const cv::Mat & depthOrRightImage,
float fx,
float fyOrBaseline,
float cx,
float cy,
const Transform & localTransform,
const Transform & pose,
float poseRotVariance,
float poseTransVariance,
int id,
double stamp,
const std::vector<unsigned char> & userData) :
_image(image),
_id(id),
_stamp(stamp),
_depthOrRightImage(depthOrRightImage),
_laserScan(laserScan),
_fx(fx),
_fyOrBaseline(fyOrBaseline),
_cx(cx),
_cy(cy),
_pose(pose),
_localTransform(localTransform),
_poseRotVariance(poseRotVariance),
_poseTransVariance(poseTransVariance),
_userData(userData)
{
UASSERT(_laserScan.empty() || _laserScan.type() == CV_32FC2);
UASSERT(image.type() == CV_8UC1 || // Mono
image.type() == CV_8UC3); // RGB
UASSERT(depthOrRightImage.type() == CV_32FC1 || // Depth in meter
depthOrRightImage.type() == CV_16UC1 || // Depth in millimetre
depthOrRightImage.type() == CV_8U); // Right stereo image
UASSERT(!depthOrRightImage.empty());
UASSERT(!_localTransform.isNull());
UASSERT_MSG(uIsFinite(_poseRotVariance) && _poseRotVariance>0 && uIsFinite(_poseTransVariance) && _poseTransVariance>0, "Rotational and transitional variances should not be null! (set to 1 if unknown)");
}
示例8: retrieve
bool VirtualKinectCapture::retrieve(cv::Mat& frame, int flag)
{
if ((flag == VK_CAP_TYPE_DEPTH || flag == VK_CAP_TYPE_POINT) && !depth_.empty())
{
if (flag == VK_CAP_TYPE_POINT)
{
generatePointClouds(depth_, frame);
}
else
{
depth_.copyTo(frame);
}
}
else if ((flag == VK_CAP_TYPE_IMAGE) && !image_.empty())
{
image_.copyTo(frame);
}
else
frame.release();
return !frame.empty();
}
示例9: setData
void ObjWidget::setData(
const cv::Mat & image)
{
rectItems_.clear();
graphicsView_->scene()->clear();
graphicsViewInitialized_ = false;
cvImage_ = image.clone();
pixmap_ = QPixmap::fromImage(cvtCvMat2QImage(cvImage_));
//this->setMinimumSize(image_.size());
if(graphicsViewMode_->isChecked())
{
this->setupGraphicsView();
}
label_->setVisible(image.empty());
}
示例10: key_callback
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if(action == GLFW_PRESS ){
if(key == GLFW_KEY_W) {
char filename[] = "out.bmp";
SOIL_save_screenshot
(
filename,
SOIL_SAVE_TYPE_BMP,
0, 0, WIDTH, HEIGHT
);
}
#ifdef STATIC_IMAGES
} else if(key == GLFW_KEY_N) {
// Capture Image
stream1 >> cameraFrame;
if( cameraFrame.empty() ) {
exit(0);
}
updated = true;
#endif
}
示例11: on_mouse
void on_mouse(int event, int x, int y, int flags, void *){
if (img.empty()){
return ;
}
if (event == CV_EVENT_LBUTTONDOWN){
first_pt = cv::Point(x,y);//set the start point
}
else if ((event == CV_EVENT_MOUSEMOVE) && (flags & CV_EVENT_FLAG_LBUTTON)){
second_pt =cv::Point(x,y);
img = img0.clone();
//draw a rectangle
cv::rectangle(img,first_pt,second_pt,cv::Scalar(255,0,0),1,8,0);
//cv::Mat img_hdr = img;
cv::imshow("image",img);
}
else if ((event == CV_EVENT_LBUTTONUP)){
inpaint_mask.create(img0.size(),CV_8UC1);
cv::rectangle(inpaint_mask, first_pt, second_pt, cv::Scalar(255),-1,8,0);
}
}
示例12: imageCb
void ImageNodelet::imageCb(const sensor_msgs::ImageConstPtr& msg)
{
image_mutex_.lock();
// We want to scale floating point images so that they display nicely
bool do_dynamic_scaling = (msg->encoding.find("F") != std::string::npos);
// Convert to OpenCV native BGR color
try {
last_image_ = cvtColorForDisplay(cv_bridge::toCvShare(msg), "", do_dynamic_scaling)->image;
}
catch (cv_bridge::Exception& e) {
NODELET_ERROR_THROTTLE(30, "Unable to convert '%s' image for display: '%s'",
msg->encoding.c_str(), e.what());
}
// Must release the mutex before calling cv::imshow, or can deadlock against
// OpenCV's window mutex.
image_mutex_.unlock();
if (!last_image_.empty())
cv::imshow(window_name_, last_image_);
}
示例13: loadConfig
void MixtureOfGaussianV2BGS::process(const cv::Mat &img_input, cv::Mat &img_output)
{
if(img_input.empty())
return;
loadConfig();
if(firstTime)
saveConfig();
//------------------------------------------------------------------
// BackgroundSubtractorMOG2
// http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
//
// Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm.
//
// The class implements the Gaussian mixture model background subtraction described in:
// (1) Z.Zivkovic, Improved adaptive Gausian mixture model for background subtraction, International Conference Pattern Recognition, UK, August, 2004,
// The code is very fast and performs also shadow detection. Number of Gausssian components is adapted per pixel.
//
// (2) Z.Zivkovic, F. van der Heijden, Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction,
// Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006.
// The algorithm similar to the standard Stauffer&Grimson algorithm with additional selection of the number of the Gaussian components based on:
// Z.Zivkovic, F.van der Heijden, Recursive unsupervised learning of finite mixture models, IEEE Trans. on Pattern Analysis and Machine Intelligence,
// vol.26, no.5, pages 651-656, 2004.
//------------------------------------------------------------------
mog(img_input, img_foreground, alpha);
if(enableThreshold)
cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);
if(showOutput)
cv::imshow("Gaussian Mixture Model (Zivkovic&Heijden)", img_foreground);
img_foreground.copyTo(img_output);
firstTime = false;
}
示例14: oResult
cv::Mat tp3::convo(const cv::Mat& oImage, const cv::Mat_<float>& oKernel) {
CV_Assert(!oImage.empty() && oImage.type()==CV_8UC3 && oImage.isContinuous());
CV_Assert(!oKernel.empty() && oKernel.cols==oKernel.rows && oKernel.isContinuous());
CV_Assert(oImage.cols>oKernel.cols && oImage.rows>oKernel.rows);
cv::Mat oResult(oImage.size(),CV_32FC3);
for (int row_index = 0; row_index < oImage.rows; ++row_index)
{
for (int col_index = 0; col_index < oImage.cols; ++col_index)
{
float resultBlue = calculate_convolution(oImage, oKernel, blue, row_index, col_index) / 255;
float resultGreen = calculate_convolution(oImage, oKernel, green, row_index, col_index) / 255;
float resultRed = calculate_convolution(oImage, oKernel, red, row_index, col_index) / 255;
cv::Vec3f result = cv::Vec3f(resultBlue, resultGreen, resultRed);
oResult.at<cv::Vec3f>(row_index, col_index) = result;
}
}
return oResult;
}
示例15: drawReferenceSystem
void Drawing::drawReferenceSystem(cv::Mat &image, const cv::Mat &cRo,
const cv::Mat &cto, const cv::Mat &A, const cv::Mat &K,
float length)
{
cv::Mat k;
if(K.empty())
k = cv::Mat::zeros(4,1, cRo.type());
else
k = K;
std::vector<cv::Point3f> oP;
oP.push_back(cv::Point3f(0,0,0));
oP.push_back(cv::Point3f(length,0,0));
oP.push_back(cv::Point3f(0,length,0));
oP.push_back(cv::Point3f(0,0,length));
vector<cv::Point2f> points2d;
cv::projectPoints(cv::Mat(oP), cRo, cto, A, k, points2d);
// draw axis
CvScalar bluez, greeny, redx;
if(image.channels() == 3 )
{
bluez = cvScalar(255,0,0);
greeny = cvScalar(0,255,0);
redx = cvScalar(0,0,255);
}
else
{
bluez = cvScalar(18,18,18);
greeny = cvScalar(182,182,182);
redx = cvScalar(120,120,120);
}
cv::line(image, points2d[0], points2d[1], redx, 2);
cv::line(image, points2d[0], points2d[2], greeny, 2);
cv::line(image, points2d[0], points2d[3], bluez, 2);
}