本文整理汇总了C++中cv::Mat_::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::begin方法的具体用法?C++ Mat_::begin怎么用?C++ Mat_::begin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat_
的用法示例。
在下文中一共展示了Mat_::begin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: matToVec
/** get 3D points out of the image */
float matToVec(const cv::Mat_<cv::Vec3f> &src_ref, const cv::Mat_<cv::Vec3f> &src_mod, std::vector<cv::Vec3f>& pts_ref, std::vector<cv::Vec3f>& pts_mod)
{
pts_ref.clear();
pts_mod.clear();
int px_missing = 0;
cv::MatConstIterator_<cv::Vec3f> it_ref = src_ref.begin();
cv::MatConstIterator_<cv::Vec3f> it_mod = src_mod.begin();
for (; it_ref != src_ref.end(); ++it_ref, ++it_mod)
{
if (!cv::checkRange(*it_ref))
continue;
pts_ref.push_back(*it_ref);
if (cv::checkRange(*it_mod))
{
pts_mod.push_back(*it_mod);
}
else
{
pts_mod.push_back(cv::Vec3f(0.0f, 0.0f, 0.0f));
++px_missing;
}
}
float ratio = 0.0f;
if ((src_ref.cols > 0) && (src_ref.rows > 0))
ratio = float(px_missing) / float(src_ref.cols * src_ref.rows);
return ratio;
}
示例2: output_HOG_frame
void EmotionDetector::output_HOG_frame(std::ofstream* hog_file, bool good_frame, const cv::Mat_<double>& hog_descriptor, int num_rows, int num_cols)
{
// Using FHOGs, hence 31 channels
int num_channels = 31;
hog_file->write((char*)(&num_cols), 4);
hog_file->write((char*)(&num_rows), 4);
hog_file->write((char*)(&num_channels), 4);
// Not the best way to store a bool, but will be much easier to read it
float good_frame_float;
if (good_frame)
good_frame_float = 1;
else
good_frame_float = -1;
hog_file->write((char*)(&good_frame_float), 4);
cv::MatConstIterator_<double> descriptor_it = hog_descriptor.begin();
for (int y = 0; y < num_cols; ++y)
{
for (int x = 0; x < num_rows; ++x)
{
for (unsigned int o = 0; o < 31; ++o)
{
float hog_data = (float)(*descriptor_it++);
hog_file->write((char*)&hog_data, 4);
}
}
}
}
示例3: cvToCloud
inline void
cvToCloud(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<PointT>& cloud, const cv::Mat& mask = cv::Mat())
{
cloud.clear();
cloud.width = points3d.size().width;
cloud.height = points3d.size().height;
cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
const bool has_mask = !mask.empty();
cv::Mat_<uchar>::const_iterator mask_it;
if (has_mask)
mask_it = mask.begin<uchar>();
for (; point_it != point_end; ++point_it, (has_mask ? ++mask_it : mask_it))
{
if (has_mask && !*mask_it)
continue;
cv::Point3f p = *point_it;
if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
continue;
PointT cp;
cp.x = p.x;
cp.y = p.y;
cp.z = p.z;
cloud.push_back(cp);
}
}
示例4: cvToCloudXYZRGB
/**
* \breif convert an opencv collection of points to a pcl::PoinCloud, your opencv mat should have NAN's for invalid points.
* @param points3d opencv matrix of nx1 3 channel points
* @param cloud output cloud
* @param rgb the rgb, required, will color points
* @param mask the mask, required, must be same size as rgb
*/
inline void
cvToCloudXYZRGB(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<pcl::PointXYZRGB>& cloud, const cv::Mat& rgb,
const cv::Mat& mask, bool brg = true)
{
cloud.clear();
cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
cv::Mat_<cv::Vec3b>::const_iterator rgb_it = rgb.begin<cv::Vec3b>();
cv::Mat_<uchar>::const_iterator mask_it;
if(!mask.empty())
mask_it = mask.begin<uchar>();
for (; point_it != point_end; ++point_it, ++rgb_it)
{
if(!mask.empty())
{
++mask_it;
if (!*mask_it)
continue;
}
cv::Point3f p = *point_it;
if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
continue;
pcl::PointXYZRGB cp;
cp.x = p.x;
cp.y = p.y;
cp.z = p.z;
cp.r = (*rgb_it)[2]; //expecting in BGR format.
cp.g = (*rgb_it)[1];
cp.b = (*rgb_it)[0];
cloud.push_back(cp);
}
}
示例5: Extract_FHOG_descriptor
// Create a row vector Felzenszwalb HOG descriptor from a given image
void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size)
{
dlib::array2d<dlib::matrix<float,31,1> > hog;
if(image.channels() == 1)
{
dlib::cv_image<uchar> dlib_warped_img(image);
dlib::extract_fhog_features(dlib_warped_img, hog, cell_size);
}
else
{
dlib::cv_image<dlib::bgr_pixel> dlib_warped_img(image);
dlib::extract_fhog_features(dlib_warped_img, hog, cell_size);
}
// Convert to a usable format
num_cols = hog.nc();
num_rows = hog.nr();
descriptor = Mat_<double>(1, num_cols * num_rows * 31);
cv::MatIterator_<double> descriptor_it = descriptor.begin();
for(int y = 0; y < num_cols; ++y)
{
for(int x = 0; x < num_rows; ++x)
{
for(unsigned int o = 0; o < 31; ++o)
{
*descriptor_it++ = (double)hog[y][x](o);
}
}
}
}
示例6: operator
int crslic_segmentation::operator()(const cv::Mat& image, cv::Mat_<int>& labels)
{
float directCliqueCost = 0.3;
unsigned int const iterations = 3;
double const diagonalCliqueCost = directCliqueCost / sqrt(2);
bool isColorImage = (image.channels() == 3);
std::vector<FeatureType> features;
if (isColorImage)
features.push_back(Color);
else
features.push_back(Grayvalue);
features.push_back(Compactness);
ContourRelaxation<int> crslic_obj(features);
cv::Mat labels_temp = createBlockInitialization<int>(image.size(), settings.superpixel_size, settings.superpixel_size);
crslic_obj.setCompactnessData(settings.superpixel_compactness);
if (isColorImage)
{
cv::Mat imageYCrCb;
cv::cvtColor(image, imageYCrCb, CV_BGR2YCrCb);
std::vector<cv::Mat> imageYCrCbChannels;
cv::split(imageYCrCb, imageYCrCbChannels);
crslic_obj.setColorData(imageYCrCbChannels[0], imageYCrCbChannels[1], imageYCrCbChannels[2]);
}
else
crslic_obj.setGrayvalueData(image.clone());
crslic_obj.relax(labels_temp, directCliqueCost, diagonalCliqueCost, iterations, labels);
return 1+*(std::max_element(labels.begin(), labels.end()));
}
示例7: setPixel
// matrix version
void multi_img::setPixel(unsigned int row, unsigned int col,
const cv::Mat_<Value>& values)
{
assert((int)row < height && (int)col < width);
assert(values.rows*values.cols == (int)size());
Pixel &p = pixels[row*width + col];
p.assign(values.begin(), values.end());
for (size_t i = 0; i < size(); ++i)
bands[i](row, col) = p[i];
dirty(row, col) = 0;
}
示例8: Clamp
//===========================================================================
// Clamping the parameter values to be within 3 standard deviations
void PDM::Clamp(cv::Mat_<float>& local_params, cv::Vec6d& params_global, const FaceModelParameters& parameters)
{
double n_sigmas = 3;
cv::MatConstIterator_<double> e_it = this->eigen_values.begin();
cv::MatIterator_<float> p_it = local_params.begin();
double v;
// go over all parameters
for(; p_it != local_params.end(); ++p_it, ++e_it)
{
// Work out the maximum value
v = n_sigmas*sqrt(*e_it);
// if the values is too extreme clamp it
if(fabs(*p_it) > v)
{
// Dealing with positive and negative cases
if(*p_it > 0.0)
{
*p_it=v;
}
else
{
*p_it=-v;
}
}
}
// do not let the pose get out of hand
if(parameters.limit_pose)
{
if(params_global[1] > M_PI / 2)
params_global[1] = M_PI/2;
if(params_global[1] < -M_PI / 2)
params_global[1] = -M_PI/2;
if(params_global[2] > M_PI / 2)
params_global[2] = M_PI/2;
if(params_global[2] < -M_PI / 2)
params_global[2] = -M_PI/2;
if(params_global[3] > M_PI / 2)
params_global[3] = M_PI/2;
if(params_global[3] < -M_PI / 2)
params_global[3] = -M_PI/2;
}
}
示例9: drawTo
void OvershootClusterer::drawTo(cv::Mat_<cv::Vec3b> &out) const
{
out.setTo(0);
out.setTo(Scalar(255,255,255), img > 0);
Mat_<Vec3b>::iterator itOut = out.begin(), itOutEnd = out.end();
Mat_<unsigned char>::const_iterator it = smallestOvershootVisit.begin(),
itEnd = smallestOvershootVisit.end();
for (; itOut != itOutEnd && it != itEnd; ++it, ++itOut)
{
if ((*it) < CLUSTERER_OVERSHOOTS)
{
unsigned char nonRed = (*it)*(255/CLUSTERER_OVERSHOOTS);
*itOut = Vec3b(nonRed,nonRed,255);
}
}
}
示例10: cvToCloudXYZ
void
cvToCloudXYZ(const cv::Mat_<float>& points3d, pcl::PointCloud<PointT>& cloud)
{
const int width = cloud.width;
const int height = cloud.height;
cv::Mat_<float>::const_iterator begin = points3d.begin();
for (int v = 0; v < height; ++v)
{
for (int u = 0; u < width; ++u)
{
PointT& p = cloud(u, v);
p.x = *(begin++);
p.y = *(begin++);
p.z = *(begin++);
}
}
}
示例11: Visualise_FHOG
void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
{
// First convert to dlib format
dlib::array2d<dlib::matrix<float,31,1> > hog(num_rows, num_cols);
cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
for(int y = 0; y < num_cols; ++y)
{
for(int x = 0; x < num_rows; ++x)
{
for(unsigned int o = 0; o < 31; ++o)
{
hog[y][x](o) = *descriptor_it++;
}
}
}
// Draw the FHOG to OpenCV format
auto fhog_vis = dlib::draw_fhog(hog);
visualisation = dlib::toMat(fhog_vis).clone();
}
示例12: if
float singleeyefitter::cvx::histKmeans(const cv::Mat_<float>& hist, int bin_min, int bin_max, int K, float init_centres[], cv::Mat_<uchar>& labels, cv::TermCriteria termCriteria)
{
using namespace math;
CV_Assert( hist.rows == 1 || hist.cols == 1 && K > 0 );
labels = cv::Mat_<uchar>::zeros(hist.size());
int nbins = hist.total();
float binWidth = (bin_max - bin_min)/nbins;
float binStart = bin_min + binWidth/2;
cv::Mat_<float> centres(K, 1, init_centres, 4);
int iters = 0;
bool finalRun = false;
while (true)
{
++iters;
cv::Mat_<float> old_centres = centres.clone();
int i_bin;
cv::Mat_<float>::const_iterator i_hist;
cv::Mat_<uchar>::iterator i_labels;
cv::Mat_<float>::iterator i_centres;
uchar label;
float sumDist = 0;
int movedCount = 0;
// Step 1. Assign each element a label
for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
i_bin < nbins;
++i_bin, ++i_labels, ++i_hist)
{
float bin_val = binStart + i_bin*binWidth;
float minDist = sq(bin_val - centres(*i_labels));
int curLabel = *i_labels;
for (label = 0; label < K; ++label)
{
float dist = sq(bin_val - centres(label));
if (dist < minDist)
{
minDist = dist;
*i_labels = label;
}
}
if (*i_labels != curLabel)
movedCount++;
sumDist += (*i_hist) * std::sqrt(minDist);
}
if (finalRun)
return sumDist;
// Step 2. Recalculate centres
cv::Mat_<float> counts(K, 1, 0.0f);
for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
i_bin < nbins;
++i_bin, ++i_labels, ++i_hist)
{
float bin_val = binStart + i_bin*binWidth;
centres(*i_labels) += (*i_hist) * bin_val;
counts(*i_labels) += *i_hist;
}
for (label = 0; label < K; ++label)
{
if (counts(label) == 0)
return std::numeric_limits<float>::infinity();
centres(label) /= counts(label);
}
// Step 3. Detect termination criteria
if (movedCount == 0)
finalRun = true;
else if (termCriteria.type | cv::TermCriteria::COUNT && iters >= termCriteria.maxCount)
finalRun = true;
else if (termCriteria.type | cv::TermCriteria::EPS)
{
float max_movement = 0;
for (label = 0; label < K; ++label)
{
max_movement = std::max(max_movement, sq(centres(label) - old_centres(label)));
}
if (sqrt(max_movement) < termCriteria.epsilon)
finalRun = true;
}
}
return std::numeric_limits<float>::infinity();
}
示例13: estimate
TagPoseMap estimate(TagCornerMap const& tags, cv::Vec<RealT, 4> const& camDeltaR, cv::Vec<RealT, 3> const& camDeltaX)
{
TagPoseMap objects;
//Pass the latest camera movement difference for prediction (if 3D filtering is enabled)
mEstimatePose3D.setCamDelta(camDeltaR, camDeltaX);
//Predict pose for all known tags with camera movement (if 3D filtering is enabled)
mEstimatePose3D(objects);
//Correct pose prediction with new observations
std::map<
const std::string, //name of the object
std::pair<
std::vector<cv::Point3_<RealT> >, //points in object
std::vector<cv::Point2f> > > //points in frame
objectToPointMapping;
auto configurationIt = mId2Configuration.begin();
auto configurationEnd = mId2Configuration.end();
for (const auto &tag : tags) {
int tagId = tag.first;
const cv::Mat_<cv::Point2f> corners(tag.second);
while (configurationIt != configurationEnd
&& configurationIt->first < tagId)
++configurationIt;
if (configurationIt != configurationEnd) {
if (configurationIt->first == tagId) {
const auto &configuration = configurationIt->second;
if (configuration.second.mKeep) {
mEstimatePose3D(cv::format("tag_%d", tagId),
configuration.second.mLocalcorners,
corners,
objects);
}
auto & pointMapping = objectToPointMapping[configuration.first];
pointMapping.first.insert(
pointMapping.first.end(),
configuration.second.mCorners.begin(),
configuration.second.mCorners.end());
pointMapping.second.insert(
pointMapping.second.end(),
corners.begin(),
corners.end());
} else if (!mOmitOtherTags) {
mEstimatePose3D(cv::format("tag_%d", tagId),
mDefaultTagCorners,
corners,
objects);
}
} else if (!mOmitOtherTags) {
mEstimatePose3D(cv::format("tag_%d", tagId),
mDefaultTagCorners,
corners,
objects);
}
}
for (auto& objectToPoints : objectToPointMapping) {
mEstimatePose3D(objectToPoints.first,
objectToPoints.second.first,
cv::Mat_<cv::Point2f>(objectToPoints.second.second),
objects);
}
return objects;
}
示例14: mFilter
std::map<std::string, cv::Matx44d> estimate(const std::map<int, Quad> &tags) {
std::map<std::string, cv::Matx44d> objects;
std::map<
const std::string, //name of the object
std::pair<
std::vector<cv::Point3f>, //points in object
std::vector<cv::Point2f> > > //points in frame
objectToPointMapping;
auto configurationIt = mId2Configuration.begin();
auto configurationEnd = mId2Configuration.end();
for (const auto &tag : tags) {
int tagId = tag.first;
const cv::Mat_<cv::Point2f> corners(tag.second);
while (configurationIt != configurationEnd
&& configurationIt->first < tagId)
++configurationIt;
if (configurationIt != configurationEnd) {
if (configurationIt->first == tagId) {
const auto &configuration = configurationIt->second;
if (configuration.second.mKeep) {
computeTransformation(cv::format("tag_%d", tagId),
configuration.second.mLocalcorners,
corners,
objects);
}
auto & pointMapping = objectToPointMapping[configuration.first];
pointMapping.first.insert(
pointMapping.first.end(),
configuration.second.mCorners.begin(),
configuration.second.mCorners.end());
pointMapping.second.insert(
pointMapping.second.end(),
corners.begin(),
corners.end());
} else if (!mOmitOtherTags) {
computeTransformation(cv::format("tag_%d", tagId),
mDefaultTagCorners,
corners,
objects);
}
} else if (!mOmitOtherTags) {
computeTransformation(cv::format("tag_%d", tagId),
mDefaultTagCorners,
corners,
objects);
}
}
for (auto& objectToPoints : objectToPointMapping) {
computeTransformation(objectToPoints.first,
objectToPoints.second.first,
cv::Mat_<cv::Point2f>(objectToPoints.second.second),
objects);
}
return mFilter(objects);
}
示例15: Response
//===========================================================================
void CCNF_neuron::Response(cv::Mat_<float> &im, cv::Mat_<double> &im_dft, cv::Mat &integral_img, cv::Mat &integral_img_sq, cv::Mat_<float> &resp)
{
int h = im.rows - weights.rows + 1;
int w = im.cols - weights.cols + 1;
// the patch area on which we will calculate reponses
cv::Mat_<float> I;
if(neuron_type == 3)
{
// Perform normalisation across whole patch (ignoring the invalid values indicated by <= 0
cv::Scalar mean;
cv::Scalar std;
// ignore missing values
cv::Mat_<uchar> mask = im > 0;
cv::meanStdDev(im, mean, std, mask);
// if all values the same don't divide by 0
if(std[0] != 0)
{
I = (im - mean[0]) / std[0];
}
else
{
I = (im - mean[0]);
}
I.setTo(0, mask == 0);
}
else
{
if(neuron_type == 0)
{
I = im;
}
else
{
printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,neuron_type);
abort();
}
}
if(resp.empty())
{
resp.create(h, w);
}
// The response from neuron before activation
if(neuron_type == 3)
{
// In case of depth we use per area, rather than per patch normalisation
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF); // the linear multiplication, efficient calc of response
}
else
{
matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
}
cv::MatIterator_<float> p = resp.begin();
cv::MatIterator_<float> q1 = resp.begin(); // respone for each pixel
cv::MatIterator_<float> q2 = resp.end();
// the logistic function (sigmoid) applied to the response
while(q1 != q2)
{
*p++ = (2 * alpha) * 1.0 /(1.0 + exp( -(*q1++ * norm_weights + bias )));
}
}