本文整理汇总了C++中cv::Mat::row方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::row方法的具体用法?C++ Mat::row怎么用?C++ Mat::row使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::row方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sharpen
void sharpen(const cv::Mat& image, cv::Mat& result){
// allocate if necessary
result.create(global::image.size(), global::image.type());
std::cout << "Size: " << global::image.size() << std::endl;
std::cout << "Cols: " << global::image.cols << "\n" << "Rows: " << global::image.rows << std::endl;
for (int j=1; j<global::image.rows-1; j++){ // for all rows except first and last
const uchar* previous = global::image.ptr<const uchar>(j-1); // previous row
const uchar* current = global::image.ptr<const uchar>(j); // current row
const uchar* next = global::image.ptr<const uchar>(j+1); // next row
uchar* output = result.ptr<uchar>(j); // output row
for (int i=1; i<global::image.cols-1; i++){
*output = cv::saturate_cast<uchar>(5*current[i]-current[i-1]-current[i+1]-previous[i]-next[i]);
output++;
}
}
// set unprocessed pixels to 0
result.row(0).setTo(cv::Scalar(0));
result.row(result.rows-1).setTo(cv::Scalar(0));
result.col(0).setTo(cv::Scalar(0));
result.col(result.cols-1).setTo(cv::Scalar(0));
}
示例2: ShuffleDataset
/* This function will rearrange dataset for training in a random order. This step is
* necessary to make training more accurate.
*/
void LetterClassifier::ShuffleDataset(cv::Mat &training_data, cv::Mat &label_mat, int numIter)
{
/* initialize random seed: */
srand(time(NULL));
int x = 0, y = 0;
assert(training_data.cols == label_mat.rows);
int numData = training_data.cols;
if (numIter <= 0)
numIter = numData;
if (training_data.type() != CV_32FC1)
training_data.convertTo(training_data, CV_32FC1);
cv::Mat temp_data_mat(training_data.rows, 1, CV_32FC1);
cv::Mat temp_label_mat(1, 1, CV_32FC1);
// Interate 'numIter' to rearrange dataset
for (int n = 0; n < numIter; n++)
{
x = (rand() % numData);
y = (rand() % numData);
// swap data
training_data.col(x).copyTo(temp_data_mat.col(0));
training_data.col(y).copyTo(training_data.col(x));
temp_data_mat.col(0).copyTo(training_data.col(y));
// swap label
label_mat.row(x).copyTo(temp_label_mat.row(0));
label_mat.row(y).copyTo(label_mat.row(x));
temp_label_mat.row(0).copyTo(label_mat.row(y));
}
}
示例3: gaussianCorrelation
// Evaluates a Gaussian kernel with bandwidth SIGMA for all relative shifts between input images X and Y, which must both be MxN. They must also be periodic (ie., pre-processed with a cosine window).
cv::Mat KCFTracker::gaussianCorrelation(cv::Mat x1, cv::Mat x2)
{
using namespace FFTTools;
cv::Mat c = cv::Mat( cv::Size(size_patch[1], size_patch[0]), CV_32F, cv::Scalar(0) );
// HOG features
if (_hogfeatures) {
cv::Mat caux;
cv::Mat x1aux;
cv::Mat x2aux;
for (int i = 0; i < size_patch[2]; i++) {
x1aux = x1.row(i); // Procedure do deal with cv::Mat multichannel bug
x1aux = x1aux.reshape(1, size_patch[0]);
x2aux = x2.row(i).reshape(1, size_patch[0]);
cv::mulSpectrums(fftd(x1aux), fftd(x2aux), caux, 0, true);
caux = fftd(caux, true);
rearrange(caux);
caux.convertTo(caux,CV_32F);
c = c + real(caux);
}
}
// Gray features
else {
cv::mulSpectrums(fftd(x1), fftd(x2), c, 0, true);
c = fftd(c, true);
rearrange(c);
c = real(c);
}
cv::Mat d;
cv::max(( (cv::sum(x1.mul(x1))[0] + cv::sum(x2.mul(x2))[0])- 2. * c) / (size_patch[0]*size_patch[1]*size_patch[2]) , 0, d);
cv::Mat k;
cv::exp((-d / (sigma * sigma)), k);
return k;
}
示例4: projectToEigenvectors
// Eigenvectors as rows
void projectToEigenvectors( const RLearning::PCA &pcaPos, const cv::Mat &evecsRowsPos,
const RLearning::PCA &pcaNeg, const cv::Mat &evecsRowsNeg)
{
cv::Mat selEvecsRowsPos( 2, evecsRowsPos.cols, evecsRowsPos.type());
cv::Mat selEvecsRowsNeg( 2, evecsRowsNeg.cols, evecsRowsNeg.type());
evecsRowsPos.row(0).copyTo( selEvecsRowsPos.row(0));
evecsRowsNeg.row(0).copyTo( selEvecsRowsNeg.row(0));
for ( int i = 0; i < 10; ++i)
{
evecsRowsPos.row(i+1).copyTo( selEvecsRowsPos.row(1));
evecsRowsNeg.row(i+1).copyTo( selEvecsRowsNeg.row(1));
std::ostringstream oss;
oss << i+1;
const string rowVals = oss.str();
cv::Mat posProjColVecs = pcaPos.project( selEvecsRowsPos);
const string posfname = string("pos_pts_") + rowVals + string(".txt");
std::ofstream ofs1( posfname.c_str());
RLearning::writePoints( ofs1, (cv::Mat_<double>)posProjColVecs, true);
ofs1.close();
cv::Mat negProjColVecs = pcaNeg.project( selEvecsRowsNeg);
const string negfname = string("neg_pts_") + rowVals + string(".txt");
std::ofstream ofs2( negfname.c_str());
RLearning::writePoints( ofs2, (cv::Mat_<double>)negProjColVecs, true);
ofs2.close();
} // end for
} // end projectToEigenvectors
示例5: ext_pooling
//*
void ext_pooling(cv::Mat pool_fea, cv::Mat ¢er, cv::Mat &range, int K)
{
int num = pool_fea.rows;
//cv::flann::KDTreeIndexParams indexParams;
//cv::flann::Index fea_tree;
//fea_tree.build(pool_fea, indexParams);
cv::Mat labels;
cv::kmeans(pool_fea, K, labels, cv::TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 1000, 1e-6), 5, cv::KMEANS_PP_CENTERS, center);
range = cv::Mat::zeros(center.rows, 1, CV_32FC1);
std::vector<size_t> count(center.rows, 0);
int *idx = (int *)labels.data;
for( int i = 0 ; i < pool_fea.rows ; i++, idx++ )
{
float cur_dist = cv::norm(pool_fea.row(i), center.row(*idx), cv::NORM_L2);
//if( range.at<float>(*idx, 0) < cur_dist )
range.at<float>(*idx, 0) += cur_dist;
count[*idx]++;
}
float *ptr = (float *)range.data;
for( int i = 0 ; i < range.rows ; i++, ptr++ )
*ptr /= count[i];
}
示例6: sharpen
void sharpen(const cv::Mat& image, cv::Mat& result)
{
result.create(image.size(), image.type()); // allocate if necessary
for (int j = 1; j < image.rows - 1; j++)
{ // for all rows (except first and last)
const uchar* previous = image.ptr<const uchar>(j - 1); // previous row
const uchar* current = image.ptr<const uchar>(j); // current row
const uchar* next = image.ptr<const uchar>(j + 1); // next row
uchar* output = result.ptr<uchar>(j); // output row
for (int i = 1; i < image.cols - 1; i++)
{
*output++ = cv::saturate_cast<uchar>(5 * current[i] - current[i - 1] - current[i + 1] -
previous[i] - next[i]);
// output[i]=
//cv::saturate_cast<uchar>(5*current[i]-current[i-1]-current[i+1]-previous[i]-next[i]);
}
}
// Set the unprocess pixels to 0
result.row(0).setTo(cv::Scalar(0));
result.row(result.rows - 1).setTo(cv::Scalar(0));
result.col(0).setTo(cv::Scalar(0));
result.col(result.cols - 1).setTo(cv::Scalar(0));
}
示例7: sharpen
void sharpen(const cv::Mat& image, cv::Mat& result)
{
//allocate if necessary
result.create(image.size(),image.type());
for(int j= 1; j < image.rows-1; ++j) { // for all rows
// except the first and last row
const uchar* previous =
image.ptr<uchar>(j-1);
const uchar* current =
image.ptr<uchar>(j);
const uchar* next =
image.ptr<uchar>(j+1);
uchar* output = result.ptr<uchar>(j); // output row
for(int i= 1; i < (image.cols-1) * image.channels(); ++i) {
// stature_cast: avoid the mathematical expression applied on the pixels leads to a
// result that goes out of the range of the permited pixel value( 0 - 255)
*output ++ = cv::saturate_cast<uchar>(5 * current[i] - current[i-1] - current[i+1] -
previous[i] - next[i]);
}
}
//set the unprocess pixels to zero
result.row(0).setTo(cv::Scalar(0));
result.row(result.rows-1).setTo(cv::Scalar(0));
result.col(0).setTo(cv::Scalar(0));
result.col(result.cols-1).setTo(cv::Scalar(0));
}
示例8: sharpen2
void sharpen2(const cv::Mat& image, cv::Mat& result)
{
result.create(image.size(), image.type()); // allocate if necessary
int step = image.step1();
const uchar* previous = image.data; // ptr to previous row
const uchar* current = image.data + step; // ptr to current row
const uchar* next = image.data + 2 * step; // ptr to next row
uchar* output = result.data + step; // ptr to output row
for (int j = 1; j < image.rows - 1; j++)
{ // for each row (except first and last)
for (int i = 1; i < image.cols - 1; i++)
{ // for each column (except first and last)
output[i] = cv::saturate_cast<uchar>(5 * current[i] - current[i - 1] - current[i + 1] -
previous[i] - next[i]);
}
previous += step;
current += step;
next += step;
output += step;
}
// Set the unprocess pixels to 0
result.row(0).setTo(cv::Scalar(0));
result.row(result.rows - 1).setTo(cv::Scalar(0));
result.col(0).setTo(cv::Scalar(0));
result.col(result.cols - 1).setTo(cv::Scalar(0));
}
示例9: sharpen
void Processor::sharpen(const cv::Mat &image, cv::Mat &result) {
startTimer();
// allocate if necessary
result.create(image.rows, image.cols, image.type());
for (int j = 1; j < image.rows - 1; j++) { // for all rows
// (except first and last)
const uchar* previous = image.ptr<const uchar>(j - 1); // previous row
const uchar* current = image.ptr<const uchar>(j); // current row
const uchar* next = image.ptr<const uchar>(j + 1); // next row
uchar* output = result.ptr<uchar>(j); // output row
for (int i = 1; i < image.cols - 1; i++) {
for (int k = 0; k < image.channels(); k++) {
result.at<cv::Vec3b>(j, i)[k] = cv::saturate_cast<uchar>(
5 * image.at<cv::Vec3b>(j, i)[k]
- image.at<cv::Vec3b>(j, i - 1)[k]
- image.at<cv::Vec3b>(j, i + 1)[k]
- image.at<cv::Vec3b>(j - 1, i)[k]
- image.at<cv::Vec3b>(j + 1, i)[k]);
}
}
}
// Set the unprocess pixels to 0
result.row(0).setTo(cv::Scalar(0));
result.row(result.rows - 1).setTo(cv::Scalar(0));
result.col(0).setTo(cv::Scalar(0));
result.col(result.cols - 1).setTo(cv::Scalar(0));
stopTimer("Sharpen");
}
示例10: sharpen_OLD
// How to do sharpening without explicitly using a convolution filter and cv::filter2D
void RFeatures::sharpen_OLD( const cv::Mat &img, cv::Mat &out)
{
out.create( img.size(), img.type()); // Allocate if necessary
int channels = img.channels();
int nc = img.cols * channels;
for ( int j = 1; j < img.rows-1; ++j) // All rows except first and last
{
const uchar* previous = img.ptr<const uchar>(j-1); // Previous row
const uchar* current = img.ptr<const uchar>(j); // Current row
const uchar* next = img.ptr<const uchar>(j+1); // Next row
uchar* output = out.ptr<uchar>(j); // Output row
for ( int i = channels; i < nc - channels; ++i) // All columns except first and last
{
uchar v = 5*current[i] - current[i-channels] - current[i+channels] - previous[i] - next[i];
*output++ = cv::saturate_cast<uchar>(v);
} // end for
} // end for
// Set the unprocesses pixels to 0
cv::Scalar s(0);
if (img.channels() == 3)
s = cv::Scalar(0,0,0);
out.row(0).setTo( s);
out.row(out.rows-1).setTo( s);
out.col(0).setTo( s);
out.col(out.cols-1).setTo( s);
} // end sharpen_OLD
示例11: convert_to_ml
/*
* Function to convert the training data to be used by the SVM classifier
*/
void convert_to_ml(const std::vector<cv::Mat> &train_samples, cv::Mat& trainData )
{
const int rows = (int)train_samples.size();
const int cols = (int)std::max( train_samples[0].cols, train_samples[0].rows );
cv::Mat tmp(1, cols,CV_32FC1);
trainData = cv::Mat(rows,cols,CV_32FC1 );
std::vector<cv::Mat>::const_iterator itr = train_samples.begin();
std::vector<cv::Mat>::const_iterator end = train_samples.end();
for( int i = 0 ; itr != end ; ++itr, ++i ) {
CV_Assert( itr->cols == 1 || itr->rows == 1 );
if( itr->cols == 1 ) {
transpose( *(itr), tmp );
tmp.copyTo( trainData.row( i ) );
}
else if( itr->rows == 1 ) {
itr->copyTo( trainData.row( i ) );
}
}
}
示例12: sharpen
void sharpen(cv::Mat &image, cv::Mat &out)
{
out.create(image.size(), image.type());
for(int j=1; j < image.rows-1;j++)
{
const uchar* previous = image.ptr<const uchar>(j-1);
const uchar* current = image.ptr<const uchar>(j);
const uchar* next = image.ptr<const uchar>(j+1);
uchar* output = out.ptr<uchar>(j);
for(int i=1; i<image.cols-1; i++)
{
*output++=cv::saturate_cast<uchar>(
5*current[i]-current[i-1]
-current[i+1]-previous[1]-next[1]);
}
}
out.row(0).setTo(cv::Scalar(0));
out.row(out.rows-1).setTo(cv::Scalar(0));
out.col(0).setTo(cv::Scalar(0));
out.col(out.cols-1).setTo(cv::Scalar(0));
}
示例13: _hammingmatch_lowetest_slow
void _hammingmatch_lowetest_slow(const cv::Mat &_des1, const cv::Mat &_des2, MatchList &_good, const float ratio){
// compute and copmare norms row by row
int _n1 = _des1.rows;
int _n2 = _des2.rows;
int _idx, _mindist1, _mindist2;
cv::DMatch _mtch;
for ( int _i = 0; _i < _n1; ++_i ){
_idx=-1; _mindist1 = 10000; _mindist2 = 1000000; // some arbirtrary large value for initializing
// compute best idx for row _i of _des1
for ( int _j = 0; _j < _n2; ++_j ){
// calculate hamming distance
int _val = cv::normHamming(_des1.row(_i).data, _des2.row(_j).data, 4);
if ( _val > 8) {continue;} // skip to make it fast
_val = cv::normHamming(_des1.row(_i).data, _des2.row(_j).data, _des2.cols);
if ( _val < _mindist2 ){
if ( _val < _mindist1 ){
_mindist2 = _mindist1;
_mindist1 = _val;
_idx = _j;
} else _mindist2 = _val;
}
}
// ratio test
if ( (_idx != -1) && (_mindist1 < (_mindist2 * ratio ))){
// all is okay
printf("distance %d: %d\n", _i, _mindist1);
_mtch.distance = _mindist1;
_mtch.queryIdx = _i; // the first
_mtch.trainIdx = _idx; // the second
_good.push_back(_mtch);
}
}
}
示例14: calAffineMatrix
/*
*calculate Affine Matrixes that transform image1 to
*image2 and image2 to image1.
*/
void ImageGraph::calAffineMatrix(ImageNode img1, ImageNode img2, cv::Mat &one2two, cv::Mat &two2one) {
cv::BruteForceMatcher< cv::L2<float> > matcher;
std::vector< cv::DMatch > matches;
matcher.match( img1.descriptors, img2.descriptors, matches );
double max_dist = 0;
for (int i=0; i<matches.size(); i++) {
if (matches[i].distance > max_dist) {
max_dist = matches[i].distance;
}
}
std::vector<cv::DMatch> goodmatches;
std::vector<cv::Point2f> goodpoints1;
std::vector<cv::Point2f> goodpoints2;
for (int i=0; i<matches.size(); i++) {
if (matches[i].distance <= max_dist) {
goodmatches.push_back(matches[i]);
goodpoints1.push_back(img1.keypoints[matches[i].queryIdx].pt);
goodpoints2.push_back(img2.keypoints[matches[i].trainIdx].pt);
}
}
// cv::Mat outp;
// cv::drawMatches(img1.img, img1.keypoints, img2.img, img2.keypoints, goodmatches, outp);
//// cv::namedWindow("aaa");
//cv::imshow("aaa", outp);
// cvWaitKey(0);
cv::Mat t2o = cv::estimateRigidTransform(goodpoints1, goodpoints2, true);
// std::cout << t2o.rows << std::endl;
assert(t2o.rows == 3);
one2two = cv::Mat::zeros(3, 3, CV_64F);
// std::cout << goodpoints1.size() << " " << goodpoints2.size() << std::endl;
// std::cout << "t2o: " << std::endl;
// std::cout << t2o << std::endl;
cv::Mat l = (cv::Mat_<double>(1,3)<< 0,0,1);
t2o.row(0).copyTo(one2two.row(0));
t2o.row(1).copyTo(one2two.row(1));
l.copyTo(one2two.row(2));
cv::Mat o2t = cv::estimateRigidTransform(goodpoints2, goodpoints1, true);
assert(o2t.rows == 3);
two2one = cv::Mat::zeros(3, 3, CV_64F);
o2t.row(0).copyTo(two2one.row(0));
o2t.row(1).copyTo(two2one.row(1));
l.copyTo(two2one.row(2));
std::cout << "two2one:" << std::endl;
std::cout << two2one << std::endl;
std::cout << "one2two" << std::endl;
std::cout << one2two << std::endl;
//std::cout << "asdf " << two2one << std::endl;
// std::cout << "row 0: " << t2o.row(0) << std::endl;
// std::cout << "row 1: " << t2o.row(1) << std::endl;
// std::cout << "row 2: " << l << std::endl;
// std::cout << "xxx " << t2o << std::endl;
// std::cout << "yyy " << one2two << std::endl;
}
示例15: performBlendY
void AlphaBlender::performBlendY(const cv::Mat& image1,const cv::Mat& image2,cv::Mat& outputImage){
double alpha=1,beta=0;
for(int i=0;i<image1.rows;i++){
beta=(double)i/(image1.rows-1);
alpha=1-beta;
cv::addWeighted(image1.row(i),alpha,image2.row(i),beta,0,outputImage.row(i));
}
}