本文整理汇总了C++中cv::Mat::rowRange方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::rowRange方法的具体用法?C++ Mat::rowRange怎么用?C++ Mat::rowRange使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::rowRange方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dissym_div
// The image is seperated into two parts given the location of seperation axis x, each part's length maximum loc+1;
// Return the dissimilarity score
float ReidDescriptor::dissym_div(int x, cv::Mat img, cv::Mat MSK, int loc, float alpha)
{
int H = img.rows;
int W = img.cols;
int chs = img.channels();
cv::Mat imgUP = img.rowRange(0, x + 1); // [0,x]
cv::Mat imgDOWN = img.rowRange(x, img.rows);
cv::Mat MSK_U = MSK.rowRange(0, x + 1);
cv::Mat MSK_D = MSK.rowRange(x, MSK.rows);
int dimLoc = min(min(x + 1, MSK_D.rows), loc + 1);
if (dimLoc != 0)
{
cv::Mat imgUPloc = img.rowRange(x - dimLoc + 1, x + 1); // [x-dimLoc+1,x]
cv::Mat imgDWloc;
cv::flip(imgDOWN.rowRange(0, dimLoc), imgDWloc, 0);
cv::Mat temp;
cv::pow(imgUPloc - imgDWloc, 2, temp);
float ans = alpha * (1 - sqrt(sum(temp.reshape(1))[0]) / dimLoc) +
(1 - alpha) * (abs(sum(MSK_U)[0] - sum(MSK_D)[0])) / max(MSK_U.rows * MSK_U.cols, MSK_D.rows * MSK_D.cols);
return ans;
}
else
{
return 1;
}
}
示例2: rotate_band
void data_transformer_util::rotate_band(
cv::Mat image,
int shift_x_to_left,
int shift_y_to_top)
{
int actual_shift_x = (shift_x_to_left % image.cols);
if (actual_shift_x < 0)
actual_shift_x += image.cols;
int actual_shift_y = (shift_y_to_top % image.rows);
if (actual_shift_y < 0)
actual_shift_y += image.rows;
if ((actual_shift_x == 0) && (actual_shift_y == 0))
return;
cv::Mat cloned_image = image.clone();
if (actual_shift_y == 0)
{
cloned_image.colRange(actual_shift_x, image.cols).copyTo(image.colRange(0, image.cols - actual_shift_x));
cloned_image.colRange(0, actual_shift_x).copyTo(image.colRange(image.cols - actual_shift_x, image.cols));
}
else if (actual_shift_x == 0)
{
cloned_image.rowRange(actual_shift_y, image.rows).copyTo(image.rowRange(0, image.rows - actual_shift_y));
cloned_image.rowRange(0, actual_shift_y).copyTo(image.rowRange(image.rows - actual_shift_y, image.rows));
}
else
{
cloned_image.colRange(actual_shift_x, image.cols).rowRange(actual_shift_y, image.rows).copyTo(image.colRange(0, image.cols - actual_shift_x).rowRange(0, image.rows - actual_shift_y));
cloned_image.colRange(0, actual_shift_x).rowRange(actual_shift_y, image.rows).copyTo(image.colRange(image.cols - actual_shift_x, image.cols).rowRange(0, image.rows - actual_shift_y));
cloned_image.colRange(actual_shift_x, image.cols).rowRange(0, actual_shift_y).copyTo(image.colRange(0, image.cols - actual_shift_x).rowRange(image.rows - actual_shift_y, image.rows));
cloned_image.colRange(0, actual_shift_x).rowRange(0, actual_shift_y).copyTo(image.colRange(image.cols - actual_shift_x, image.cols).rowRange(image.rows - actual_shift_y, image.rows));
}
}
示例3: getWhsvFeature
cv::Mat ReidDescriptor::getWhsvFeature(cv::Mat img, cv::Mat MSK)
{
int offset = img.rows / 5;
vector<cv::Mat> sub(5);
// Divide the image into 5x1 cells
for(int i = 0 ; i < 4 ; i++) {
sub[i] = img.rowRange(offset * i, offset * (i + 1));
}
sub[4] = img.rowRange(offset * 4, img.rows);
// Debug this
cv::Mat conc;
cv::Mat temp;
for(int i = 0 ; i < 5 ; i++) {
cv::Mat HSV = HSVVector(sub[i]);
if(i == 0) {
conc = HSV;
} else {
vconcat(conc, HSV, conc);
}
}
return conc;
//return cv::Mat::zeros(2,2,CV_8U);
}
示例4: _removeRowsNonContinuous
void Mat::_removeRowsNonContinuous(cv::Mat &m,
const std::vector<unsigned int> &rows)
{
// always preserve the order of the rest of rows
// remove rows in descending order, grouping when possible
int end_row = m.rows;
int i_idx = (int)rows.size() - 1;
while(i_idx >= 0)
{
int j_idx = i_idx - 1;
while(j_idx >= 0 && ((int)(rows[i_idx] - rows[j_idx]) == i_idx - j_idx))
{
j_idx--;
}
//data.erase(data.begin() + indices[j_idx + 1],
// data.begin() + indices[i_idx] + 1);
// ==
//std::copy( m.ptr<T>(rows[i_idx]+1), m.ptr<T>(end_row),
// m.ptr<T>(rows[j_idx + 1]) );
m.rowRange(rows[j_idx+1], rows[j_idx+1] + end_row-rows[i_idx]-1) =
m.rowRange(rows[i_idx]+1, end_row) * 1;
end_row -= rows[i_idx] - rows[j_idx+1] + 1;
i_idx = j_idx;
}
// remove last rows
m.resize(end_row);
}
示例5: Whsv_estimation
// Given map kernel calculate the histogram of each part of the image and combine them together.
cv::Mat ReidDescriptor::Whsv_estimation(cv::Mat img, vector<int> NBINs, cv::Mat MAP_KRNL, int HDanti, int TLanti)
{
cv::Mat img_hsv = img.clone();
//Matlab:0:1,0:1,0:1 ; Opencv:0:255,0:255,0:180
cvtColor(img, img_hsv, CV_BGR2HSV);
vector<cv::Mat>img_split;
split(img_hsv, img_split);
img_split[2].convertTo(img_split[2], CV_8UC1, 255);
equalizeHist(img_split[2], img_split[2]);
img_split[0].convertTo(img_split[0], CV_32FC1, 1.0 / 180);
img_split[1].convertTo(img_split[1], CV_32FC1, 1.0 / 255);
img_split[2].convertTo(img_split[2], CV_32FC1, 1.0 / 255);
merge(img_split, img_hsv);
cv::Mat UP = img_hsv.rowRange(HDanti + 1, TLanti + 1);
vector<cv::Mat> UP_split; split(UP, UP_split);
cv::Mat UPW = MAP_KRNL.rowRange(HDanti + 1, TLanti + 1);
cv::Mat DOWN = img_hsv.rowRange(TLanti + 1, img_hsv.rows);
vector<cv::Mat> DOWN_split; split(DOWN, DOWN_split);
cv::Mat DOWNW = MAP_KRNL.rowRange(TLanti + 1, img_hsv.rows);;
UPW = UPW.reshape(1, 1);
DOWNW = DOWNW.reshape(1, 1);
cv::Mat tmph0(0, 0, CV_32FC1); cv::Mat tmph2(0, 0, CV_32FC1);
cv::Mat tmpup0(0, 0, CV_32FC1); cv::Mat tmpup2(0, 0, CV_32FC1);
cv::Mat tmpdown0(0, 0, CV_32FC1); cv::Mat tmpdown2(0, 0, CV_32FC1);
for (int ch = 0 ; ch < 3 ; ch++){
tmph2.push_back(cv::Mat(cv::Mat::zeros(NBINs[ch], 1, CV_32F)));
cv::Mat rasterUP = UP_split[ch];
tmpup2.push_back(whistcY(rasterUP.reshape(1, 1), UPW, NBINs[ch]));
cv::Mat rasterDOWN = DOWN_split[ch];
tmpdown2.push_back(whistcY(rasterDOWN.reshape(1, 1), DOWNW, NBINs[ch]));
}
cv::Mat ans = tmph2;
ans.push_back(tmpup2);
ans.push_back(tmpdown2);
for (int row = 0; row < ans.rows; row++) {
for (int col = 0; col < ans.cols; col++) {
#ifdef linux
if (isnan(ans.at<float>(row, col)) == true) {
ans.at<float>(row, col) = 0;
}
#endif
#ifdef _WIN32
if (_isnan(ans.at<float>(row, col)) != 0) {
ans.at<float>(row, col) = 0;
}
#endif
}
}
return ans;
}
示例6: extractVU_method2
// AVERAGE: 16.46 fps after 600 frames
void extractVU_method2(const cv::Mat &srcNV21, cv::Mat &imageYUV){
Mat Y, U, V;
size_t height = 2*srcNV21.rows/3;
// Luma
Y = srcNV21.rowRange( 0, height);
// Chroma U
if (U.empty())
U.create(cv::Size(srcNV21.cols/2, height/2), CV_8UC1);
// Chroma V
if (V.empty())
V.create(cv::Size(srcNV21.cols/2, height/2), CV_8UC1);
Mat image = srcNV21.rowRange( height, srcNV21.rows);
size_t nRows = image.rows; // number of lines
size_t nCols = image.cols; // number of columns
/// Convert to 1D array if Continuous
if (image.isContinuous()) {
nCols = nCols * nRows;
nRows = 1; // it is now a
}
for (int j=0; j<nRows; j++) {
/// Pointer to start of the row
uchar* data = reinterpret_cast<uchar*>(image.data);
uchar* colorV = reinterpret_cast<uchar*>(V.data);
uchar* colorU = reinterpret_cast<uchar*>(U.data);
for (int i = 0; i < nCols; i += 2) {
// assign each pixel to V and U
*colorV++ = *data++; // [0,255]
*colorU++ = *data++; // [0,255]
}
}
std::vector<cv::Mat> channels(4);
cv::Mat Yscaled;
cv::resize(Y, Yscaled, cv::Size(srcNV21.cols/2, height/2));
channels[0] = Yscaled;
channels[1] = U;
channels[2] = V;
channels[3] = Mat::zeros(cv::Size(srcNV21.cols/2, height/2), CV_8UC1) + 255;
cv::merge(channels, imageYUV);
}
示例7: circshift
void circshift(cv::Mat &A, int shitf_row, int shift_col) {
int row = A.rows, col = A.cols;
shitf_row = (row + (shitf_row % row)) % row;
shift_col = (col + (shift_col % col)) % col;
cv::Mat temp = A.clone();
if (shitf_row){
temp.rowRange(row - shitf_row, row).copyTo(A.rowRange(0, shitf_row));
temp.rowRange(0, row - shitf_row).copyTo(A.rowRange(shitf_row, row));
}
if (shift_col){
temp.colRange(col - shift_col, col).copyTo(A.colRange(0, shift_col));
temp.colRange(0, col - shift_col).copyTo(A.colRange(shift_col, col));
}
return;
}
示例8: cutFeatures
bool VisualFeatureExtraction::cutFeatures(cv::vector<cv::KeyPoint> &kpts,
cv::Mat &features, unsigned short maxFeats) const {
// store hash values in a map
std::map<size_t, unsigned int> keyp_hashes;
cv::vector<cv::KeyPoint>::iterator itKeyp;
cv::Mat sorted_features;
unsigned int iLine = 0;
for (itKeyp = kpts.begin(); itKeyp < kpts.end(); itKeyp++, iLine++)
keyp_hashes[(*itKeyp).hash()] = iLine;
// sort values according to the response
std::sort(kpts.begin(), kpts.end(), greater_than_response());
// create a new descriptor matrix with the sorted keypoints
sorted_features.create(0, features.cols, features.type());
sorted_features.reserve(features.rows);
for (itKeyp = kpts.begin(); itKeyp < kpts.end(); itKeyp++)
sorted_features.push_back(features.row(keyp_hashes[(*itKeyp).hash()]));
features = sorted_features.clone();
// select the first maxFeats features
if (kpts.size() > maxFeats) {
vector<KeyPoint> cutKpts(kpts.begin(), kpts.begin() + maxFeats);
kpts = cutKpts;
features = features.rowRange(0, maxFeats).clone();
}
return 0;
}
示例9: mergePoints
void mergePoints(const std::vector<cv::Mat> &in_descriptors, const std::vector<cv::Mat> &in_points,
cv::Mat &out_descriptors, cv::Mat &out_points) {
// Figure out the number of points
size_t n_points = 0, n_images = in_descriptors.size();
for (size_t image_id = 0; image_id < n_images; ++image_id)
n_points += in_descriptors[image_id].rows;
if (n_points == 0)
return;
// Fill the descriptors and 3d points
out_descriptors = cv::Mat(n_points, in_descriptors[0].cols, in_descriptors[0].depth());
out_points = cv::Mat(1, n_points, CV_32FC3);
size_t row_index = 0;
for (size_t image_id = 0; image_id < n_images; ++image_id) {
// Copy the descriptors
const cv::Mat & descriptors = in_descriptors[image_id];
int n_points = descriptors.rows;
cv::Mat sub_descriptors = out_descriptors.rowRange(row_index, row_index + n_points);
descriptors.copyTo(sub_descriptors);
// Copy the 3d points
const cv::Mat & points = in_points[image_id];
cv::Mat sub_points = out_points.colRange(row_index, row_index + n_points);
points.copyTo(sub_points);
row_index += n_points;
}
}
示例10: cvFindFundamentalMat
CV_IMPL int cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
CvMat* fmatrix, int method,
double param1, double param2, CvMat* _mask )
{
cv::Mat m1 = cv::cvarrToMat(points1), m2 = cv::cvarrToMat(points2);
if( m1.channels() == 1 && (m1.rows == 2 || m1.rows == 3) && m1.cols > 3 )
cv::transpose(m1, m1);
if( m2.channels() == 1 && (m2.rows == 2 || m2.rows == 3) && m2.cols > 3 )
cv::transpose(m2, m2);
const cv::Mat FM = cv::cvarrToMat(fmatrix), mask = cv::cvarrToMat(_mask);
cv::Mat FM0 = cv::findFundamentalMat(m1, m2, method, param1, param2,
_mask ? cv::_OutputArray(mask) : cv::_OutputArray());
if( FM0.empty() )
{
cv::Mat FM0z = cv::cvarrToMat(fmatrix);
FM0z.setTo(cv::Scalar::all(0));
return 0;
}
CV_Assert( FM0.cols == 3 && FM0.rows % 3 == 0 && FM.cols == 3 && FM.rows % 3 == 0 && FM.channels() == 1 );
cv::Mat FM1 = FM.rowRange(0, MIN(FM0.rows, FM.rows));
FM0.rowRange(0, FM1.rows).convertTo(FM1, FM1.type());
return FM1.rows / 3;
}
示例11: inv
cv::Mat Transformations::inv(const cv::Mat &aTb)
{
// inv(T) = [R^t | -R^t p]
const cv::Mat R = aTb.rowRange(0,3).colRange(0,3);
const cv::Mat t = aTb.rowRange(0,3).colRange(3,4);
cv::Mat Rt = R.t();
cv::Mat t2 = -Rt*t;
cv::Mat ret;
if(aTb.type() == CV_32F)
{
ret = (cv::Mat_<float>(4,4) <<
Rt.at<float>(0,0), Rt.at<float>(0,1), Rt.at<float>(0,2), t2.at<float>(0,0),
Rt.at<float>(1,0), Rt.at<float>(1,1), Rt.at<float>(1,2), t2.at<float>(1,0),
Rt.at<float>(2,0), Rt.at<float>(2,1), Rt.at<float>(2,2), t2.at<float>(2,0),
0, 0, 0, 1);
}else
{
ret = (cv::Mat_<double>(4,4) <<
Rt.at<double>(0,0), Rt.at<double>(0,1), Rt.at<double>(0,2), t2.at<double>(0,0),
Rt.at<double>(1,0), Rt.at<double>(1,1), Rt.at<double>(1,2), t2.at<double>(1,0),
Rt.at<double>(2,0), Rt.at<double>(2,1), Rt.at<double>(2,2), t2.at<double>(2,0),
0, 0, 0, 1);
}
return ret;
}
示例12: sym_dissimilar_MSKH
// The image is seperated into two parts, each part's length maximum loc+1;
// x is the seperation line, both two part have x;
float ReidDescriptor::sym_dissimilar_MSKH(int x, cv::Mat img, cv::Mat MSK, int loc, float alpha)
{
int H = img.rows;
int W = img.cols;
int chs = img.channels();
cv::Mat imgUP = img.rowRange(0, x + 1);//[0,x]
cv::Mat imgDOWN = img.rowRange(x, img.rows);
cv::Mat MSK_U = MSK.rowRange(0, x + 1);
cv::Mat MSK_D = MSK.rowRange(x, MSK.rows);
int localderU = max(x - loc, 0);
int localderD = min(loc + 1, MSK_D.rows);
float ans = -abs(sum(MSK_U.rowRange(localderU, x + 1))[0] - sum(MSK_D.rowRange(0, localderD))[0]);
return ans;
}
示例13: alignRigid
// out: aligned modelShape
// in: Rect, ocv with tl x, tl y, w, h (?) and calcs center
// directly modifies modelShape
// could move to parent-class
// assumes mean -0.5, 0.5 and just places inside FB
cv::Mat alignRigid(cv::Mat modelShape, cv::Rect faceBox) const {
// we assume we get passed a col-vec. For convenience, we keep it.
if (modelShape.cols != 1) {
throw std::runtime_error("The supplied model shape does not have one column (i.e. it doesn't seem to be a column-vector).");
// We could also check if it's a row-vector and if yes, transpose.
}
Mat xCoords = modelShape.rowRange(0, modelShape.rows / 2);
Mat yCoords = modelShape.rowRange(modelShape.rows / 2, modelShape.rows);
// b) Align the model to the current face-box. (rigid, only centering of the mean). x_0
// Initial estimate x_0: Center the mean face at the [-0.5, 0.5] x [-0.5, 0.5] square (assuming the face-box is that square)
// More precise: Take the mean as it is (assume it is in a space [-0.5, 0.5] x [-0.5, 0.5]), and just place it in the face-box as
// if the box is [-0.5, 0.5] x [-0.5, 0.5]. (i.e. the mean coordinates get upscaled)
xCoords = (xCoords + 0.5f) * faceBox.width + faceBox.x;
yCoords = (yCoords + 0.5f) * faceBox.height + faceBox.y;
/*
// Old algorithm Zhenhua:
// scale the model:
double minX, maxX, minY, maxY;
cv::minMaxLoc(xCoords, &minX, &maxX);
cv::minMaxLoc(yCoords, &minY, &maxY);
float faceboxScaleFactor = 1.25f; // 1.25f: value of Zhenhua Matlab FD. Mine: 1.35f
float modelWidth = maxX - minX;
float modelHeight = maxY - minY;
// scale it:
modelShape = modelShape * (faceBox.width / modelWidth + faceBox.height / modelHeight) / (2.0f * faceboxScaleFactor);
// translate the model:
Scalar meanX = cv::mean(xCoords);
double meanXd = meanX[0];
Scalar meanY = cv::mean(yCoords);
double meanYd = meanY[0];
// move it:
xCoords += faceBox.x + faceBox.width / 2.0f - meanXd;
yCoords += faceBox.y + faceBox.height / 1.8f - meanYd; // we use another value for y because we don't want to center the model right in the middle of the face-box
*/
return modelShape;
};
示例14: convolution
void convolution(cv::Mat &inputImg, cv::Mat &outputImg, const cv::Mat &kernel, float scalar) {
cv::Size imgSize = inputImg.size();
outputImg = cv::Mat(imgSize, CV_8UC1);
for(int ii = 1; ii < imgSize.width-1; ii++) {
for(int jj = 1; jj < imgSize.height-1; jj++) {
auto submat = inputImg.rowRange(ii-1, ii+2).colRange(jj-1, jj+2);
auto p = single_pixel_convolution(
submat,
kernel
);
auto it = submat.begin<uchar>();
auto end = submat.end<uchar>();
outputImg.at<uchar>(ii,jj) = static_cast<uchar>(p*scalar);
}
}
}
示例15: drawPose
void drawPose(cv::Mat& img, const cv::Mat& rot, float lineL)
{
int loc[2] = {70, 70};
int thickness = 2;
int lineType = 8;
cv::Mat P = (cv::Mat_<float>(3,4) <<
0, lineL, 0, 0,
0, 0, -lineL, 0,
0, 0, 0, -lineL);
P = rot.rowRange(0,2)*P;
P.row(0) += loc[0];
P.row(1) += loc[1];
cv::Point p0(P.at<float>(0,0),P.at<float>(1,0));
line(img, p0, cv::Point(P.at<float>(0,1),P.at<float>(1,1)), cv::Scalar( 255, 0, 0 ), thickness, lineType);
line(img, p0, cv::Point(P.at<float>(0,2),P.at<float>(1,2)), cv::Scalar( 0, 255, 0 ), thickness, lineType);
line(img, p0, cv::Point(P.at<float>(0,3),P.at<float>(1,3)), cv::Scalar( 0, 0, 255 ), thickness, lineType);
}