本文整理汇总了C++中cv::Mat_::clone方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::clone方法的具体用法?C++ Mat_::clone怎么用?C++ Mat_::clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat_
的用法示例。
在下文中一共展示了Mat_::clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: preprocessImage
void GrayWorldEstimator::preprocessImage(const cv::Mat_<cv::Vec3d>& image, const cv::Mat_<unsigned char>& mask, cv::Mat_<cv::Vec3d> &inputImage, cv::Mat_<unsigned char> &inputMask) const
{
inputImage = image.clone();
inputMask = mask.clone();
if ((image.rows != mask.rows) || (image.cols != mask.cols)) {
inputMask = cv::Mat_<unsigned char>(inputImage.rows, inputImage.cols, (unsigned char)0);
}
Mask::maskSaturatedPixels(inputImage, inputMask, 1);
cv::Mat_<unsigned char> element = cv::Mat_<unsigned char>::ones(3, 3);
cv::dilate(inputMask, inputMask, element);
const double kernelsize = cvRound(m_sigma * 3 * 2 + 1) | 1;
Mask::maskBorderPixels(inputImage, inputMask, (kernelsize + 1) / 2);
if (m_sigma > 0) {
if (m_n == 0) {
const double kernelsize = cvRound(m_sigma * 3 * 2 + 1) | 1;
cv::GaussianBlur(inputImage, inputImage, cv::Size(kernelsize, kernelsize), m_sigma, m_sigma);
} else if (m_n > 0) {
inputImage = Derivative::normDerivativeFilter(inputImage, m_n, m_sigma);
}
}
}
示例2: UpdateRunningMedian
void FaceAnalyser::UpdateRunningMedian(cv::Mat_<unsigned int>& histogram, int& hist_count, cv::Mat_<double>& median, const cv::Mat_<double>& descriptor, bool update, int num_bins, double min_val, double max_val)
{
double length = max_val - min_val;
if(length < 0)
length = -length;
// The median update
if(histogram.empty())
{
histogram = Mat_<unsigned int>(descriptor.cols, num_bins, (unsigned int)0);
median = descriptor.clone();
}
if(update)
{
// Find the bins corresponding to the current descriptor
Mat_<double> converted_descriptor = (descriptor - min_val)*((double)num_bins)/(length);
// Capping the top and bottom values
converted_descriptor.setTo(Scalar(num_bins-1), converted_descriptor > num_bins - 1);
converted_descriptor.setTo(Scalar(0), converted_descriptor < 0);
// Only count the median till a certain number of frame seen?
for(int i = 0; i < histogram.rows; ++i)
{
int index = (int)converted_descriptor.at<double>(i);
histogram.at<unsigned int>(i, index)++;
}
// Update the histogram count
hist_count++;
}
if(hist_count == 1)
{
median = descriptor.clone();
}
else
{
// Recompute the median
int cutoff_point = (hist_count + 1)/2;
// For each dimension
for(int i = 0; i < histogram.rows; ++i)
{
int cummulative_sum = 0;
for(int j = 0; j < histogram.cols; ++j)
{
cummulative_sum += histogram.at<unsigned int>(i, j);
if(cummulative_sum > cutoff_point)
{
median.at<double>(i) = min_val + j * (length/num_bins) + (0.5*(length)/num_bins);
break;
}
}
}
}
}
示例3: preprocessImage
void Tan::preprocessImage(const cv::Mat_<cv::Vec3d>& image, const cv::Mat_<unsigned char>& mask, cv::Mat_<cv::Vec3d> &outputImage, cv::Mat_<unsigned char> &outputMask) const
{
outputImage = image.clone();
if ((image.rows != mask.rows) || (image.cols != mask.cols)) {
std::cerr << "No mask!" << std::endl;
outputMask = cv::Mat_<unsigned char>::zeros(outputImage.rows, outputImage.cols);
} else {
outputMask = mask.clone();
}
illumestimators::Mask::maskSaturatedPixels(outputImage, outputMask, config.max_intensity);
illumestimators::Mask::maskDarkPixels(outputImage, outputMask, config.min_intensity);
}
示例4: AlignShapesWithScale
//=============================================================================
// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst)
{
int n = src.rows;
// First we mean normalise both src and dst
float mean_src_x = cv::mean(src.col(0))[0];
float mean_src_y = cv::mean(src.col(1))[0];
float mean_dst_x = cv::mean(dst.col(0))[0];
float mean_dst_y = cv::mean(dst.col(1))[0];
cv::Mat_<float> src_mean_normed = src.clone();
src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;
cv::Mat_<float> dst_mean_normed = dst.clone();
dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;
// Find the scaling factor of each
cv::Mat src_sq;
cv::pow(src_mean_normed, 2, src_sq);
cv::Mat dst_sq;
cv::pow(dst_mean_normed, 2, dst_sq);
float s_src = sqrt(cv::sum(src_sq)[0] / n);
float s_dst = sqrt(cv::sum(dst_sq)[0] / n);
src_mean_normed = src_mean_normed / s_src;
dst_mean_normed = dst_mean_normed / s_dst;
float s = s_dst / s_src;
// Get the rotation
cv::Matx22f R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);
cv::Matx22f A;
cv::Mat(s * R).copyTo(A);
cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
cv::Mat_<float> offset = dst - aligned;
float t_x = cv::mean(offset.col(0))[0];
float t_y = cv::mean(offset.col(1))[0];
return A;
}
示例5: midpoint
cv::Mat_<float> midpoint(cv::Mat_<float> &img, unsigned int filter_size)
{
cv::Mat_<float> img_filter = img.clone();
std::vector<float> filter_list(filter_size*filter_size, 0.0);
unsigned int temp_x, temp_y, i = 0, percentile_i;
for(size_t y = 0; y < img.cols; y++)
{
for(size_t x = 0; x < img.rows; x++)
{
for(size_t filter_y = 0; filter_y < filter_size; ++filter_y)
{
for(size_t filter_x = 0; filter_x < filter_size; ++filter_x)
{
temp_x = (x - filter_size / 2 + filter_x + img.rows) % img.rows;
temp_y = (y - filter_size / 2 + filter_y + img.cols) % img.cols;
filter_list[i] = img(temp_x, temp_y);
i++;
}
}
i = 0;
std::sort(filter_list.begin(), filter_list.end());
img_filter(x, y) = (filter_list.front() + filter_list.back())/2;
}
}
return img_filter;
}
示例6: OMP
/** Orthogonal matching pursuit
* x: input signal, N * 1
* D: dictionary, N * M
* L: number of non_zero elements in output
* coeff: coefficent of each atoms in dictionary, M * 1
*/
void OMP(const cv::Mat_<double>& x, const cv::Mat_<double>& D, int L, cv::Mat_<double>& coeff){
int dim = x.rows;
int atom_num = D.cols;
coeff = Mat::zeros(atom_num, 1, CV_64FC1);
Mat_<double> residual = x.clone();
Mat_<double> selected_index(L, 1);
Mat_<double> a;
for (int i = 0; i < L; i++){
cout << "here ok 1" << endl;
Mat_<double> dot_p = D.t() * residual;
Point max_index;
minMaxLoc(abs(dot_p), NULL, NULL, NULL, &max_index);
int max_row = max_index.y;
selected_index(i) = max_row;
Mat_<double> temp(dim, i + 1);
for (int j = 0; j < i + 1; j++){
D.col(selected_index(j)).copyTo(temp.col(j));
}
Mat_<double> invert_temp;
invert(temp, invert_temp, CV_SVD);
a = invert_temp * x;
residual = x - temp * a;
}
for (int i = 0; i < L; i++){
coeff(selected_index(i)) = a(i);
}
}
示例7: dftshift
cv::Mat_<float> get_DFT_image(cv::Mat_<float> &img, bool log)
{
int rows = cv::getOptimalDFTSize(2 * img.rows);
int cols = cv::getOptimalDFTSize(2 * img.cols);
int imgRows = img.rows;
int imgCols = img.cols;
cv::copyMakeBorder(img, img, 0, rows - img.rows, 0, cols - img.cols, cv::BORDER_CONSTANT, cv::Scalar(0));
cv::Mat_<float> imgs[] = {img.clone(), cv::Mat_<float>(img.rows, img.cols, 0.0f)};
cv::Mat_<cv::Vec2f> img_dft;
cv::merge(imgs, 2, img_dft);
cv::dft(img_dft, img_dft);
cv::split(img_dft, imgs);
cv::Mat_<float> magnitude, phase;
cv::cartToPolar(imgs[0], imgs[1], magnitude, phase);
dftshift(magnitude);
if(log)
{
magnitude += 1.0f;
cv::log(magnitude, magnitude);
}
img = img(cv::Rect(0,0,imgCols,imgRows));
return magnitude;
}
示例8:
cv::Mat_<float> cModel::Reshape_alt(cv::Mat_<float>& mean, cv::Rect& faceBox)
{
cv::Mat_<double> modelShape = mean.clone();
cv::Mat_<double> xCoords = modelShape.colRange(0, modelShape.cols / 2);
cv::Mat_<double> yCoords = modelShape.colRange(modelShape.cols / 2, modelShape.cols);
double minX, maxX, minY, maxY;
cv::minMaxLoc(xCoords, &minX, &maxX);//得到x的最大/最小值
cv::minMaxLoc(yCoords, &minY, &maxY);//得到y的最大/最小值
double faceboxScaleFactor = m_Params->__facebox_scale_factor;
double modelWidth = maxX - minX;
double modelHeight = maxY - minY;
xCoords -= minX;
yCoords -= minY;
// scale it:
xCoords *= faceBox.width / modelWidth;
yCoords *= faceBox.height / modelHeight;
// modelShape = modelShape * (faceBox.width / modelWidth + faceBox.height / modelHeight) / (params->__facebox_scale_const * faceboxScaleFactor);
// translate the model:
// cv::Scalar meanX = cv::mean(xCoords);
// double meanXd = meanX[0];
// cv::Scalar meanY = cv::mean(yCoords);
// double meanYd = meanY[0];
// move it:
xCoords += faceBox.x; // +faceBox.width / params->__facebox_width_div - meanXd;
yCoords += faceBox.y; // +faceBox.height / params->__facebox_height_div - meanYd;
return modelShape;
}
示例9:
/**
* Find simplfied homography representation from rotation and translation
* @param homo: [Output] Estimated Homography
* @param rot: [Input] Rotation result
* @param trans: [Input] Translation result
*/
void AugmentationEnvironment::Rt2H(const cv::Mat_<double> intrisinc, const cv::Mat_<double>& rot, const cv::Mat_<double> &trans, cv::Mat_<double>& homo)
{
cv::Mat_<double> R = rot.clone();
R.at<double>(0,2) = trans.at<double>(0,0);
R.at<double>(1,2) = trans.at<double>(1,0);
R.at<double>(2,2) = trans.at<double>(2,0);
homo = (intrisinc*R)/trans.at<double>(2,0);
}
示例10:
void SegmenterHumanSimple::_prob2energy(const cv::Mat_<float>& prob, cv::Mat_<float>& fgdEnergy, cv::Mat_<float>& bgdEnergy)
{
fgdEnergy = prob.clone();
bgdEnergy = prob.clone();
cv::log(prob,fgdEnergy);
cv::log(1-prob,bgdEnergy);
fgdEnergy = -fgdEnergy;
bgdEnergy = -bgdEnergy;
}
示例11: train
double LinearRegression::train(const cv::Mat_<double>& inputs, const cv::Mat_<double>& Y) {
cv::Mat_<double> X = inputs.clone();
ml::addBias(X);
W = X.inv(cv::DECOMP_SVD) * Y;
// residueの計算
cv::Mat_<double> avg_mat;
cv::reduce((X * W - Y).mul(X * W - Y), avg_mat, 1, CV_REDUCE_SUM);
cv::reduce(avg_mat, avg_mat, 0, CV_REDUCE_AVG);
return sqrt(avg_mat(0, 0));
}
示例12: throw
cv::Mat_<float> DescriptorJoiner::loadDescriptors( const string& dfile, int* label) throw (DescriptorLengthException)
{
const cv::Mat_<float> vecs = RFeatures::readDescriptors( dfile, false);
const int numVecs = vecs.rows;
const int lab = (int)_labCounts.size(); // Label for these desciptors
_labCounts.push_back(numVecs); // Store the number of descriptors for this class label (vector index)
// Add vecs to _xs
for ( int i = 0; i < numVecs; ++i)
{
_xs.push_back( vecs.row(i));
_labs.push_back(lab);
} // end for
return vecs.clone();
} // end loadDescriptors
示例13:
cv::Mat_<double> ClusteredLinearRegression::predict(const cv::Mat_<double>& x) {
// 直近のクラスタを探す
float min_dist = std::numeric_limits<float>::max();
int min_id = -1;
for (int i = 0; i < clusterCentroids.size(); ++i) {
float dist = cv::norm((clusterCentroids[i] - x));
if (dist < min_dist) {
min_dist = dist;
min_id = i;
}
}
cv::Mat_<double> x2 = x.clone();
ml::addBias(x2);
return x2 * W[min_id];
}
示例14: ModifyCrossMapArmlengthToFitSubImage
void spm_bp::ModifyCrossMapArmlengthToFitSubImage(const cv::Mat_<cv::Vec4b>& crMapIn, int maxArmLength, cv::Mat_<cv::Vec4b>& crMapOut)
{
int iy, ix, height, width;
height = crMapIn.rows;
width = crMapIn.cols;
crMapOut = crMapIn.clone();
// up
for (iy = 0; iy < min<int>(maxArmLength, height); ++iy) {
for (ix = 0; ix < width; ++ix) {
crMapOut[iy][ix][1] = min<int>(iy, crMapOut[iy][ix][1]);
}
}
// down
int ky = maxArmLength - 1;
for (iy = height - maxArmLength; iy < height; ++iy) {
if (iy < 0) {
--ky;
continue;
}
for (ix = 0; ix < width; ++ix) {
crMapOut[iy][ix][3] = min<int>(ky, crMapOut[iy][ix][3]);
}
--ky;
}
// left
for (iy = 0; iy < height; ++iy) {
for (ix = 0; ix < min<int>(width, maxArmLength); ++ix) {
crMapOut[iy][ix][0] = min<int>(ix, crMapOut[iy][ix][0]);
}
}
// right
int kx;
for (iy = 0; iy < height; ++iy) {
kx = maxArmLength - 1;
for (ix = width - maxArmLength; ix < width; ++ix) {
if (ix < 0) {
--kx;
continue;
}
crMapOut[iy][ix][2] = min<int>(kx, crMapOut[iy][ix][2]);
--kx;
}
}
}
示例15: normImage
cv::Mat_<cv::Vec3f> Utility::getChromacityImage(cv::Mat_<cv::Vec3f>& rgbImage) {
///normalize r,g,b channels
cv::Size imgSize=rgbImage.size();
cv::Mat_<float> normImage(imgSize);
normImage.setTo(cv::Scalar(0, 0, 0));
cv::Mat_<cv::Vec3f> chromacityImage=rgbImage.clone();
std::vector<cv::Mat_<float> > rgbPlanes;
cv::split(rgbImage, rgbPlanes);
cv::Mat_<float> singlePlane=normImage.clone();
for (int i=0; i<3; i++) {
cv::pow(rgbPlanes[i], 2.0, singlePlane);
cv::add(normImage, singlePlane, normImage);
}
cv::sqrt(normImage, normImage);
for (int i=0; i<3; i++) {
cv::divide(rgbPlanes[i], normImage, rgbPlanes[i]);
}
cv::merge(&rgbPlanes[0], 3, chromacityImage);
return chromacityImage;
}