本文整理汇总了C++中cv::Mat::mul方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::mul方法的具体用法?C++ Mat::mul怎么用?C++ Mat::mul使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gaussianCorrelation
// Evaluates a Gaussian kernel with bandwidth SIGMA for all relative shifts between input images X and Y, which must both be MxN. They must also be periodic (ie., pre-processed with a cosine window).
cv::Mat KCFTracker::gaussianCorrelation(cv::Mat x1, cv::Mat x2)
{
using namespace FFTTools;
cv::Mat c = cv::Mat( cv::Size(size_patch[1], size_patch[0]), CV_32F, cv::Scalar(0) );
// HOG features
if (_hogfeatures) {
cv::Mat caux;
cv::Mat x1aux;
cv::Mat x2aux;
for (int i = 0; i < size_patch[2]; i++) {
x1aux = x1.row(i); // Procedure do deal with cv::Mat multichannel bug
x1aux = x1aux.reshape(1, size_patch[0]);
x2aux = x2.row(i).reshape(1, size_patch[0]);
cv::mulSpectrums(fftd(x1aux), fftd(x2aux), caux, 0, true);
caux = fftd(caux, true);
rearrange(caux);
caux.convertTo(caux,CV_32F);
c = c + real(caux);
}
}
// Gray features
else {
cv::mulSpectrums(fftd(x1), fftd(x2), c, 0, true);
c = fftd(c, true);
rearrange(c);
c = real(c);
}
cv::Mat d;
cv::max(( (cv::sum(x1.mul(x1))[0] + cv::sum(x2.mul(x2))[0])- 2. * c) / (size_patch[0]*size_patch[1]*size_patch[2]) , 0, d);
cv::Mat k;
cv::exp((-d / (sigma * sigma)), k);
return k;
}
示例2:
// Ridgeness operator: F_vv.
// REF [book] >> section 9.1.2 (p. 254) in "Digital and Medical Image Processing", 2005.
// REF [book] >> Figure 9.10 & 9.11 (p. 260) in "Digital and Medical Image Processing", 2005.
cv::Mat ImageFilter::RidgenessOperator::operator()(const cv::Mat& img, const std::size_t apertureSize, const double sigma) const
{
cv::Mat Fx, Fy, Fxx, Fyy, Fxy;
ImageFilter::computeDerivativesOfImage(img, apertureSize, sigma, Fx, Fy, Fxx, Fyy, Fxy);
// Compute Fvv.
// REF [book] >> p. 255 ~ 256 in "Digital and Medical Image Processing", 2005.
const cv::Mat Fx2(Fx.mul(Fx)), Fy2(Fy.mul(Fy));
return (Fy2.mul(Fxx) - 2 * Fx.mul(Fy).mul(Fxy) + Fx2.mul(Fyy)) / (Fx2 + Fy2);
}
示例3: magnitude
static void magnitude(const cv::Mat& I_X, const cv::Mat& I_Y, cv::Mat& I_mag)
{
CV_Assert(I_X.type() == CV_64F && I_X.type() == I_Y.type());
I_mag = I_X.mul(I_X) + I_Y.mul(I_Y);
int s = I_mag.rows * I_mag.cols;
double* p = (double*)I_mag.data;
for (int i = 0; i < s; ++i) {
*p = cv::sqrt(*p);
p++;
}
}
示例4: filterSingleChannel
cv::Mat GuidedFilterMono::filterSingleChannel(const cv::Mat &p) const
{
cv::Mat mean_p = boxfilter(p, r);
cv::Mat mean_Ip = boxfilter(I.mul(p), r);
cv::Mat cov_Ip = mean_Ip - mean_I.mul(mean_p); // this is the covariance of (I, p) in each local patch.
cv::Mat a = cov_Ip / (var_I + eps); // Eqn. (5) in the paper;
cv::Mat b = mean_p - a.mul(mean_I); // Eqn. (6) in the paper;
cv::Mat mean_a = boxfilter(a, r);
cv::Mat mean_b = boxfilter(b, r);
return mean_a.mul(I) + mean_b;
}
示例5: computeWeights
void computeWeights(const cv::Mat &imagePatch, const spatiogram &qTarget, const spatiogram &pCurrent,
const cv::Mat &w, cv::Mat &weights){
CV_Assert(qTarget.bins==pCurrent.bins);
cv::Mat sqco;
cv::divide(qTarget.cd,pCurrent.cd,sqco);
cv::sqrt(sqco, sqco);
cv::Mat rel = w.mul( sqco );
std::vector<cv::Mat> weightC;
cv::Mat tc =cv::Mat::zeros(imagePatch.rows, imagePatch.cols, CV_64FC1);
int n = 256/qTarget.bins;
std::vector<double> bins;
linspace(bins, 0, 256, n);
int m = pCurrent.bins/imagePatch.channels();
for (int l=0; l<imagePatch.channels(); l++){
for (int j=0; j<m; j++){
cv::Mat temp;
binelements(imagePatch, bins, l, j, temp);
tc = tc + (rel.at<double>(0,l*m+j))*temp;
}
weightC.push_back(qTarget.C*tc);
}
mat3min( weightC, weights );
//weights=weightC[0];
}
示例6: Frobenius
double Matrix::Frobenius(cv::Mat A) {
CV_Assert(A.size != 0);
double frobenius;
cv::Mat square_A;
square_A = A.mul(A);
frobenius = sqrt(cv::sum(square_A)[0]);
return frobenius;
}
示例7: bhattacharyya
double Descriptor::bhattacharyya(cv::Mat k, cv::Mat q)
{
cv::normalize(k, k, 1, 0, cv::NORM_L1);
cv::normalize(q, q, 1, 0, cv::NORM_L1);
cv::Mat temp = k.mul(q);
sqrt(temp, temp);
return (double)sqrt(1 - cv::sum(temp)[0]);
// sqrt(1-sum(sqrt(k.*q)))
}
示例8: PCs
// Compute 3D object (shape/texture/expression) from weights, given 3DMM basis (MU, PCs, EV). Ouput size Vx3.
// The input vector "weight" is vertical, with float numbers
cv::Mat BaselFaceEstimator::coef2object(cv::Mat weight, cv::Mat MU, cv::Mat PCs, cv::Mat EV){
int M = weight.rows;
Mat tmpShape;
if (M == 0)
tmpShape = MU.clone();
else {
Mat subPC = PCs(Rect(0,0,M,PCs.rows));
Mat subEV = EV(Rect(0,0,1,M));
tmpShape = MU + subPC * weight.mul(subEV);
}
return tmpShape.reshape(1,tmpShape.rows/3);
}
示例9: bhattacharyya
// The bhattacharyya distance between vector and vector q;
float ReidDescriptor::bhattacharyya(cv::Mat k, cv::Mat q)
{
cv::normalize(k, k, 1, 0, cv::NORM_L1);
cv::normalize(q, q, 1, 0, cv::NORM_L1);
//show the histograms
//drawHist("hist1", k);
//drawHist("hist2", q);
cv::Mat temp = k.mul(q);
sqrt(temp, temp);
return (float)sqrt(1 - cv::sum(temp)[0]); // sqrt(1-sum(sqrt(k.*q)))
}
示例10: si_filter
void Metric::si_filter(cv::Mat& src, cv::Mat& si_filtered, cv::Mat& hfiltered, cv::Mat& vfiltered, int len){
float filterMask[len];
float filterA [len][len];
getFilterMask(len, filterMask);
for(int i=0; i<len; i++){
for(int j=0; j<len; j++){
filterA[i][j] = filterMask[i];
}
}
cv::Mat kernel = cv::Mat(len, len, CV_32F, &filterA);
cv::Mat tmp;
cv::filter2D(src, hfiltered, -1, kernel);
cv::filter2D(src, vfiltered, -1, kernel.t());
cv::Mat h2 = hfiltered.mul(hfiltered);
cv::Mat v2 = vfiltered.mul(vfiltered);
cv::add(h2,v2, tmp);
cv::sqrt(tmp, si_filtered);
}
示例11: Response
//===========================================================================
void MPatch::Response(cv::Mat &im,cv::Mat &resp)
{
assert((im.type() == CV_32F) && (resp.type() == CV_64F));
assert((im.rows >= _h) && (im.cols >= _w));
int h = im.rows - _h + 1, w = im.cols - _w + 1;
if(resp.rows != h || resp.cols != w)resp.create(h,w,CV_64F);
if(res_.rows != h || res_.cols != w)res_.create(h,w,CV_64F);
if(_p.size() == 1){_p[0].Response(im,resp); sum2one(resp);}
else{
resp = cvScalar(1.0);
for(int i = 0; i < (int)_p.size(); i++){
_p[i].Response(im,res_); sum2one(res_); resp = resp.mul(res_);
}
sum2one(resp);
}return;
}
示例12: apply
void apply(const cv::Mat& A, cv::Mat &B)
{
Sobel(A, grad_x, ddepth, 1, 0, kernel, scale, delta, cv::BORDER_DEFAULT);
Sobel(A, grad_y, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);
Sobel(grad_x, d_xx, ddepth, 1, 0, kernel, scale, delta, cv::BORDER_DEFAULT);
Sobel(grad_x, d_xy, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);
Sobel(grad_y, d_yy, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);
diskr = (((d_xx - d_yy) / 2.0).mul(((d_xx - d_yy) / 2.0)) + d_xy.mul(d_xy));
sqrt(diskr, root);
largeC = (d_xx + d_yy) / 2.0 + root;
smallC = (d_xx + d_yy) / 2.0 - root;
switch (output)
{
case picDx:
normalize(grad_x, B, 0, 1, CV_MINMAX);
break;
case picDy:
normalize(grad_y, B, 0, 1, CV_MINMAX);
break;
case picDxx:
normalize(d_xx, B, 0, 1, CV_MINMAX);
break;
case picDxy:
normalize(d_xy, B, 0, 1, CV_MINMAX);
break;
case picDyy:
normalize(d_yy, B, 0, 1, CV_MINMAX);
break;
case picEVSmall:
normalize(smallC, B, 0, 1, CV_MINMAX);
break;
case picEVLarge:
normalize(largeC, B, 0, 1, CV_MINMAX);
break;
case picDerivInput:
default:
B = A;
break;
}
}
示例13: triangulateFromVp
void Triangulator::triangulateFromVp(cv::Mat &vp, cv::Mat &xyz){
// Solve for xyzw using determinant tensor
cv::Mat C = determinantTensor;
std::vector<cv::Mat> xyzw(4);
for(unsigned int i=0; i<4; i++){
// xyzw[i].create(vp.size(), CV_32F);
xyzw[i] = C.at<float>(cv::Vec4i(i,0,1,1)) - C.at<float>(cv::Vec4i(i,2,1,1))*uc - C.at<float>(cv::Vec4i(i,0,2,1))*vc -
C.at<float>(cv::Vec4i(i,0,1,2))*vp + C.at<float>(cv::Vec4i(i,2,1,2))*vp.mul(uc) + C.at<float>(cv::Vec4i(i,0,2,2))*vp.mul(vc);
}
// Convert to non homogenous coordinates
for(unsigned int i=0; i<3; i++)
xyzw[i] /= xyzw[3];
// Merge
cv::merge(std::vector<cv::Mat>(xyzw.begin(), xyzw.begin()+3), xyz);
}
示例14: interpolate
/**
* Interpolate D19 pixel values
*/
void CalibrationFilter::interpolate()
{
// Gaussian interpolation mask
float coeff[9] = {
.707, 1.0, .707,
1.0, 0.0, 1.0,
.707, 1.0, .707 };
const cv::Mat mask(3, 3, CV_32F, coeff);
for(int i=1; i<mFrame32F_big.rows-1; ++i)
{
for(int j=1; j<mFrame32F_big.cols-1; ++j)
{
// Check if pixel is dead
if(mAlive_big.at<unsigned char>(i, j) == 0)
{
// Dead pixel: build interpolation matrix
cv::Mat neighbor( 3, 3, CV_32F );
mAlive_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) ).convertTo( neighbor, CV_32F );
//QLOG_DEBUG() << TAG << "neighbor sum" << cv::sum( neighbor )[0];
// Compute interpolated value
// 'M' is the same as 'mask' but with 0-coefficient over dead pixels
const cv::Mat M = mask.mul( neighbor, 1.0 );
// 'roi' Region of interest is the 3x3 region centered on the dead pixel
const cv::Mat roi = mFrame32F_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) );
double sum = cv::sum( M )[0];
if(sum >= 1.0)
{
mFrame32F_big.at<float>(i, j) = M.dot( roi ) / sum;
}
else
{
mFrame32F_big.at<float>(i, j) = 0;
}
}
}
}
}
示例15: WeightedSum
double mylib::WeightedSum(const cv::Mat& A, const cv::Mat& W)
{
if (A.size() != W.size())
exit(0);
//int a = A.type();
//int b = W.type();
double result=0.0;
cv::Scalar s;
vector<Mat> Wv;
Mat W3;
for (int i=0; i< A.channels(); i++)
{
Wv.push_back(W);
}
cv::merge(Wv,W3);
W3 = A.mul(W3);
s = cv::sum(W3);
for (int i=0; i<A.channels(); i++)
result += s[i];
return result / A.channels();
}