本文整理匯總了Java中org.opencv.core.Core.add方法的典型用法代碼示例。如果您正苦於以下問題:Java Core.add方法的具體用法?Java Core.add怎麽用?Java Core.add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.core.Core
的用法示例。
在下文中一共展示了Core.add方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: transEstimate
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans,
int r, double eps, double gamma) {
int rows = img.rows();
int cols = img.cols();
List<Mat> bgr = new ArrayList<>();
Core.split(img, bgr);
int type = bgr.get(0).type();
// calculate the transmission map
Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans);
// refine the transmission map
img.convertTo(img, CvType.CV_8UC1);
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
gray.convertTo(gray, CvType.CV_32F);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, r, eps);
Mat Tsmooth = new Mat();
Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40);
Mat Tdetails = new Mat();
Core.subtract(T, Tsmooth, Tdetails);
Core.multiply(Tdetails, new Scalar(gamma), Tdetails);
Core.add(Tsmooth, Tdetails, T);
return T;
}
示例2: Saliency
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例3: globalAdaptation
import org.opencv.core.Core; //導入方法依賴的package包/類
private static List<Mat> globalAdaptation(Mat b, Mat g, Mat r, int rows, int cols) {
// Calculate Lw & maximum of Lw
Mat Lw = new Mat(rows, cols, r.type());
Core.multiply(r, new Scalar(rParam), r);
Core.multiply(g, new Scalar(gParam), g);
Core.multiply(b, new Scalar(bParam), b);
Core.add(r, g, Lw);
Core.add(Lw, b, Lw);
double LwMax = Core.minMaxLoc(Lw).maxVal; // the maximum luminance value
// Calculate log-average luminance and get global adaptation result
Mat Lw_ = Lw.clone();
Core.add(Lw_, new Scalar(0.001), Lw_);
Core.log(Lw_, Lw_);
double LwAver = Math.exp(Core.sumElems(Lw_).val[0] / (rows * cols));
Mat Lg = Lw.clone();
Core.divide(Lg, new Scalar(LwAver), Lg);
Core.add(Lg, new Scalar(1.0), Lg);
Core.log(Lg, Lg);
Core.divide(Lg, new Scalar(Math.log(LwMax / LwAver + 1.0)), Lg); // Lg is the global adaptation
List<Mat> list = new ArrayList<>();
list.add(Lw);
list.add(Lg);
return list;
}
示例4: calWeight
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat calWeight(Mat img) {
Mat L = new Mat();
img.convertTo(img, CvType.CV_8UC1);
Imgproc.cvtColor(img, L, Imgproc.COLOR_BGR2GRAY);
L.convertTo(L, CvType.CV_32F);
Core.divide(L, new Scalar(255.0), L);
// calculate Luminance weight
Mat WC = FeatureWeight.LuminanceWeight(img, L);
WC.convertTo(WC, L.type());
// calculate the Saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the Exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WC.clone();
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例5: enhance
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat enhance (Mat image, int level) {
// color balance
Mat img1 = Filters.SimplestColorBalance(image, 5);
img1.convertTo(img1, CvType.CV_8UC1);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm1 = new Mat();
Imgproc.cvtColor(img1, LabIm1, Imgproc.COLOR_BGR2Lab);
Mat L1 = new Mat();
Core.extractChannel(LabIm1, L1, 0);
// apply CLAHE
Mat[] result = applyCLAHE(LabIm1, L1);
Mat img2 = result[0];
Mat L2 = result[1];
// calculate normalized weight
Mat w1 = calWeight(img1, L1);
Mat w2 = calWeight(img2, L2);
Mat sumW = new Mat();
Core.add(w1, w2, sumW);
Core.divide(w1, sumW, w1);
Core.divide(w2, sumW, w2);
// merge image1 and image2
return ImgDecompose.fuseTwoImage(w1, img1, w2, img2, level);
}
示例6: calWeight
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = FeatureWeight.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = FeatureWeight.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例7: calWeight
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = WeightCalculate.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = WeightCalculate.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = WeightCalculate.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = WeightCalculate.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例8: preDehaze
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat preDehaze(Mat img, double a, double nTrans) {
// nOut = ( (blkIm - a) * nTrans + 128 * a ) / 128;
Core.subtract(img, new Scalar(a), img);
Core.multiply(img, new Scalar(nTrans), img);
Core.add(img, new Scalar(128.0 * a), img);
Core.divide(img, new Scalar(128.0), img);
return img;
}
示例9: dehaze
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat dehaze(Mat img, Mat T, double airlight) {
// J = (img - airlight) ./ T + airlight;
Core.subtract(img, new Scalar(airlight), img);
Core.divide(img, T, img);
Core.add(img, new Scalar(airlight), img);
return img;
}
示例10: pyramidFuse
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat pyramidFuse(Mat w1, Mat w2, Mat img1, Mat img2, int level) {
// Normalized weight
Mat sumW = new Mat();
Core.add(w1, w2, sumW);
Core.divide(w1, sumW, w1);
Core.multiply(w1, new Scalar(2.0), w1);
Core.divide(w2, sumW, w2);
Core.multiply(w2, new Scalar(2.0), w2);
// Pyramid decomposition and reconstruct
return ImgDecompose.fuseTwoImage(w1, img1, w2, img2, level);
}
示例11: dehazeProcess
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat dehazeProcess(Mat img, Mat trans, double[] airlight) {
Mat balancedImg = Filters.SimplestColorBalance(img, 5);
Mat bCnl = new Mat();
Core.extractChannel(balancedImg, bCnl, 0);
Mat gCnl = new Mat();
Core.extractChannel(balancedImg, gCnl, 1);
Mat rCnl = new Mat();
Core.extractChannel(balancedImg, rCnl, 2);
// get mean value
double bMean = Core.mean(bCnl).val[0];
double gMean = Core.mean(gCnl).val[0];
double rMean = Core.mean(rCnl).val[0];
// get transmission map for each channel
Mat Tb = trans.clone();
Core.multiply(Tb, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / bMean * 0.8), Tb);
Mat Tg = trans.clone();
Core.multiply(Tg, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / gMean * 0.9), Tg);
Mat Tr = trans.clone();
Core.multiply(Tr, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / rMean * 0.8), Tr);
// dehaze by formula
// blue channel
Mat bChannel = new Mat();
Core.subtract(bCnl, new Scalar(airlight[0]), bChannel);
Core.divide(bChannel, Tb, bChannel);
Core.add(bChannel, new Scalar(airlight[0]), bChannel);
// green channel
Mat gChannel = new Mat();
Core.subtract(gCnl, new Scalar(airlight[1]), gChannel);
Core.divide(gChannel, Tg, gChannel);
Core.add(gChannel, new Scalar(airlight[1]), gChannel);
// red channel
Mat rChannel = new Mat();
Core.subtract(rCnl, new Scalar(airlight[2]), rChannel);
Core.divide(rChannel, Tr, rChannel);
Core.add(rChannel, new Scalar(airlight[2]), rChannel);
Mat dehazed = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazed);
return dehazed;
}
示例12: Saliency
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<Mat>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例13: PyramidReconstruct
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat PyramidReconstruct(Mat[] pyramid) {
int level = pyramid.length;
for (int i = level - 1; i > 0; i--) {
Mat tmpPyr = new Mat();
Imgproc.resize(pyramid[i], tmpPyr, pyramid[i - 1].size(), 0, 0, Imgproc.INTER_LINEAR);
Core.add(pyramid[i - 1], tmpPyr, pyramid[i - 1]);
}
return pyramid[0];
}
示例14: subspaceReconstruct
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat subspaceReconstruct(Mat W, Mat mean, Mat src) {
int n = src.rows();
int d = src.cols();
Mat X = new Mat();
Mat Y = new Mat();
src.convertTo(Y, W.type());
Core.gemm(Y, W, 1.0, new Mat(), 0.0, X, 2);
if(!mean.empty()) {
for(int i = 0; i < n; i ++) {
Mat r_i = X.row(i);
Core.add(r_i, mean.reshape(1, 1), r_i);
}
}
return X;
}
示例15: GuidedImageFilter
import org.opencv.core.Core; //導入方法依賴的package包/類
/**
* Guided Image Filter for grayscale image, O(1) time implementation of guided filter
*
* @param I guidance image (should be a gray-scale/single channel image)
* @param p filtering input image (should be a gray-scale/single channel image)
* @param r local window radius
* @param eps regularization parameter
* @return filtered image
*/
public static Mat GuidedImageFilter(Mat I, Mat p, int r, double eps) {
I.convertTo(I, CvType.CV_64FC1);
p.convertTo(p, CvType.CV_64FC1);
//[hei, wid] = size(I);
int rows = I.rows();
int cols = I.cols();
// N = boxfilter(ones(hei, wid), r); % the size of each local patch; N=(2r+1)^2 except for boundary pixels.
Mat N = new Mat();
Imgproc.boxFilter(Mat.ones(rows, cols, I.type()), N, -1, new Size(r, r));
// mean_I = boxfilter(I, r) ./ N;
Mat mean_I = new Mat();
Imgproc.boxFilter(I, mean_I, -1, new Size(r, r));
// mean_p = boxfilter(p, r) ./ N
Mat mean_p = new Mat();
Imgproc.boxFilter(p, mean_p, -1, new Size(r, r));
// mean_Ip = boxfilter(I.*p, r) ./ N;
Mat mean_Ip = new Mat();
Imgproc.boxFilter(I.mul(p), mean_Ip, -1, new Size(r, r));
// cov_Ip = mean_Ip - mean_I .* mean_p; % this is the covariance of (I, p) in each local patch.
Mat cov_Ip = new Mat();
Core.subtract(mean_Ip, mean_I.mul(mean_p), cov_Ip);
// mean_II = boxfilter(I.*I, r) ./ N;
Mat mean_II = new Mat();
Imgproc.boxFilter(I.mul(I), mean_II, -1, new Size(r, r));
// var_I = mean_II - mean_I .* mean_I;
Mat var_I = new Mat();
Core.subtract(mean_II, mean_I.mul(mean_I), var_I);
// a = cov_Ip ./ (var_I + eps); % Eqn. (5) in the paper;
Mat a = new Mat();
Core.add(var_I, new Scalar(eps), a);
Core.divide(cov_Ip, a, a);
//b = mean_p - a .* mean_I; % Eqn. (6) in the paper;
Mat b = new Mat();
Core.subtract(mean_p, a.mul(mean_I), b);
// mean_a = boxfilter(a, r) ./ N;
Mat mean_a = new Mat();
Imgproc.boxFilter(a, mean_a, -1, new Size(r, r));
Core.divide(mean_a, N, mean_a);
// mean_b = boxfilter(b, r) ./ N;
Mat mean_b = new Mat();
Imgproc.boxFilter(b, mean_b, -1, new Size(r, r));
Core.divide(mean_b, N, mean_b);
// q = mean_a .* I + mean_b; % Eqn. (8) in the paper;
Mat q = new Mat();
Core.add(mean_a.mul(I), mean_b, q);
q.convertTo(q, CvType.CV_32F);
return q;
}