本文整理匯總了Java中org.opencv.core.Core.multiply方法的典型用法代碼示例。如果您正苦於以下問題:Java Core.multiply方法的具體用法?Java Core.multiply怎麽用?Java Core.multiply使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.core.Core
的用法示例。
在下文中一共展示了Core.multiply方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: transEstimate
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans,
int r, double eps, double gamma) {
int rows = img.rows();
int cols = img.cols();
List<Mat> bgr = new ArrayList<>();
Core.split(img, bgr);
int type = bgr.get(0).type();
// calculate the transmission map
Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans);
// refine the transmission map
img.convertTo(img, CvType.CV_8UC1);
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
gray.convertTo(gray, CvType.CV_32F);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, r, eps);
Mat Tsmooth = new Mat();
Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40);
Mat Tdetails = new Mat();
Core.subtract(T, Tsmooth, Tdetails);
Core.multiply(Tdetails, new Scalar(gamma), Tdetails);
Core.add(Tsmooth, Tdetails, T);
return T;
}
示例2: globalAdaptation
import org.opencv.core.Core; //導入方法依賴的package包/類
private static List<Mat> globalAdaptation(Mat b, Mat g, Mat r, int rows, int cols) {
// Calculate Lw & maximum of Lw
Mat Lw = new Mat(rows, cols, r.type());
Core.multiply(r, new Scalar(rParam), r);
Core.multiply(g, new Scalar(gParam), g);
Core.multiply(b, new Scalar(bParam), b);
Core.add(r, g, Lw);
Core.add(Lw, b, Lw);
double LwMax = Core.minMaxLoc(Lw).maxVal; // the maximum luminance value
// Calculate log-average luminance and get global adaptation result
Mat Lw_ = Lw.clone();
Core.add(Lw_, new Scalar(0.001), Lw_);
Core.log(Lw_, Lw_);
double LwAver = Math.exp(Core.sumElems(Lw_).val[0] / (rows * cols));
Mat Lg = Lw.clone();
Core.divide(Lg, new Scalar(LwAver), Lg);
Core.add(Lg, new Scalar(1.0), Lg);
Core.log(Lg, Lg);
Core.divide(Lg, new Scalar(Math.log(LwMax / LwAver + 1.0)), Lg); // Lg is the global adaptation
List<Mat> list = new ArrayList<>();
list.add(Lw);
list.add(Lg);
return list;
}
示例3: DifferenceOfGaussian
import org.opencv.core.Core; //導入方法依賴的package包/類
public void DifferenceOfGaussian() {
Mat grayMat = new Mat();
Mat blur1 = new Mat();
Mat blur2 = new Mat();
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5);
Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5);
//Subtracting the two blurred images
Mat DoG = new Mat();
Core.absdiff(blur1, blur2, DoG);
//Inverse Binary Thresholding
Core.multiply(DoG, new Scalar(100), DoG);
Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV);
//Converting Mat back to Bitmap
Utils.matToBitmap(DoG, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例4: ridgeFrequency
import org.opencv.core.Core; //導入方法依賴的package包/類
/**
* Calculate ridge frequency.
*/
private double ridgeFrequency(Mat ridgeSegment, Mat segmentMask, Mat ridgeOrientation, Mat frequencies, int blockSize, int windowSize, int minWaveLength, int maxWaveLength) {
int rows = ridgeSegment.rows();
int cols = ridgeSegment.cols();
Mat blockSegment;
Mat blockOrientation;
Mat frequency;
for (int y = 0; y < rows - blockSize; y += blockSize) {
for (int x = 0; x < cols - blockSize; x += blockSize) {
blockSegment = ridgeSegment.submat(y, y + blockSize, x, x + blockSize);
blockOrientation = ridgeOrientation.submat(y, y + blockSize, x, x + blockSize);
frequency = calculateFrequency(blockSegment, blockOrientation, windowSize, minWaveLength, maxWaveLength);
frequency.copyTo(frequencies.rowRange(y, y + blockSize).colRange(x, x + blockSize));
}
}
// mask out frequencies calculated for non ridge regions
Core.multiply(frequencies, segmentMask, frequencies, 1.0, CvType.CV_32FC1);
// find median frequency over all the valid regions of the image.
double medianFrequency = medianFrequency(frequencies);
// the median frequency value used across the whole fingerprint gives a more satisfactory result
Core.multiply(segmentMask, Scalar.all(medianFrequency), frequencies, 1.0, CvType.CV_32FC1);
return medianFrequency;
}
示例5: enhancement
import org.opencv.core.Core; //導入方法依賴的package包/類
/**
* Enhance the image after ridge filter.
* Apply mask, binary threshold, thinning, ..., etc.
*/
private void enhancement(Mat source, Mat result, int blockSize, int rows, int cols, int padding) {
Mat MatSnapShotMask = snapShotMask(rows, cols, padding);
Mat paddedMask = imagePadding(MatSnapShotMask, blockSize);
if (BuildConfig.DEBUG && !paddedMask.size().equals(source.size())) {
throw new RuntimeException("Incompatible sizes of image and mask");
}
// apply the original mask to get rid of extras
Core.multiply(source, paddedMask, result, 1.0, CvType.CV_8UC1);
// apply binary threshold
Imgproc.threshold(result, result, 0, 255, Imgproc.THRESH_BINARY);
}
示例6: preDehaze
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat preDehaze(Mat img, double a, double nTrans) {
// nOut = ( (blkIm - a) * nTrans + 128 * a ) / 128;
Core.subtract(img, new Scalar(a), img);
Core.multiply(img, new Scalar(nTrans), img);
Core.add(img, new Scalar(128.0 * a), img);
Core.divide(img, new Scalar(128.0), img);
return img;
}
示例7: enhanceEachChannel
import org.opencv.core.Core; //導入方法依賴的package包/類
@SuppressWarnings("unused")
public static Mat enhanceEachChannel(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) {
image.convertTo(image, CvType.CV_32F);
// split image to three channels
List<Mat> bgr = new ArrayList<>();
Core.split(image, bgr);
Mat bChannel = bgr.get(0);
Mat gChannel = bgr.get(1);
Mat rChannel = bgr.get(2);
// obtain air-light
double[] airlight = AirlightEstimate.estimate(image, blkSize);
// obtain coarse transmission map and refine it for each channel
double fTrans = 0.3;
Mat T = TransmissionEstimate.transEstimateEachChannel(bChannel, patchSize, airlight[0], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tb = Filters.GuidedImageFilter(bChannel, T, krnlSize, eps);
T = TransmissionEstimate.transEstimateEachChannel(gChannel, patchSize, airlight[1], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tg = Filters.GuidedImageFilter(gChannel, T, krnlSize, eps);
T = TransmissionEstimate.transEstimateEachChannel(rChannel, patchSize, airlight[2], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tr = Filters.GuidedImageFilter(rChannel, T, krnlSize, eps);
// dehaze
bChannel = dehaze(bChannel, Tb, airlight[0]);
gChannel = dehaze(gChannel, Tg, airlight[1]);
rChannel = dehaze(rChannel, Tr, airlight[2]);
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval);
return outval;
}
示例8: dehaze
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat dehaze(Mat channel, Mat t, double minAtmosLight) {
Mat t_ = new Mat();
Core.subtract(t, new Scalar(1.0), t_);
Core.multiply(t_, new Scalar(-1.0 * minAtmosLight), t_);
Core.subtract(channel, t_, channel);
Core.divide(channel, t, channel);
return channel;
}
示例9: pyramidFuse
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat pyramidFuse(Mat w1, Mat w2, Mat img1, Mat img2, int level) {
// Normalized weight
Mat sumW = new Mat();
Core.add(w1, w2, sumW);
Core.divide(w1, sumW, w1);
Core.multiply(w1, new Scalar(2.0), w1);
Core.divide(w2, sumW, w2);
Core.multiply(w2, new Scalar(2.0), w2);
// Pyramid decomposition and reconstruct
return ImgDecompose.fuseTwoImage(w1, img1, w2, img2, level);
}
示例10: dehazeProcess
import org.opencv.core.Core; //導入方法依賴的package包/類
private static Mat dehazeProcess(Mat img, Mat trans, double[] airlight) {
Mat balancedImg = Filters.SimplestColorBalance(img, 5);
Mat bCnl = new Mat();
Core.extractChannel(balancedImg, bCnl, 0);
Mat gCnl = new Mat();
Core.extractChannel(balancedImg, gCnl, 1);
Mat rCnl = new Mat();
Core.extractChannel(balancedImg, rCnl, 2);
// get mean value
double bMean = Core.mean(bCnl).val[0];
double gMean = Core.mean(gCnl).val[0];
double rMean = Core.mean(rCnl).val[0];
// get transmission map for each channel
Mat Tb = trans.clone();
Core.multiply(Tb, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / bMean * 0.8), Tb);
Mat Tg = trans.clone();
Core.multiply(Tg, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / gMean * 0.9), Tg);
Mat Tr = trans.clone();
Core.multiply(Tr, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / rMean * 0.8), Tr);
// dehaze by formula
// blue channel
Mat bChannel = new Mat();
Core.subtract(bCnl, new Scalar(airlight[0]), bChannel);
Core.divide(bChannel, Tb, bChannel);
Core.add(bChannel, new Scalar(airlight[0]), bChannel);
// green channel
Mat gChannel = new Mat();
Core.subtract(gCnl, new Scalar(airlight[1]), gChannel);
Core.divide(gChannel, Tg, gChannel);
Core.add(gChannel, new Scalar(airlight[1]), gChannel);
// red channel
Mat rChannel = new Mat();
Core.subtract(rCnl, new Scalar(airlight[2]), rChannel);
Core.divide(rChannel, Tr, rChannel);
Core.add(rChannel, new Scalar(airlight[2]), rChannel);
Mat dehazed = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazed);
return dehazed;
}
示例11: process
import org.opencv.core.Core; //導入方法依賴的package包/類
/**
* Process an rgba image. The results can be drawn on retrieved later.
* This method does not modify the image.
*
* @param rgbaImage An RGBA image matrix
*/
public void process(Mat rgbaImage) {
Imgproc.pyrDown(rgbaImage, mPyrDownMat);
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);
//Test whether we need two inRange operations (only if the hue crosses over 255)
if (upperBound.getScalar().val[0] <= 255) {
Core.inRange(mHsvMat, lowerBound.getScalar(), upperBound.getScalar(), mMask);
} else {
//We need two operations - we're going to OR the masks together
Scalar lower = lowerBound.getScalar().clone();
Scalar upper = upperBound.getScalar().clone();
while (upper.val[0] > 255)
upper.val[0] -= 255;
double tmp = lower.val[0];
lower.val[0] = 0;
//Mask 1 - from 0 to n
Core.inRange(mHsvMat, lower, upper, mMaskOne);
//Mask 2 - from 255-n to 255
lower.val[0] = tmp;
upper.val[0] = 255;
Core.inRange(mHsvMat, lower, upper, mMask);
//OR the two masks
Core.bitwise_or(mMaskOne, mMask, mMask);
}
//Dilate (blur) the mask to decrease processing power
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contourListTemp = new ArrayList<>();
Imgproc.findContours(mDilatedMask, contourListTemp, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Filter contours by area and resize to fit the original image size
contours.clear();
for (MatOfPoint c : contourListTemp) {
Core.multiply(c, new Scalar(4, 4), c);
contours.add(new Contour(c));
}
}
示例12: enhance
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) {
image.convertTo(image, CvType.CV_32F);
// extract each color channel
List<Mat> rgb = new ArrayList<>();
Core.split(image, rgb);
Mat rChannel = rgb.get(0);
Mat gChannel = rgb.get(1);
Mat bChannel = rgb.get(2);
int rows = rChannel.rows();
int cols = rChannel.cols();
// derive the dark channel from original image
Mat dc = rChannel.clone();
for (int i = 0; i < image.rows(); i++) {
for (int j = 0; j < image.cols(); j++) {
double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0]));
dc.put(i, j, min);
}
}
// minimum filter
int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue();
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
Imgproc.erode(dc, dc, kernel);
// get coarse transmission map
Mat t = dc.clone();
Core.subtract(t, new Scalar(255.0), t);
Core.multiply(t, new Scalar(-1.0), t);
Core.divide(t, new Scalar(255.0), t);
// obtain gray scale image
Mat gray = new Mat();
Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
Core.divide(gray, new Scalar(255.0), gray);
// refine transmission map
int r = krnlSz * 4;
t = Filters.GuidedImageFilter(gray, t, r, eps);
// get minimum atmospheric light
minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal);
// dehaze each color channel
rChannel = dehaze(rChannel, t, minAtmosLight);
gChannel = dehaze(gChannel, t, minAtmosLight);
bChannel = dehaze(bChannel, t, minAtmosLight);
// merge three color channels to a image
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval);
outval.convertTo(outval, CvType.CV_8UC1);
return outval;
}
示例13: InvertImageColor
import org.opencv.core.Core; //導入方法依賴的package包/類
public static Mat InvertImageColor(Mat img){
Mat im = new Mat();;
Core.bitwise_not(normalThresholding(img),im);
Core.multiply(im,new Scalar(255),im);
return im;
}