本文整理汇总了Java中org.opencv.core.Core.split方法的典型用法代码示例。如果您正苦于以下问题:Java Core.split方法的具体用法?Java Core.split怎么用?Java Core.split使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.core.Core
的用法示例。
在下文中一共展示了Core.split方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transEstimate
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans,
int r, double eps, double gamma) {
int rows = img.rows();
int cols = img.cols();
List<Mat> bgr = new ArrayList<>();
Core.split(img, bgr);
int type = bgr.get(0).type();
// calculate the transmission map
Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans);
// refine the transmission map
img.convertTo(img, CvType.CV_8UC1);
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
gray.convertTo(gray, CvType.CV_32F);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, r, eps);
Mat Tsmooth = new Mat();
Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40);
Mat Tdetails = new Mat();
Core.subtract(T, Tsmooth, Tdetails);
Core.multiply(Tdetails, new Scalar(gamma), Tdetails);
Core.add(Tsmooth, Tdetails, T);
return T;
}
示例2: Saliency
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例3: SimplestColorBalance
import org.opencv.core.Core; //导入方法依赖的package包/类
/**
* Simplest Color Balance. Performs color balancing via histogram
* normalization.
*
* @param img input color or gray scale image
* @param percent controls the percentage of pixels to clip to white and black. (normally, choose 1~10)
* @return Balanced image in CvType.CV_32F
*/
public static Mat SimplestColorBalance(Mat img, int percent) {
if (percent <= 0)
percent = 5;
img.convertTo(img, CvType.CV_32F);
List<Mat> channels = new ArrayList<>();
int rows = img.rows(); // number of rows of image
int cols = img.cols(); // number of columns of image
int chnls = img.channels(); // number of channels of image
double halfPercent = percent / 200.0;
if (chnls == 3) Core.split(img, channels);
else channels.add(img);
List<Mat> results = new ArrayList<>();
for (int i = 0; i < chnls; i++) {
// find the low and high precentile values (based on the input percentile)
Mat flat = new Mat();
channels.get(i).reshape(1, 1).copyTo(flat);
Core.sort(flat, flat, Core.SORT_ASCENDING);
double lowVal = flat.get(0, (int) Math.floor(flat.cols() * halfPercent))[0];
double topVal = flat.get(0, (int) Math.ceil(flat.cols() * (1.0 - halfPercent)))[0];
// saturate below the low percentile and above the high percentile
Mat channel = channels.get(i);
for (int m = 0; m < rows; m++) {
for (int n = 0; n < cols; n++) {
if (channel.get(m, n)[0] < lowVal) channel.put(m, n, lowVal);
if (channel.get(m, n)[0] > topVal) channel.put(m, n, topVal);
}
}
Core.normalize(channel, channel, 0.0, 255.0 / 2, Core.NORM_MINMAX);
channel.convertTo(channel, CvType.CV_32F);
results.add(channel);
}
Mat outval = new Mat();
Core.merge(results, outval);
return outval;
}
示例4: enhance
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat enhance(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) {
image.convertTo(image, CvType.CV_32F);
// obtain air-light
double[] airlight = AirlightEstimate.estimate(image, blkSize);
// obtain coarse transmission map
double fTrans = 0.5;
Mat T = TransmissionEstimate.transEstimate(image, patchSize, airlight, lambda, fTrans);
// refine the transmission map
Mat gray = new Mat();
Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, krnlSize, eps);
// dehaze
List<Mat> bgr = new ArrayList<>();
Core.split(image, bgr);
Mat bChannel = dehaze(bgr.get(0), T, airlight[0]);
//Core.normalize(bChannel, bChannel, 0, 255, Core.NORM_MINMAX);
Mat gChannel = dehaze(bgr.get(1), T, airlight[1]);
//Core.normalize(gChannel, gChannel, 0, 255, Core.NORM_MINMAX);
Mat rChannel = dehaze(bgr.get(2), T, airlight[2]);
//Core.normalize(rChannel, rChannel, 0, 255, Core.NORM_MINMAX);
Mat dehazedImg = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazedImg);
return dehazedImg;
}
示例5: applyCLAHE
import org.opencv.core.Core; //导入方法依赖的package包/类
private static Mat[] applyCLAHE(Mat img, Mat L) {
Mat[] result = new Mat[2];
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(2.0);
Mat L2 = new Mat();
clahe.apply(L, L2);
Mat LabIm2 = new Mat();
List<Mat> lab = new ArrayList<>();
Core.split(img, lab);
Core.merge(new ArrayList<>(Arrays.asList(L2, lab.get(1), lab.get(2))), LabIm2);
Mat img2 = new Mat();
Imgproc.cvtColor(LabIm2, img2, Imgproc.COLOR_Lab2BGR);
result[0] = img2;
result[1] = L2;
return result;
}
示例6: main
import org.opencv.core.Core; //导入方法依赖的package包/类
public static void main(String[] args) {
String imgPath = "src/main/resources/dcp_images/flash/cave-flash.bmp";
String guidedImgPath = "src/main/resources/dcp_images/flash/cave-noflash.bmp";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("image").showImage(image);
image.convertTo(image, CvType.CV_32F);
Mat guide = Imgcodecs.imread(guidedImgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
guide.convertTo(guide, CvType.CV_32F);
List<Mat> img = new ArrayList<>();
List<Mat> gid = new ArrayList<>();
Core.split(image, img);
Core.split(guide, gid);
int r = 8;
double eps = 0.02 * 0.02;
Mat q_r = Filters.GuidedImageFilter(img.get(0), gid.get(0), r, eps);
Mat q_g = Filters.GuidedImageFilter(img.get(1), gid.get(1), r, eps);
Mat q_b = Filters.GuidedImageFilter(img.get(2), gid.get(2), r, eps);
Mat q = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(q_r, q_g, q_b)), q);
q.convertTo(q, CvType.CV_8UC1);
new ImShow("q").showImage(q);
}
示例7: main
import org.opencv.core.Core; //导入方法依赖的package包/类
public static void main(String[] args) {
String imgPath = "src/main/resources/dcp_images/enhancement/tulips.bmp";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("image").showImage(image);
image.convertTo(image, CvType.CV_32F);
List<Mat> img = new ArrayList<>();
Core.split(image, img);
int r = 16;
double eps = 0.01;
Mat q_r = Filters.GuidedImageFilter(img.get(0), img.get(0), r, eps);
Mat q_g = Filters.GuidedImageFilter(img.get(1), img.get(1), r, eps);
Mat q_b = Filters.GuidedImageFilter(img.get(2), img.get(2), r, eps);
Mat q = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(q_r, q_g, q_b)), q);
q.convertTo(q, CvType.CV_8UC1);
new ImShow("q").showImage(q);
}
示例8: applyCLAHE
import org.opencv.core.Core; //导入方法依赖的package包/类
private static Mat[] applyCLAHE(Mat img, Mat L) {
Mat[] result = new Mat[2];
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(2.0);
Mat L2 = new Mat();
clahe.apply(L, L2);
Mat LabIm2 = new Mat();
List<Mat> lab = new ArrayList<Mat>();
Core.split(img, lab);
Core.merge(new ArrayList<Mat>(Arrays.asList(L2, lab.get(1), lab.get(2))), LabIm2);
Mat img2 = new Mat();
Imgproc.cvtColor(LabIm2, img2, Imgproc.COLOR_Lab2BGR);
result[0] = img2;
result[1] = L2;
return result;
}
示例9: leviRedFilter
import org.opencv.core.Core; //导入方法依赖的package包/类
public void leviRedFilter (Mat input, Mat mask, double threshold){
Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Core.split(input, channels);
Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY);
for(int i=0;i<channels.size();i++){
channels.get(i).release();
}
}
示例10: leviBlueFilter
import org.opencv.core.Core; //导入方法依赖的package包/类
public void leviBlueFilter (Mat input, Mat mask){
List<Mat> channels = new ArrayList<>();
Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Core.split(input, channels);
Imgproc.threshold(channels.get(1), mask, 145, 255, Imgproc.THRESH_BINARY);
for(int i=0;i<channels.size();i++){
channels.get(i).release();
}
}
示例11: enhanceEachChannel
import org.opencv.core.Core; //导入方法依赖的package包/类
@SuppressWarnings("unused")
public static Mat enhanceEachChannel(Mat image, int blkSize, int patchSize, double lambda, double eps, int krnlSize) {
image.convertTo(image, CvType.CV_32F);
// split image to three channels
List<Mat> bgr = new ArrayList<>();
Core.split(image, bgr);
Mat bChannel = bgr.get(0);
Mat gChannel = bgr.get(1);
Mat rChannel = bgr.get(2);
// obtain air-light
double[] airlight = AirlightEstimate.estimate(image, blkSize);
// obtain coarse transmission map and refine it for each channel
double fTrans = 0.3;
Mat T = TransmissionEstimate.transEstimateEachChannel(bChannel, patchSize, airlight[0], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tb = Filters.GuidedImageFilter(bChannel, T, krnlSize, eps);
T = TransmissionEstimate.transEstimateEachChannel(gChannel, patchSize, airlight[1], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tg = Filters.GuidedImageFilter(gChannel, T, krnlSize, eps);
T = TransmissionEstimate.transEstimateEachChannel(rChannel, patchSize, airlight[2], lambda, fTrans);
Core.subtract(T, new Scalar(1.0), T);
Core.multiply(T, new Scalar(-1.0), T);
Mat Tr = Filters.GuidedImageFilter(rChannel, T, krnlSize, eps);
// dehaze
bChannel = dehaze(bChannel, Tb, airlight[0]);
gChannel = dehaze(gChannel, Tg, airlight[1]);
rChannel = dehaze(rChannel, Tr, airlight[2]);
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval);
return outval;
}
示例12: Saliency
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<Mat>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例13: process
import org.opencv.core.Core; //导入方法依赖的package包/类
@Override
public void process(Mat input, Mat mask) {
channels = new ArrayList<>();
switch(color){
case RED:
if(threshold == -1){
threshold = 164;
}
Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Core.split(input, channels);
Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY);
break;
case BLUE:
if(threshold == -1){
threshold = 145;
}
Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Core.split(input, channels);
Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY);
break;
case YELLOW:
if(threshold == -1){
threshold = 95;
}
Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Core.split(input, channels);
Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY_INV);
break;
}
for(int i=0;i<channels.size();i++){
channels.get(i).release();
}
input.release();
}
示例14: blkEstimate
import org.opencv.core.Core; //导入方法依赖的package包/类
public static double blkEstimate(Mat blkIm, double[] airlight, double lambda, double fTrans) {
double Trans = 0.0;
double nTrans = Math.floor(1.0 / fTrans * 128);
double fMinCost = Double.MAX_VALUE;
int numberOfPixels = blkIm.rows() * blkIm.cols() * blkIm.channels();
double nCounter = 0.0;
List<Mat> bgr = new ArrayList<>();
Core.split(blkIm, bgr);
while (nCounter < (1.0 - fTrans) * 10) {
// initial dehazing process to calculate the loss information
Mat bChannel = bgr.get(0).clone();
bChannel = preDehaze(bChannel, airlight[0], nTrans);
Mat gChannel = bgr.get(1).clone();
gChannel = preDehaze(gChannel, airlight[1], nTrans);
Mat rChannel = bgr.get(2).clone();
rChannel = preDehaze(rChannel, airlight[2], nTrans);
// find the pixels with over-255 value and below-0 value, and
// calculate the sum of information loss
double nSumOfLoss = 0.0;
for (int i = 0; i < bChannel.rows(); i++) {
for (int j = 0; j < bChannel.cols(); j++) {
if (bChannel.get(i, j)[0] > 255.0) nSumOfLoss += (bChannel.get(i, j)[0] - 255.0) * (bChannel.get(i, j)[0] - 255.0);
else if (bChannel.get(i, j)[0] < 0.0) nSumOfLoss += bChannel.get(i, j)[0] * bChannel.get(i, j)[0];
if (gChannel.get(i, j)[0] > 255.0) nSumOfLoss += (gChannel.get(i, j)[0] - 255.0) * (gChannel.get(i, j)[0] - 255.0);
else if (gChannel.get(i, j)[0] < 0.0) nSumOfLoss += gChannel.get(i, j)[0] * gChannel.get(i, j)[0];
if (rChannel.get(i, j)[0] > 255.0) nSumOfLoss += (rChannel.get(i, j)[0] - 255.0) * (rChannel.get(i, j)[0] - 255.0);
else if (rChannel.get(i, j)[0] < 0.0) nSumOfLoss += rChannel.get(i, j)[0] * rChannel.get(i, j)[0];
}
}
// calculate the value of sum of square out
double nSumOfSquareOuts = Core.sumElems(bChannel.mul(bChannel)).val[0] + Core.sumElems(gChannel.mul(gChannel)).val[0] + Core.sumElems(rChannel.mul(rChannel)).val[0];
// calculate the value of sum of out
double nSumOfOuts = Core.sumElems(bChannel).val[0] + Core.sumElems(gChannel).val[0] + Core.sumElems(rChannel).val[0];
// calculate the mean value of the block image
double fMean = nSumOfOuts / numberOfPixels;
// calculate the cost function
double fCost = lambda * nSumOfLoss / numberOfPixels - (nSumOfSquareOuts / numberOfPixels - fMean * fMean);
// find the minimum cost and the related transmission
if (nCounter == 0 || fMinCost > fCost) {
fMinCost = fCost;
Trans = fTrans;
}
fTrans = fTrans + 0.1;
nTrans = 1.0 / fTrans * 128.0;
nCounter = nCounter + 1;
}
return Trans;
}
示例15: enhance
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat enhance(Mat image, int r, double eps, double eta, double lambda, double krnlRatio) {
image.convertTo(image, CvType.CV_32F);
// extract each color channel
List<Mat> bgr = new ArrayList<>();
Core.split(image, bgr);
Mat bChannel = bgr.get(0);
Mat gChannel = bgr.get(1);
Mat rChannel = bgr.get(2);
int m = rChannel.rows();
int n = rChannel.cols();
// Global Adaptation
List<Mat> list = globalAdaptation(bChannel, gChannel, rChannel, m, n);
Mat Lw = list.get(0);
Mat Lg = list.get(1);
// Local Adaptation
Mat Hg = localAdaptation(Lg, m, n, r, eps, krnlRatio);
Lg.convertTo(Lg, CvType.CV_32F);
// process
Mat alpha = new Mat(m, n, rChannel.type());
Core.divide(Lg, new Scalar(Core.minMaxLoc(Lg).maxVal / eta), alpha);
//Core.multiply(alpha, new Scalar(eta), alpha);
Core.add(alpha, new Scalar(1.0), alpha);
//alpha = adjustment(alpha, 1.25);
Mat Lg_ = new Mat(m, n, rChannel.type());
Core.add(Lg, new Scalar(1.0 / 255.0), Lg_);
Core.log(Lg_, Lg_);
double beta = Math.exp(Core.sumElems(Lg_).val[0] / (m * n)) * lambda;
Mat Lout = new Mat(m, n, rChannel.type());
Core.divide(Lg, Hg, Lout);
Core.add(Lout, new Scalar(beta), Lout);
Core.log(Lout, Lout);
Core.normalize(alpha.mul(Lout), Lout, 0, 255, Core.NORM_MINMAX);
Mat gain = obtainGain(Lout, Lw, m, n);
// output
Core.divide(rChannel.mul(gain), new Scalar(Core.minMaxLoc(rChannel).maxVal / 255.0), rChannel); // Red Channel
Core.divide(gChannel.mul(gain), new Scalar(Core.minMaxLoc(gChannel).maxVal / 255.0), gChannel); // Green Channel
Core.divide(bChannel.mul(gain), new Scalar(Core.minMaxLoc(bChannel).maxVal / 255.0), bChannel); // Blue Channel
// merge three color channels to a image
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), outval);
outval.convertTo(outval, CvType.CV_8UC1);
return outval;
}