本文整理汇总了Java中org.opencv.core.Core.mean方法的典型用法代码示例。如果您正苦于以下问题:Java Core.mean方法的具体用法?Java Core.mean怎么用?Java Core.mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.core.Core
的用法示例。
在下文中一共展示了Core.mean方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Saliency
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例2: dehazeProcess
import org.opencv.core.Core; //导入方法依赖的package包/类
private static Mat dehazeProcess(Mat img, Mat trans, double[] airlight) {
Mat balancedImg = Filters.SimplestColorBalance(img, 5);
Mat bCnl = new Mat();
Core.extractChannel(balancedImg, bCnl, 0);
Mat gCnl = new Mat();
Core.extractChannel(balancedImg, gCnl, 1);
Mat rCnl = new Mat();
Core.extractChannel(balancedImg, rCnl, 2);
// get mean value
double bMean = Core.mean(bCnl).val[0];
double gMean = Core.mean(gCnl).val[0];
double rMean = Core.mean(rCnl).val[0];
// get transmission map for each channel
Mat Tb = trans.clone();
Core.multiply(Tb, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / bMean * 0.8), Tb);
Mat Tg = trans.clone();
Core.multiply(Tg, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / gMean * 0.9), Tg);
Mat Tr = trans.clone();
Core.multiply(Tr, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / rMean * 0.8), Tr);
// dehaze by formula
// blue channel
Mat bChannel = new Mat();
Core.subtract(bCnl, new Scalar(airlight[0]), bChannel);
Core.divide(bChannel, Tb, bChannel);
Core.add(bChannel, new Scalar(airlight[0]), bChannel);
// green channel
Mat gChannel = new Mat();
Core.subtract(gCnl, new Scalar(airlight[1]), gChannel);
Core.divide(gChannel, Tg, gChannel);
Core.add(gChannel, new Scalar(airlight[1]), gChannel);
// red channel
Mat rChannel = new Mat();
Core.subtract(rCnl, new Scalar(airlight[2]), rChannel);
Core.divide(rChannel, Tr, rChannel);
Core.add(rChannel, new Scalar(airlight[2]), rChannel);
Mat dehazed = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazed);
return dehazed;
}
示例3: Saliency
import org.opencv.core.Core; //导入方法依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<Mat>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例4: unevenLightCompensate
import org.opencv.core.Core; //导入方法依赖的package包/类
/**
* 其主要思路为:
1、求取源图I的平均灰度,并记录rows和cols;
2、按照一定大小,分为N*M个方块,求出每块的平均值,得到子块的亮度矩阵D;
3、用矩阵D的每个元素减去源图的平均灰度,得到子块的亮度差值矩阵E;
4、用双立方差值法,将矩阵E差值成与源图一样大小的亮度分布矩阵R;
5、得到矫正后的图像result=I-R;
* @Title: unevenLightCompensate
* @Description: 光线补偿
* @param image
* @param blockSize
* void
* @throws
*/
public static void unevenLightCompensate(Mat image, int blockSize) {
if(image.channels() == 3) {
Imgproc.cvtColor(image, image, 7);
}
double average = Core.mean(image).val[0];
Scalar scalar = new Scalar(average);
int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
Mat blockImage = new Mat();
blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
for(int i = 0; i < rowsNew; i ++) {
for(int j = 0; j < colsNew; j ++) {
int rowmin = i * blockSize;
int rowmax = (i + 1) * blockSize;
if(rowmax > image.rows()) rowmax = image.rows();
int colmin = j * blockSize;
int colmax = (j +1) * blockSize;
if(colmax > image.cols()) colmax = image.cols();
Range rangeRow = new Range(rowmin, rowmax);
Range rangeCol = new Range(colmin, colmax);
Mat imageROI = new Mat(image, rangeRow, rangeCol);
double temaver = Core.mean(imageROI).val[0];
blockImage.put(i, j, temaver);
}
}
Core.subtract(blockImage, scalar, blockImage);
Mat blockImage2 = new Mat();
int INTER_CUBIC = 2;
Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
Mat image2 = new Mat();
image.convertTo(image2, CvType.CV_32FC1);
Mat dst = new Mat();
Core.subtract(image2, blockImage2, dst);
dst.convertTo(image, CvType.CV_8UC1);
}