本文整理汇总了Java中org.opencv.core.Mat.convertTo方法的典型用法代码示例。如果您正苦于以下问题:Java Mat.convertTo方法的具体用法?Java Mat.convertTo怎么用?Java Mat.convertTo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.core.Mat
的用法示例。
在下文中一共展示了Mat.convertTo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transEstimate
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans,
int r, double eps, double gamma) {
int rows = img.rows();
int cols = img.cols();
List<Mat> bgr = new ArrayList<>();
Core.split(img, bgr);
int type = bgr.get(0).type();
// calculate the transmission map
Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans);
// refine the transmission map
img.convertTo(img, CvType.CV_8UC1);
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
gray.convertTo(gray, CvType.CV_32F);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, r, eps);
Mat Tsmooth = new Mat();
Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40);
Mat Tdetails = new Mat();
Core.subtract(T, Tsmooth, Tdetails);
Core.multiply(Tdetails, new Scalar(gamma), Tdetails);
Core.add(Tsmooth, Tdetails, T);
return T;
}
示例2: Saliency
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例3: LuminanceWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat LuminanceWeight(Mat img, Mat L) {
Mat bCnl = new Mat();
Core.extractChannel(img, bCnl, 0);
bCnl.convertTo(bCnl, CvType.CV_32F);
Mat gCnl = new Mat();
Core.extractChannel(img, gCnl, 1);
gCnl.convertTo(gCnl, CvType.CV_32F);
Mat rCnl = new Mat();
Core.extractChannel(img, rCnl, 2);
rCnl.convertTo(rCnl, CvType.CV_32F);
Mat lum = new Mat(L.rows(), L.cols(), L.type());
for (int i = 0; i < L.rows(); i++) {
for (int j = 0; j < L.cols(); j++) {
double data = Math.sqrt( ( Math.pow(bCnl.get(i, j)[0] / 255.0 - L.get(i, j)[0], 2.0) +
Math.pow(gCnl.get(i, j)[0] / 255.0 - L.get(i, j)[0], 2.0) +
Math.pow(rCnl.get(i, j)[0] / 255.0 - L.get(i, j)[0], 2.0) ) / 3 );
lum.put(i, j, data);
}
}
return lum;
}
示例4: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = WeightCalculate.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = WeightCalculate.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = WeightCalculate.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = WeightCalculate.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例5: enhance
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat enhance (Mat image, int blkSize, int patchSize, double lambda, double gamma, int r, double eps, int level) {
image.convertTo(image, CvType.CV_32F);
// image decomposition
Mat[] decomposed = ImgDecompose.illuRefDecompose(image);
Mat AL = decomposed[0];
Mat RL = decomposed[1];
// For RL
RL = Filters.SimplestColorBalance(RL, colorBalanceRatio);
// Calculate the air-light
double[] airlight = AirlightEstimate.estimate(AL, blkSize);
// estimate the transmission map
double fTrans = 0.6;
Mat trans = TransmissionEstimate.transEstimate(AL, patchSize, airlight, lambda, fTrans, r, eps, gamma);
AL = dehazeProcess(AL, trans, airlight);
// calculate the weight
Mat w1 = calWeight(AL);
Mat w2 = calWeight(RL);
// Fuse
return pyramidFuse(w1, w2, AL, RL, level);
}
示例6: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img) {
Mat L = new Mat();
img.convertTo(img, CvType.CV_8UC1);
Imgproc.cvtColor(img, L, Imgproc.COLOR_BGR2GRAY);
L.convertTo(L, CvType.CV_32F);
Core.divide(L, new Scalar(255.0), L);
// calculate Luminance weight
Mat WC = FeatureWeight.LuminanceWeight(img, L);
WC.convertTo(WC, L.type());
// calculate the Saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the Exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WC.clone();
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例7: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = FeatureWeight.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = FeatureWeight.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例8: main
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static void main(String[] args) {
String imgPath = "src/main/resources/dcp_images/flash/cave-flash.bmp";
String guidedImgPath = "src/main/resources/dcp_images/flash/cave-noflash.bmp";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("image").showImage(image);
image.convertTo(image, CvType.CV_32F);
Mat guide = Imgcodecs.imread(guidedImgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
guide.convertTo(guide, CvType.CV_32F);
List<Mat> img = new ArrayList<>();
List<Mat> gid = new ArrayList<>();
Core.split(image, img);
Core.split(guide, gid);
int r = 8;
double eps = 0.02 * 0.02;
Mat q_r = Filters.GuidedImageFilter(img.get(0), gid.get(0), r, eps);
Mat q_g = Filters.GuidedImageFilter(img.get(1), gid.get(1), r, eps);
Mat q_b = Filters.GuidedImageFilter(img.get(2), gid.get(2), r, eps);
Mat q = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(q_r, q_g, q_b)), q);
q.convertTo(q, CvType.CV_8UC1);
new ImShow("q").showImage(q);
}
示例9: enhanceImageBrightness
import org.opencv.core.Mat; //导入方法依赖的package包/类
public void enhanceImageBrightness() {
double alpha = 1; // Change to 2 for more brightness
double beta = 50;
String fileName = "cat.jpg";
Mat source = Imgcodecs.imread("cat.jpg");
Mat destination = new Mat(source.rows(), source.cols(),
source.type());
source.convertTo(destination, -1, 1, 50);
Imgcodecs.imwrite("brighterCat.jpg", destination);
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:12,代码来源:OpenCVNonMavenExamples.java
示例10: unevenLightCompensate
import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
* 其主要思路为:
1、求取源图I的平均灰度,并记录rows和cols;
2、按照一定大小,分为N*M个方块,求出每块的平均值,得到子块的亮度矩阵D;
3、用矩阵D的每个元素减去源图的平均灰度,得到子块的亮度差值矩阵E;
4、用双立方差值法,将矩阵E差值成与源图一样大小的亮度分布矩阵R;
5、得到矫正后的图像result=I-R;
* @Title: unevenLightCompensate
* @Description: 光线补偿
* @param image
* @param blockSize
* void
* @throws
*/
public static void unevenLightCompensate(Mat image, int blockSize) {
if(image.channels() == 3) {
Imgproc.cvtColor(image, image, 7);
}
double average = Core.mean(image).val[0];
Scalar scalar = new Scalar(average);
int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
Mat blockImage = new Mat();
blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
for(int i = 0; i < rowsNew; i ++) {
for(int j = 0; j < colsNew; j ++) {
int rowmin = i * blockSize;
int rowmax = (i + 1) * blockSize;
if(rowmax > image.rows()) rowmax = image.rows();
int colmin = j * blockSize;
int colmax = (j +1) * blockSize;
if(colmax > image.cols()) colmax = image.cols();
Range rangeRow = new Range(rowmin, rowmax);
Range rangeCol = new Range(colmin, colmax);
Mat imageROI = new Mat(image, rangeRow, rangeCol);
double temaver = Core.mean(imageROI).val[0];
blockImage.put(i, j, temaver);
}
}
Core.subtract(blockImage, scalar, blockImage);
Mat blockImage2 = new Mat();
int INTER_CUBIC = 2;
Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
Mat image2 = new Mat();
image.convertTo(image2, CvType.CV_32FC1);
Mat dst = new Mat();
Core.subtract(image2, blockImage2, dst);
dst.convertTo(image, CvType.CV_8UC1);
}
示例11: subspaceReconstruct
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat subspaceReconstruct(Mat W, Mat mean, Mat src) {
int n = src.rows();
int d = src.cols();
Mat X = new Mat();
Mat Y = new Mat();
src.convertTo(Y, W.type());
Core.gemm(Y, W, 1.0, new Mat(), 0.0, X, 2);
if(!mean.empty()) {
for(int i = 0; i < n; i ++) {
Mat r_i = X.row(i);
Core.add(r_i, mean.reshape(1, 1), r_i);
}
}
return X;
}
示例12: gammaAdjust
import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
*
* @Title: gammaAdjust
* @Description: gamma校正
* @param grayImg
* @return
* Mat
* @throws
*/
public static Mat gammaAdjust(Mat grayImg) {
Mat X = new Mat();
grayImg.convertTo(X, CvType.CV_32FC1);
Mat I = new Mat();
float gamma = 1/2.2f;
Core.pow(X, gamma, I);
Mat result = norm_0_255(I);
return result;
}
示例13: main
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static void main (String[] args) {
String imgPath = "src/main/resources/haze_images/canon_2.jpg";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("org-image").showImage(image);
Mat result = OptimizedContrastEnhance.enhance(image, blkSize, patchSize, lambda, eps, krnlSize);
result.convertTo(result, CvType.CV_8UC1);
new ImShow("dehaze-image").showImage(result);
}
示例14: main
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static void main (String[] args) {
String imgPath = "src/main/resources/underwater_images/underwater_scene.jpg";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("original").showImage(image); // show image
Mat fusion = RemoveBackScatter.enhance(image, blkSize, patchSize, lambda, gamma, r, eps, level);
fusion.convertTo(fusion, CvType.CV_8UC1);
new ImShow("fusion").showImage(fusion); // show fusion result
}
示例15: main
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static void main (String[] args) {
String imgPath = "src/main/resources/underwater_images/underwater_scene.jpg";
Mat image = Imgcodecs.imread(imgPath, Imgcodecs.CV_LOAD_IMAGE_COLOR);
new ImShow("original").showImage(image);
Mat fusion = FusionEnhance.enhance(image, level);
fusion.convertTo(fusion, CvType.CV_8UC1);
new ImShow("fusion").showImage(fusion);
}