本文整理汇总了Java中org.opencv.core.Mat.clone方法的典型用法代码示例。如果您正苦于以下问题:Java Mat.clone方法的具体用法?Java Mat.clone怎么用?Java Mat.clone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.core.Mat
的用法示例。
在下文中一共展示了Mat.clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: scaleTemplateMatch
import org.opencv.core.Mat; //导入方法依赖的package包/类
@SuppressWarnings("unused")
public MatchResult scaleTemplateMatch(Mat scene, Mat templ, Method method, double scaleFactor){
int tw = templ.width();
int th = templ.height();
double currScaleFactor = scaleFactor;
MatchResult bestScore = null;
for(Mat img = templ.clone(); img.width() > tw*0.25;
CvProcessing.resize(img, scaleFactor)){
MatchResult currResult = match(scene, img, method, img);
if(bestScore == null || bestScore.maxVal < currResult.maxVal){
bestScore = currResult;
bestScore.scaleFactor = currScaleFactor;
}
currScaleFactor *= scaleFactor;
}
return bestScore;
}
示例2: convertColorMat
import org.opencv.core.Mat; //导入方法依赖的package包/类
/**
* Convert a matrix in one color space to another
*
* @param in Input matrix
* @param spaceIn Input colorspace
* @param spaceOut Output colorspace
* @return Matrix in output colorspace
*/
public static Mat convertColorMat(Mat in, ColorSpace spaceIn, ColorSpace spaceOut) {
if (spaceIn == spaceOut)
return in;
if (!spaceIn.canConvertTo(spaceOut))
throw new IllegalArgumentException("Cannot convert color to the desired color space.");
Mat output = in.clone();
try {
for (int i = 0; i < spaceIn.getConversionsTo(spaceOut).length; i += 3) {
int conversion = spaceIn.getConversionsTo(spaceOut)[i];
int inputDim = spaceIn.getConversionsTo(spaceOut)[i + 1];
int outputDim = spaceIn.getConversionsTo(spaceOut)[i + 2];
Imgproc.cvtColor(output, output, conversion, outputDim);
}
} catch (Exception ignored) {
throw new IllegalArgumentException("Cannot convert color to the desired color space.");
}
return output;
}
示例3: getContourArea
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static ArrayList<Rect> getContourArea(Mat mat) {
Mat hierarchy = new Mat();
Mat image = mat.clone();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(image, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
Rect rect = null;
double maxArea = 300;
ArrayList<Rect> arr = new ArrayList<Rect>();
for (int i = 0; i < contours.size(); i++) {
Mat contour = contours.get(i);
double contourArea = Imgproc.contourArea(contour);
if (contourArea > maxArea) {
rect = Imgproc.boundingRect(contours.get(i));
arr.add(rect);
}
}
return arr;
}
示例4: globalAdaptation
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static List<Mat> globalAdaptation(Mat b, Mat g, Mat r, int rows, int cols) {
// Calculate Lw & maximum of Lw
Mat Lw = new Mat(rows, cols, r.type());
Core.multiply(r, new Scalar(rParam), r);
Core.multiply(g, new Scalar(gParam), g);
Core.multiply(b, new Scalar(bParam), b);
Core.add(r, g, Lw);
Core.add(Lw, b, Lw);
double LwMax = Core.minMaxLoc(Lw).maxVal; // the maximum luminance value
// Calculate log-average luminance and get global adaptation result
Mat Lw_ = Lw.clone();
Core.add(Lw_, new Scalar(0.001), Lw_);
Core.log(Lw_, Lw_);
double LwAver = Math.exp(Core.sumElems(Lw_).val[0] / (rows * cols));
Mat Lg = Lw.clone();
Core.divide(Lg, new Scalar(LwAver), Lg);
Core.add(Lg, new Scalar(1.0), Lg);
Core.log(Lg, Lg);
Core.divide(Lg, new Scalar(Math.log(LwMax / LwAver + 1.0)), Lg); // Lg is the global adaptation
List<Mat> list = new ArrayList<>();
list.add(Lw);
list.add(Lg);
return list;
}
示例5: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img) {
Mat L = new Mat();
img.convertTo(img, CvType.CV_8UC1);
Imgproc.cvtColor(img, L, Imgproc.COLOR_BGR2GRAY);
L.convertTo(L, CvType.CV_32F);
Core.divide(L, new Scalar(255.0), L);
// calculate Luminance weight
Mat WC = FeatureWeight.LuminanceWeight(img, L);
WC.convertTo(WC, L.type());
// calculate the Saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the Exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WC.clone();
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例6: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = FeatureWeight.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = FeatureWeight.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例7: calWeight
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat calWeight(Mat img, Mat L) {
Core.divide(L, new Scalar(255.0), L);
L.convertTo(L, CvType.CV_32F);
// calculate laplacian contrast weight
Mat WL = WeightCalculate.LaplacianContrast(L);
WL.convertTo(WL, L.type());
// calculate Local contrast weight
Mat WC = WeightCalculate.LocalContrast(L);
WC.convertTo(WC, L.type());
// calculate the saliency weight
Mat WS = WeightCalculate.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the exposedness weight
Mat WE = WeightCalculate.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WL.clone();
Core.add(weight, WC, weight);
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例8: dobj
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat dobj(CascadeClassifier objDetector, Mat src) {
final Mat dst = src.clone();
final MatOfRect objDetections = new MatOfRect();
objDetector.detectMultiScale(dst, objDetections);
if (objDetections.toArray().length <= 0) {
return src;
}
for (final Rect rect : objDetections.toArray()) {
Imgproc.rectangle(dst, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 0, 255), 2);
}
return dst;
}
示例9: smoothImage
import org.opencv.core.Mat; //导入方法依赖的package包/类
public void smoothImage() {
// Smoothing, also called blurring, will make the edges soother.
Mat source = Imgcodecs.imread("cat.jpg");
Mat destination = source.clone();
for (int i = 0; i < 25; i++) {
Mat sourceImage = destination.clone();
Imgproc.blur(sourceImage, destination, new Size(3.0, 3.0));
}
Imgcodecs.imwrite("smoothCat.jpg", destination);
}
示例10: readCameraParameters
import org.opencv.core.Mat; //导入方法依赖的package包/类
private void readCameraParameters() {
Mat camM = new Mat(3, 3, CvType.CV_64FC1);
Mat difC = new Mat(1, 5, CvType.CV_64FC1);
CalibrationResultInitializer.tryLoad(activity, camM, difC);
this.cameraMatrix = camM.clone();
this.distCoeff = difC.clone();
}
开发者ID:PawelTypiak,项目名称:Checkerboard-IMU-Comparator,代码行数:8,代码来源:CheckerboardPatternComputingInitializer.java
示例11: resizeAndConvertOneImage
import org.opencv.core.Mat; //导入方法依赖的package包/类
private void resizeAndConvertOneImage() {
Size size = new Size(IMAGE.cols() * scaleSize, IMAGE.rows() * scaleSize);
Mat imgTMP = new Mat();
Imgproc.resize(IMAGE, imgTMP, size);
IMAGE = imgTMP.clone();
Imgproc.cvtColor(IMAGE, imgTMP, Imgproc.COLOR_BGR2GRAY);
IMAGE = imgTMP.clone();
}
开发者ID:PawelTypiak,项目名称:Checkerboard-IMU-Comparator,代码行数:9,代码来源:CheckerboardPatternComputingInitializer.java
示例12: LSD
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat LSD(Mat in) {
final LineSegmentDetector lsd = Imgproc.createLineSegmentDetector();
final Mat gray = new Mat();
Imgproc.cvtColor(in, gray, Imgproc.COLOR_RGB2GRAY);
final Mat out = gray.clone();
final Mat _lines = new Mat();
lsd.detect(gray, _lines);
lsd.drawSegments(out, _lines);
return out;
}
示例13: dehazeProcess
import org.opencv.core.Mat; //导入方法依赖的package包/类
private static Mat dehazeProcess(Mat img, Mat trans, double[] airlight) {
Mat balancedImg = Filters.SimplestColorBalance(img, 5);
Mat bCnl = new Mat();
Core.extractChannel(balancedImg, bCnl, 0);
Mat gCnl = new Mat();
Core.extractChannel(balancedImg, gCnl, 1);
Mat rCnl = new Mat();
Core.extractChannel(balancedImg, rCnl, 2);
// get mean value
double bMean = Core.mean(bCnl).val[0];
double gMean = Core.mean(gCnl).val[0];
double rMean = Core.mean(rCnl).val[0];
// get transmission map for each channel
Mat Tb = trans.clone();
Core.multiply(Tb, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / bMean * 0.8), Tb);
Mat Tg = trans.clone();
Core.multiply(Tg, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / gMean * 0.9), Tg);
Mat Tr = trans.clone();
Core.multiply(Tr, new Scalar(Math.max(bMean, Math.max(gMean, rMean)) / rMean * 0.8), Tr);
// dehaze by formula
// blue channel
Mat bChannel = new Mat();
Core.subtract(bCnl, new Scalar(airlight[0]), bChannel);
Core.divide(bChannel, Tb, bChannel);
Core.add(bChannel, new Scalar(airlight[0]), bChannel);
// green channel
Mat gChannel = new Mat();
Core.subtract(gCnl, new Scalar(airlight[1]), gChannel);
Core.divide(gChannel, Tg, gChannel);
Core.add(gChannel, new Scalar(airlight[1]), gChannel);
// red channel
Mat rChannel = new Mat();
Core.subtract(rCnl, new Scalar(airlight[2]), rChannel);
Core.divide(rChannel, Tr, rChannel);
Core.add(rChannel, new Scalar(airlight[2]), rChannel);
Mat dehazed = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(bChannel, gChannel, rChannel)), dehazed);
return dehazed;
}
示例14: enhance
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) {
image.convertTo(image, CvType.CV_32F);
// extract each color channel
List<Mat> rgb = new ArrayList<>();
Core.split(image, rgb);
Mat rChannel = rgb.get(0);
Mat gChannel = rgb.get(1);
Mat bChannel = rgb.get(2);
int rows = rChannel.rows();
int cols = rChannel.cols();
// derive the dark channel from original image
Mat dc = rChannel.clone();
for (int i = 0; i < image.rows(); i++) {
for (int j = 0; j < image.cols(); j++) {
double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0]));
dc.put(i, j, min);
}
}
// minimum filter
int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue();
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
Imgproc.erode(dc, dc, kernel);
// get coarse transmission map
Mat t = dc.clone();
Core.subtract(t, new Scalar(255.0), t);
Core.multiply(t, new Scalar(-1.0), t);
Core.divide(t, new Scalar(255.0), t);
// obtain gray scale image
Mat gray = new Mat();
Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
Core.divide(gray, new Scalar(255.0), gray);
// refine transmission map
int r = krnlSz * 4;
t = Filters.GuidedImageFilter(gray, t, r, eps);
// get minimum atmospheric light
minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal);
// dehaze each color channel
rChannel = dehaze(rChannel, t, minAtmosLight);
gChannel = dehaze(gChannel, t, minAtmosLight);
bChannel = dehaze(bChannel, t, minAtmosLight);
// merge three color channels to a image
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval);
outval.convertTo(outval, CvType.CV_8UC1);
return outval;
}
示例15: copiarImagem
import org.opencv.core.Mat; //导入方法依赖的package包/类
public static Mat copiarImagem(Mat imagem) {
return imagem.clone();
}