本文整理匯總了Java中org.opencv.core.Mat.submat方法的典型用法代碼示例。如果您正苦於以下問題:Java Mat.submat方法的具體用法?Java Mat.submat怎麽用?Java Mat.submat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.core.Mat
的用法示例。
在下文中一共展示了Mat.submat方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: averageColor
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Gets the average color of the object
*
* @param img The image matrix, of any color size
* @param imgSpace The image's color space
* @return The average color of the region
*/
public Color averageColor(Mat img, ColorSpace imgSpace) {
//Coerce values to stay within screen dimensions
double leftX = MathUtil.coerce(0, img.cols() - 1, left());
double rightX = MathUtil.coerce(0, img.cols() - 1, right());
double topY = MathUtil.coerce(0, img.rows() - 1, top());
double bottomY = MathUtil.coerce(0, img.rows() - 1, bottom());
//Input points into array for calculation
//TODO rectangular submatrix-based calculation isn't perfectly accurate when you have ellipses or weird shapes
Mat subMat = img.submat((int) topY, (int) bottomY, (int) leftX, (int) rightX);
//Calculate average and return new color instance
return Color.create(Core.mean(subMat), imgSpace);
}
示例2: process
import org.opencv.core.Mat; //導入方法依賴的package包/類
private void process(Mat f)
{
// grayscale toggle
if (grayscaleToggle.isSelected()) Imgproc.cvtColor(f, f, Imgproc.COLOR_BGR2GRAY);
// add image to frame
if (thenkToggle.isSelected() && thenkMat != null)
{
Rect roi = new Rect(f.cols()-thenkMat.cols(), f.rows()-thenkMat.rows(), thenkMat.cols(), thenkMat.rows());
Mat thenkROI = f.submat(roi);
Core.addWeighted(thenkROI, 1.0, thenkMat, 1.0, 0.0, thenkROI);
}
}
示例3: cropFingerprint
import org.opencv.core.Mat; //導入方法依賴的package包/類
private Mat cropFingerprint(Mat src) {
int rowStart = (int) (CameraOverlayView.PADDING * src.rows());
int rowEnd = (int) ((1 - CameraOverlayView.PADDING) * src.rows());
int colStart = (int) (CameraOverlayView.PADDING * src.cols());
int colEnd = (int) ((1 - CameraOverlayView.PADDING) * src.cols());
Range rowRange = new Range(rowStart, rowEnd);
Range colRange = new Range(colStart, colEnd);
return src.submat(rowRange, colRange);
}
示例4: ridgeFrequency
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* Calculate ridge frequency.
*/
private double ridgeFrequency(Mat ridgeSegment, Mat segmentMask, Mat ridgeOrientation, Mat frequencies, int blockSize, int windowSize, int minWaveLength, int maxWaveLength) {
int rows = ridgeSegment.rows();
int cols = ridgeSegment.cols();
Mat blockSegment;
Mat blockOrientation;
Mat frequency;
for (int y = 0; y < rows - blockSize; y += blockSize) {
for (int x = 0; x < cols - blockSize; x += blockSize) {
blockSegment = ridgeSegment.submat(y, y + blockSize, x, x + blockSize);
blockOrientation = ridgeOrientation.submat(y, y + blockSize, x, x + blockSize);
frequency = calculateFrequency(blockSegment, blockOrientation, windowSize, minWaveLength, maxWaveLength);
frequency.copyTo(frequencies.rowRange(y, y + blockSize).colRange(x, x + blockSize));
}
}
// mask out frequencies calculated for non ridge regions
Core.multiply(frequencies, segmentMask, frequencies, 1.0, CvType.CV_32FC1);
// find median frequency over all the valid regions of the image.
double medianFrequency = medianFrequency(frequencies);
// the median frequency value used across the whole fingerprint gives a more satisfactory result
Core.multiply(segmentMask, Scalar.all(medianFrequency), frequencies, 1.0, CvType.CV_32FC1);
return medianFrequency;
}
示例5: buildTemplate
import org.opencv.core.Mat; //導入方法依賴的package包/類
/**
* <p>Build a template from a specific eye area previously substracted
* uses detectMultiScale for this area, then uses minMaxLoc method to
* detect iris from the detected eye</p>
*
* @param area Preformatted Area
* @param size minimum iris size
* @param grayMat image in gray
* @param rgbaMat image in color
* @param detectorEye Haar Cascade classifier
* @return built template
*/
@NonNull
private static Mat buildTemplate(Rect area, final int size,
@NonNull Mat grayMat,
@NonNull Mat rgbaMat,
CascadeClassifier detectorEye) {
Mat template = new Mat();
Mat graySubMatEye = grayMat.submat(area);
MatOfRect eyes = new MatOfRect();
Rect eyeTemplate;
detectorEye.detectMultiScale(graySubMatEye, eyes, 1.15, 2,
Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, new Size(EYE_MIN_SIZE, EYE_MIN_SIZE),
new Size());
Rect[] eyesArray = eyes.toArray();
if (eyesArray.length > 0) {
Rect e = eyesArray[0];
e.x = area.x + e.x;
e.y = area.y + e.y;
Rect eyeRectangle = getEyeArea((int) e.tl().x,
(int) (e.tl().y + e.height * 0.4),
e.width,
(int) (e.height * 0.6));
graySubMatEye = grayMat.submat(eyeRectangle);
Mat rgbaMatEye = rgbaMat.submat(eyeRectangle);
Core.MinMaxLocResult minMaxLoc = Core.minMaxLoc(graySubMatEye);
FaceDrawerOpenCV.drawIrisCircle(rgbaMatEye, minMaxLoc);
Point iris = new Point();
iris.x = minMaxLoc.minLoc.x + eyeRectangle.x;
iris.y = minMaxLoc.minLoc.y + eyeRectangle.y;
eyeTemplate = getEyeArea((int) iris.x - size / 2,
(int) iris.y
- size / 2, size, size);
FaceDrawerOpenCV.drawEyeRectangle(eyeTemplate, rgbaMat);
template = (grayMat.submat(eyeTemplate)).clone();
}
return template;
}
示例6: transEstimateEachChannel
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static Mat transEstimateEachChannel(Mat img, int patchSz, double airlight, double lambda, double fTrans) {
int rows = img.rows();
int cols = img.cols();
Mat T = new Mat(rows, cols, img.type());
for (int i = 0; i < rows; i += patchSz) {
for (int j = 0; j < cols; j += patchSz) {
int endRow = i + patchSz > rows ? rows : i + patchSz;
int endCol = j + patchSz > cols ? cols : j + patchSz;
Mat blkIm = img.submat(i, endRow, j, endCol);
double Trans = BlkTransEstimate.blkEstimateEachChannel(blkIm, airlight, lambda, fTrans);
for (int m = i; m < endRow; m++) for (int n = j; n < endCol; n++) T.put(m, n, Trans);
}
}
return T;
}
示例7: computeTrans
import org.opencv.core.Mat; //導入方法依賴的package包/類
private static Mat computeTrans (Mat img, int patchSz, int rows, int cols, int type, double[] airlight, double lambda, double fTrans) {
Mat T = new Mat(rows, cols, type);
for (int i = 0; i < rows; i += patchSz) {
for (int j = 0; j < cols; j += patchSz) {
int endRow = i + patchSz > rows ? rows : i + patchSz;
int endCol = j + patchSz > cols ? cols : j + patchSz;
Mat blkIm = img.submat(i, endRow, j, endCol);
double Trans = BlkTransEstimate.blkEstimate(blkIm, airlight, lambda, fTrans);
for (int m = i; m < endRow; m++) for (int n = j; n < endCol; n++) T.put(m, n, Trans);
}
}
return T;
}
示例8: ExtractImageBoxes
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static ArrayList<Mat> ExtractImageBoxes(Mat boxes, Mat img, int method){
ArrayList<Mat> imBox = new ArrayList<Mat>();
for(int i = 0; i < boxes.height(); i++){
Mat imR = img.submat((int)boxes.get(i,0)[0]+2,(int)boxes.get(i,0)[0] + (int)boxes.get(i,2)[0]-4,
(int)boxes.get(i,1)[0]+2,(int)boxes.get(i,1)[0] + (int)boxes.get(i,3)[0]-4);
Imgproc.resize(imR,imR,new Size(28,28),0,0,method);
imR = Processing.binarize(imR,Imgproc.THRESH_BINARY);
imBox.add(imR.reshape(0,1));
}
return imBox;
}
示例9: estimate
import org.opencv.core.Mat; //導入方法依賴的package包/類
public static double[] estimate(Mat img, int blockSize) {
int rows = img.rows();
int cols = img.cols();
while (rows * cols > blockSize) {
int midRow = (int) Math.floor(rows / 2.0);
int midCol = (int) Math.floor(cols / 2.0);
// divided image into 4 rectangular region
Mat[] subIm = new Mat[4];
subIm[0] = img.submat(0, midRow, 0, midCol); // left-top corner
subIm[1] = img.submat(midRow, rows, 0, midCol); // right-top corner
subIm[2] = img.submat(0, midRow, midCol, cols); // left-bottom corner
subIm[3] = img.submat(midRow, rows, midCol, cols); // right-bottom corner
// for each sub-image, calculate its score (mean - standard deviation)
double[] score = new double[4];
score[0] = calculateScore(subIm[0]);
score[1] = calculateScore(subIm[1]);
score[2] = calculateScore(subIm[2]);
score[3] = calculateScore(subIm[3]);
int index = 0;
for (int i = 1; i < score.length; i++) {
if (score[index] < score[i]) index = i;
}
img = subIm[index].clone();
rows = img.rows();
cols = img.cols();
}
// get the selected region and select the correct air-light
int index_X = 0;
int index_Y = 0;
double pointValue = Double.MAX_VALUE;
for (int i = 0; i < img.rows(); i++) {
for (int j = 0; j < img.cols(); j++) {
double[] data = img.get(i, j);
double tmpValue = Math.sqrt(Math.pow(data[0] - 255.0, 2.0) + Math.pow(data[1] - 255.0, 2.0) + Math.pow(data[2] - 255.0, 2.0));
if (pointValue > tmpValue) {
index_X = i;
index_Y = j;
pointValue = tmpValue;
}
}
}
return img.get(index_X, index_Y);
}
示例10: getFadeImg
import org.opencv.core.Mat; //導入方法依賴的package包/類
private Mat getFadeImg(Mat interimImage) {
// Mat result = getBlendImg();
int borderImgHeight = (int) Math.round(interimImage.cols() * .1);
if ((borderImgHeight % 2) == 1)
borderImgHeight++;
Mat blendImg = getBlendImg(interimImage, borderImgHeight);
// Calculate the cutoff in the mInterimImage:
// Note the same formula is used in NativeWrapper
float overlapHalf = .1f;
int patchHeight = (int) Math.round(borderImgHeight * (.5 + overlapHalf));
int cutOff = Math.round(patchHeight - (float) borderImgHeight / 2);
int cutPatchHeight = patchHeight - cutOff;
if (cutOff <= 0 || cutPatchHeight <= 0)
return interimImage;
Mat result = interimImage.rowRange(cutOff, interimImage.rows()-cutOff).clone();
Mat subMatBlendedTop = result.submat(0, patchHeight-cutOff, 0, result.cols());
blendImg.submat(cutPatchHeight, blendImg.rows(), 0, result.cols()).copyTo(subMatBlendedTop);
Mat subMatBlendedBottom = result.submat(result.rows()-cutPatchHeight, result.rows(), 0, result.cols());
blendImg.submat(0, cutPatchHeight, 0, result.cols()).copyTo(subMatBlendedBottom);
return result;
}