本文整理汇总了Java中org.opencv.imgproc.Imgproc类的典型用法代码示例。如果您正苦于以下问题:Java Imgproc类的具体用法?Java Imgproc怎么用?Java Imgproc使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Imgproc类属于org.opencv.imgproc包,在下文中一共展示了Imgproc类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: gridDetection
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public static void gridDetection(Mat img){
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(img,contours,new Mat(),Imgproc.RETR_TREE,Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea = 0;
MatOfPoint max_contour = new MatOfPoint();
Iterator<MatOfPoint> iterator = contours.iterator();
while (iterator.hasNext()){
MatOfPoint contour = iterator.next();
double area = Imgproc.contourArea(contour);
if(area > maxArea){
maxArea = area;
max_contour = contour;
}
}
double epsilon = 0.1*Imgproc.arcLength(new MatOfPoint2f(max_contour.toArray()),true);
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(max_contour.toArray()),approx,epsilon,true);
RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(max_contour.toArray()));
Mat grid = Thresholding.orderPoints(approx);
Thresholding.approx = approx;
Thresholding.grid = grid;
Thresholding.rect = rect;
}
示例2: onCameraFrame
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int viewMode = mViewMode;
switch (viewMode) {
case VIEW_MODE_OPTICAL_FLOW:
mGray = inputFrame.gray();
if(features.toArray().length==0){
int rowStep = 50, colStep = 100;
int nRows = mGray.rows()/rowStep, nCols = mGray.cols()/colStep;
// Log.d(TAG, "\nRows: "+nRows+"\nCols: "+nCols+"\n");
Point points[] = new Point[nRows*nCols];
for(int i=0; i<nRows; i++){
for(int j=0; j<nCols; j++){
points[i*nCols+j]=new Point(j*colStep, i*rowStep);
// Log.d(TAG, "\nRow: "+i*rowStep+"\nCol: "+j*colStep+"\n: ");
}
}
features.fromArray(points);
prevFeatures.fromList(features.toList());
mPrevGray = mGray.clone();
break;
}
nextFeatures.fromArray(prevFeatures.toArray());
Video.calcOpticalFlowPyrLK(mPrevGray, mGray, prevFeatures, nextFeatures, status, err);
List<Point> prevList=features.toList(), nextList=nextFeatures.toList();
Scalar color = new Scalar(255);
for(int i = 0; i<prevList.size(); i++){
// Core.circle(mGray, prevList.get(i), 5, color);
Imgproc.line(mGray, prevList.get(i), nextList.get(i), color);
}
mPrevGray = mGray.clone();
break;
case VIEW_MODE_KLT_TRACKER:
mGray = inputFrame.gray();
if(features.toArray().length==0){
Imgproc.goodFeaturesToTrack(mGray, features, 10, 0.01, 10);
Log.d(TAG, features.toList().size()+"");
prevFeatures.fromList(features.toList());
mPrevGray = mGray.clone();
// prevFeatures.fromList(nextFeatures.toList());
break;
}
// OpticalFlow(mPrevGray.getNativeObjAddr(), mGray.getNativeObjAddr(), prevFeatures.getNativeObjAddr(), nextFeatures.getNativeObjAddr());
Video.calcOpticalFlowPyrLK(mPrevGray, mGray, prevFeatures, nextFeatures, status, err);
List<Point> drawFeature = nextFeatures.toList();
// Log.d(TAG, drawFeature.size()+"");
for(int i = 0; i<drawFeature.size(); i++){
Point p = drawFeature.get(i);
Imgproc.circle(mGray, p, 5, new Scalar(255));
}
mPrevGray = mGray.clone();
prevFeatures.fromList(nextFeatures.toList());
break;
default: mViewMode = VIEW_MODE_KLT_TRACKER;
}
return mGray;
}
示例3: adaptativeProcess
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public static Mat adaptativeProcess(Mat img){
Mat im = new Mat();
Imgproc.threshold(img,im,120,255,Imgproc.THRESH_TRUNC);
im = Thresholding.adaptativeThresholding(im);
Imgproc.medianBlur(im,im,7);
Mat threshImg = Thresholding.InvertImageColor(im);
Thresholding.gridDetection(threshImg);
Mat mat = Mat.zeros(4,2,CvType.CV_32F);
mat.put(0,0,0); mat.put(0,1,512);
mat.put(1,0,0); mat.put(1,1,0);
mat.put(2,0,512); mat.put(2,1,0);
mat.put(3,0,512); mat.put(3,1,512);
mat = Imgproc.getPerspectiveTransform(Thresholding.grid,mat);
Mat M = new Mat();
Imgproc.warpPerspective(threshImg,M,mat, new Size(512,512));
Imgproc.medianBlur(M,M,3);
Imgproc.threshold(M,M,254,255,Imgproc.THRESH_BINARY);
return Thresholding.InvertImageColor(M);
}
示例4: match
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public MatchResult match(Mat scene, Mat templ, Method method, Mat img) {
int result_cols = scene.cols() - templ.cols() + 1;
int result_rows = scene.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CV_32FC1);
Imgproc.matchTemplate(scene, templ, result, method.ordinal());
//Core.normalize(result, result, 0, 1, 32,-1,new Mat());
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
double maxVal;
if (method.ordinal() == Imgproc.TM_SQDIFF
|| method.ordinal() == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
maxVal = mmr.minVal;
}
else {
matchLoc = mmr.maxLoc;
maxVal = mmr.maxVal;
}
MatchResult currResult = new MatchResult(matchLoc.x +(templ.cols()/2),matchLoc.y +(templ.rows()/2),0,maxVal);
return currResult;
}
示例5: onCameraFrame
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
}
//转置90度
Mat rotateMat = Imgproc.getRotationMatrix2D(new Point(mRgba.rows() / 2, mRgba.cols() / 2), 90, 1);
Imgproc.warpAffine(mRgba, mRgba, rotateMat, mRgba.size());
//以y轴翻转
Core.flip(mRgba, mRgba, 1);
return mRgba;
}
示例6: showImage
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public void showImage(Mat img) {
if (SizeCustom) {
Imgproc.resize(img, img, new Size(Height, Width));
}
// Highgui.imencode(".jpg", img, matOfByte);
// byte[] byteArray = matOfByte.toArray();
BufferedImage bufImage = null;
try {
// InputStream in = new ByteArrayInputStream(byteArray);
// bufImage = ImageIO.read(in);
bufImage = toBufferedImage(img);
image.setImage(bufImage);
Window.pack();
label.updateUI();
Window.setVisible(true);
} catch (Exception e) {
e.printStackTrace();
}
}
示例7: rotate
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
/**
* Rotate an image by an angle (counterclockwise)
*
* @param image Transform matrix
* @param angle Angle to rotate by (counterclockwise) from -360 to 360
*/
public static void rotate(Mat image, double angle) {
//Calculate size of new matrix
double radians = Math.toRadians(angle);
double sin = Math.abs(Math.sin(radians));
double cos = Math.abs(Math.cos(radians));
int newWidth = (int) (image.width() * cos + image.height() * sin);
int newHeight = (int) (image.width() * sin + image.height() * cos);
// rotating image
Point center = new Point(newWidth / 2, newHeight / 2);
Mat rotMatrix = Imgproc.getRotationMatrix2D(center, angle, 1.0); //1.0 means 100 % scale
Size size = new Size(newWidth, newHeight);
Imgproc.warpAffine(image, image, rotMatrix, image.size());
}
示例8: normalProcess
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public static Mat normalProcess(Mat img){
Mat threshImg = Thresholding.InvertImageColor(img);
Thresholding.gridDetection(threshImg);
Mat mat = Mat.zeros(4,2,CvType.CV_32F);
mat.put(0,0,0); mat.put(0,1,512);
mat.put(1,0,0); mat.put(1,1,0);
mat.put(2,0,512); mat.put(2,1,0);
mat.put(3,0,512); mat.put(3,1,512);
mat = Imgproc.getPerspectiveTransform(Thresholding.grid,mat);
Mat M = new Mat();
Imgproc.warpPerspective(threshImg,M,mat, new Size(512,512));
return Thresholding.InvertImageColor(M);
}
示例9: rgba
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public Mat rgba() {
Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2BGR_NV12, 4);
if (mRotated != null) mRotated.release();
mRotated = mRgba.t();
Core.flip(mRotated, mRotated, 1);
return mRotated;
}
示例10: initDefaultCommand
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public void initDefaultCommand() {
visionThread = new Thread(() -> {
// Get the UsbCamera from CameraServer
UsbCamera camera = CameraServer.getInstance().startAutomaticCapture();
// Set the resolution
camera.setResolution(640, 480);
// Get a CvSink. This will capture Mats from the camera
CvSink cvSink = CameraServer.getInstance().getVideo();
// Setup a CvSource. This will send images back to the Dashboard
CvSource outputStream = CameraServer.getInstance().putVideo("Rectangle", 640, 480);
// Mats are very memory expensive. Lets reuse this Mat.
Mat mat = new Mat();
// This cannot be 'true'. The program will never exit if it is. This
// lets the robot stop this thread when restarting robot code or
// deploying.
while (!Thread.interrupted()) {
// Tell the CvSink to grab a frame from the camera and put it
// in the source mat. If there is an error notify the output.
if (cvSink.grabFrame(mat) == 0) {
// Send the output the error.
outputStream.notifyError(cvSink.getError());
// skip the rest of the current iteration
continue;
}
// Put a rectangle on the image
Imgproc.rectangle(mat, new Point(100, 100), new Point(400, 400),
new Scalar(255, 255, 255), 5);
// Give the output stream a new image to display
outputStream.putFrame(mat);
}
});
visionThread.setDaemon(true);
visionThread.start();
}
示例11: calWeight
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
private static Mat calWeight(Mat img) {
Mat L = new Mat();
img.convertTo(img, CvType.CV_8UC1);
Imgproc.cvtColor(img, L, Imgproc.COLOR_BGR2GRAY);
L.convertTo(L, CvType.CV_32F);
Core.divide(L, new Scalar(255.0), L);
// calculate Luminance weight
Mat WC = FeatureWeight.LuminanceWeight(img, L);
WC.convertTo(WC, L.type());
// calculate the Saliency weight
Mat WS = FeatureWeight.Saliency(img);
WS.convertTo(WS, L.type());
// calculate the Exposedness weight
Mat WE = FeatureWeight.Exposedness(L);
WE.convertTo(WE, L.type());
// sum
Mat weight = WC.clone();
Core.add(weight, WS, weight);
Core.add(weight, WE, weight);
return weight;
}
示例12: Saliency
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public static Mat Saliency(Mat img) {
// blur image with a 3x3 or 5x5 Gaussian filter
Mat gfbgr = new Mat();
Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
// Perform sRGB to CIE Lab color space conversion
Mat LabIm = new Mat();
Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
// Compute Lab average values (note that in the paper this average is found from the
// un-blurred original image, but the results are quite similar)
List<Mat> lab = new ArrayList<>();
Core.split(LabIm, lab);
Mat l = lab.get(0);
l.convertTo(l, CvType.CV_32F);
Mat a = lab.get(1);
a.convertTo(a, CvType.CV_32F);
Mat b = lab.get(2);
b.convertTo(b, CvType.CV_32F);
double lm = Core.mean(l).val[0];
double am = Core.mean(a).val[0];
double bm = Core.mean(b).val[0];
// Finally compute the saliency map
Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
Core.subtract(l, new Scalar(lm), l);
Core.subtract(a, new Scalar(am), a);
Core.subtract(b, new Scalar(bm), b);
Core.add(sm, l.mul(l), sm);
Core.add(sm, a.mul(a), sm);
Core.add(sm, b.mul(b), sm);
return sm;
}
示例13: getInputDataLeNet
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
private float[] getInputDataLeNet(Bitmap bitmap) {
final int INPUT_LENGTH = 28;
Mat imageMat = new Mat();
Mat inputMat = new Mat();
Utils.bitmapToMat(bitmap, imageMat);
// convert the image to 28 * 28, grayscale, 0~1, and smaller means whiter
Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_RGBA2GRAY);
imageMat = centerCropAndScale(imageMat, INPUT_LENGTH);
imageMat.convertTo(imageMat, CvType.CV_32F, 1. / 255);
Core.subtract(Mat.ones(imageMat.size(), CvType.CV_32F), imageMat, inputMat);
float[] inputData = new float[inputMat.width() * inputMat.height()];
inputMat.get(0, 0, inputData);
return inputData;
}
示例14: conv_Mat
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
private Mat conv_Mat(BufferedImage img) {
byte[] data = ((DataBufferByte) img.getRaster().getDataBuffer()).getData();
Mat mat = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
mat.put(0, 0, data);
Mat mat1 = new Mat(img.getHeight(), img.getWidth(), CvType.CV_8UC3);
Imgproc.cvtColor(mat, mat1, Imgproc.COLOR_RGB2HSV);
return mat1;
}
开发者ID:javaspecial,项目名称:Face-detection-and-recognition-desktop-application,代码行数:10,代码来源:FaceDetectCropTest.java
示例15: run
import org.opencv.imgproc.Imgproc; //导入依赖的package包/类
public void run() {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
String base = "C:/Books in Progress/Java for Data Science/Chapter 10/OpenCVExamples/src/resources";
CascadeClassifier faceDetector =
new CascadeClassifier(base + "/lbpcascade_frontalface.xml");
Mat image = Imgcodecs.imread(base + "/images.jpg");
MatOfRect faceVectors = new MatOfRect();
faceDetector.detectMultiScale(image, faceVectors);
out.println(faceVectors.toArray().length + " faces found");
for (Rect rect : faceVectors.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
Imgcodecs.imwrite("faceDetection.png", image);
}