本文整理汇总了Java中org.opencv.core.MatOfPoint2f.toArray方法的典型用法代码示例。如果您正苦于以下问题:Java MatOfPoint2f.toArray方法的具体用法?Java MatOfPoint2f.toArray怎么用?Java MatOfPoint2f.toArray使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.core.MatOfPoint2f
的用法示例。
在下文中一共展示了MatOfPoint2f.toArray方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: findRectangle
import org.opencv.core.MatOfPoint2f; //导入方法依赖的package包/类
public void findRectangle() {
Imgproc.cvtColor(originalImage, image, Imgproc.COLOR_BGR2GRAY);
setFilter();
this.rects.clear();
//Find Contours
Imgproc.findContours(image, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0, 0));
//For conversion later on
MatOfPoint2f approxCurve = new MatOfPoint2f();
//For each contour found
for (int i = 0; i < contours.size(); i++) {
//Convert contours from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(i).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
if (approxDistance > 1) {
//Find Polygons
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
//Rectangle Checks - Points, area, convexity
if (points.total() == 4 && Math.abs(Imgproc.contourArea(points)) > 1000 && Imgproc.isContourConvex(points)) {
double cos = 0;
double mcos = 0;
for (int sc = 2; sc < 5; sc++) {
// TO-DO Figure a way to check angle
if (cos > mcos) {
mcos = cos;
}
}
if (mcos < 0.3) {
// Get bounding rect of contour
Rect rect = Imgproc.boundingRect(points);
// if (Math.abs(rect.height - rect.width) < 1000) {
System.out.println(i + "| x: " + rect.x + " + width("+rect.width+"), y: " + rect.y + "+ width("+rect.height+")");
rects.add(rect);
Core.rectangle(originalImage, rect.tl(), rect.br(), new Scalar(20, 20, 20), -1, 4, 0);
Imgproc.drawContours(originalImage, contours, i, new Scalar(0, 255, 0, .8), 2);
// Highgui.imwrite("detected_layers"+i+".png", originalImage);
// }
}
}
}
}
// Pass raw parameters
ImageDetection id = new ImageDetection();
HyperTextBuilder.rects = this.rects;
HyperTextBuilder.rect_height = this.HEIGHT;
HyperTextBuilder.rect_width = this.WIDTH;
id.setData(Utility.matToBufferedImage(originalImage));
}
示例2: drawObjectLocation
import org.opencv.core.MatOfPoint2f; //导入方法依赖的package包/类
/**
* Draw the object's location
*
* @param output Image to draw on
* @param objectAnalysis Object analysis information
* @param sceneAnalysis Scene analysis information
*/
public static void drawObjectLocation(Mat output, ObjectAnalysis objectAnalysis, SceneAnalysis sceneAnalysis) {
List<Point> ptsObject = new ArrayList<>();
List<Point> ptsScene = new ArrayList<>();
KeyPoint[] keypointsObject = objectAnalysis.keypoints.toArray();
KeyPoint[] keypointsScene = sceneAnalysis.keypoints.toArray();
DMatch[] matches = sceneAnalysis.matches.toArray();
for (DMatch matche : matches) {
//Get the keypoints from these matches
ptsObject.add(keypointsObject[matche.queryIdx].pt);
ptsScene.add(keypointsScene[matche.trainIdx].pt);
}
MatOfPoint2f matObject = new MatOfPoint2f();
matObject.fromList(ptsObject);
MatOfPoint2f matScene = new MatOfPoint2f();
matScene.fromList(ptsScene);
//Calculate homography of object in scene
Mat homography = Calib3d.findHomography(matObject, matScene, Calib3d.RANSAC, 5.0f);
//Create the unscaled array of corners, representing the object size
Point cornersObject[] = new Point[4];
cornersObject[0] = new Point(0, 0);
cornersObject[1] = new Point(objectAnalysis.object.cols(), 0);
cornersObject[2] = new Point(objectAnalysis.object.cols(), objectAnalysis.object.rows());
cornersObject[3] = new Point(0, objectAnalysis.object.rows());
Point[] cornersSceneTemp = new Point[0];
MatOfPoint2f cornersSceneMatrix = new MatOfPoint2f(cornersSceneTemp);
MatOfPoint2f cornersObjectMatrix = new MatOfPoint2f(cornersObject);
//Transform the object coordinates to the scene coordinates by the homography matrix
Core.perspectiveTransform(cornersObjectMatrix, cornersSceneMatrix, homography);
//Mat transform = Imgproc.getAffineTransform(cornersObjectMatrix, cornersSceneMatrix);
//Draw the lines of the object on the scene
Point[] cornersScene = cornersSceneMatrix.toArray();
final ColorRGBA lineColor = new ColorRGBA("#00ff00");
Drawing.drawLine(output, new Point(cornersScene[0].x + objectAnalysis.object.cols(), cornersScene[0].y),
new Point(cornersScene[1].x + objectAnalysis.object.cols(), cornersScene[1].y), lineColor, 5);
Drawing.drawLine(output, new Point(cornersScene[1].x + objectAnalysis.object.cols(), cornersScene[1].y),
new Point(cornersScene[2].x + objectAnalysis.object.cols(), cornersScene[2].y), lineColor, 5);
Drawing.drawLine(output, new Point(cornersScene[2].x + objectAnalysis.object.cols(), cornersScene[2].y),
new Point(cornersScene[3].x + objectAnalysis.object.cols(), cornersScene[3].y), lineColor, 5);
Drawing.drawLine(output, new Point(cornersScene[3].x + objectAnalysis.object.cols(), cornersScene[3].y),
new Point(cornersScene[0].x + objectAnalysis.object.cols(), cornersScene[0].y), lineColor, 5);
}
示例3: transform
import org.opencv.core.MatOfPoint2f; //导入方法依赖的package包/类
public static TransformResult transform(CalibrationResult calibrationResult) {
MatOfPoint2f src = new MatOfPoint2f(
new org.opencv.core.Point(calibrationResult.getLeftLowX(), calibrationResult.getLeftLowY()), // tl
new org.opencv.core.Point(calibrationResult.getLeftUpX(), calibrationResult.getLeftUpY()), // tr
new org.opencv.core.Point(calibrationResult.getRightLowX(), calibrationResult.getRightLowY()), // br
new org.opencv.core.Point(calibrationResult.getRightUpX(), calibrationResult.getRightUpY()) // bl
);
MatOfPoint2f dst = new MatOfPoint2f(
new org.opencv.core.Point(0, 0), // tl
new org.opencv.core.Point(0, 100), // tr
new org.opencv.core.Point(500, 0), // br
new org.opencv.core.Point(500, 100) // bl
);
TransformResult transformResult = new TransformResult();
MatOfPoint2f Src = new MatOfPoint2f(
new org.opencv.core.Point(calibrationResult.getLeftLowX(), calibrationResult.getLeftLowY()), // tl
new org.opencv.core.Point(calibrationResult.getLeftUpRightX(), calibrationResult.getLeftUpRightY()), // tr
new org.opencv.core.Point(calibrationResult.getRightLowX(), calibrationResult.getRightLowY()), // br
new org.opencv.core.Point(calibrationResult.getRightUpLeftX(), calibrationResult.getRightUpLeftY()) // bl
);
MatOfPoint2f Dst = new MatOfPoint2f(
new org.opencv.core.Point(calibrationResult.getLeftLowX(), calibrationResult.getLeftLowY()), // tl
new org.opencv.core.Point(calibrationResult.getLeftUpRightX(), calibrationResult.getLeftUpRightY()), // tr
new org.opencv.core.Point(calibrationResult.getRightLowX(), calibrationResult.getRightLowY()), // br
new org.opencv.core.Point(calibrationResult.getRightUpLeftX(), calibrationResult.getRightUpLeftY()) // bl
);
transformResult.m = Imgproc.getPerspectiveTransform(src, dst);
Core.perspectiveTransform(Src, Dst, transformResult.m);
org.opencv.core.Point[] points = Dst.toArray();
double widthleft = Math.abs(points[0].x - points[1].x);
double widthright = Math.abs(points[2].x - points[3].x);
transformResult.blackWidth = (widthleft + widthright) / 2;
return transformResult;
}
示例4: Contour
import org.opencv.core.MatOfPoint2f; //导入方法依赖的package包/类
/**
* Instantiate a contour from an OpenCV matrix of points (double)
*
* @param data OpenCV matrix of points
*/
public Contour(MatOfPoint2f data) {
this.mat = new MatOfPoint(data.toArray());
}