本文整理汇总了Java中org.opencv.core.Core类的典型用法代码示例。如果您正苦于以下问题:Java Core类的具体用法?Java Core怎么用?Java Core使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Core类属于org.opencv.core包,在下文中一共展示了Core类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transEstimate
import org.opencv.core.Core; //导入依赖的package包/类
public static Mat transEstimate(Mat img, int patchSz, double[] airlight, double lambda, double fTrans,
int r, double eps, double gamma) {
int rows = img.rows();
int cols = img.cols();
List<Mat> bgr = new ArrayList<>();
Core.split(img, bgr);
int type = bgr.get(0).type();
// calculate the transmission map
Mat T = computeTrans(img, patchSz, rows, cols, type, airlight, lambda, fTrans);
// refine the transmission map
img.convertTo(img, CvType.CV_8UC1);
Mat gray = new Mat();
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
gray.convertTo(gray, CvType.CV_32F);
Core.divide(gray, new Scalar(255.0), gray);
T = Filters.GuidedImageFilter(gray, T, r, eps);
Mat Tsmooth = new Mat();
Imgproc.GaussianBlur(T, Tsmooth, new Size(81, 81), 40);
Mat Tdetails = new Mat();
Core.subtract(T, Tsmooth, Tdetails);
Core.multiply(Tdetails, new Scalar(gamma), Tdetails);
Core.add(Tsmooth, Tdetails, T);
return T;
}
示例2: main
import org.opencv.core.Core; //导入依赖的package包/类
public static void main(String[] args) throws Exception{
FlashUtil.setLog(log);
log.log("Loading settings and properties...", "Dashboard");
validateBasicHierarcy();
loadSettings();
validateBasicSettings();
printSettings();
log.log("Done", "Dashboard");
setupValuePath();
log.log("FlashLib version: "+FlashUtil.VERSION, "Dashboard");
log.log("Loading opencv natives: "+Core.NATIVE_LIBRARY_NAME+" ...", "Dashboard");
loadValueLibrary(Core.NATIVE_LIBRARY_NAME);
log.log("opencv version: "+Core.VERSION, "Dashboard");
log.log("Creating shutdown hook...", "Dashboard");
Runtime.getRuntime().addShutdownHook(new Thread(()->close()));
log.log("Done", "Dashboard");
log.save();
initStart();
log.log("Launching FX...", "Dashboard");
launch();
}
示例3: measure
import org.opencv.core.Core; //导入依赖的package包/类
public void measure() {
if (!mIsInitialized) {
init();
mIsInitialized = true;
} else {
mFramesCouner++;
if (mFramesCouner % STEP == 0) {
long time = Core.getTickCount();
double fps = STEP * mFrequency / (time - mprevFrameTime);
mprevFrameTime = time;
if (mWidth != 0 && mHeight != 0)
mStrfps = FPS_FORMAT.format(fps) + " [email protected]" + Integer.valueOf(mWidth) + "x" + Integer.valueOf(mHeight);
else
mStrfps = FPS_FORMAT.format(fps) + " FPS";
Log.i(TAG, mStrfps);
}
}
}
示例4: run
import org.opencv.core.Core; //导入依赖的package包/类
public void run() {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
String base = "C:/Books in Progress/Java for Data Science/Chapter 10/OpenCVExamples/src/resources";
CascadeClassifier faceDetector =
new CascadeClassifier(base + "/lbpcascade_frontalface.xml");
Mat image = Imgcodecs.imread(base + "/images.jpg");
MatOfRect faceVectors = new MatOfRect();
faceDetector.detectMultiScale(image, faceVectors);
out.println(faceVectors.toArray().length + " faces found");
for (Rect rect : faceVectors.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
Imgcodecs.imwrite("faceDetection.png", image);
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:21,代码来源:DetectFaceDemo.java
示例5: staticFace
import org.opencv.core.Core; //导入依赖的package包/类
public static void staticFace(String input, String output) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.out.println("\nRunning FaceDetector");
final CascadeClassifier faceDetector = new CascadeClassifier(StaticFacialRecognition.class
.getResource("../../../../../opencv/sources/data/haarcascades_cuda/haarcascade_frontalface_alt.xml")
.getPath().substring(1));
final Mat image = Imgcodecs.imread(input);
// �������� StaticFacialRecognition.class.getResource(input).getPath().substring(1)
final MatOfRect faceDetections = new MatOfRect();
faceDetector.detectMultiScale(image, faceDetections);
System.out.println(String.format("Detected %s faces", faceDetections.toArray().length));
for (final Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
System.out.println(String.format("Writing %s", output));
Imgcodecs.imwrite(output, image);
// �������� StaticFacialRecognition.class.getResource("").getPath().substring(1) +
// output
}
示例6: sharpenImage
import org.opencv.core.Core; //导入依赖的package包/类
public void sharpenImage() {
String fileName = "SharpnessExample2.png";
fileName = "smoothCat.jpg";
fileName = "blurredText.jpg";
fileName = "Blurred Text3.jpg";
try {
// Not working that well !!!
Mat source = Imgcodecs.imread(fileName,
// Imgcodecs.CV_LOAD_IMAGE_COLOR);
Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
Mat destination = new Mat(source.rows(), source.cols(), source.type());
Imgproc.GaussianBlur(source, destination, new Size(0, 0), 10);
// The following was used witht he cat
// Core.addWeighted(source, 1.5, destination, -0.75, 0, destination);
// Core.addWeighted(source, 2.5, destination, -1.5, 0, destination);
Core.addWeighted(source, 1.5, destination, -0.75, 0, destination);
Imgcodecs.imwrite("sharpenedCat.jpg", destination);
} catch (Exception ex) {
ex.printStackTrace();
}
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:22,代码来源:OpenCVNonMavenExamples.java
示例7: imagePadding
import org.opencv.core.Core; //导入依赖的package包/类
/**
* Apply padding to the image.
*/
private Mat imagePadding(Mat source, int blockSize) {
int width = source.width();
int height = source.height();
int bottomPadding = 0;
int rightPadding = 0;
if (width % blockSize != 0) {
bottomPadding = blockSize - (width % blockSize);
}
if (height % blockSize != 0) {
rightPadding = blockSize - (height % blockSize);
}
Core.copyMakeBorder(source, source, 0, bottomPadding, 0, rightPadding, Core.BORDER_CONSTANT, Scalar.all(0));
return source;
}
示例8: averageColor
import org.opencv.core.Core; //导入依赖的package包/类
/**
* Gets the average color of the object
*
* @param img The image matrix, of any color size
* @param imgSpace The image's color space
* @return The average color of the region
*/
public Color averageColor(Mat img, ColorSpace imgSpace) {
//Coerce values to stay within screen dimensions
double leftX = MathUtil.coerce(0, img.cols() - 1, left());
double rightX = MathUtil.coerce(0, img.cols() - 1, right());
double topY = MathUtil.coerce(0, img.rows() - 1, top());
double bottomY = MathUtil.coerce(0, img.rows() - 1, bottom());
//Input points into array for calculation
//TODO rectangular submatrix-based calculation isn't perfectly accurate when you have ellipses or weird shapes
Mat subMat = img.submat((int) topY, (int) bottomY, (int) leftX, (int) rightX);
//Calculate average and return new color instance
return Color.create(Core.mean(subMat), imgSpace);
}
示例9: applyCLAHE
import org.opencv.core.Core; //导入依赖的package包/类
private static Mat[] applyCLAHE(Mat img, Mat L) {
Mat[] result = new Mat[2];
CLAHE clahe = Imgproc.createCLAHE();
clahe.setClipLimit(2.0);
Mat L2 = new Mat();
clahe.apply(L, L2);
Mat LabIm2 = new Mat();
List<Mat> lab = new ArrayList<Mat>();
Core.split(img, lab);
Core.merge(new ArrayList<Mat>(Arrays.asList(L2, lab.get(1), lab.get(2))), LabIm2);
Mat img2 = new Mat();
Imgproc.cvtColor(LabIm2, img2, Imgproc.COLOR_Lab2BGR);
result[0] = img2;
result[1] = L2;
return result;
}
示例10: process
import org.opencv.core.Core; //导入依赖的package包/类
@Override
public void process(Mat input, Mat mask) {
Imgproc.cvtColor(input,input,Imgproc.COLOR_RGB2HSV_FULL);
Imgproc.GaussianBlur(input,input,new Size(3,3),0);
Scalar lower = new Scalar(perfect.val[0] - range.val[0], perfect.val[1] - range.val[1],perfect.val[2] - range.val[2]);
Scalar upper = new Scalar(perfect.val[0] + range.val[0], perfect.val[1] + range.val[1],perfect.val[2] + range.val[2]);
Core.inRange(input,lower,upper,mask);
input.release();
}
示例11: main
import org.opencv.core.Core; //导入依赖的package包/类
public static void main(String args[]) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
CascadeClassifier faceCascade = new CascadeClassifier(xmlFile);
if(!faceCascade.load(xmlFile)) {
System.out.println("Error Loading XML File");
} else {
System.out.println("Success Loading XML");
}
//invokeLater enables *swing threads to operate,
//it is not actually going to run this method later
EventQueue.invokeLater(new Runnable() {
public void run() {
try {
FaceTrackMain frame = new FaceTrackMain();
frame.setVisible(true);
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
示例12: getInputDataLeNet
import org.opencv.core.Core; //导入依赖的package包/类
private float[] getInputDataLeNet(Bitmap bitmap) {
final int INPUT_LENGTH = 28;
Mat imageMat = new Mat();
Mat inputMat = new Mat();
Utils.bitmapToMat(bitmap, imageMat);
// convert the image to 28 * 28, grayscale, 0~1, and smaller means whiter
Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_RGBA2GRAY);
imageMat = centerCropAndScale(imageMat, INPUT_LENGTH);
imageMat.convertTo(imageMat, CvType.CV_32F, 1. / 255);
Core.subtract(Mat.ones(imageMat.size(), CvType.CV_32F), imageMat, inputMat);
float[] inputData = new float[inputMat.width() * inputMat.height()];
inputMat.get(0, 0, inputData);
return inputData;
}
示例13: calculateScore
import org.opencv.core.Core; //导入依赖的package包/类
private static double calculateScore(Mat im) {
MatOfDouble mean = new MatOfDouble();
MatOfDouble std = new MatOfDouble();
Core.meanStdDev(im, mean, std);
double[] means = mean.get(0, 0);
double[] stds = std.get(0, 0);
double score = 0.0;
for (int i = 0; i < means.length; i++) score += means[i] - stds[i];
return score;
}
示例14: drawMatches
import org.opencv.core.Core; //导入依赖的package包/类
static Mat drawMatches(Mat img1, MatOfKeyPoint key1, Mat img2, MatOfKeyPoint key2, MatOfDMatch matches, boolean imageOnly){
//https://github.com/mustafaakin/image-matcher/tree/master/src/in/mustafaak/imagematcher
Mat out = new Mat();
Mat im1 = new Mat();
Mat im2 = new Mat();
Imgproc.cvtColor(img1, im1, Imgproc.COLOR_GRAY2RGB);
Imgproc.cvtColor(img2, im2, Imgproc.COLOR_GRAY2RGB);
if ( imageOnly){
MatOfDMatch emptyMatch = new MatOfDMatch();
MatOfKeyPoint emptyKey1 = new MatOfKeyPoint();
MatOfKeyPoint emptyKey2 = new MatOfKeyPoint();
Features2d.drawMatches(im1, emptyKey1, im2, emptyKey2, emptyMatch, out);
} else {
Features2d.drawMatches(im1, key1, im2, key2, matches, out);
}
//Bitmap bmp = Bitmap.createBitmap(out.cols(), out.rows(), Bitmap.Config.ARGB_8888);
Imgproc.cvtColor(out, out, Imgproc.COLOR_BGR2RGB);
Imgproc.putText(out, "Frame", new Point(img1.width() / 2,30), Core.FONT_HERSHEY_PLAIN, 2, new Scalar(0,255,255),3);
Imgproc.putText(out, "Match", new Point(img1.width() + img2.width() / 2,30), Core.FONT_HERSHEY_PLAIN, 2, new Scalar(255,0,0),3);
return out;
}
示例15: LocalContrast
import org.opencv.core.Core; //导入依赖的package包/类
public static Mat LocalContrast(Mat img) {
double[] h = { 1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0 };
Mat mask = new Mat(h.length, h.length, img.type());
for (int i = 0; i < h.length; i++) {
for (int j = 0; j < h.length; j++) {
mask.put(i, j, h[i] * h[j]);
}
}
Mat localContrast = new Mat();
Imgproc.filter2D(img, localContrast, img.depth(), mask);
for (int i = 0; i < localContrast.rows(); i++) {
for (int j = 0; j < localContrast.cols(); j++) {
if (localContrast.get(i, j)[0] > Math.PI / 2.75) localContrast.put(i, j, Math.PI / 2.75);
}
}
Core.subtract(img, localContrast, localContrast);
return localContrast.mul(localContrast);
}