本文整理汇总了Java中org.opencv.imgproc.Imgproc.equalizeHist方法的典型用法代码示例。如果您正苦于以下问题:Java Imgproc.equalizeHist方法的具体用法?Java Imgproc.equalizeHist怎么用?Java Imgproc.equalizeHist使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.imgproc.Imgproc
的用法示例。
在下文中一共展示了Imgproc.equalizeHist方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: enhanceImageContrast
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public void enhanceImageContrast() {
Mat source = Imgcodecs.imread("GrayScaleParrot.png",
Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
Mat destination = new Mat(source.rows(), source.cols(), source.type());
Imgproc.equalizeHist(source, destination);
Imgcodecs.imwrite("enhancedParrot.jpg", destination);
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:8,代码来源:OpenCVNonMavenExamples.java
示例2: preprocess
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
static Bitmap preprocess(Mat frame, int width, int height) {
// convert to grayscale
Mat frameGrey = new Mat(height, width, CvType.CV_8UC1);
Imgproc.cvtColor(frame, frameGrey, Imgproc.COLOR_BGR2GRAY, 1);
// rotate
Mat rotatedFrame = new Mat(width, height, frameGrey.type());
Core.transpose(frameGrey, rotatedFrame);
Core.flip(rotatedFrame, rotatedFrame, Core.ROTATE_180);
// resize to match the surface view
Mat resizedFrame = new Mat(width, height, rotatedFrame.type());
Imgproc.resize(rotatedFrame, resizedFrame, new Size(width, height));
// crop
Mat ellipseMask = getEllipseMask(width, height);
Mat frameCropped = new Mat(resizedFrame.rows(), resizedFrame.cols(), resizedFrame.type(), new Scalar(0));
resizedFrame.copyTo(frameCropped, ellipseMask);
// histogram equalisation
Mat frameHistEq = new Mat(frame.rows(), frameCropped.cols(), frameCropped.type());
Imgproc.equalizeHist(frameCropped, frameHistEq);
// convert back to rgba
Mat frameRgba = new Mat(frameHistEq.rows(), frameHistEq.cols(), CvType.CV_8UC4);
Imgproc.cvtColor(frameHistEq, frameRgba, Imgproc.COLOR_GRAY2RGBA);
// crop again to correct alpha
Mat frameAlpha = new Mat(frameRgba.rows(), frameRgba.cols(), CvType.CV_8UC4, new Scalar(0, 0, 0, 0));
frameRgba.copyTo(frameAlpha, ellipseMask);
// convert to bitmap
Bitmap bmp = Bitmap.createBitmap(frameAlpha.cols(), frameAlpha.rows(), Bitmap.Config.ARGB_4444);
Utils.matToBitmap(frameAlpha, bmp);
return bmp;
}
示例3: getOpenCvLines
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public static List<Line> getOpenCvLines(Mat original, int scale, double minLength) {
Mat raw = new Mat();
Imgproc.resize(original.clone(), raw, new Size((int) (original.size().width/scale), (int) (original.size().height/scale)));
if(raw.channels() > 1) {
Imgproc.cvtColor(raw, raw, Imgproc.COLOR_RGB2GRAY);
}
Imgproc.equalizeHist(raw, raw);
Imgproc.blur(raw, raw, new Size(3,3));
//Line Segment Detection 2
Mat linesM1 = new Mat();
//LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_ADV, 0.6, 0.3, 2.6, 22.5, 0, 0.3,256);
//LineSegmentDetector detector = Imgproc.createLineSegmentDetector(Imgproc.LSD_REFINE_STD, 0.5, 0.4,2.0, 19.5, 0, 0.6, 32);
//Reference for final glyph detection
detector.detect(raw, linesM1);
ArrayList<Line> lines = new ArrayList<Line>();
for (int x = 0; x < linesM1.rows(); x++) {
double[] vec = linesM1.get(x, 0);
Point start = new Point(vec[0],vec[1]);
Point end = new Point(vec[2], vec[3]);
Line line = new Line(start, end);
line = new Line(new Point((int)line.x1*scale, (int) line.y1*scale), new Point((int)line.x2*scale, (int)line.y2*scale));
if(line.length() > minLength) lines.add(line);
}
raw.release();
linesM1.release();
return lines;
}
示例4: detectAndDisplay
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Method for face detection and tracking
*
* @param frame
* it looks for faces in this frame
*/
private void detectAndDisplay(Mat frame)
{
MatOfRect faces = new MatOfRect();
Mat grayFrame = new Mat();
// convert the frame in gray scale
Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY);
// equalize the frame histogram to improve the result
Imgproc.equalizeHist(grayFrame, grayFrame);
// compute minimum face size (20% of the frame height, in our case)
if (this.absoluteFaceSize == 0)
{
int height = grayFrame.rows();
if (Math.round(height * 0.2f) > 0)
{
this.absoluteFaceSize = Math.round(height * 0.2f);
}
}
// detect faces
this.faceCascade.detectMultiScale(grayFrame, faces, 1.1, 2, 0 | Objdetect.CASCADE_SCALE_IMAGE,
new Size(this.absoluteFaceSize, this.absoluteFaceSize), new Size());
// each rectangle in faces is a face: draw them!
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
{
Imgproc.rectangle(frame, facesArray[i].tl(), facesArray[i].br(), new Scalar(7, 255, 90), 4);
System.out.println(facesArray[i].tl());
System.out.println(facesArray[i].br());
}
}
示例5: computeModel
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public void computeModel(ArrayList<MetaData> photos)
{
numPhotos = photos.size();
model.setNumPhotos(numPhotos);
MatOfKeyPoint[] keypoints = new MatOfKeyPoint[numPhotos];
Mat[] descriptors = new Mat[numPhotos];
Mat allDescriptors = new Mat();
ArrayList<Integer> descriptorLabels = new ArrayList<Integer>();
// compute keypoints and descriptors
Mat currentImg = null;
for (int a = 0; a < numPhotos; a++)
{
// System.out.println("now:" + animalFiles.get(a));
currentImg = Highgui.imread(photos.get(a).getZooName().toString(), 0);
Imgproc.resize(currentImg, currentImg, new Size(150, 250));
Imgproc.equalizeHist(currentImg, currentImg);
Imgproc.threshold(currentImg, currentImg, 127, 255, Imgproc.THRESH_BINARY);
featureDetector.detect(currentImg, keypoints[a]);
descriptorExtractor.compute(currentImg, keypoints[a], descriptors[a]);
allDescriptors.push_back(descriptors[a]);
for (int i = 0; i < descriptors[a].rows(); i++)
descriptorLabels.add(a);
}
System.out.println("label size:" + descriptorLabels.size());
Mat clusterLabels = new Mat();
Mat centers = new Mat();
// set up all desriptors, init criteria
allDescriptors.convertTo(allDescriptors, CvType.CV_32F);
TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 100, 0.1);
long before = System.currentTimeMillis();
// compute clusters
System.out.print("creating kmeans clusters...");
Core.kmeans(allDescriptors, k, clusterLabels, criteria, 10, Core.KMEANS_PP_CENTERS, centers);
System.out.println("done.");
// map k-means centroid labels to descriptors of all images
ArrayList<ArrayList<Integer>> clusterImageMap = new ArrayList<ArrayList<Integer>>();
for (int nk = 0; nk < k + 1; nk++)
clusterImageMap.add(new ArrayList<Integer>());
for (int r = 0; r < clusterLabels.rows(); r++)
clusterImageMap.get((int) clusterLabels.get(r, 0)[0]).add(descriptorLabels.get(r));
model.setCentroids(centers);
model.setLabels(clusterLabels);
model.setClusterImageMap(clusterImageMap);
model.setKeypoints(keypoints);
model.setDescriptors(descriptors);
}
示例6: detectAndDisplay
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
private void detectAndDisplay(final Mat frame, boolean grayIsAlreadySelected) {
MatOfRect faces = new MatOfRect();
Mat grayFrame = new Mat();
if (grayIsAlreadySelected) {
LOGGER.warn("TODO IT :-)");
} else {
// convert the frame in gray scale to ANOTHER frame
Imgproc.cvtColor(frame, grayFrame, Imgproc.COLOR_BGR2GRAY);
}
// equalize the frame histogram to improve the result
Imgproc.equalizeHist(grayFrame, grayFrame);
// compute minimum face size (20% of the frame height, in our case)
if (absoluteAreaSize == 0) {
int height = grayFrame.rows();
if (Math.round(height * 0.2f) > 0) {
absoluteAreaSize = Math.round(height * 0.2f);
}
}
// detect faces
/*
The detectMultiScale function detects objects of different sizes in the input image.
The detected objects are returned as a list of rectangles. The parameters are:
image Matrix of the type CV_8U containing an image where objects are detected.
objects Vector of rectangles where each rectangle contains the detected object.
scaleFactor Parameter specifying how much the image size is reduced at each image scale.
minNeighbors Parameter specifying how many neighbors each candidate rectangle should have to retain it.
flags Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. It is not used for a new cascade.
minSize Minimum possible object size. Objects smaller than that are ignored.
maxSize Maximum possible object size. Objects larger than that are ignored.
So the result of the detection is going to be in the objects parameter or in our case faces.
*/
CASCADE_CLASSIFIER.detectMultiScale(grayFrame, faces, 1.1, 2,
0 | Objdetect.CASCADE_SCALE_IMAGE, new Size(absoluteAreaSize, absoluteAreaSize), new Size());
/*
each rectangle in faces is a face: draw them!
Let’s put this result in an array of rects and draw them on the frame, by doing so we can display the detected face are.
As you can see we selected the color green with a transparent background: Scalar(0, 255, 0, 255).
.tl() and .br() stand for top-left and bottom-right and they represents the two opposite vertexes.
The last parameter just set the thickness of the rectangle’s border.
*/
final Rect[] facesArray = faces.toArray();
countFaces(facesArray.length);
for (Rect aFacesArray : facesArray)
Imgproc.rectangle(frame, aFacesArray.tl(), aFacesArray.br(), new Scalar(0, 255, 0, 255), 3);
}
示例7: grayEqualizeHist
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
*
* @Title: grayEqualizeHist
* @Description: 直方图均衡化
* @param grayImg
* @return
* Mat
* @throws
*/
public static Mat grayEqualizeHist(Mat grayImg) {
//Mat gray = new Mat();
//Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
Mat heqResult = new Mat(); // 直方图均衡化
Imgproc.equalizeHist(grayImg, heqResult);
return heqResult;
}