本文整理汇总了Java中org.opencv.imgproc.Imgproc.dilate方法的典型用法代码示例。如果您正苦于以下问题:Java Imgproc.dilate方法的具体用法?Java Imgproc.dilate怎么用?Java Imgproc.dilate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.imgproc.Imgproc
的用法示例。
在下文中一共展示了Imgproc.dilate方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: skinDetection
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public Mat skinDetection(Mat src) {
// define the upper and lower boundaries of the HSV pixel
// intensities to be considered 'skin'
Scalar lower = new Scalar(0, 48, 80);
Scalar upper = new Scalar(20, 255, 255);
// Convert to HSV
Mat hsvFrame = new Mat(src.rows(), src.cols(), CvType.CV_8U, new Scalar(3));
Imgproc.cvtColor(src, hsvFrame, Imgproc.COLOR_RGB2HSV, 3);
// Mask the image for skin colors
Mat skinMask = new Mat(hsvFrame.rows(), hsvFrame.cols(), CvType.CV_8U, new Scalar(3));
Core.inRange(hsvFrame, lower, upper, skinMask);
// currentSkinMask = new Mat(hsvFrame.rows(), hsvFrame.cols(), CvType.CV_8U, new Scalar(3));
// skinMask.copyTo(currentSkinMask);
// apply a series of erosions and dilations to the mask
// using an elliptical kernel
final Size kernelSize = new Size(11, 11);
final Point anchor = new Point(-1, -1);
final int iterations = 2;
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, kernelSize);
Imgproc.erode(skinMask, skinMask, kernel, anchor, iterations);
Imgproc.dilate(skinMask, skinMask, kernel, anchor, iterations);
// blur the mask to help remove noise, then apply the
// mask to the frame
final Size ksize = new Size(3, 3);
Mat skin = new Mat(skinMask.rows(), skinMask.cols(), CvType.CV_8U, new Scalar(3));
Imgproc.GaussianBlur(skinMask, skinMask, ksize, 0);
Core.bitwise_and(src, src, skin, skinMask);
return skin;
}
示例2: FindMatch
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
int FindMatch(Mat test_image) {
Imgproc.dilate(test_image, test_image,
Imgproc.getStructuringElement(Imgproc.CV_SHAPE_CROSS,
new Size(3,3)));
// Resize the image
Imgproc.resize(test_image, test_image, new Size(width, height));
// Convert the image to grayscale
// Imgproc.cvtColor(test_image, test_image, Imgproc.COLOR_RGB2GRAY);
// Adaptive Threshold
Imgproc.adaptiveThreshold(test_image, test_image, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C,
Imgproc.THRESH_BINARY_INV, 15,2);
Mat test = new Mat(1, test_image.rows() *
test_image.cols(), CvType.CV_32FC1);
int count = 0;
for (int i = 0 ; i < test_image.rows(); i++) {
for (int j = 0; j < test_image.cols(); j++) {
test.put(0, count, test_image.get(i, j)[0]);
count++;
}
}
Mat results = new Mat(1, 1, CvType.CV_8U);
knn.find_nearest(test, 10, results, new Mat(), new Mat());
Log.i("Result:", "" + results.get(0,0)[0]);
return (int)(results.get(0,0)[0]);
}
示例3: dilate
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Dilate the image using morphological transformations
*
* @param img Image matrix
* @param amount Amount to dilate = 0
*/
public static void dilate(Mat img, int amount) {
Mat kernel = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT,
new Size(2 * amount + 1, 2 * amount + 1),
new Point(amount, amount));
Imgproc.dilate(img, img, kernel);
}
示例4: localAdaptation
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
private static Mat localAdaptation(Mat Lg, int rows, int cols, int r, double eps, double krnlRatio) {
int krnlSz = Stream.of(3.0, rows * krnlRatio, cols * krnlRatio).max(Double::compare).orElse(3.0).intValue();
// maximum filter: using dilate to extract the local maximum of a image block, which acts as the maximum filter
// Meanwhile, minimum filter can be achieved by using erode function
Mat Lg_ = new Mat();
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
Imgproc.dilate(Lg, Lg_, kernel);
// guided image filter
return Filters.GuidedImageFilter(Lg, Lg_, r, eps);
}
示例5: process
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Process an rgba image. The results can be drawn on retrieved later.
* This method does not modify the image.
*
* @param rgbaImage An RGBA image matrix
*/
public void process(Mat rgbaImage) {
Imgproc.pyrDown(rgbaImage, mPyrDownMat);
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);
//Test whether we need two inRange operations (only if the hue crosses over 255)
if (upperBound.getScalar().val[0] <= 255) {
Core.inRange(mHsvMat, lowerBound.getScalar(), upperBound.getScalar(), mMask);
} else {
//We need two operations - we're going to OR the masks together
Scalar lower = lowerBound.getScalar().clone();
Scalar upper = upperBound.getScalar().clone();
while (upper.val[0] > 255)
upper.val[0] -= 255;
double tmp = lower.val[0];
lower.val[0] = 0;
//Mask 1 - from 0 to n
Core.inRange(mHsvMat, lower, upper, mMaskOne);
//Mask 2 - from 255-n to 255
lower.val[0] = tmp;
upper.val[0] = 255;
Core.inRange(mHsvMat, lower, upper, mMask);
//OR the two masks
Core.bitwise_or(mMaskOne, mMask, mMask);
}
//Dilate (blur) the mask to decrease processing power
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contourListTemp = new ArrayList<>();
Imgproc.findContours(mDilatedMask, contourListTemp, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Filter contours by area and resize to fit the original image size
contours.clear();
for (MatOfPoint c : contourListTemp) {
Core.multiply(c, new Scalar(4, 4), c);
contours.add(new Contour(c));
}
}
示例6: setFilter
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
private void setFilter() {
//Apply gaussian blur to remove noise
Imgproc.GaussianBlur(image, image, new Size(5, 5), 0);
//Threshold
Imgproc.adaptiveThreshold(image, image, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 7, 1);
//Invert the image
Core.bitwise_not(image, image);
//Dilate
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(3, 3), new Point(1, 1));
Imgproc.dilate(image, image, kernel);
}
示例7: doBackgroundRemoval
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Perform the operations needed for removing a uniform background
*
* @param frame the current frame
* @return an image with only foreground objects
*/
private Mat doBackgroundRemoval(Mat frame) {
// init
Mat hsvImg = new Mat();
List<Mat> hsvPlanes = new ArrayList<>();
Mat thresholdImg = new Mat();
int thresh_type = Imgproc.THRESH_BINARY_INV;
if (inverse.isSelected())
thresh_type = Imgproc.THRESH_BINARY;
// threshold the image with the average hue value
hsvImg.create(frame.size(), CvType.CV_8U);
Imgproc.cvtColor(frame, hsvImg, Imgproc.COLOR_BGR2HSV);
Core.split(hsvImg, hsvPlanes);
// get the average hue value of the image
double threshValue = getHistAverage(hsvImg, hsvPlanes.get(0));
Imgproc.threshold(hsvPlanes.get(0), thresholdImg, threshValue, 179.0, thresh_type);
Imgproc.blur(thresholdImg, thresholdImg, new Size(5, 5));
// dilate to fill gaps, erode to smooth edges
Imgproc.dilate(thresholdImg, thresholdImg, new Mat(), new Point(-1, -1), 1);
Imgproc.erode(thresholdImg, thresholdImg, new Mat(), new Point(-1, -1), 3);
Imgproc.threshold(thresholdImg, thresholdImg, threshValue, 179.0, Imgproc.THRESH_BINARY);
// create the new image
Mat foreground = new Mat(frame.size(), CvType.CV_8UC3, new Scalar(255, 255, 255));
frame.copyTo(foreground, thresholdImg);
return foreground;
}
示例8: FindMatch
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
int FindMatch(Mat test_image)
{
//Dilate the image
Imgproc.dilate(test_image, test_image, Imgproc.getStructuringElement(Imgproc.CV_SHAPE_CROSS, new Size(3,3)));
//Resize the image to match it with the sample image size
Imgproc.resize(test_image, test_image, new Size(width, height));
//Convert the image to grayscale
Imgproc.cvtColor(test_image, test_image, Imgproc.COLOR_RGB2GRAY);
//Adaptive Threshold
Imgproc.adaptiveThreshold(test_image,test_image,255,Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY_INV,15, 2);
Mat test = new Mat(1, test_image.rows() * test_image.cols(), CvType.CV_32FC1);
int count = 0;
for(int i = 0 ; i < test_image.rows(); i++)
{
for(int j = 0 ; j < test_image.cols(); j++) {
test.put(0, count, test_image.get(i, j)[0]);
count++;
}
}
Mat results = new Mat(1, 1, CvType.CV_8U);
//K-NN Prediction
//return (int)knn.findNearest(test, 10, results);
//SVM Prediction
return (int)svm.predict(test);
}
示例9: getFingers
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public static List<Point> getFingers(Mat im, Mat hand, List<MatOfPoint> contourOutput) {
/**
* @param: im: A YCrCb image with same shape as `hand`
* @param: hand: A binary image indicating which pixel is part of hand
* @param: contourOutput:
* If is not null, contours will be saved for debug
* @return: A list of points indicating the detected finger tip points
* This function will not change `in` or `hand`
*/
// assert im.size().height == Config.IM_HEIGHT;
// assert im.size().height == hand.size().height
List<MatOfPoint> contours = Util.largeContours(hand, Config.HAND_AREA_MIN);
Imgproc.dilate(hand, hand, Mat.ones(new Size(5, 5), CvType.CV_8UC1));
if (contours.isEmpty()) {
return new ArrayList<>();
}
ArrayList<Point> fingerTips = new ArrayList<>();
for (int i = 0; i < contours.size(); ++i) {
// apply polygon approximation
MatOfPoint cnt = contours.get(i);
double epsilon = 5;
MatOfPoint2f approx = new MatOfPoint2f(), cntCvt = new MatOfPoint2f();
cnt.convertTo(cntCvt, CvType.CV_32FC2);
Imgproc.approxPolyDP(cntCvt, approx, epsilon, true);
approx.convertTo(cnt, CvType.CV_32S);
// apply polygon approximation
fingerTips.addAll(findFingerTips(approx.toList(), hand));
}
if (contourOutput != null) {
contourOutput.clear();
contourOutput.addAll(contours);
}
return fingerTips;
}
示例10: refresh
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public void refresh()
{
updateColors();
Mat frame = new Mat(), imageHSV = new Mat(), imgMasked = new Mat(), mask = new Mat();
cap.read(frame);
Imgproc.cvtColor(frame, imageHSV, Imgproc.COLOR_BGR2HSV);
Core.inRange(imageHSV, new Scalar(iLowH, iLowS, iLowV), new Scalar(iHighH, iHighS, iHighV), mask);
Imgproc.erode(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(morphScale, morphScale)));
Imgproc.dilate(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(morphScale, morphScale)));
Imgproc.erode(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(morphScale, morphScale)));
Imgproc.dilate(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(morphScale, morphScale)));
frame.copyTo(imgMasked, mask);
drawMat = imgMasked;
repaint();
}
示例11: onActivityResult
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
//Put it there, just in case:)
super.onActivityResult(requestCode, resultCode, imageReturnedIntent);
switch(requestCode) {
case SELECT_PHOTO:
if(resultCode == RESULT_OK && read_external_storage_granted){
try {
final Uri imageUri = imageReturnedIntent.getData();
final InputStream imageStream = getContentResolver().openInputStream(imageUri);
final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
Utils.bitmapToMat(selectedImage, src);
src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1);
switch (ACTION_MODE) {
case HomeActivity.GAUSSIAN_BLUR:
Imgproc.GaussianBlur(src, src, new Size(9, 9), 0);
break;
case HomeActivity.MEAN_BLUR:
Imgproc.blur(src, src, new Size(9, 9));
break;
case HomeActivity.MEDIAN_BLUR:
Imgproc.medianBlur(src, src, 9);
break;
case HomeActivity.SHARPEN:
Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
//int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0};
Log.d("imageType", CvType.typeToString(src.type()) + "");
kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
Imgproc.filter2D(src, src, src_gray.depth(), kernel);
break;
case HomeActivity.DILATE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
Imgproc.dilate(src_gray, src_gray, kernelDilate);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ERODE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
Imgproc.erode(src_gray, src_gray, kernelErode);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ADAPTIVE_THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
}
Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888);
Log.i("imageType", CvType.typeToString(src.type()) + "");
Utils.matToBitmap(src, processedImage);
ivImage.setImageBitmap(selectedImage);
ivImageProcessed.setImageBitmap(processedImage);
Log.i("process", "process done");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
break;
}
}