本文整理汇总了Java中org.opencv.imgproc.Imgproc.getStructuringElement方法的典型用法代码示例。如果您正苦于以下问题:Java Imgproc.getStructuringElement方法的具体用法?Java Imgproc.getStructuringElement怎么用?Java Imgproc.getStructuringElement使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.imgproc.Imgproc
的用法示例。
在下文中一共展示了Imgproc.getStructuringElement方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: skinDetection
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public Mat skinDetection(Mat src) {
// define the upper and lower boundaries of the HSV pixel
// intensities to be considered 'skin'
Scalar lower = new Scalar(0, 48, 80);
Scalar upper = new Scalar(20, 255, 255);
// Convert to HSV
Mat hsvFrame = new Mat(src.rows(), src.cols(), CvType.CV_8U, new Scalar(3));
Imgproc.cvtColor(src, hsvFrame, Imgproc.COLOR_RGB2HSV, 3);
// Mask the image for skin colors
Mat skinMask = new Mat(hsvFrame.rows(), hsvFrame.cols(), CvType.CV_8U, new Scalar(3));
Core.inRange(hsvFrame, lower, upper, skinMask);
// currentSkinMask = new Mat(hsvFrame.rows(), hsvFrame.cols(), CvType.CV_8U, new Scalar(3));
// skinMask.copyTo(currentSkinMask);
// apply a series of erosions and dilations to the mask
// using an elliptical kernel
final Size kernelSize = new Size(11, 11);
final Point anchor = new Point(-1, -1);
final int iterations = 2;
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, kernelSize);
Imgproc.erode(skinMask, skinMask, kernel, anchor, iterations);
Imgproc.dilate(skinMask, skinMask, kernel, anchor, iterations);
// blur the mask to help remove noise, then apply the
// mask to the frame
final Size ksize = new Size(3, 3);
Mat skin = new Mat(skinMask.rows(), skinMask.cols(), CvType.CV_8U, new Scalar(3));
Imgproc.GaussianBlur(skinMask, skinMask, ksize, 0);
Core.bitwise_and(src, src, skin, skinMask);
return skin;
}
示例2: erode
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Erode the image using morphological transformations
*
* @param img Image matrix
* @param amount Amount to erode = 0
*/
public static void erode(Mat img, int amount) {
Mat kernel = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT,
new Size(2 * amount + 1, 2 * amount + 1),
new Point(amount, amount));
Imgproc.erode(img, img, kernel);
}
示例3: dilate
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
/**
* Dilate the image using morphological transformations
*
* @param img Image matrix
* @param amount Amount to dilate = 0
*/
public static void dilate(Mat img, int amount) {
Mat kernel = Imgproc.getStructuringElement(Imgproc.CV_SHAPE_RECT,
new Size(2 * amount + 1, 2 * amount + 1),
new Point(amount, amount));
Imgproc.dilate(img, img, kernel);
}
示例4: localAdaptation
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
private static Mat localAdaptation(Mat Lg, int rows, int cols, int r, double eps, double krnlRatio) {
int krnlSz = Stream.of(3.0, rows * krnlRatio, cols * krnlRatio).max(Double::compare).orElse(3.0).intValue();
// maximum filter: using dilate to extract the local maximum of a image block, which acts as the maximum filter
// Meanwhile, minimum filter can be achieved by using erode function
Mat Lg_ = new Mat();
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
Imgproc.dilate(Lg, Lg_, kernel);
// guided image filter
return Filters.GuidedImageFilter(Lg, Lg_, r, eps);
}
示例5: setFilter
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
private void setFilter() {
//Apply gaussian blur to remove noise
Imgproc.GaussianBlur(image, image, new Size(5, 5), 0);
//Threshold
Imgproc.adaptiveThreshold(image, image, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 7, 1);
//Invert the image
Core.bitwise_not(image, image);
//Dilate
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE, new Size(3, 3), new Point(1, 1));
Imgproc.dilate(image, image, kernel);
}
示例6: apply_filters
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public Mat apply_filters(Mat input_mat) {
Mat blurred = new Mat();
Mat edges = new Mat();
Mat structure = new Mat();
Imgproc.GaussianBlur(input_mat,blurred,new Size(3,3),1); //Jack-ify the image
Imgproc.bilateralFilter(blurred,blurred,11,17,17);
Imgproc.Canny(blurred,edges,20,50);
structure = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,new Size(45,45));
Imgproc.morphologyEx(edges,edges,Imgproc.MORPH_CLOSE,structure);
return edges;
}
示例7: enhance
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) {
image.convertTo(image, CvType.CV_32F);
// extract each color channel
List<Mat> rgb = new ArrayList<>();
Core.split(image, rgb);
Mat rChannel = rgb.get(0);
Mat gChannel = rgb.get(1);
Mat bChannel = rgb.get(2);
int rows = rChannel.rows();
int cols = rChannel.cols();
// derive the dark channel from original image
Mat dc = rChannel.clone();
for (int i = 0; i < image.rows(); i++) {
for (int j = 0; j < image.cols(); j++) {
double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0]));
dc.put(i, j, min);
}
}
// minimum filter
int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue();
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
Imgproc.erode(dc, dc, kernel);
// get coarse transmission map
Mat t = dc.clone();
Core.subtract(t, new Scalar(255.0), t);
Core.multiply(t, new Scalar(-1.0), t);
Core.divide(t, new Scalar(255.0), t);
// obtain gray scale image
Mat gray = new Mat();
Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
Core.divide(gray, new Scalar(255.0), gray);
// refine transmission map
int r = krnlSz * 4;
t = Filters.GuidedImageFilter(gray, t, r, eps);
// get minimum atmospheric light
minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal);
// dehaze each color channel
rChannel = dehaze(rChannel, t, minAtmosLight);
gChannel = dehaze(gChannel, t, minAtmosLight);
bChannel = dehaze(bChannel, t, minAtmosLight);
// merge three color channels to a image
Mat outval = new Mat();
Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval);
outval.convertTo(outval, CvType.CV_8UC1);
return outval;
}
示例8: onActivityResult
import org.opencv.imgproc.Imgproc; //导入方法依赖的package包/类
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
//Put it there, just in case:)
super.onActivityResult(requestCode, resultCode, imageReturnedIntent);
switch(requestCode) {
case SELECT_PHOTO:
if(resultCode == RESULT_OK && read_external_storage_granted){
try {
final Uri imageUri = imageReturnedIntent.getData();
final InputStream imageStream = getContentResolver().openInputStream(imageUri);
final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
Utils.bitmapToMat(selectedImage, src);
src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1);
switch (ACTION_MODE) {
case HomeActivity.GAUSSIAN_BLUR:
Imgproc.GaussianBlur(src, src, new Size(9, 9), 0);
break;
case HomeActivity.MEAN_BLUR:
Imgproc.blur(src, src, new Size(9, 9));
break;
case HomeActivity.MEDIAN_BLUR:
Imgproc.medianBlur(src, src, 9);
break;
case HomeActivity.SHARPEN:
Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
//int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0};
Log.d("imageType", CvType.typeToString(src.type()) + "");
kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
Imgproc.filter2D(src, src, src_gray.depth(), kernel);
break;
case HomeActivity.DILATE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
Imgproc.dilate(src_gray, src_gray, kernelDilate);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ERODE:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
Imgproc.erode(src_gray, src_gray, kernelErode);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
case HomeActivity.ADAPTIVE_THRESHOLD:
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0);
Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
break;
}
Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888);
Log.i("imageType", CvType.typeToString(src.type()) + "");
Utils.matToBitmap(src, processedImage);
ivImage.setImageBitmap(selectedImage);
ivImageProcessed.setImageBitmap(processedImage);
Log.i("process", "process done");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
break;
}
}