当前位置: 首页>>代码示例>>Java>>正文


Java Utils类代码示例

本文整理汇总了Java中org.opencv.android.Utils的典型用法代码示例。如果您正苦于以下问题:Java Utils类的具体用法?Java Utils怎么用?Java Utils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Utils类属于org.opencv.android包,在下文中一共展示了Utils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HOGDescriptor

import org.opencv.android.Utils; //导入依赖的package包/类
void HOGDescriptor() {
    Mat grayMat = new Mat();
    Mat people = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    HOGDescriptor hog = new HOGDescriptor();
    hog.setSVMDetector(HOGDescriptor.getDefaultPeopleDetector());

    MatOfRect faces = new MatOfRect();
    MatOfDouble weights = new MatOfDouble();

    hog.detectMultiScale(grayMat, faces, weights);
    originalMat.copyTo(people);
    //Draw faces on the image
    Rect[] facesArray = faces.toArray();
    for (int i = 0; i < facesArray.length; i++)
        Imgproc.rectangle(people, facesArray[i].tl(), facesArray[i].br(), new Scalar(100), 3);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(people, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:25,代码来源:MainActivity.java

示例2: HoughLines

import org.opencv.android.Utils; //导入依赖的package包/类
void HoughLines() {

        Mat grayMat = new Mat();
        Mat cannyEdges = new Mat();
        Mat lines = new Mat();

        //Converting the image to grayscale
        Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

        Imgproc.Canny(grayMat, cannyEdges, 10, 100);

        Imgproc.HoughLinesP(cannyEdges, lines, 1, Math.PI / 180, 50, 20, 20);

        Mat houghLines = new Mat();
        houghLines.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC1);

        //Drawing lines on the image
        for (int i = 0; i < lines.cols(); i++) {
            double[] points = lines.get(0, i);
            double x1, y1, x2, y2;

            x1 = points[0];
            y1 = points[1];
            x2 = points[2];
            y2 = points[3];

            Point pt1 = new Point(x1, y1);
            Point pt2 = new Point(x2, y2);

            //Drawing lines on an image
            Imgproc.line(houghLines, pt1, pt2, new Scalar(255, 0, 0), 1);
        }

        //Converting Mat back to Bitmap
        Utils.matToBitmap(houghLines, currentBitmap);
        imageView.setImageBitmap(currentBitmap);

    }
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:39,代码来源:MainActivity.java

示例3: bytesToMat

import org.opencv.android.Utils; //导入依赖的package包/类
private Mat bytesToMat(byte[] data) {
    // Scale down the image for performance
    Bitmap bmp = BitmapFactory.decodeByteArray(data, 0, data.length);
    int targetWidth = 1200;
    if (bmp.getWidth() > targetWidth) {
        float scaleDownFactor = (float)targetWidth / bmp.getWidth();
        bmp = Bitmap.createScaledBitmap(bmp,
                (int)(bmp.getWidth()*scaleDownFactor),
                (int)(bmp.getHeight()*scaleDownFactor),
                true);
    }
    Mat BGRImage = new Mat (bmp.getWidth(), bmp.getHeight(), CvType.CV_8UC3);
    Utils.bitmapToMat(bmp, BGRImage);

    return BGRImage;
}
 
开发者ID:jorenham,项目名称:fingerblox,代码行数:17,代码来源:ImageProcessing.java

示例4: Sobel

import org.opencv.android.Utils; //导入依赖的package包/类
void Sobel() {
    Mat grayMat = new Mat();
    Mat sobel = new Mat(); //Mat to store the final result

    //Matrices to store gradient and absolute gradient respectively
    Mat grad_x = new Mat();
    Mat abs_grad_x = new Mat();

    Mat grad_y = new Mat();
    Mat abs_grad_y = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    //Calculating gradient in horizontal direction
    Imgproc.Sobel(grayMat, grad_x, CvType.CV_16S, 1, 0, 3, 1, 0);

    //Calculating gradient in vertical direction
    Imgproc.Sobel(grayMat, grad_y, CvType.CV_16S, 0, 1, 3, 1, 0);

    //Calculating absolute value of gradients in both the direction
    Core.convertScaleAbs(grad_x, abs_grad_x);
    Core.convertScaleAbs(grad_y, abs_grad_y);

    //Calculating the resultant gradient
    Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 1, sobel);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(sobel, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:32,代码来源:MainActivity.java

示例5: mat2Bitmap

import org.opencv.android.Utils; //导入依赖的package包/类
private Bitmap mat2Bitmap(Mat src, int code) {
    Mat rgbaMat = new Mat(src.width(), src.height(), CvType.CV_8UC4);
    Imgproc.cvtColor(src, rgbaMat, code, 4);
    Bitmap bmp = Bitmap.createBitmap(rgbaMat.cols(), rgbaMat.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(rgbaMat, bmp);
    return bmp;
}
 
开发者ID:jorenham,项目名称:fingerblox,代码行数:8,代码来源:ImageProcessing.java

示例6: onPictureTaken

import org.opencv.android.Utils; //导入依赖的package包/类
@Override
public void onPictureTaken(byte[] data, Camera camera) {
    Log.i(TAG, "Saving a bitmap to file");
    // The camera preview was automatically stopped. Start it again.
    mCamera.startPreview();
    mCamera.setPreviewCallback(this);

    // Write the image in a file (in jpeg format)
    try {
        /*FileOutputStream fos = new FileOutputStream(mPictureFileName);

        fos.write(data);
        fos.close();*/

        Bitmap bmp = BitmapFactory.decodeByteArray(data , 0, data.length);
        Mat orig = new Mat(bmp.getHeight(),bmp.getWidth(),CvType.CV_8UC3);
        Bitmap myBitmap32 = bmp.copy(Bitmap.Config.ARGB_8888, true);
        Utils.bitmapToMat(myBitmap32, orig);
        mImage = new Mat();
        Imgproc.cvtColor(orig,mImage,Imgproc.COLOR_RGB2GRAY);
        /*Imgproc.cvtColor(orig, orig, Imgproc.COLOR_BGR2RGB,4);
        Mat frame = new Mat(mFrameHeight+mFrameHeight/2,mFrameWidth, CvType.CV_8UC1);
        frame.put(0,0,data);
        //Imgcodecs.imdecode(frame,0);
        Imgproc.cvtColor(frame,mImage,Imgproc.COLOR_YUV2RGBA_NV21);//frame.submat(0, mFrameHeight, 0, mFrameWidth);*/

    } catch (Exception e) {
        Log.e("PictureDemo", "Exception in photoCallback", e);
    }

}
 
开发者ID:Sanahm,项目名称:SudoCAM-Ku,代码行数:32,代码来源:CameraView.java

示例7: getInputDataLeNet

import org.opencv.android.Utils; //导入依赖的package包/类
private float[] getInputDataLeNet(Bitmap bitmap) {
    final int INPUT_LENGTH = 28;

    Mat imageMat = new Mat();
    Mat inputMat = new Mat();

    Utils.bitmapToMat(bitmap, imageMat);

    // convert the image to 28 * 28, grayscale, 0~1, and smaller means whiter
    Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_RGBA2GRAY);
    imageMat = centerCropAndScale(imageMat, INPUT_LENGTH);
    imageMat.convertTo(imageMat, CvType.CV_32F, 1. / 255);
    Core.subtract(Mat.ones(imageMat.size(), CvType.CV_32F), imageMat, inputMat);

    float[] inputData = new float[inputMat.width() * inputMat.height()];

    inputMat.get(0, 0, inputData);

    return inputData;
}
 
开发者ID:daquexian,项目名称:DNNLibrary,代码行数:21,代码来源:MainActivity.java

示例8: onActivityResult

import org.opencv.android.Utils; //导入依赖的package包/类
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
    super.onActivityResult(requestCode, resultCode, imageReturnedIntent);

    switch(requestCode) {
        case SELECT_PHOTO:
            if(resultCode == RESULT_OK){
                try {
                    final Uri imageUri = imageReturnedIntent.getData();
                    final InputStream imageStream = getContentResolver().openInputStream(imageUri);
                    final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
                    src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
                    Utils.bitmapToMat(selectedImage, src);
                    srcSelected = true;
                    bGaussianPyrUp.setEnabled(true);
                    bGaussianPyrDown.setEnabled(true);
                    bLaplacianPyr.setEnabled(true);
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                }
            }
            break;
    }
}
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:25,代码来源:PyramidActivity.java

示例9: DifferenceOfGaussian

import org.opencv.android.Utils; //导入依赖的package包/类
public void DifferenceOfGaussian() {
    Mat grayMat = new Mat();
    Mat blur1 = new Mat();
    Mat blur2 = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5);
    Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5);

    //Subtracting the two blurred images
    Mat DoG = new Mat();
    Core.absdiff(blur1, blur2, DoG);

    //Inverse Binary Thresholding
    Core.multiply(DoG, new Scalar(100), DoG);
    Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(DoG, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:24,代码来源:MainActivity.java

示例10: Contours

import org.opencv.android.Utils; //导入依赖的package包/类
void Contours() {
    Mat grayMat = new Mat();
    Mat cannyEdges = new Mat();
    Mat hierarchy = new Mat();

    List<MatOfPoint> contourList = new ArrayList<MatOfPoint>(); //A list to store all the contours

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.Canny(originalMat, cannyEdges, 10, 100);

    //finding contours
    Imgproc.findContours(cannyEdges, contourList, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    //Drawing contours on a new image
    Mat contours = new Mat();
    contours.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC3);
    Random r = new Random();
    for (int i = 0; i < contourList.size(); i++) {
        Imgproc.drawContours(contours, contourList, i, new Scalar(r.nextInt(255), r.nextInt(255), r.nextInt(255)), -1);
    }
    //Converting Mat back to Bitmap
    Utils.matToBitmap(contours, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
开发者ID:johnhany,项目名称:MOAAP,代码行数:27,代码来源:MainActivity.java

示例11: onCameraViewStarted

import org.opencv.android.Utils; //导入依赖的package包/类
/**
 * This method is called when the camera view is started. It will allocate and initialize
 * some global resources.
 *
 * @param width specifies the width of the camera view.
 * @param height specifies the height of the camera view.
 */
@Override
public void onCameraViewStarted(int width, int height)
{
    faceRects = new MatOfRect();
    totalProcessingTime = 0;
    framesProcessed = 0;

    overlayImage = new Mat();
    Bitmap overlayBitmap =
            BitmapFactory.decodeResource(activity.getResources(), R.drawable.mustache);
    Utils.bitmapToMat(overlayBitmap, overlayImage);
    //
    // Don't allow overlay unless overlay image has the rgba channels.
    //
    if (overlayImage.channels() < 4) doOverlayImage = false;
}
 
开发者ID:trc492,项目名称:FtcSamples,代码行数:24,代码来源:FtcTestOpenCv.java

示例12: loadClassifier

import org.opencv.android.Utils; //导入依赖的package包/类
private static String loadClassifier(Context context)
{
	if(BuildConfig.DEBUG)
	{
		String fullname = context.getResources().getResourceName(R.raw.haarcascade_frontalface_alt2);
		Log.i(TAG, "fullname: " + fullname);
		String resName = fullname.substring(fullname.lastIndexOf("/") + 1);
		Log.i(TAG, "resName: " + resName);
		
		// Enter "OpenCV_data", you will get "/data/data/<PACKAGE_NAME>/app_OpenCV_data", why a "app_" prefix?
		File resDir = context.getDir("OpenCV_data", Context.MODE_PRIVATE);
		Log.i(TAG, "resDir: " + resDir.getAbsolutePath());
	}
	
	String path = Utils.exportResource(context, R.raw.haarcascade_frontalface_alt2);
	Utils.exportResource(context, R.raw.haarcascade_mcs_lefteye);
	Utils.exportResource(context, R.raw.haarcascade_mcs_mouth);
	Utils.exportResource(context, R.raw.haarcascade_mcs_righteye);
	String classifier_dir = path.substring(0, path.lastIndexOf('/'));
	Log.d(TAG, "cascade data directory: " + classifier_dir);
	
	return classifier_dir;
}
 
开发者ID:KAlO2,项目名称:PerfectShow,代码行数:24,代码来源:Feature.java

示例13: detectLight

import org.opencv.android.Utils; //导入依赖的package包/类
private void detectLight(Bitmap bitmap, double gaussianBlurValue) {
    Mat rgba = new Mat();
    Utils.bitmapToMat(bitmap, rgba);

    Mat grayScaleGaussianBlur = new Mat();
    Imgproc.cvtColor(rgba, grayScaleGaussianBlur, Imgproc.COLOR_BGR2GRAY);
    Imgproc.GaussianBlur(grayScaleGaussianBlur, grayScaleGaussianBlur, new Size(gaussianBlurValue, gaussianBlurValue), 0);

    Core.MinMaxLocResult minMaxLocResultBlur = Core.minMaxLoc(grayScaleGaussianBlur);
    Imgproc.circle(rgba, minMaxLocResultBlur.maxLoc, 30, new Scalar(255), 3);

    // Don't do that at home or work it's for visualization purpose.
    Bitmap resultBitmap = Bitmap.createBitmap(rgba.cols(), rgba.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(rgba, resultBitmap);
    BitmapHelper.showBitmap(this, resultBitmap, detectLightImageView);

    Bitmap blurryBitmap = Bitmap.createBitmap(grayScaleGaussianBlur.cols(), grayScaleGaussianBlur.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(grayScaleGaussianBlur, blurryBitmap);
    BitmapHelper.showBitmap(this, blurryBitmap, gaussianBlurImageView);

}
 
开发者ID:zavenco,项目名称:DigitalImageProcessing,代码行数:22,代码来源:DetectLightActivity.java

示例14: getPixels

import org.opencv.android.Utils; //导入依赖的package包/类
private float[] getPixels(Mat img){
    Bitmap bmp = Bitmap.createBitmap(inputSize, inputSize, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(img, bmp);
    int[] intValues = new int[inputSize * inputSize];
    bmp.getPixels(intValues, 0, inputSize, 0, 0, inputSize, inputSize);

    float[] floatValues = new float[inputSize * inputSize * channels];
    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3 + 0] = (((float)((val >> 16) & 0xFF)) - imageMean) / imageStd;
        floatValues[i * 3 + 1] = (((float)((val >> 8) & 0xFF)) - imageMean) / imageStd;
        floatValues[i * 3 + 2] = (((float)(val & 0xFF)) - imageMean) / imageStd;
    }

    return floatValues;
}
 
开发者ID:Qualeams,项目名称:Android-Face-Recognition-with-Deep-Learning-Library,代码行数:17,代码来源:TensorFlow.java

示例15: detectDocument

import org.opencv.android.Utils; //导入依赖的package包/类
Document detectDocument(Frame frame){
    Size imageSize = new Size(frame.getMetadata().getWidth(), frame.getMetadata().getHeight());
    Mat src = new Mat();
    Utils.bitmapToMat(frame.getBitmap(), src);
    List<MatOfPoint> contours = CVProcessor.findContours(src);
    src.release();

    if(!contours.isEmpty()){
        CVProcessor.Quadrilateral quad = CVProcessor.getQuadrilateral(contours, imageSize);

        if(quad != null){
            quad.points = CVProcessor.getUpscaledPoints(quad.points, CVProcessor.getScaleRatio(imageSize));
            return new Document(frame, quad);
        }
    }

    return null;
}
 
开发者ID:Credntia,项目名称:CVScanner,代码行数:19,代码来源:DocumentDetector.java


注:本文中的org.opencv.android.Utils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。