當前位置: 首頁>>代碼示例>>Java>>正文


Java Utils類代碼示例

本文整理匯總了Java中org.opencv.android.Utils的典型用法代碼示例。如果您正苦於以下問題:Java Utils類的具體用法?Java Utils怎麽用?Java Utils使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Utils類屬於org.opencv.android包,在下文中一共展示了Utils類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: HOGDescriptor

import org.opencv.android.Utils; //導入依賴的package包/類
void HOGDescriptor() {
    Mat grayMat = new Mat();
    Mat people = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    HOGDescriptor hog = new HOGDescriptor();
    hog.setSVMDetector(HOGDescriptor.getDefaultPeopleDetector());

    MatOfRect faces = new MatOfRect();
    MatOfDouble weights = new MatOfDouble();

    hog.detectMultiScale(grayMat, faces, weights);
    originalMat.copyTo(people);
    //Draw faces on the image
    Rect[] facesArray = faces.toArray();
    for (int i = 0; i < facesArray.length; i++)
        Imgproc.rectangle(people, facesArray[i].tl(), facesArray[i].br(), new Scalar(100), 3);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(people, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:25,代碼來源:MainActivity.java

示例2: HoughLines

import org.opencv.android.Utils; //導入依賴的package包/類
void HoughLines() {

        Mat grayMat = new Mat();
        Mat cannyEdges = new Mat();
        Mat lines = new Mat();

        //Converting the image to grayscale
        Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

        Imgproc.Canny(grayMat, cannyEdges, 10, 100);

        Imgproc.HoughLinesP(cannyEdges, lines, 1, Math.PI / 180, 50, 20, 20);

        Mat houghLines = new Mat();
        houghLines.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC1);

        //Drawing lines on the image
        for (int i = 0; i < lines.cols(); i++) {
            double[] points = lines.get(0, i);
            double x1, y1, x2, y2;

            x1 = points[0];
            y1 = points[1];
            x2 = points[2];
            y2 = points[3];

            Point pt1 = new Point(x1, y1);
            Point pt2 = new Point(x2, y2);

            //Drawing lines on an image
            Imgproc.line(houghLines, pt1, pt2, new Scalar(255, 0, 0), 1);
        }

        //Converting Mat back to Bitmap
        Utils.matToBitmap(houghLines, currentBitmap);
        imageView.setImageBitmap(currentBitmap);

    }
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:39,代碼來源:MainActivity.java

示例3: bytesToMat

import org.opencv.android.Utils; //導入依賴的package包/類
private Mat bytesToMat(byte[] data) {
    // Scale down the image for performance
    Bitmap bmp = BitmapFactory.decodeByteArray(data, 0, data.length);
    int targetWidth = 1200;
    if (bmp.getWidth() > targetWidth) {
        float scaleDownFactor = (float)targetWidth / bmp.getWidth();
        bmp = Bitmap.createScaledBitmap(bmp,
                (int)(bmp.getWidth()*scaleDownFactor),
                (int)(bmp.getHeight()*scaleDownFactor),
                true);
    }
    Mat BGRImage = new Mat (bmp.getWidth(), bmp.getHeight(), CvType.CV_8UC3);
    Utils.bitmapToMat(bmp, BGRImage);

    return BGRImage;
}
 
開發者ID:jorenham,項目名稱:fingerblox,代碼行數:17,代碼來源:ImageProcessing.java

示例4: Sobel

import org.opencv.android.Utils; //導入依賴的package包/類
void Sobel() {
    Mat grayMat = new Mat();
    Mat sobel = new Mat(); //Mat to store the final result

    //Matrices to store gradient and absolute gradient respectively
    Mat grad_x = new Mat();
    Mat abs_grad_x = new Mat();

    Mat grad_y = new Mat();
    Mat abs_grad_y = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    //Calculating gradient in horizontal direction
    Imgproc.Sobel(grayMat, grad_x, CvType.CV_16S, 1, 0, 3, 1, 0);

    //Calculating gradient in vertical direction
    Imgproc.Sobel(grayMat, grad_y, CvType.CV_16S, 0, 1, 3, 1, 0);

    //Calculating absolute value of gradients in both the direction
    Core.convertScaleAbs(grad_x, abs_grad_x);
    Core.convertScaleAbs(grad_y, abs_grad_y);

    //Calculating the resultant gradient
    Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 1, sobel);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(sobel, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:32,代碼來源:MainActivity.java

示例5: mat2Bitmap

import org.opencv.android.Utils; //導入依賴的package包/類
private Bitmap mat2Bitmap(Mat src, int code) {
    Mat rgbaMat = new Mat(src.width(), src.height(), CvType.CV_8UC4);
    Imgproc.cvtColor(src, rgbaMat, code, 4);
    Bitmap bmp = Bitmap.createBitmap(rgbaMat.cols(), rgbaMat.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(rgbaMat, bmp);
    return bmp;
}
 
開發者ID:jorenham,項目名稱:fingerblox,代碼行數:8,代碼來源:ImageProcessing.java

示例6: onPictureTaken

import org.opencv.android.Utils; //導入依賴的package包/類
@Override
public void onPictureTaken(byte[] data, Camera camera) {
    Log.i(TAG, "Saving a bitmap to file");
    // The camera preview was automatically stopped. Start it again.
    mCamera.startPreview();
    mCamera.setPreviewCallback(this);

    // Write the image in a file (in jpeg format)
    try {
        /*FileOutputStream fos = new FileOutputStream(mPictureFileName);

        fos.write(data);
        fos.close();*/

        Bitmap bmp = BitmapFactory.decodeByteArray(data , 0, data.length);
        Mat orig = new Mat(bmp.getHeight(),bmp.getWidth(),CvType.CV_8UC3);
        Bitmap myBitmap32 = bmp.copy(Bitmap.Config.ARGB_8888, true);
        Utils.bitmapToMat(myBitmap32, orig);
        mImage = new Mat();
        Imgproc.cvtColor(orig,mImage,Imgproc.COLOR_RGB2GRAY);
        /*Imgproc.cvtColor(orig, orig, Imgproc.COLOR_BGR2RGB,4);
        Mat frame = new Mat(mFrameHeight+mFrameHeight/2,mFrameWidth, CvType.CV_8UC1);
        frame.put(0,0,data);
        //Imgcodecs.imdecode(frame,0);
        Imgproc.cvtColor(frame,mImage,Imgproc.COLOR_YUV2RGBA_NV21);//frame.submat(0, mFrameHeight, 0, mFrameWidth);*/

    } catch (Exception e) {
        Log.e("PictureDemo", "Exception in photoCallback", e);
    }

}
 
開發者ID:Sanahm,項目名稱:SudoCAM-Ku,代碼行數:32,代碼來源:CameraView.java

示例7: getInputDataLeNet

import org.opencv.android.Utils; //導入依賴的package包/類
private float[] getInputDataLeNet(Bitmap bitmap) {
    final int INPUT_LENGTH = 28;

    Mat imageMat = new Mat();
    Mat inputMat = new Mat();

    Utils.bitmapToMat(bitmap, imageMat);

    // convert the image to 28 * 28, grayscale, 0~1, and smaller means whiter
    Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_RGBA2GRAY);
    imageMat = centerCropAndScale(imageMat, INPUT_LENGTH);
    imageMat.convertTo(imageMat, CvType.CV_32F, 1. / 255);
    Core.subtract(Mat.ones(imageMat.size(), CvType.CV_32F), imageMat, inputMat);

    float[] inputData = new float[inputMat.width() * inputMat.height()];

    inputMat.get(0, 0, inputData);

    return inputData;
}
 
開發者ID:daquexian,項目名稱:DNNLibrary,代碼行數:21,代碼來源:MainActivity.java

示例8: onActivityResult

import org.opencv.android.Utils; //導入依賴的package包/類
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
    super.onActivityResult(requestCode, resultCode, imageReturnedIntent);

    switch(requestCode) {
        case SELECT_PHOTO:
            if(resultCode == RESULT_OK){
                try {
                    final Uri imageUri = imageReturnedIntent.getData();
                    final InputStream imageStream = getContentResolver().openInputStream(imageUri);
                    final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
                    src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
                    Utils.bitmapToMat(selectedImage, src);
                    srcSelected = true;
                    bGaussianPyrUp.setEnabled(true);
                    bGaussianPyrDown.setEnabled(true);
                    bLaplacianPyr.setEnabled(true);
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                }
            }
            break;
    }
}
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:25,代碼來源:PyramidActivity.java

示例9: DifferenceOfGaussian

import org.opencv.android.Utils; //導入依賴的package包/類
public void DifferenceOfGaussian() {
    Mat grayMat = new Mat();
    Mat blur1 = new Mat();
    Mat blur2 = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5);
    Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5);

    //Subtracting the two blurred images
    Mat DoG = new Mat();
    Core.absdiff(blur1, blur2, DoG);

    //Inverse Binary Thresholding
    Core.multiply(DoG, new Scalar(100), DoG);
    Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(DoG, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:24,代碼來源:MainActivity.java

示例10: Contours

import org.opencv.android.Utils; //導入依賴的package包/類
void Contours() {
    Mat grayMat = new Mat();
    Mat cannyEdges = new Mat();
    Mat hierarchy = new Mat();

    List<MatOfPoint> contourList = new ArrayList<MatOfPoint>(); //A list to store all the contours

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.Canny(originalMat, cannyEdges, 10, 100);

    //finding contours
    Imgproc.findContours(cannyEdges, contourList, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    //Drawing contours on a new image
    Mat contours = new Mat();
    contours.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC3);
    Random r = new Random();
    for (int i = 0; i < contourList.size(); i++) {
        Imgproc.drawContours(contours, contourList, i, new Scalar(r.nextInt(255), r.nextInt(255), r.nextInt(255)), -1);
    }
    //Converting Mat back to Bitmap
    Utils.matToBitmap(contours, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
開發者ID:johnhany,項目名稱:MOAAP,代碼行數:27,代碼來源:MainActivity.java

示例11: onCameraViewStarted

import org.opencv.android.Utils; //導入依賴的package包/類
/**
 * This method is called when the camera view is started. It will allocate and initialize
 * some global resources.
 *
 * @param width specifies the width of the camera view.
 * @param height specifies the height of the camera view.
 */
@Override
public void onCameraViewStarted(int width, int height)
{
    faceRects = new MatOfRect();
    totalProcessingTime = 0;
    framesProcessed = 0;

    overlayImage = new Mat();
    Bitmap overlayBitmap =
            BitmapFactory.decodeResource(activity.getResources(), R.drawable.mustache);
    Utils.bitmapToMat(overlayBitmap, overlayImage);
    //
    // Don't allow overlay unless overlay image has the rgba channels.
    //
    if (overlayImage.channels() < 4) doOverlayImage = false;
}
 
開發者ID:trc492,項目名稱:FtcSamples,代碼行數:24,代碼來源:FtcTestOpenCv.java

示例12: loadClassifier

import org.opencv.android.Utils; //導入依賴的package包/類
private static String loadClassifier(Context context)
{
	if(BuildConfig.DEBUG)
	{
		String fullname = context.getResources().getResourceName(R.raw.haarcascade_frontalface_alt2);
		Log.i(TAG, "fullname: " + fullname);
		String resName = fullname.substring(fullname.lastIndexOf("/") + 1);
		Log.i(TAG, "resName: " + resName);
		
		// Enter "OpenCV_data", you will get "/data/data/<PACKAGE_NAME>/app_OpenCV_data", why a "app_" prefix?
		File resDir = context.getDir("OpenCV_data", Context.MODE_PRIVATE);
		Log.i(TAG, "resDir: " + resDir.getAbsolutePath());
	}
	
	String path = Utils.exportResource(context, R.raw.haarcascade_frontalface_alt2);
	Utils.exportResource(context, R.raw.haarcascade_mcs_lefteye);
	Utils.exportResource(context, R.raw.haarcascade_mcs_mouth);
	Utils.exportResource(context, R.raw.haarcascade_mcs_righteye);
	String classifier_dir = path.substring(0, path.lastIndexOf('/'));
	Log.d(TAG, "cascade data directory: " + classifier_dir);
	
	return classifier_dir;
}
 
開發者ID:KAlO2,項目名稱:PerfectShow,代碼行數:24,代碼來源:Feature.java

示例13: detectLight

import org.opencv.android.Utils; //導入依賴的package包/類
private void detectLight(Bitmap bitmap, double gaussianBlurValue) {
    Mat rgba = new Mat();
    Utils.bitmapToMat(bitmap, rgba);

    Mat grayScaleGaussianBlur = new Mat();
    Imgproc.cvtColor(rgba, grayScaleGaussianBlur, Imgproc.COLOR_BGR2GRAY);
    Imgproc.GaussianBlur(grayScaleGaussianBlur, grayScaleGaussianBlur, new Size(gaussianBlurValue, gaussianBlurValue), 0);

    Core.MinMaxLocResult minMaxLocResultBlur = Core.minMaxLoc(grayScaleGaussianBlur);
    Imgproc.circle(rgba, minMaxLocResultBlur.maxLoc, 30, new Scalar(255), 3);

    // Don't do that at home or work it's for visualization purpose.
    Bitmap resultBitmap = Bitmap.createBitmap(rgba.cols(), rgba.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(rgba, resultBitmap);
    BitmapHelper.showBitmap(this, resultBitmap, detectLightImageView);

    Bitmap blurryBitmap = Bitmap.createBitmap(grayScaleGaussianBlur.cols(), grayScaleGaussianBlur.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(grayScaleGaussianBlur, blurryBitmap);
    BitmapHelper.showBitmap(this, blurryBitmap, gaussianBlurImageView);

}
 
開發者ID:zavenco,項目名稱:DigitalImageProcessing,代碼行數:22,代碼來源:DetectLightActivity.java

示例14: getPixels

import org.opencv.android.Utils; //導入依賴的package包/類
private float[] getPixels(Mat img){
    Bitmap bmp = Bitmap.createBitmap(inputSize, inputSize, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(img, bmp);
    int[] intValues = new int[inputSize * inputSize];
    bmp.getPixels(intValues, 0, inputSize, 0, 0, inputSize, inputSize);

    float[] floatValues = new float[inputSize * inputSize * channels];
    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3 + 0] = (((float)((val >> 16) & 0xFF)) - imageMean) / imageStd;
        floatValues[i * 3 + 1] = (((float)((val >> 8) & 0xFF)) - imageMean) / imageStd;
        floatValues[i * 3 + 2] = (((float)(val & 0xFF)) - imageMean) / imageStd;
    }

    return floatValues;
}
 
開發者ID:Qualeams,項目名稱:Android-Face-Recognition-with-Deep-Learning-Library,代碼行數:17,代碼來源:TensorFlow.java

示例15: detectDocument

import org.opencv.android.Utils; //導入依賴的package包/類
Document detectDocument(Frame frame){
    Size imageSize = new Size(frame.getMetadata().getWidth(), frame.getMetadata().getHeight());
    Mat src = new Mat();
    Utils.bitmapToMat(frame.getBitmap(), src);
    List<MatOfPoint> contours = CVProcessor.findContours(src);
    src.release();

    if(!contours.isEmpty()){
        CVProcessor.Quadrilateral quad = CVProcessor.getQuadrilateral(contours, imageSize);

        if(quad != null){
            quad.points = CVProcessor.getUpscaledPoints(quad.points, CVProcessor.getScaleRatio(imageSize));
            return new Document(frame, quad);
        }
    }

    return null;
}
 
開發者ID:Credntia,項目名稱:CVScanner,代碼行數:19,代碼來源:DocumentDetector.java


注:本文中的org.opencv.android.Utils類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。