本文整理汇总了Java中org.opencv.android.Utils.matToBitmap方法的典型用法代码示例。如果您正苦于以下问题:Java Utils.matToBitmap方法的具体用法?Java Utils.matToBitmap怎么用?Java Utils.matToBitmap使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.android.Utils
的用法示例。
在下文中一共展示了Utils.matToBitmap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HOGDescriptor
import org.opencv.android.Utils; //导入方法依赖的package包/类
void HOGDescriptor() {
Mat grayMat = new Mat();
Mat people = new Mat();
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
HOGDescriptor hog = new HOGDescriptor();
hog.setSVMDetector(HOGDescriptor.getDefaultPeopleDetector());
MatOfRect faces = new MatOfRect();
MatOfDouble weights = new MatOfDouble();
hog.detectMultiScale(grayMat, faces, weights);
originalMat.copyTo(people);
//Draw faces on the image
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(people, facesArray[i].tl(), facesArray[i].br(), new Scalar(100), 3);
//Converting Mat back to Bitmap
Utils.matToBitmap(people, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例2: HoughLines
import org.opencv.android.Utils; //导入方法依赖的package包/类
void HoughLines() {
Mat grayMat = new Mat();
Mat cannyEdges = new Mat();
Mat lines = new Mat();
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(grayMat, cannyEdges, 10, 100);
Imgproc.HoughLinesP(cannyEdges, lines, 1, Math.PI / 180, 50, 20, 20);
Mat houghLines = new Mat();
houghLines.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC1);
//Drawing lines on the image
for (int i = 0; i < lines.cols(); i++) {
double[] points = lines.get(0, i);
double x1, y1, x2, y2;
x1 = points[0];
y1 = points[1];
x2 = points[2];
y2 = points[3];
Point pt1 = new Point(x1, y1);
Point pt2 = new Point(x2, y2);
//Drawing lines on an image
Imgproc.line(houghLines, pt1, pt2, new Scalar(255, 0, 0), 1);
}
//Converting Mat back to Bitmap
Utils.matToBitmap(houghLines, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例3: Sobel
import org.opencv.android.Utils; //导入方法依赖的package包/类
void Sobel() {
Mat grayMat = new Mat();
Mat sobel = new Mat(); //Mat to store the final result
//Matrices to store gradient and absolute gradient respectively
Mat grad_x = new Mat();
Mat abs_grad_x = new Mat();
Mat grad_y = new Mat();
Mat abs_grad_y = new Mat();
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
//Calculating gradient in horizontal direction
Imgproc.Sobel(grayMat, grad_x, CvType.CV_16S, 1, 0, 3, 1, 0);
//Calculating gradient in vertical direction
Imgproc.Sobel(grayMat, grad_y, CvType.CV_16S, 0, 1, 3, 1, 0);
//Calculating absolute value of gradients in both the direction
Core.convertScaleAbs(grad_x, abs_grad_x);
Core.convertScaleAbs(grad_y, abs_grad_y);
//Calculating the resultant gradient
Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 1, sobel);
//Converting Mat back to Bitmap
Utils.matToBitmap(sobel, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例4: mat2Bitmap
import org.opencv.android.Utils; //导入方法依赖的package包/类
private Bitmap mat2Bitmap(Mat src, int code) {
Mat rgbaMat = new Mat(src.width(), src.height(), CvType.CV_8UC4);
Imgproc.cvtColor(src, rgbaMat, code, 4);
Bitmap bmp = Bitmap.createBitmap(rgbaMat.cols(), rgbaMat.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(rgbaMat, bmp);
return bmp;
}
示例5: Contours
import org.opencv.android.Utils; //导入方法依赖的package包/类
void Contours() {
Mat grayMat = new Mat();
Mat cannyEdges = new Mat();
Mat hierarchy = new Mat();
List<MatOfPoint> contourList = new ArrayList<MatOfPoint>(); //A list to store all the contours
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(originalMat, cannyEdges, 10, 100);
//finding contours
Imgproc.findContours(cannyEdges, contourList, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
//Drawing contours on a new image
Mat contours = new Mat();
contours.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC3);
Random r = new Random();
for (int i = 0; i < contourList.size(); i++) {
Imgproc.drawContours(contours, contourList, i, new Scalar(r.nextInt(255), r.nextInt(255), r.nextInt(255)), -1);
}
//Converting Mat back to Bitmap
Utils.matToBitmap(contours, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例6: DifferenceOfGaussian
import org.opencv.android.Utils; //导入方法依赖的package包/类
public void DifferenceOfGaussian() {
Mat grayMat = new Mat();
Mat blur1 = new Mat();
Mat blur2 = new Mat();
//Converting the image to grayscale
Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5);
Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5);
//Subtracting the two blurred images
Mat DoG = new Mat();
Core.absdiff(blur1, blur2, DoG);
//Inverse Binary Thresholding
Core.multiply(DoG, new Scalar(100), DoG);
Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV);
//Converting Mat back to Bitmap
Utils.matToBitmap(DoG, currentBitmap);
imageView.setImageBitmap(currentBitmap);
}
示例7: detectLight
import org.opencv.android.Utils; //导入方法依赖的package包/类
private void detectLight(Bitmap bitmap, double gaussianBlurValue) {
Mat rgba = new Mat();
Utils.bitmapToMat(bitmap, rgba);
Mat grayScaleGaussianBlur = new Mat();
Imgproc.cvtColor(rgba, grayScaleGaussianBlur, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(grayScaleGaussianBlur, grayScaleGaussianBlur, new Size(gaussianBlurValue, gaussianBlurValue), 0);
Core.MinMaxLocResult minMaxLocResultBlur = Core.minMaxLoc(grayScaleGaussianBlur);
Imgproc.circle(rgba, minMaxLocResultBlur.maxLoc, 30, new Scalar(255), 3);
// Don't do that at home or work it's for visualization purpose.
Bitmap resultBitmap = Bitmap.createBitmap(rgba.cols(), rgba.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(rgba, resultBitmap);
BitmapHelper.showBitmap(this, resultBitmap, detectLightImageView);
Bitmap blurryBitmap = Bitmap.createBitmap(grayScaleGaussianBlur.cols(), grayScaleGaussianBlur.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(grayScaleGaussianBlur, blurryBitmap);
BitmapHelper.showBitmap(this, blurryBitmap, gaussianBlurImageView);
}
示例8: drawTemplateOutline
import org.opencv.android.Utils; //导入方法依赖的package包/类
/**
* Draws the outline of the Pattern to the preview screen.
*/
public void drawTemplateOutline() {
//Load the template outline
cardType = DataHolder.getInstance().getData();
if(!cardType.equals("-DETECT-")) getTemplate();
Bitmap icon = BitmapFactory.decodeResource(this.getResources(),
R.drawable.card_frame);
Mat outline = new Mat ( icon.getHeight(), icon.getWidth(), CvType.CV_8U, new Scalar(0, 0, 0));
Utils.bitmapToMat(icon, outline);
Imgproc.cvtColor(outline, outline, Imgproc.COLOR_BGRA2RGBA);
if(showOutline) {
for (String key : card.getPatternMap().keySet()) {
if(card.getPattern(key).getResource().matches("[t][e][x][t].*")) Core.rectangle(outline, new Point(Math.abs(outline.cols() - (card.getPattern(key).getTl().y * outline.cols())), card.getPattern(key).getTl().x * outline.rows()),new Point(Math.abs(outline.cols() - (card.getPattern(key).getBr().y * outline.cols())), card.getPattern(key).getBr().x * outline.rows()), new Scalar(0, 255, 0, 255), 1);
//Core.rectangle(outline, new Point(Math.abs(outline.cols() - (card.getPattern(key).getTl().y * outline.cols())), card.getPattern(key).getTl().x * outline.rows()),new Point(Math.abs(outline.cols() - (card.getPattern(key).getBr().y * outline.cols())), card.getPattern(key).getBr().x * outline.rows()), new Scalar(255, 0, 0, 0), 1);
}
Core.rectangle(outline, new Point(Math.abs(outline.cols() - (facetl.y * outline.cols())), facetl.x * outline.rows()),new Point(Math.abs(outline.cols() - (facebr.y * outline.cols())),facebr.x * outline.rows()), new Scalar(0, 255, 0, 255), 1);
}
Bitmap bimage = Bitmap.createBitmap(outline.cols(), outline.rows(),Bitmap.Config.ARGB_8888);
Imgproc.cvtColor(outline, outline, Imgproc.COLOR_RGBA2BGRA);
Utils.matToBitmap(outline, bimage);
ImageView imgV = (ImageView )findViewById(R.id.frame_det);
imgV.setImageBitmap(bimage);
}
示例9: onPostExecute
import org.opencv.android.Utils; //导入方法依赖的package包/类
@Override
protected void onPostExecute(String result) {
if(result.equals("success")) {
ProgressWheel pBar = (ProgressWheel) findViewById(R.id.progressBar);
pBar.setProgress(0);
pBar.setVisibility(View.GONE);
ImageView imageView = (ImageView) findViewById(R.id.imgView);
imageView.setVisibility(View.VISIBLE);
Utils.matToBitmap(showBit, bimage);
imageView.setImageBitmap(bimage);
incremented = 0;
new TextOperation().execute("");
} else {
Log.w("Error", "Loading image error");
theText.put("Error", "Image Loading");
Intent theResult = new Intent();
theResult.putExtra("theValidation", theText);
setResult(Activity.RESULT_OK, theResult);
finish();
}
}
示例10: onFaceCaptured
import org.opencv.android.Utils; //导入方法依赖的package包/类
private void onFaceCaptured(Mat faceMat){
capturingImage = false;
final boolean willRecognizeButtonAppear = capturedMat == null;
capturedMat = faceMat;
final Bitmap bmp = Bitmap.createBitmap(faceMat.cols(), faceMat.rows(), Bitmap.Config.RGB_565);
Utils.matToBitmap(faceMat, bmp);
FaceRecognitionActivity.this.runOnUiThread(new Runnable() {
@Override
public void run() {
capturedImage.setImageBitmap(bmp);
captureButton.setBackgroundResource(R.drawable.capturestart);
captureButton.setText("Start Capturing");
if(willRecognizeButtonAppear) {
bringRecognizeButtonAnimatedly();
}
}
});
}
示例11: add
import org.opencv.android.Utils; //导入方法依赖的package包/类
void add(Mat m, String description) {
Bitmap bmp= Bitmap.createBitmap(m.width(), m.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(m,bmp);
bmp= Bitmap.createScaledBitmap(bmp, WIDTH, HEIGHT, false);
FileOutputStream f;
try {
f = new FileOutputStream(mPath+description+"-"+count+".jpg",true);
count++;
bmp.compress(Bitmap.CompressFormat.JPEG, 100, f);
f.close();
} catch (Exception e) {
Log.e("error",e.getCause()+" "+e.getMessage());
e.printStackTrace();
}
}
示例12: draw
import org.opencv.android.Utils; //导入方法依赖的package包/类
@Override
protected void draw() {
Utils.matToBitmap(out, this.bmp);
Canvas canvas = new Canvas(this.bmp);
int y = bounds.height();
int c = 1;
for (String line : simpleText.split("\n")) {
canvas.drawText(line, bounds.width(), y, paint);
y = y + bounds.height();
if (c >= lines)
break;;
}
this.drawer.drawBitmap(this.bmp);
}
示例13: execute
import org.opencv.android.Utils; //导入方法依赖的package包/类
@Override
protected Bundle execute(Bundle arg0) {
Mat rgba = (Mat) arg0.get(Constants.SOURCE_MAT_RGB);
Mat gray = (Mat) arg0.get(Constants.SOURCE_MAT_GRAY);
long pupilRoiRect = (Long) arg0.get(Constants.PUPIL_ROI_RECT);
long detectedCircles = (Long) arg0.get(Constants.DETECTED_CIRCLES);
arg0 = null;
detectPupilAndDraw(rgba.getNativeObjAddr(), gray.getNativeObjAddr(),
pupilRoiRect, detectedCircles);
Log.i(RGB2GRAYFilter.TAG, this.getFilterName() + "start");
Bitmap bitmap = Bitmap.createBitmap(rgba.cols(), rgba.rows(),
Config.ARGB_8888);
Utils.matToBitmap(rgba, bitmap);
Bundle newBundle = new Bundle();
newBundle.put(Constants.SINK_BITMAP, bitmap);
Log.i(RGB2GRAYFilter.TAG, this.getFilterName());
return newBundle;
}
示例14: preprocess
import org.opencv.android.Utils; //导入方法依赖的package包/类
static Bitmap preprocess(Mat frame, int width, int height) {
// convert to grayscale
Mat frameGrey = new Mat(height, width, CvType.CV_8UC1);
Imgproc.cvtColor(frame, frameGrey, Imgproc.COLOR_BGR2GRAY, 1);
// rotate
Mat rotatedFrame = new Mat(width, height, frameGrey.type());
Core.transpose(frameGrey, rotatedFrame);
Core.flip(rotatedFrame, rotatedFrame, Core.ROTATE_180);
// resize to match the surface view
Mat resizedFrame = new Mat(width, height, rotatedFrame.type());
Imgproc.resize(rotatedFrame, resizedFrame, new Size(width, height));
// crop
Mat ellipseMask = getEllipseMask(width, height);
Mat frameCropped = new Mat(resizedFrame.rows(), resizedFrame.cols(), resizedFrame.type(), new Scalar(0));
resizedFrame.copyTo(frameCropped, ellipseMask);
// histogram equalisation
Mat frameHistEq = new Mat(frame.rows(), frameCropped.cols(), frameCropped.type());
Imgproc.equalizeHist(frameCropped, frameHistEq);
// convert back to rgba
Mat frameRgba = new Mat(frameHistEq.rows(), frameHistEq.cols(), CvType.CV_8UC4);
Imgproc.cvtColor(frameHistEq, frameRgba, Imgproc.COLOR_GRAY2RGBA);
// crop again to correct alpha
Mat frameAlpha = new Mat(frameRgba.rows(), frameRgba.cols(), CvType.CV_8UC4, new Scalar(0, 0, 0, 0));
frameRgba.copyTo(frameAlpha, ellipseMask);
// convert to bitmap
Bitmap bmp = Bitmap.createBitmap(frameAlpha.cols(), frameAlpha.rows(), Bitmap.Config.ARGB_4444);
Utils.matToBitmap(frameAlpha, bmp);
return bmp;
}
示例15: rotateCropAndResizePreview
import org.opencv.android.Utils; //导入方法依赖的package包/类
/**
* Resize, crop and rotate the camera preview frame
*
* @param bytes preview data
* @param width original width
* @param height original height
* @param params image processing parameters
* @return
*/
public static Bitmap rotateCropAndResizePreview(byte[] bytes, int width, int height, PreviewResizeParams params)
{
Size finalSize = new Size(params.newWidth, params.newHeight);
Rect cropRect = new Rect(params.cropX, params.cropY, params.cropWidth, params.cropHeight);
Mat rawMat = new Mat(height * 3 / 2, width, CvType.CV_8UC1); // YUV data
rawMat.put(0, 0, bytes);
Mat rgbMat = new Mat(height, width, CvType.CV_8UC4); // RGBA image
Imgproc.cvtColor(rawMat, rgbMat, Imgproc.COLOR_YUV2RGBA_NV21);
//rotate clockwise
Mat rotatedMat = rotateFrame(rgbMat, params.rotation);
//crop rect from image
Mat croppedMat = new Mat(rotatedMat, cropRect);
//resize
if (finalSize.area() > 0)
Imgproc.resize(croppedMat, croppedMat, finalSize);
Bitmap bmp = Bitmap.createBitmap(croppedMat.cols(), croppedMat.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(croppedMat, bmp);
return bmp;
}