本文整理汇总了Java中org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame方法的典型用法代码示例。如果您正苦于以下问题:Java CameraBridgeViewBase.CvCameraViewFrame方法的具体用法?Java CameraBridgeViewBase.CvCameraViewFrame怎么用?Java CameraBridgeViewBase.CvCameraViewFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.opencv.android.CameraBridgeViewBase
的用法示例。
在下文中一共展示了CameraBridgeViewBase.CvCameraViewFrame方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
//Rotating the input frame
Mat mGray = inputFrame.gray();
mRgba = inputFrame.rgba();
if (mIsFrontCamera)
{
Core.flip(mRgba, mRgba, 1);
Core.flip(mGray, mGray, 1);
}
//Detecting face in the frame
MatOfRect faces = new MatOfRect();
if(haarCascade != null)
{
haarCascade.detectMultiScale(mGray, faces, 1.1, 2, 2, new Size(200,200), new Size());
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), new Scalar(100), 3);
return mRgba;
}
示例2: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (absoluteFaceSize == 0) {
int height = mGray.rows();
float relativeFaceSize = 0.2f;
if (Math.round(height * relativeFaceSize) > 0) {
absoluteFaceSize = Math.round(height * relativeFaceSize);
}
nativeDetector.setMinFaceSize(absoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
MatOfRect facesFliped = new MatOfRect();
return getMat(mRgba, mGray, faces, facesFliped);
}
示例3: render
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat render(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat undistortedFrame = new Mat(inputFrame.rgba().size(), inputFrame.rgba().type());
Imgproc.undistort(inputFrame.rgba(), undistortedFrame,
mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients());
Mat comparisonFrame = inputFrame.rgba();
undistortedFrame.colRange(new Range(0, mWidth / 2)).copyTo(comparisonFrame.colRange(new Range(mWidth / 2, mWidth)));
List<MatOfPoint> border = new ArrayList<MatOfPoint>();
final int shift = (int)(mWidth * 0.005);
border.add(new MatOfPoint(new Point(mWidth / 2 - shift, 0), new Point(mWidth / 2 + shift, 0),
new Point(mWidth / 2 + shift, mHeight), new Point(mWidth / 2 - shift, mHeight)));
Imgproc.fillPoly(comparisonFrame, border, new Scalar(255, 255, 255));
Imgproc.putText(comparisonFrame, mResources.getString(R.string.original), new Point(mWidth * 0.1, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
Imgproc.putText(comparisonFrame, mResources.getString(R.string.undistorted), new Point(mWidth * 0.6, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
return comparisonFrame;
}
示例4: render
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat render(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat undistortedFrame = new Mat(inputFrame.rgba().size(), inputFrame.rgba().type());
Imgproc.undistort(inputFrame.rgba(), undistortedFrame,
mCalibrator.getCameraMatrix(), mCalibrator.getDistortionCoefficients());
Mat comparisonFrame = inputFrame.rgba();
undistortedFrame.colRange(new Range(0, mWidth / 2)).copyTo(comparisonFrame.colRange(new Range(mWidth / 2, mWidth)));
List<MatOfPoint> border = new ArrayList<MatOfPoint>();
final int shift = (int)(mWidth * 0.005);
border.add(new MatOfPoint(new Point(mWidth / 2 - shift, 0), new Point(mWidth / 2 + shift, 0),
new Point(mWidth / 2 + shift, mHeight), new Point(mWidth / 2 - shift, mHeight)));
Core.fillPoly(comparisonFrame, border, new Scalar(255, 255, 255));
Core.putText(comparisonFrame, mResources.getString(R.string.original), new Point(mWidth * 0.1, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
Core.putText(comparisonFrame, mResources.getString(R.string.undistorted), new Point(mWidth * 0.6, mHeight * 0.1),
Core.FONT_HERSHEY_SIMPLEX, 1.0, new Scalar(255, 255, 0));
return comparisonFrame;
}
示例5: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat imgRgba = inputFrame.rgba();
Mat img = new Mat();
imgRgba.copyTo(img);
List<Mat> images = ppF.getCroppedImage(img);
Rect[] faces = ppF.getFacesForRecognition();
// Selfie / Mirror mode
if(front_camera){
Core.flip(imgRgba,imgRgba,1);
}
if(images == null || images.size() == 0 || faces == null || faces.length == 0 || ! (images.size() == faces.length)){
// skip
return imgRgba;
} else {
faces = MatOperation.rotateFaces(imgRgba, faces, ppF.getAngleForRecognition());
for(int i = 0; i<faces.length; i++){
MatOperation.drawRectangleAndLabelOnPreview(imgRgba, faces[i], "", front_camera);
}
return imgRgba;
}
}
开发者ID:Qualeams,项目名称:Android-Face-Recognition-with-Deep-Learning-Test-Framework,代码行数:24,代码来源:DetectionActivity.java
示例6: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
// input frame has RGBA format
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
doWithMat(mGray.getNativeObjAddr(), mRgba.getNativeObjAddr());
return mRgba;
}
示例7: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
if (!initialized) {
return inputFrame.rgba();
}
// telemetry.addData("Vision Status", "Ready!");
fps.update();
return frame(inputFrame.rgba(), inputFrame.gray());
}
示例8: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat rgba = inputFrame.rgba();
Mat gray = inputFrame.gray();
setMatrices(rgba, gray);
if (isMachineLearningInitialised && !isStopped) {
fdInteractor.execute(matrixGray, this);
}
return matrixRgba;
}
示例9: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
// 这里执行人脸检测的逻辑, 根据OpenCV提供的例子实现(face-detection)
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
// 翻转矩阵以适配前后置摄像头
if (isFrontCamera) {
Core.flip(mRgba, mRgba, 1);
Core.flip(mGray, mGray, 1);
} else {
Core.flip(mRgba, mRgba, -1);
Core.flip(mGray, mGray, -1);
}
float mRelativeFaceSize = 0.2f;
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
MatOfRect faces = new MatOfRect();
if (classifier != null)
classifier.detectMultiScale(mGray, faces, 1.1, 2, 2,
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
Rect[] facesArray = faces.toArray();
Scalar faceRectColor = new Scalar(0, 255, 0, 255);
for (Rect faceRect : facesArray)
Imgproc.rectangle(mRgba, faceRect.tl(), faceRect.br(), faceRectColor, 3);
return mRgba;
}
示例10: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat input = inputFrame.gray();
Mat circles = new Mat();
Imgproc.blur(input, input, new Size(7, 7), new Point(2, 2));
Imgproc.HoughCircles(input, circles, Imgproc.CV_HOUGH_GRADIENT, 2, 100, 100, 90, 0, 1000);
Log.i(TAG, String.valueOf("size: " + circles.cols()) + ", " + String.valueOf(circles.rows()));
if (circles.cols() > 0) {
for (int x=0; x < Math.min(circles.cols(), 5); x++ ) {
double circleVec[] = circles.get(0, x);
if (circleVec == null) {
break;
}
Point center = new Point((int) circleVec[0], (int) circleVec[1]);
int radius = (int) circleVec[2];
Imgproc.circle(input, center, 3, new Scalar(255, 255, 255), 5);
Imgproc.circle(input, center, radius, new Scalar(255, 255, 255), 2);
}
}
circles.release();
input.release();
return inputFrame.rgba();
}
示例11: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
JniManager.process(mRgba.getNativeObjAddr(), mGray.getNativeObjAddr());
return mRgba;
}
示例12: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
rotateFlipImage(ImageControllers.getInstance().getFlipType(),
ImageControllers.getInstance().getMakeTranspose(),
mRgba.getNativeObjAddr());
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
Log.v(LOG_TAG, "native detect");
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
示例13: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat raw = inputFrame.rgba();
Size imageSize = new Size(1280,720); //Resize Images to this
Boolean debug_show_preprocessed = false; //Show the preproccessed image
Boolean debug_show_filtered = false; //Show the filtered image
Boolean debug_draw_stats = false; //Show stats for each rectangle (very spammy)
Boolean debug_draw_center = false; //Draw center line on the screen
Boolean debug_draw_rects = false; //Draw all found rectangles
//Weights for scoring
double score_ratio_weight = 0.9;
double score_distance_x_weight = 1;
double score_distance_y_weight = 1.2;
double score_area_weight = 3;
try {
}
catch (Exception e) {
telemetry.addData("error",e.getMessage());
}
// This is where the magic will happen. inputFrame has all the data for each camera frame.
telemetry.update();
return raw;
}
示例14: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat matRgba = inputFrame.rgba();
// just draw a text string to test modifying onCameraFrame real time by listener
// Core.putText(matRgba, "~20% screen size", new Point(100,300), 3, 1, new Scalar (255, 0, 0, 255), 2);
onCameraFrame(matRgba);
return matRgba;
}
示例15: onCameraFrame
import org.opencv.android.CameraBridgeViewBase; //导入方法依赖的package包/类
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame)
{
threshold = thresholdBar.getProgress();
edge = inputFrame.rgba();
if(((Switch) findViewById(R.id.edgeSwitch)).isChecked())
Canny(edge, edge,threshold , threshold*3);
return edge;
}