本文整理匯總了Java中org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame.rgba方法的典型用法代碼示例。如果您正苦於以下問題:Java CvCameraViewFrame.rgba方法的具體用法?Java CvCameraViewFrame.rgba怎麽用?Java CvCameraViewFrame.rgba使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame
的用法示例。
在下文中一共展示了CvCameraViewFrame.rgba方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.e(TAG, "Contours count: " + contours.size());
Imgproc.drawContours(mRgba, contours, -1, CONTOUR_COLOR);
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
}
return mRgba;
}
示例2: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
MatOfRect objs = new MatOfRect();
// Adjust minimun size for objects in the image
int width = mGray.cols();
int height = mGray.rows();
relativeObjSize = Math.round(height * 0.12f);
mNativeDetector.setMinDetectionSize(relativeObjSize);
if (mDetectorType == JAVA_DETECTOR) {
javaClassifier.detectMultiScale(mGray, objs, 1.1, 4, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(relativeObjSize, relativeObjSize), new Size());
}
else {
mNativeDetector.detect(mGray, objs);
}
track_vehicles(objs);
/** Draw the final classification **/
Rect[] objArray = vehicles.toArray(new Rect[0]);
//Rect[] objArray = objs.toArray();
for (int i = 0; i < objArray.length; i++) {
String distance = String.format("%.2fm", pixels_to_meters((double)objArray[i].width / (double)width));
Scalar color = colors[vids.get(i) % colors.length];
//Scalar color = colors[0];
Core.rectangle(mRgba, objArray[i].tl(), objArray[i].br(), color, 3);
Core.putText(mRgba, distance, objArray[i].tl(), Core.FONT_HERSHEY_SIMPLEX, 1.5, color, 4);
}
objs = null;
return mRgba;
}
示例3: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
}
//轉置90度
Mat rotateMat = Imgproc.getRotationMatrix2D(new Point(mRgba.rows() / 2, mRgba.cols() / 2), 90, 1);
Imgproc.warpAffine(mRgba, mRgba, rotateMat, mRgba.size());
//以y軸翻轉
Core.flip(mRgba, mRgba, 1);
return mRgba;
}
示例4: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Log.w(TAG, "Got frame");
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR) {
// if (mNativeDetector != null)
// mNativeDetector.detect(mGray, faces);
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
// if (facesArray.length > 0) {
// Intent i = new Intent(FdActivity.this, PreviewActivity.class);
// i.putExtra("num", facesArray.length);
// startActivity(i);
// }
return mRgba;
}
示例5: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat frame = inputFrame.rgba();
//Replace Frame
String filename= mocker.getNextFrame(frame).getName();
detector.proccessFrame(frame, filename);
return frame;
}
示例6: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat mat = new Mat();
Mat input = inputFrame.rgba();
processFrame(input.getNativeObjAddr(), mat.getNativeObjAddr());
return mat;
}
示例7: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame)
{
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
switch (MainActivity.viewMode)
{
case MainActivity.VIEW_MODE_RGBA:
return this.mRgba;
case MainActivity.VIEW_MODE_HIST:
return this.mRgba;
case MainActivity.VIEW_MODE_CANNY:
Imgproc.Canny(this.mGray, mIntermediateMat, 80, 100);
Imgproc.cvtColor(mIntermediateMat, this.mGray, Imgproc.COLOR_GRAY2BGRA, 4);
return this.mGray;
case MainActivity.VIEW_MODE_SOBEL:
Imgproc.Sobel(this.mGray, this.mGray, CvType.CV_8U, 1, 1);
// Core.convertScaleAbs(mIntermediateMat, mIntermediateMat, 10, 0);
Imgproc.cvtColor(this.mGray, this.mGray, Imgproc.COLOR_GRAY2BGRA, 4);
return this.mGray;
case MainActivity.VIEW_MODE_PIXELIZE:
Imgproc.resize(this.mGray, mIntermediateMat, mSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
Imgproc.resize(mIntermediateMat, this.mRgba, this.mRgba.size(), 0.0, 0.0, Imgproc.INTER_NEAREST);
return this.mRgba;
case MainActivity.VIEW_MODE_GRAY:
return this.mGray;
case MainActivity.VIEW_MODE_FEATURES:
FindFeatures(mGray.getNativeObjAddr(), mRgba.getNativeObjAddr());
return this.mRgba;
default:
return this.mRgba;
}
}
示例8: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat img = inputFrame.rgba();
/*if(CameraProcessActivity.this.onTouch){
//CameraProcessActivity.this.picture = img;
CameraProcessActivity.this.onTouch = false;
}*/
this.currentFrame = img;
if(CameraProcessActivity.this.state) return img;
else {
Mat im = Thresholding.normalThresholding(inputFrame.gray());
try{
Mat stat = Processing.ExtractBoxes(im,8);
Processing.drawNumbers(img,CameraProcessActivity.this.emptySud,CameraProcessActivity.this.filledSud,stat,Processing.getNumberColor());
this.currentFrame = img;
return img;
}catch (Exception e){
return img;
}
}
}
示例9: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
/**
* Called for processing of each camera frame
* @param inputFrame - the delivered frame
* @return mRgba
*/
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
// Retrieve timestamp
// This is where the timestamp for each video frame originates
time = System.currentTimeMillis();
mRgba.release();
mGray.release();
// Get RGBA and Gray versions
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
// Write frame to video
if (VIDEO) {
encoder.writeFrame(mRgba.dataAddr(), time);
}
// Send the frame to rPPG for processing
// To C++
rPPG.processFrame(mRgba.getNativeObjAddr(), mGray.getNativeObjAddr(), time);
return mRgba;
}
示例10: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame)
{
last_frame = inputFrame.gray().clone();
Mat rgba_img = inputFrame.rgba();
Point[] corners = last_corners;
if (corners != null) {
for (Point p : corners)
Core.circle(rgba_img, p, 10, CIRCLE_COLOR);
Core.line(rgba_img, corners[0], corners[1], LINE_COLOR);
Core.line(rgba_img, corners[1], corners[2], LINE_COLOR);
Core.line(rgba_img, corners[2], corners[3], LINE_COLOR);
Core.line(rgba_img, corners[3], corners[0], LINE_COLOR);
}
return rgba_img;
}
示例11: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if (firstFrame == null && cnt > 5) {
firstFrame = inputFrame.gray().clone();
return firstFrame;
} else if (cnt <= 5) {
cnt++;
return inputFrame.rgba();
} else {
Core.subtract(inputFrame.gray(), firstFrame, result);
Scalar newMean = Core.mean(result);
double newMeanVal = newMean.val[0];
if (5 < newMeanVal - lastMean) {
down=true;
} else if(newMeanVal < lastMean) {
if (down) {
Utils.PlaySound(R.raw.bongo_1, context);
down = false;
}
}
Log.d("tamMean","mean " + newMeanVal);
lastMean = newMeanVal;
return result;
}
}
示例12: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mFaceDetector.setMinFaceSize(mAbsoluteFaceSize);
}
Rect[] facesArray = mFaceDetector.Detectfaces(mGray);
for (int i = 0; i < facesArray.length; i++)
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
示例13: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if(calculatingMatch){
return this.lastFrame;
}else{
this.lastFrame = inputFrame.gray().clone();
return inputFrame.rgba();
}
}
示例14: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame)
{
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0)
{
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0)
{
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
MatOfRect faces = new MatOfRect();
if (mJavaDetector != null)
{
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
// Draw rectangles
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
{
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
}
return mRgba;
}
示例15: onCameraFrame
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame; //導入方法依賴的package包/類
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat gray = inputFrame.gray();
Mat imgCard = new Mat(310, 223, gray.type());
Mat liveMat = new Mat();
if (mFrameCount % MODE_PROCESS_EVERY_N_FRAME == 0) {
int res = Card.findCard(gray, imgCard);
if (MODE_ONLY_DISPLAY_MATCH == 0
|| (MODE_ONLY_DISPLAY_MATCH == 1 && res == Card.RECTANGLE_FOUND)) {
updateSnapshotFrame(imgCard);
}
}
mFrameCount++;
liveMat = inputFrame.rgba();
return liveMat;
}