本文整理汇总了Java中org.opencv.video.Video类的典型用法代码示例。如果您正苦于以下问题:Java Video类的具体用法?Java Video怎么用?Java Video使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Video类属于org.opencv.video包,在下文中一共展示了Video类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onCameraFrame
import org.opencv.video.Video; //导入依赖的package包/类
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int viewMode = mViewMode;
switch (viewMode) {
case VIEW_MODE_OPTICAL_FLOW:
mGray = inputFrame.gray();
if(features.toArray().length==0){
int rowStep = 50, colStep = 100;
int nRows = mGray.rows()/rowStep, nCols = mGray.cols()/colStep;
// Log.d(TAG, "\nRows: "+nRows+"\nCols: "+nCols+"\n");
Point points[] = new Point[nRows*nCols];
for(int i=0; i<nRows; i++){
for(int j=0; j<nCols; j++){
points[i*nCols+j]=new Point(j*colStep, i*rowStep);
// Log.d(TAG, "\nRow: "+i*rowStep+"\nCol: "+j*colStep+"\n: ");
}
}
features.fromArray(points);
prevFeatures.fromList(features.toList());
mPrevGray = mGray.clone();
break;
}
nextFeatures.fromArray(prevFeatures.toArray());
Video.calcOpticalFlowPyrLK(mPrevGray, mGray, prevFeatures, nextFeatures, status, err);
List<Point> prevList=features.toList(), nextList=nextFeatures.toList();
Scalar color = new Scalar(255);
for(int i = 0; i<prevList.size(); i++){
// Core.circle(mGray, prevList.get(i), 5, color);
Imgproc.line(mGray, prevList.get(i), nextList.get(i), color);
}
mPrevGray = mGray.clone();
break;
case VIEW_MODE_KLT_TRACKER:
mGray = inputFrame.gray();
if(features.toArray().length==0){
Imgproc.goodFeaturesToTrack(mGray, features, 10, 0.01, 10);
Log.d(TAG, features.toList().size()+"");
prevFeatures.fromList(features.toList());
mPrevGray = mGray.clone();
// prevFeatures.fromList(nextFeatures.toList());
break;
}
// OpticalFlow(mPrevGray.getNativeObjAddr(), mGray.getNativeObjAddr(), prevFeatures.getNativeObjAddr(), nextFeatures.getNativeObjAddr());
Video.calcOpticalFlowPyrLK(mPrevGray, mGray, prevFeatures, nextFeatures, status, err);
List<Point> drawFeature = nextFeatures.toList();
// Log.d(TAG, drawFeature.size()+"");
for(int i = 0; i<drawFeature.size(); i++){
Point p = drawFeature.get(i);
Imgproc.circle(mGray, p, 5, new Scalar(255));
}
mPrevGray = mGray.clone();
prevFeatures.fromList(nextFeatures.toList());
break;
default: mViewMode = VIEW_MODE_KLT_TRACKER;
}
return mGray;
}
示例2: track
import org.opencv.video.Video; //导入依赖的package包/类
/**
* @return Pair of new, FILTERED, last and current POINTS, or null if it hasn't managed to track anything.
*/
Pair<Point[], Point[]> track(final Mat lastImg, final Mat currentImg, Point[] lastPoints){
final int size = lastPoints.length;
final MatOfPoint2f currentPointsMat = new MatOfPoint2f();
final MatOfPoint2f pointsFBMat = new MatOfPoint2f();
final MatOfByte statusMat = new MatOfByte();
final MatOfFloat errSimilarityMat = new MatOfFloat();
final MatOfByte statusFBMat = new MatOfByte();
final MatOfFloat errSimilarityFBMat = new MatOfFloat();
//Forward-Backward tracking
Video.calcOpticalFlowPyrLK(lastImg, currentImg, new MatOfPoint2f(lastPoints), currentPointsMat,
statusMat, errSimilarityMat, WINDOW_SIZE, MAX_LEVEL, termCriteria, 0, LAMBDA);
Video.calcOpticalFlowPyrLK(currentImg, lastImg, currentPointsMat, pointsFBMat,
statusFBMat, errSimilarityFBMat, WINDOW_SIZE, MAX_LEVEL, termCriteria, 0, LAMBDA);
final byte[] status = statusMat.toArray();
float[] errSimilarity = new float[lastPoints.length];
//final byte[] statusFB = statusFBMat.toArray();
final float[] errSimilarityFB = errSimilarityFBMat.toArray();
// compute the real FB error (relative to LAST points not the current ones...
final Point[] pointsFB = pointsFBMat.toArray();
for(int i = 0; i < size; i++){
errSimilarityFB[i] = Util.norm(pointsFB[i], lastPoints[i]);
}
final Point[] currPoints = currentPointsMat.toArray();
// compute real similarity error
errSimilarity = normCrossCorrelation(lastImg, currentImg, lastPoints, currPoints, status);
//TODO errSimilarityFB has problem != from C++
// filter out points with fwd-back error > the median AND points with similarity error > median
return filterPts(lastPoints, currPoints, errSimilarity, errSimilarityFB, status);
}
示例3: getMogInstance
import org.opencv.video.Video; //导入依赖的package包/类
/**
* Get the instance of BackgroundSubtractorMOG2 with the desired configuration.
*
* @param history the number of frames to consider in the background model.
* @param shadowThreshold the threshold to consider a pixel as shadow or not.
* @return instance of BackgroundSubtractorMOG2.
*/
private BackgroundSubtractorMOG2 getMogInstance(int history, double shadowThreshold) {
BackgroundSubtractorMOG2 instance =
Video.createBackgroundSubtractorMOG2(history, VAR_THRESHOLD, DETECT_SHADOWS);
instance.setBackgroundRatio(BACKGROUND_RATIO);
instance.setVarInit(VAR_INIT);
instance.setShadowThreshold(shadowThreshold);
return instance;
}
示例4: ExtractPath
import org.opencv.video.Video; //导入依赖的package包/类
public ExtractPath() {
super();
mKeyPointsPrev = new MatOfKeyPoint();
// set up feature detection
try {
mFeatureDectector = FeatureDetector.create(FeatureDetector.FAST);
} catch (UnsatisfiedLinkError err) {
Log.e(TAG, "Feature detector failed with");
err.printStackTrace();
}
// set up description detection
mDescExtractor = DescriptorExtractor.create(DescriptorExtractor.BRISK);
mDescMatcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
mPrevFrame = new Mat();
prevKeyPoints = new MatOfKeyPoint();
RGBFrame = new Mat();
mForeGroundMask = new Mat();
mContours = new ArrayList<MatOfPoint>();
//creates a new BackgroundSubtractorMOG class with the arguments
mBackgroundSub = Video.createBackgroundSubtractorMOG2(50, 0, true);
}
示例5: execute
import org.opencv.video.Video; //导入依赖的package包/类
@Override
public List<CVParticle> execute(List<CVParticle> input) throws Exception {
List<CVParticle> result = new ArrayList<CVParticle>();
if(input.size() != 2 || !(input.get(0) instanceof Frame) || !(input.get(1) instanceof Frame))
return result;
Frame frame1 = (Frame)input.get(0);
Frame frame2 = (Frame)input.get(1);
MatOfByte mob1 = new MatOfByte(frame1.getImageBytes());
Mat image1 = Highgui.imdecode(mob1, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
Mat image1Gray = new Mat( image1.size(), CvType.CV_8UC1 );
Imgproc.cvtColor( image1, image1Gray, Imgproc.COLOR_RGB2GRAY );
MatOfByte mob2 = new MatOfByte(frame2.getImageBytes());
Mat image2 = Highgui.imdecode(mob2, Highgui.CV_LOAD_IMAGE_ANYCOLOR);
Mat image2Gray = new Mat( image2.size(), CvType.CV_8UC1 );
Imgproc.cvtColor( image2, image2Gray, Imgproc.COLOR_RGB2GRAY );
Mat opticalFlow = new Mat( image1Gray.size(), CvType.CV_32FC2 );
Video.calcOpticalFlowFarneback( image1Gray, image2Gray, opticalFlow, 0.5, 1, 1, 1, 7, 1.5, 1 );
int cols = opticalFlow.cols();
int rows = opticalFlow.rows();
int maxz = opticalFlow.get(0,0).length;
float[] tmp = new float[maxz];
float[][][] dense = new float[cols][rows][maxz];
for(int y=0; y<opticalFlow.rows(); y++){
for(int x=0; x<opticalFlow.cols(); x++){
opticalFlow.get(y,x, tmp);
dense[x][y][0] = tmp[0];
dense[x][y][1] = tmp[1];
}
}
Feature feature = new Feature(frame1.getStreamId(), frame1.getSequenceNr(), name, frame2.getSequenceNr()-frame1.getSequenceNr(), null, dense);
if(outputFrame){
frame1.getFeatures().add(feature);
result.add(frame1);
}else{
result.add(feature);
}
return result;
}