本文整理匯總了Java中org.opencv.utils.Converters.Mat_to_vector_Mat方法的典型用法代碼示例。如果您正苦於以下問題:Java Converters.Mat_to_vector_Mat方法的具體用法?Java Converters.Mat_to_vector_Mat怎麽用?Java Converters.Mat_to_vector_Mat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.opencv.utils.Converters
的用法示例。
在下文中一共展示了Converters.Mat_to_vector_Mat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: calibrate
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria)
{
Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrate_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例2: compute
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public void compute(List<Mat> images, List<MatOfKeyPoint> keypoints, List<Mat> descriptors)
{
Mat images_mat = Converters.vector_Mat_to_Mat(images);
List<Mat> keypoints_tmplm = new ArrayList<Mat>((keypoints != null) ? keypoints.size() : 0);
Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm);
Mat descriptors_mat = new Mat();
compute_1(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, descriptors_mat.nativeObj);
Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints);
keypoints_mat.release();
Converters.Mat_to_vector_Mat(descriptors_mat, descriptors);
descriptors_mat.release();
return;
}
示例3: calibrateCamera
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs)
{
Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例4: finalize
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public void finalize(List<Mat> inputs, List<Mat> outputs)
{
Mat inputs_mat = Converters.vector_Mat_to_Mat(inputs);
Mat outputs_mat = new Mat();
finalize_1(nativeObj, inputs_mat.nativeObj, outputs_mat.nativeObj);
Converters.Mat_to_vector_Mat(outputs_mat, outputs);
outputs_mat.release();
return;
}
示例5: buildOpticalFlowPyramid
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage)
{
Mat pyramid_mat = new Mat();
int retVal = buildOpticalFlowPyramid_0(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage);
Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
pyramid_mat.release();
return retVal;
}
示例6: getDescriptors
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public List<Mat> getDescriptors()
{
List<Mat> retVal = new ArrayList<Mat>();
Mat retValMat = new Mat(getDescriptors_0(nativeObj));
Converters.Mat_to_vector_Mat(retValMat, retVal);
return retVal;
}
示例7: forward
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public void forward(List<Mat> inputs, List<Mat> outputs, List<Mat> internals)
{
Mat inputs_mat = Converters.vector_Mat_to_Mat(inputs);
Mat outputs_mat = Converters.vector_Mat_to_Mat(outputs);
Mat internals_mat = Converters.vector_Mat_to_Mat(internals);
forward_0(nativeObj, inputs_mat.nativeObj, outputs_mat.nativeObj, internals_mat.nativeObj);
Converters.Mat_to_vector_Mat(outputs_mat, outputs);
outputs_mat.release();
Converters.Mat_to_vector_Mat(internals_mat, internals);
internals_mat.release();
return;
}
示例8: calibrateCameraArucoExtended
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrateCameraArucoExtended(List<Mat> corners, Mat ids, Mat counter, Board board, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags, TermCriteria criteria)
{
Mat corners_mat = Converters.vector_Mat_to_Mat(corners);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrateCameraArucoExtended_0(corners_mat.nativeObj, ids.nativeObj, counter.nativeObj, board.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例9: calibrate
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags)
{
Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrate_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例10: calibrate
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs)
{
Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例11: solveP3P
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static int solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags)
{
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
int retVal = solveP3P_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例12: calibrateCameraExtended
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags)
{
Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
Mat rvecs_mat = new Mat();
Mat tvecs_mat = new Mat();
double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags);
Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
rvecs_mat.release();
Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
tvecs_mat.release();
return retVal;
}
示例13: getTrainDescriptors
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public List<Mat> getTrainDescriptors()
{
List<Mat> retVal = new ArrayList<Mat>();
Mat retValMat = new Mat(getTrainDescriptors_0(nativeObj));
Converters.Mat_to_vector_Mat(retValMat, retVal);
return retVal;
}
示例14: getCovs
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public void getCovs(List<Mat> covs)
{
Mat covs_mat = new Mat();
getCovs_0(nativeObj, covs_mat.nativeObj);
Converters.Mat_to_vector_Mat(covs_mat, covs);
covs_mat.release();
return;
}
示例15: run
import org.opencv.utils.Converters; //導入方法依賴的package包/類
public void run(List<Mat> inputs, List<Mat> outputs, List<Mat> internals)
{
Mat inputs_mat = Converters.vector_Mat_to_Mat(inputs);
Mat outputs_mat = new Mat();
Mat internals_mat = Converters.vector_Mat_to_Mat(internals);
run_0(nativeObj, inputs_mat.nativeObj, outputs_mat.nativeObj, internals_mat.nativeObj);
Converters.Mat_to_vector_Mat(outputs_mat, outputs);
outputs_mat.release();
Converters.Mat_to_vector_Mat(internals_mat, internals);
internals_mat.release();
return;
}