本文整理汇总了C++中OutputArray::getMatRef方法的典型用法代码示例。如果您正苦于以下问题:C++ OutputArray::getMatRef方法的具体用法?C++ OutputArray::getMatRef怎么用?C++ OutputArray::getMatRef使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OutputArray
的用法示例。
在下文中一共展示了OutputArray::getMatRef方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeImpl
void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
cv::Mat& desc = descriptors.getMatRef();
AKAZEOptions options;
options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.nsublevels = nsublevels;
options.dthreshold = dtreshhold;
options.img_width = img.cols;
options.img_height = img.rows;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
示例2: call
void cv::superres::Simple::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
Mat frame0 = ::getMat(_frame0, buf[0]);
Mat frame1 = ::getMat(_frame1, buf[1]);
CV_DbgAssert( frame1.type() == frame0.type() );
CV_DbgAssert( frame1.size() == frame0.size() );
Mat input0 = ::convertToType(frame0, CV_8U, 3, buf[2], buf[3]);
Mat input1 = ::convertToType(frame1, CV_8U, 3, buf[4], buf[5]);
if (!_flow2.needed() && _flow1.kind() == _InputArray::MAT)
{
call(input0, input1, _flow1.getMatRef());
return;
}
call(input0, input1, flow);
if (!_flow2.needed())
{
::copy(_flow1, flow);
}
else
{
split(flow, flows);
::copy(_flow1, flows[0]);
::copy(_flow2, flows[1]);
}
}
示例3: getCameras
virtual void
getCameras(OutputArray Rs, OutputArray Ts) {
const size_t n_views =
libmv_reconstruction_.reconstruction.AllCameras().size();
Rs.create(n_views, 1, CV_64F);
Ts.create(n_views, 1, CV_64F);
Matx33d R;
Vec3d t;
for(size_t i = 0; i < n_views; ++i)
{
eigen2cv(libmv_reconstruction_.reconstruction.AllCameras()[i].R, R);
eigen2cv(libmv_reconstruction_.reconstruction.AllCameras()[i].t, t);
Mat(R).copyTo(Rs.getMatRef(i));
Mat(t).copyTo(Ts.getMatRef(i));
}
}
示例4: run
void SparsePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
points0_.upload(points0.getMat());
if (errors.needed())
{
run(frame0_, frame1_, points0_, points1_, status_, errors_);
errors_.download(errors.getMatRef());
}
else
run(frame0_, frame1_, points0_, points1_, status_);
points1_.download(points1.getMatRef());
status_.download(status.getMatRef());
}
示例5: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img_width != img.cols) {
img_width = img.cols;
impl.release();
}
if (img_height != img.rows) {
img_height = img.rows;
impl.release();
}
if (impl.empty()) {
AKAZEOptionsV2 options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img_width;
options.img_height = img_height;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
impl = makePtr<AKAZEFeaturesV2>(options);
}
impl->Create_Nonlinear_Scale_Space(img);
if (!useProvidedKeypoints)
{
impl->Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl->Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例6: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
if ( img.depth() == CV_32F )
img1_32 = img;
else if ( img.depth() == CV_8U )
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
else if ( img.depth() == CV_16U )
img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);
CV_Assert( ! img1_32.empty() );
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例7: threshold
/*******************************************************************************
* Function: subtractBGMedian
* Description: BG subtraction via opening with diagonal structuring elements
* Arguments:
inImg - input image
bgsImg - BG subtracted image
threshVal - threshold value for converting to binary image
seLength - length of structuring elements
* Returns: void
* Comments:
* Revision:
*******************************************************************************/
void
FGExtraction::subtractBGMedian(InputArray src, OutputArray dst, int threshVal, int seLength)
{
Mat inImg = src.getMat();
Mat medImg;
// median filter
Mat tempImg = inImg.clone();
medianBlur(tempImg, medImg, 31);
//showImage("median", medImg);
Mat bin;
double thresh = threshold(medImg, bin, threshVal, 255, THRESH_BINARY);
dst.getMatRef() = bin;
}
示例8: getPoints
virtual void
getPoints(OutputArray points3d) {
const size_t n_points =
libmv_reconstruction_.reconstruction.AllPoints().size();
points3d.create(n_points, 1, CV_64F);
Vec3d point3d;
for ( size_t i = 0; i < n_points; ++i )
{
for ( int j = 0; j < 3; ++j )
point3d[j] =
libmv_reconstruction_.reconstruction.AllPoints()[i].X[j];
Mat(point3d).copyTo(points3d.getMatRef(i));
}
}
示例9: reconstruct
void
reconstruct(const std::vector<cv::String> images, OutputArray Ps, OutputArray points3d,
InputOutputArray K, bool is_projective)
{
const int nviews = static_cast<int>(images.size());
CV_Assert( nviews >= 2 );
Matx33d Ka = K.getMat();
const int depth = Mat(Ka).depth();
// Projective reconstruction
if ( is_projective )
{
std::vector<Mat> Rs, Ts;
reconstruct(images, Rs, Ts, Ka, points3d, is_projective);
// From Rs and Ts, extract Ps
const int nviews_est = Rs.size();
Ps.create(nviews_est, 1, depth);
Matx34d P;
for (size_t i = 0; i < nviews_est; ++i)
{
projectionFromKRt(Ka, Rs[i], Vec3d(Ts[i]), P);
Mat(P).copyTo(Ps.getMatRef(i));
}
Mat(Ka).copyTo(K.getMat());
}
// Affine reconstruction
else
{
// TODO: implement me
CV_Error(Error::StsNotImplemented, "Affine reconstruction not yet implemented");
}
}
示例10: switch
void cv::cuda::createContinuous(int rows, int cols, int type, OutputArray arr)
{
switch (arr.kind())
{
case _InputArray::MAT:
::createContinuousImpl(rows, cols, type, arr.getMatRef());
break;
case _InputArray::GPU_MAT:
::createContinuousImpl(rows, cols, type, arr.getGpuMatRef());
break;
case _InputArray::CUDA_MEM:
::createContinuousImpl(rows, cols, type, arr.getCudaMemRef());
break;
default:
arr.create(rows, cols, type);
}
}
示例11: run
void DensePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_;
if (errors.needed())
{
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
errors_.download(errors.getMatRef());
}
else
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_);
flowX_.download(flowX.getMatRef());
flowY_.download(flowY.getMatRef());
}
示例12: switch
void cv::cuda::ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr)
{
switch (arr.kind())
{
case _InputArray::MAT:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getMatRef());
break;
case _InputArray::CUDA_GPU_MAT:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getGpuMatRef());
break;
case _InputArray::CUDA_HOST_MEM:
::ensureSizeIsEnoughImpl(rows, cols, type, arr.getHostMemRef());
break;
default:
arr.create(rows, cols, type);
}
}
示例13: computeFeatures
//static
void QualityBRISQUE::computeFeatures(InputArray img, OutputArray features)
{
CV_Assert(features.needed());
CV_Assert(img.isMat());
CV_Assert(!img.getMat().empty());
auto mat = mat_convert(img.getMat());
const auto vals = ComputeBrisqueFeature(mat);
cv::Mat valmat( cv::Size( (int)vals.size(), 1 ), CV_32FC1, (void*)vals.data()); // create row vector, type depends on brisque_calc_element_type
if (features.isUMat())
valmat.copyTo(features.getUMatRef());
else if (features.isMat())
// how to move data instead?
// if calling this:
// features.getMatRef() = valmat;
// then shared data is erased when valmat is released, corrupting the data in the outputarray for the caller
valmat.copyTo(features.getMatRef());
else
CV_Error(cv::Error::StsNotImplemented, "Unsupported output type");
}
示例14: reconstruct
// Reconstruction function for API
void
reconstruct(InputArrayOfArrays points2d, OutputArray Ps, OutputArray points3d, InputOutputArray K,
bool is_projective)
{
const int nviews = points2d.total();
CV_Assert( nviews >= 2 );
// OpenCV data types
std::vector<Mat> pts2d;
points2d.getMatVector(pts2d);
const int depth = pts2d[0].depth();
Matx33d Ka = K.getMat();
// Projective reconstruction
if (is_projective)
{
if ( nviews == 2 )
{
// Get Projection matrices
Matx33d F;
Matx34d P, Pp;
normalizedEightPointSolver(pts2d[0], pts2d[1], F);
projectionsFromFundamental(F, P, Pp);
Ps.create(2, 1, depth);
Mat(P).copyTo(Ps.getMatRef(0));
Mat(Pp).copyTo(Ps.getMatRef(1));
// Triangulate and find 3D points using inliers
triangulatePoints(points2d, Ps, points3d);
}
else
{
std::vector<Mat> Rs, Ts;
reconstruct(points2d, Rs, Ts, Ka, points3d, is_projective);
// From Rs and Ts, extract Ps
const int nviews = Rs.size();
Ps.create(nviews, 1, depth);
Matx34d P;
for (size_t i = 0; i < nviews; ++i)
{
projectionFromKRt(Ka, Rs[i], Vec3d(Ts[i]), P);
Mat(P).copyTo(Ps.getMatRef(i));
}
Mat(Ka).copyTo(K.getMat());
}
}
// Affine reconstruction
else
{
// TODO: implement me
}
}
示例15: operator
void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
OutputArray _descriptors, bool useProvidedKeypoints) const
{
bool do_keypoints = !useProvidedKeypoints;
bool do_descriptors = _descriptors.needed();
if( (!do_keypoints && !do_descriptors) || _image.empty() )
return;
cv::Mat img1_8, img1_32;
// Convert to gray scale iamge and float image
if (_image.getMat().channels() == 3)
cv::cvtColor(_image, img1_8, CV_RGB2GRAY);
else
_image.getMat().copyTo(img1_8);
img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0);
// Construct KAZE
toptions opt = options;
opt.img_width = img1_32.cols;
opt.img_height = img1_32.rows;
::KAZE kazeEvolution(opt);
// Create nonlinear scale space
kazeEvolution.Create_Nonlinear_Scale_Space(img1_32);
// Feature detection
std::vector<Ipoint> kazePoints;
if (do_keypoints)
{
kazeEvolution.Feature_Detection(kazePoints);
filterDuplicated(kazePoints);
if (!_mask.empty())
{
filterByPixelsMask(kazePoints, _mask.getMat());
}
if (opt.nfeatures > 0)
{
filterRetainBest(kazePoints, opt.nfeatures);
}
}
else
{
kazePoints.resize(_keypoints.size());
#pragma omp parallel for
for (int i = 0; i < kazePoints.size(); i++)
{
convertPoint(_keypoints[i], kazePoints[i]);
}
}
// Descriptor caculation
if (do_descriptors)
{
kazeEvolution.Feature_Description(kazePoints);
cv::Mat& descriptors = _descriptors.getMatRef();
descriptors.create(kazePoints.size(), descriptorSize(), descriptorType());
for (size_t i = 0; i < kazePoints.size(); i++)
{
std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data);
}
}
// Transfer from KAZE::Ipoint to cv::KeyPoint
if (do_keypoints)
{
_keypoints.resize(kazePoints.size());
#pragma omp parallel for
for (int i = 0; i < kazePoints.size(); i++)
{
convertPoint(kazePoints[i], _keypoints[i]);
}
}
}