本文整理汇总了C++中OutputArray::needed方法的典型用法代码示例。如果您正苦于以下问题:C++ OutputArray::needed方法的具体用法?C++ OutputArray::needed怎么用?C++ OutputArray::needed使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类OutputArray
的用法示例。
在下文中一共展示了OutputArray::needed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: call
void cv::superres::Farneback::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
Mat frame0 = ::getMat(_frame0, buf[0]);
Mat frame1 = ::getMat(_frame1, buf[1]);
CV_DbgAssert( frame1.type() == frame0.type() );
CV_DbgAssert( frame1.size() == frame0.size() );
Mat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]);
Mat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]);
if (!_flow2.needed() && _flow1.kind() != _InputArray::GPU_MAT)
{
call(input0, input1, _flow1);
return;
}
call(input0, input1, flow);
if (!_flow2.needed())
{
::copy(_flow1, flow);
}
else
{
split(flow, flows);
::copy(_flow1, flows[0]);
::copy(_flow2, flows[1]);
}
}
示例2: computeProbabilities
Vec2d predict2(InputArray _sample, OutputArray _probs) const
{
int ptype = CV_64F;
Mat sample = _sample.getMat();
CV_Assert(isTrained());
CV_Assert(!sample.empty());
if(sample.type() != CV_64FC1)
{
Mat tmp;
sample.convertTo(tmp, CV_64FC1);
sample = tmp;
}
sample = sample.reshape(1, 1);
Mat probs;
if( _probs.needed() )
{
if( _probs.fixedType() )
ptype = _probs.type();
_probs.create(1, nclusters, ptype);
probs = _probs.getMat();
}
return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype);
}
示例3: predict
float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) const
{
float result = 0;
cv::Mat samples = _samples.getMat();
int nSamples = samples.rows;
cv::Mat results;
CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1);
if( _results.needed() )
{
_results.create( nSamples, 1, samples.type() );
results = _results.getMat();
}
else
{
CV_Assert( nSamples == 1 );
results = Mat(1, 1, CV_32FC1, &result);
}
for (int sampleIndex = 0; sampleIndex < nSamples; sampleIndex++)
{
Mat currentSample = samples.row(sampleIndex);
float criterion = static_cast<float>(currentSample.dot(weights_)) + shift_;
results.at<float>(sampleIndex) = (criterion >= 0) ? 1.f : -1.f;
}
return result;
}
示例4: applyTransformation
float AffineTransformerImpl::applyTransformation(InputArray inPts, OutputArray outPts)
{
Mat pts1 = inPts.getMat();
CV_Assert((pts1.channels()==2) && (pts1.cols>0));
//Apply transformation in the complete set of points
Mat fAffine;
transform(pts1, fAffine, affineMat);
// Ensambling output //
if (outPts.needed())
{
outPts.create(1,fAffine.cols, CV_32FC2);
Mat outMat = outPts.getMat();
for (int i=0; i<fAffine.cols; i++)
outMat.at<Point2f>(0,i)=fAffine.at<Point2f>(0,i);
}
// Updating Transform Cost //
Mat Af(2, 2, CV_32F);
Af.at<float>(0,0)=affineMat.at<float>(0,0);
Af.at<float>(0,1)=affineMat.at<float>(1,0);
Af.at<float>(1,0)=affineMat.at<float>(0,1);
Af.at<float>(1,1)=affineMat.at<float>(1,1);
SVD mysvd(Af, SVD::NO_UV);
Mat singVals=mysvd.w;
transformCost=std::log((singVals.at<float>(0,0)+FLT_MIN)/(singVals.at<float>(1,0)+FLT_MIN));
return transformCost;
}
示例5: predict
float predict(InputArray _inputs, OutputArray _outputs, int) const
{
bool needprobs = _outputs.needed();
Mat samples = _inputs.getMat(), probs, probsrow;
int ptype = CV_64F;
float firstres = 0.f;
int i, nsamples = samples.rows;
if( needprobs )
{
if( _outputs.fixedType() )
ptype = _outputs.type();
_outputs.create(samples.rows, nclusters, ptype);
}
else
nsamples = std::min(nsamples, 1);
for( i = 0; i < nsamples; i++ )
{
if( needprobs )
probsrow = probs.row(i);
Vec2d res = computeProbabilities(samples.row(i), needprobs ? &probsrow : 0, ptype);
if( i == 0 )
firstres = (float)res[1];
}
return firstres;
}
示例6: computeReprojectionErrors
double computeReprojectionErrors(InputArray points3D,
InputArray points2D,
InputArray cameraMatrix,
InputArray distCoeffs,
InputArray rvec,
InputArray tvec,
OutputArray _proj_points2D,
vector<double> *individual_error)
{
// set proper type for the output
Mat x = points2D.getMat();
Mat proj_points2D = _proj_points2D.getMat();
proj_points2D.create(x.rows, x.cols, x.type());
// project points
projectPoints(points3D, rvec, tvec, cameraMatrix, distCoeffs, proj_points2D);
// save output if it is needed (no default parameter)
if (_proj_points2D.needed())
{
proj_points2D.copyTo(_proj_points2D);
}
// return error
return calib::norm(x, proj_points2D, individual_error);
}
示例7: calcError
float StatModel::calcError( const Ptr<TrainData>& data, bool testerr, OutputArray _resp ) const
{
CV_TRACE_FUNCTION_SKIP_NESTED();
Mat samples = data->getSamples();
int layout = data->getLayout();
Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
const int* sidx_ptr = sidx.ptr<int>();
int i, n = (int)sidx.total();
bool isclassifier = isClassifier();
Mat responses = data->getResponses();
int responses_type = responses.type();
if( n == 0 )
n = data->getNSamples();
if( n == 0 )
return -FLT_MAX;
Mat resp;
if( _resp.needed() )
resp.create(n, 1, CV_32F);
double err = 0;
for( i = 0; i < n; i++ )
{
int si = sidx_ptr ? sidx_ptr[i] : i;
Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si);
float val = predict(sample);
float val0 = (responses_type == CV_32S) ? (float)responses.at<int>(si) : responses.at<float>(si);
if( isclassifier )
err += fabs(val - val0) > FLT_EPSILON;
else
err += (val - val0)*(val - val0);
if( !resp.empty() )
resp.at<float>(i) = val;
/*if( i < 100 )
{
printf("%d. ref %.1f vs pred %.1f\n", i, val0, val);
}*/
}
if( _resp.needed() )
resp.copyTo(_resp);
return (float)(err / n * (isclassifier ? 100 : 1));
}
示例8: EMD
float cv::EMD( InputArray _signature1, InputArray _signature2,
int distType, InputArray _cost,
float* lowerBound, OutputArray _flow )
{
Mat signature1 = _signature1.getMat(), signature2 = _signature2.getMat();
Mat cost = _cost.getMat(), flow;
CvMat _csignature1 = signature1;
CvMat _csignature2 = signature2;
CvMat _ccost = cost, _cflow;
if( _flow.needed() )
{
_flow.create(signature1.rows, signature2.rows, CV_32F);
flow = _flow.getMat();
_cflow = flow;
}
return cvCalcEMD2( &_csignature1, &_csignature2, distType, 0, cost.empty() ? 0 : &_ccost,
_flow.needed() ? &_cflow : 0, lowerBound, 0 );
}
示例9: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img_width != img.cols) {
img_width = img.cols;
impl.release();
}
if (img_height != img.rows) {
img_height = img.rows;
impl.release();
}
if (impl.empty()) {
AKAZEOptionsV2 options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img_width;
options.img_height = img_height;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
impl = makePtr<AKAZEFeaturesV2>(options);
}
impl->Create_Nonlinear_Scale_Space(img);
if (!useProvidedKeypoints)
{
impl->Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl->Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例10: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
if ( img.depth() == CV_32F )
img1_32 = img;
else if ( img.depth() == CV_8U )
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
else if ( img.depth() == CV_16U )
img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);
CV_Assert( ! img1_32.empty() );
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例11: d_votes
void cv::gpu::GeneralizedHough_GPU::download(const GpuMat& d_positions, OutputArray h_positions_, OutputArray h_votes_)
{
if (d_positions.empty())
{
h_positions_.release();
if (h_votes_.needed())
h_votes_.release();
return;
}
CV_Assert(d_positions.rows == 2 && d_positions.type() == CV_32FC4);
h_positions_.create(1, d_positions.cols, CV_32FC4);
Mat h_positions = h_positions_.getMat();
d_positions.row(0).download(h_positions);
if (h_votes_.needed())
{
h_votes_.create(1, d_positions.cols, CV_32SC3);
Mat h_votes = h_votes_.getMat();
GpuMat d_votes(1, d_positions.cols, CV_32SC3, const_cast<int3*>(d_positions.ptr<int3>(1)));
d_votes.download(h_votes);
}
}
示例12: findPattern
bool CustomPattern::findPattern(InputArray image, OutputArray matched_features, OutputArray pattern_points,
const double ratio, const double proj_error, const bool refine_position, OutputArray out,
OutputArray H, OutputArray pattern_corners)
{
CV_Assert(!image.empty() && proj_error > 0);
Mat img = image.getMat();
vector<Point2f> m_ftrs;
vector<Point3f> pattern_pts;
Mat _H;
vector<Point2f> scene_corners;
if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners, 0.6, proj_error, refine_position))
return false; // pattern not found
Mat mask = Mat::zeros(img.size(), CV_8UC1);
vector<vector<Point> > obj(1);
vector<Point> scorners_int(scene_corners.size());
for (uint i = 0; i < scene_corners.size(); ++i)
scorners_int[i] = (Point)scene_corners[i]; // for drawContours
obj[0] = scorners_int;
drawContours(mask, obj, 0, Scalar(255), FILLED);
// Second pass
Mat output;
if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners,
ratio, proj_error, refine_position, mask, output))
return false; // pattern not found
Mat(m_ftrs).copyTo(matched_features);
Mat(pattern_pts).copyTo(pattern_points);
if (out.needed()) output.copyTo(out);
if (H.needed()) _H.copyTo(H);
if (pattern_corners.needed()) Mat(scene_corners).copyTo(pattern_corners);
return (!m_ftrs.empty());
}
示例13: init
bool CustomPattern::init(Mat& image, const float pixel_size, OutputArray output)
{
image.copyTo(img_roi);
//Setup object corners
obj_corners = std::vector<Point2f>(4);
obj_corners[0] = Point2f(0, 0); obj_corners[1] = Point2f(img_roi.cols, 0);
obj_corners[2] = Point2f(img_roi.cols, img_roi.rows); obj_corners[3] = Point2f(0, img_roi.rows);
if (!detector) // if no detector chosen, use default
{
detector = FeatureDetector::create("ORB");
detector->set("nFeatures", 2000);
detector->set("scaleFactor", 1.15);
detector->set("nLevels", 30);
}
detector->detect(img_roi, keypoints);
if (keypoints.empty())
{
initialized = false;
return initialized;
}
refineKeypointsPos(img_roi, keypoints);
if (!descriptorExtractor) // if no extractor chosen, use default
descriptorExtractor = DescriptorExtractor::create("ORB");
descriptorExtractor->compute(img_roi, keypoints, descriptor);
if (!descriptorMatcher)
descriptorMatcher = DescriptorMatcher::create("BruteForce-Hamming(2)");
// Scale found points by pixelSize
pxSize = pixel_size;
scaleFoundPoints(pxSize, keypoints, points3d);
if (output.needed())
{
Mat out;
drawKeypoints(img_roi, keypoints, out, CV_RGB(255, 0, 0));
out.copyTo(output);
}
initialized = !keypoints.empty();
return initialized; // initialized if any keypoints are found
}
示例14: run
void SparsePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
points0_.upload(points0.getMat());
if (errors.needed())
{
run(frame0_, frame1_, points0_, points1_, status_, errors_);
errors_.download(errors.getMatRef());
}
else
run(frame0_, frame1_, points0_, points1_, status_);
points1_.download(points1.getMatRef());
status_.download(status.getMatRef());
}
示例15: run
void DensePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
optFlowEstimator_.winSize = winSize_;
optFlowEstimator_.maxLevel = maxLevel_;
if (errors.needed())
{
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
errors_.download(errors.getMatRef());
}
else
optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_);
flowX_.download(flowX.getMatRef());
flowY_.download(flowY.getMatRef());
}