本文整理汇总了C++中InputArray::getMatVector方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::getMatVector方法的具体用法?C++ InputArray::getMatVector怎么用?C++ InputArray::getMatVector使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::getMatVector方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: calcPosition
void calcPosition( InputArray _tvecs, InputArray _rvecs, InputArray _pts,
InputArray _cameraMatrices, InputArray _distortionMatrices,
OutputArray _state, OutputArray _covariance )
{
Ptr< PositionCalculator > p_pc = PositionCalculator::create();
std::vector< Mat > tvecs, rvecs;
_tvecs.getMatVector( tvecs );
_rvecs.getMatVector( rvecs );
CV_Assert( tvecs.size() >= 2 );
CV_Assert( tvecs.size() == rvecs.size() );
Mat pts = _pts.getMat();
CV_Assert( ( tvecs.size() == pts.checkVector( 2, CV_32F, true ) ) );
std::vector< Mat > camera_m, dist_m;
if ( _cameraMatrices.kind() == _InputArray::STD_VECTOR_MAT )
{
_cameraMatrices.getMatVector( camera_m );
CV_Assert( tvecs.size() == camera_m.size() );
}
else
{
camera_m.push_back( _cameraMatrices.getMat() );
CV_Assert( ( camera_m[0].rows == 3 ) && ( camera_m[0].cols == 3 ) );
}
if ( _distortionMatrices.kind() == _InputArray::STD_VECTOR_MAT )
{
_distortionMatrices.getMatVector( dist_m );
CV_Assert( tvecs.size() == dist_m.size() );
}
else
{
dist_m.push_back( _distortionMatrices.getMat() );
CV_Assert( ( ( dist_m[0].rows == 5 ) && ( dist_m[0].cols == 1 ) ) || dist_m[0].empty() );
}
Mat camera = camera_m[0];
Mat dist = dist_m[0];
for ( size_t i = 0; i < tvecs.size(); ++i )
{
if ( camera_m.size() == tvecs.size() )
camera = camera_m[i];
if ( dist_m.size() == tvecs.size() )
dist = dist_m[i];
p_pc->addMeasurement( tvecs[i], rvecs[i], pts.at< Point2f >( i ), camera, dist );
}
p_pc->computeState( _state, _covariance );
}
示例2: hconcat
void cv::hconcat(InputArray _src, OutputArray dst)
{
CV_INSTRUMENT_REGION();
std::vector<Mat> src;
_src.getMatVector(src);
hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
}
示例3: estimateTransform
Stitcher::Status Stitcher::estimateTransform(InputArray images, const vector<vector<Rect> > &rois)
{
images.getMatVector(imgs_);
rois_ = rois;
Status status;
if ((status = matchImages()) != OK)
return status;
estimateCameraParams();
return OK;
}
示例4: trainE
bool EM::trainE(InputArray samples,
InputArray _means0,
InputArray _covs0,
InputArray _weights0,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs)
{
Mat samplesMat = samples.getMat();
vector<Mat> covs0;
_covs0.getMatVector(covs0);
Mat means0 = _means0.getMat(), weights0 = _weights0.getMat();
setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0,
!_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0);
return doTrain(START_E_STEP, logLikelihoods, labels, probs);
}
示例5: if
static std::vector<Mat> extractMatVector(InputArray in)
{
if (in.isMat() || in.isUMat())
{
return std::vector<Mat>(1, in.getMat());
}
else if (in.isMatVector())
{
return *static_cast<const std::vector<Mat>*>(in.getObj());
}
else if (in.isUMatVector())
{
std::vector<Mat> vmat;
in.getMatVector(vmat);
return vmat;
}
else
{
CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
return std::vector<Mat>();
}
}
示例6: stitch
bool GPSStitcher::stitch( InputArray images,
OutputArray pano,
vector<CameraParams> cameras,
bool useFeatures){
/**
* Use these cameras
*/
this->cameras_ = cameras;
images.getMatVector(imgs_);
cout <<"Matching images...\n";
if (!prepareAndMatchImages(useFeatures)){
return false;
}
cout <<"Images matched successfully.\n";
/**
* Compose Panorama
*/
return composePanorama(vector<Mat>(), pano, useFeatures);
}
示例7: composePanorama
Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano)
{
LOGLN("Warping images (auxiliary)... ");
vector<Mat> imgs;
images.getMatVector(imgs);
if (!imgs.empty())
{
CV_Assert(imgs.size() == imgs_.size());
Mat img;
seam_est_imgs_.resize(imgs.size());
for (size_t i = 0; i < imgs.size(); ++i)
{
imgs_[i] = imgs[i];
resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
seam_est_imgs_[i] = img.clone();
}
vector<Mat> seam_est_imgs_subset;
vector<Mat> imgs_subset;
for (size_t i = 0; i < indices_.size(); ++i)
{
imgs_subset.push_back(imgs_[indices_[i]]);
seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
}
seam_est_imgs_ = seam_est_imgs_subset;
imgs_ = imgs_subset;
}
Mat &pano_ = pano.getMatRef();
int64 t = getTickCount();
vector<Point> corners(imgs_.size());
vector<Mat> masks_warped(imgs_.size());
vector<Mat> images_warped(imgs_.size());
vector<Size> sizes(imgs_.size());
vector<Mat> masks(imgs_.size());
// Prepare image masks
for (size_t i = 0; i < imgs_.size(); ++i)
{
masks[i].create(seam_est_imgs_[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}
// Warp images and their masks
Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
for (size_t i = 0; i < imgs_.size(); ++i)
{
Mat_<float> K;
cameras_[i].K().convertTo(K, CV_32F);
K(0,0) *= (float)seam_work_aspect_;
K(0,2) *= (float)seam_work_aspect_;
K(1,1) *= (float)seam_work_aspect_;
K(1,2) *= (float)seam_work_aspect_;
corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<Mat> images_warped_f(imgs_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);
LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
// Find seams
exposure_comp_->feed(corners, images_warped, masks_warped);
seam_finder_->find(images_warped_f, corners, masks_warped);
// Release unused memory
seam_est_imgs_.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();
LOGLN("Compositing...");
t = getTickCount();
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
//double compose_seam_aspect = 1;
double compose_work_aspect = 1;
bool is_blender_prepared = false;
double compose_scale = 1;
bool is_compose_scale_set = false;
Mat full_img, img;
for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
{
LOGLN("Compositing image #" << indices_[img_idx] + 1);
//.........这里部分代码省略.........
示例8: train
virtual void train(InputArray src, InputArray lbls) {
src.getMatVector(imgs);
labels = lbls.getMat();
}