本文整理汇总了C++中InputArray::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::empty方法的具体用法?C++ InputArray::empty怎么用?C++ InputArray::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: computeSaliency
bool Saliency::computeSaliency( InputArray image, OutputArray saliencyMap )
{
if( image.empty() )
return false;
return computeSaliencyImpl( image, saliencyMap );
}
示例2: cacheGValues
void DepthmapDenoiseWeightedHuberImpl::cacheGValues(InputArray _visibleLightImage){
using namespace cv::cuda::device::dtam_denoise;
localStream = cv::cuda::StreamAccessor::getStream(cvStream);
if (!_visibleLightImage.empty()){
visibleLightImage=_visibleLightImage.getGpuMat();
cachedG=0;
}
if(cachedG)
return;//already cached
if(!alloced)
allocate(rows,cols);
// Call the gpu function for caching g's
loadConstants(rows, cols, 0, 0, 0, 0, 0, 0,
0, 0);
CV_Assert(_g1.isContinuous());
float* pp = (float*) visibleLightImage.data;//TODO: write a color version.
float* g1p = (float*)_g1.data;
float* gxp = (float*)_gx.data;
float* gyp = (float*)_gy.data;
computeGCaller(pp, g1p, gxp, gyp, cols);
cachedG=1;
}
示例3: detectImpl
void AKAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
{
cv::Mat img = image.getMat();
if (img.type() != CV_8UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
AKAZEOptions options;
options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.nsublevels = nsublevels;
options.dthreshold = dtreshhold;
options.img_width = img.cols;
options.img_height = img.rows;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
impl.Feature_Detection(keypoints);
if (!mask.empty())
{
cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
}
示例4: Niblack
/*!
Niblack binarization algorithm.
@param src [in] Mat, single channel uchar image.
@param dst [out] Mat, result image.
@param windowSize [in] int, window size for calculation.
@param k [in] int, parameter for local threshold.
@return int, 0x0000 = Success.
*/
int Niblack(InputArray src, OutputArray dst, int windowSize, float k)
{
if (src.type() != CV_8UC1 || src.empty())
return 0x0001; /*!< source image type not supported. */
/*! update window size, which should be odd. */
if (windowSize < 2)
return 0x0002; /*!< window size not supported. */
if (windowSize / 2 == 0)
windowSize++;
Mat source, destination;
Mat sourceUchar = src.getMat();
sourceUchar.convertTo(source, CV_32FC1);
/*! calcalte mean and variance via
D(x) = E(x^2) - (Ex)^2 */
Mat avg, power, avg_power, power_avg;
Mat standard;
boxFilter(source, avg, -1, Size(windowSize, windowSize));
pow(avg, 2, avg_power);
pow(source, 2, power);
boxFilter(power, power_avg, -1, Size(windowSize, windowSize));
sqrt(power_avg - power_avg, standard);
/*! calculate local threshold */
Mat threshold = avg + k * standard;
/*! Output result */
dst.create(sourceUchar.size(), CV_8UC1);
destination = dst.getMat();
destination = source > threshold;
return 0x0000;
}
示例5: trainE
bool EM::trainE(InputArray samples,
InputArray _means0,
InputArray _covs0,
InputArray _weights0,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs)
{
Mat samplesMat = samples.getMat();
vector<Mat> covs0;
_covs0.getMatVector(covs0);
Mat means0 = _means0.getMat(), weights0 = _weights0.getMat();
setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0,
!_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0);
return doTrain(START_E_STEP, logLikelihoods, labels, probs);
}
示例6: create
bool CustomPattern::create(InputArray pattern, const Size2f boardSize, OutputArray output)
{
CV_Assert(!pattern.empty() && (boardSize.area() > 0));
Mat img = pattern.getMat();
float pixel_size = (boardSize.width > boardSize.height)? // Choose the longer side for more accurate calculation
float(img.cols) / boardSize.width: // width is longer
float(img.rows) / boardSize.height; // height is longer
return init(img, pixel_size, output);
}
示例7: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img_width != img.cols) {
img_width = img.cols;
impl.release();
}
if (img_height != img.rows) {
img_height = img.rows;
impl.release();
}
if (impl.empty()) {
AKAZEOptionsV2 options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img_width;
options.img_height = img_height;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
impl = makePtr<AKAZEFeaturesV2>(options);
}
impl->Create_Nonlinear_Scale_Space(img);
if (!useProvidedKeypoints)
{
impl->Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl->Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例8: trainM
bool EM::trainM(InputArray samples,
InputArray _probs0,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs)
{
Mat samplesMat = samples.getMat();
Mat probs0 = _probs0.getMat();
setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0);
return doTrain(START_M_STEP, logLikelihoods, labels, probs);
}
示例9: detectAndCompute
void detectAndCompute(InputArray image, InputArray mask,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints)
{
Mat img = image.getMat();
if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
cvtColor(image, img, COLOR_BGR2GRAY);
Mat img1_32;
if ( img.depth() == CV_32F )
img1_32 = img;
else if ( img.depth() == CV_8U )
img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
else if ( img.depth() == CV_16U )
img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);
CV_Assert( ! img1_32.empty() );
AKAZEOptions options;
options.descriptor = descriptor;
options.descriptor_channels = descriptor_channels;
options.descriptor_size = descriptor_size;
options.img_width = img.cols;
options.img_height = img.rows;
options.dthreshold = threshold;
options.omax = octaves;
options.nsublevels = sublevels;
options.diffusivity = diffusivity;
AKAZEFeatures impl(options);
impl.Create_Nonlinear_Scale_Space(img1_32);
if (!useProvidedKeypoints)
{
impl.Feature_Detection(keypoints);
}
if (!mask.empty())
{
KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
}
if( descriptors.needed() )
{
Mat& desc = descriptors.getMatRef();
impl.Compute_Descriptors(keypoints, desc);
CV_Assert((!desc.rows || desc.cols == descriptorSize()));
CV_Assert((!desc.rows || (desc.type() == descriptorType())));
}
}
示例10: compute
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
if( image.empty() || keypoints.empty() )
{
descriptors.release();
return;
}
KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 );
KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() );
computeImpl( image, keypoints, descriptors );
}
示例11: detectAndCompute
/*
* Compute the descriptors for a set of keypoints in an image.
* image The image.
* keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
* descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
*/
void Feature2D::compute( InputArray image,
std::vector<KeyPoint>& keypoints,
OutputArray descriptors )
{
CV_INSTRUMENT_REGION();
if( image.empty() )
{
descriptors.release();
return;
}
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
示例12: integral
void cv::softcascade::SCascade::detect(InputArray _image, InputArray _rois, OutputArray _objects, cv::gpu::Stream& s) const
{
CV_Assert(fields);
// only color images and precomputed integrals are supported
int type = _image.type();
CV_Assert(type == CV_8UC3 || type == CV_32SC1 || (!_rois.empty()));
const cv::gpu::GpuMat image = _image.getGpuMat();
if (_objects.empty()) _objects.create(1, 4096 * sizeof(Detection), CV_8UC1);
cv::gpu::GpuMat rois = _rois.getGpuMat(), objects = _objects.getGpuMat();
/// roi
Fields& flds = *fields;
int shr = flds.shrinkage;
flds.mask.create( rois.cols / shr, rois.rows / shr, rois.type());
device::shrink(rois, flds.mask);
//cv::gpu::transpose(flds.genRoiTmp, flds.mask, s);
if (type == CV_8UC3)
{
flds.update(image.rows, image.cols, flds.shrinkage);
if (flds.check((float)minScale, (float)maxScale, scales))
flds.createLevels(image.rows, image.cols);
flds.preprocessor->apply(image, flds.shrunk);
integral(flds.shrunk, flds.hogluv, flds.integralBuffer, s);
}
else
{
if (s)
s.enqueueCopy(image, flds.hogluv);
else
image.copyTo(flds.hogluv);
}
flds.detect(objects, s);
if ( (flags && NMS_MASK) != NO_REJECT)
{
cv::gpu::GpuMat spr(objects, cv::Rect(0, 0, flds.suppressed.cols, flds.suppressed.rows));
flds.suppress(objects, s);
flds.suppressed.copyTo(spr);
}
}
示例13: ocl_accumulate
static bool ocl_accumulate( InputArray _src, InputArray _src2, InputOutputArray _dst, double alpha,
InputArray _mask, int op_type )
{
CV_Assert(op_type == ACCUMULATE || op_type == ACCUMULATE_SQUARE ||
op_type == ACCUMULATE_PRODUCT || op_type == ACCUMULATE_WEIGHTED);
int stype = _src.type(), cn = CV_MAT_CN(stype);
int sdepth = CV_MAT_DEPTH(stype), ddepth = _dst.depth();
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
haveMask = !_mask.empty();
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
const char * const opMap[4] = { "ACCUMULATE", "ACCUMULATE_SQUARE", "ACCUMULATE_PRODUCT",
"ACCUMULATE_WEIGHTED" };
ocl::Kernel k("accumulate", ocl::imgproc::accumulate_oclsrc,
format("-D %s%s -D srcT=%s -D cn=%d -D dstT=%s%s",
opMap[op_type], haveMask ? " -D HAVE_MASK" : "",
ocl::typeToStr(sdepth), cn, ocl::typeToStr(ddepth),
doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
if (k.empty())
return false;
UMat src = _src.getUMat(), src2 = _src2.getUMat(), dst = _dst.getUMat(), mask = _mask.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
dstarg = ocl::KernelArg::ReadWrite(dst),
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask);
int argidx = k.set(0, srcarg);
if (op_type == ACCUMULATE_PRODUCT)
argidx = k.set(argidx, src2arg);
argidx = k.set(argidx, dstarg);
if (op_type == ACCUMULATE_WEIGHTED)
{
if (ddepth == CV_32F)
argidx = k.set(argidx, (float)alpha);
else
argidx = k.set(argidx, alpha);
}
if (haveMask)
k.set(argidx, maskarg);
size_t globalsize[2] = { src.cols, src.rows };
return k.run(2, globalsize, NULL, false);
}
示例14: run
void ICP::run(bool withCuda, InputArray initObjSet)
{
assert(!m_objSet.empty() && !m_modSet.empty());
double d_pre = 100000, d_now = 100000;
int iterCnt = 0;
Mat objSet;
Transformation tr;
if (initObjSet.empty())
{
objSet = m_objSet.clone();
}
else
{
objSet = initObjSet.getMat();
}
/* plotTwoPoint3DSet(objSet, m_modSet);*/
do
{
d_pre = d_now;
Mat closestSet;
Mat lambda(objSet.rows, 1, CV_64FC1);
RUNANDTIME(global_timer, closestSet =
getClosestPointsSet(objSet, lambda, KDTREE).clone(),
OUTPUT && SUBOUTPUT, "compute closest points.");
Mat tmpObjSet = convertMat(m_objSet);
Mat tmpModSet = convertMat(closestSet);
RUNANDTIME(global_timer, tr =
computeTransformation(tmpObjSet, tmpModSet, lambda),
OUTPUT && SUBOUTPUT, "compute transformation");
Mat transformMat = tr.toMatrix();
RUNANDTIME(global_timer, transformPointCloud(
m_objSet, objSet, transformMat, withCuda),
OUTPUT && SUBOUTPUT, "transform points.");
RUNANDTIME(global_timer,
d_now = computeError(objSet, closestSet, lambda, withCuda),
OUTPUT && SUBOUTPUT, "compute error.");
iterCnt++;
} while (fabs(d_pre - d_now) > m_epsilon && iterCnt <= m_iterMax);
m_tr = tr;
/* waitKey();*/
/* plotTwoPoint3DSet(objSet, m_modSet);*/
}
示例15: homographyFromSquarePoints
void IPPE::PoseSolver::solveSquare(float squareLength, InputArray _imagePoints, InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec1, OutputArray _tvec1, float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
{
//allocate outputs:
_rvec1.create(3, 1, CV_64FC1);
_tvec1.create(3, 1, CV_64FC1);
_rvec2.create(3, 1, CV_64FC1);
_tvec2.create(3, 1, CV_64FC1);
cv::Mat normalizedInputPoints; //undistored version of imagePoints
cv::Mat objectPoints2D;
//generate the object points:
generateSquareObjectCorners2D(squareLength, objectPoints2D);
cv::Mat H; //homography from canonical object points to normalized pixels
if (_cameraMatrix.empty()) {
//this means imagePoints are defined in normalized pixel coordinates, so just copy it:
_imagePoints.copyTo(normalizedInputPoints);
}
else {
//undistort the image points (i.e. put them in normalized pixel coordinates).
cv::undistortPoints(_imagePoints, normalizedInputPoints, _cameraMatrix, _distCoeffs);
}
//compute H
homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0f, H);
//now solve
cv::Mat Ma, Mb;
solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);
//sort poses according to reprojection error:
cv::Mat M1, M2;
cv::Mat objectPoints3D;
generateSquareObjectCorners3D(squareLength, objectPoints3D);
sortPosesByReprojError(objectPoints3D, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb, M1, M2, err1, err2);
//fill outputs
rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}