本文整理汇总了C++中GpuMat::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ GpuMat::empty方法的具体用法?C++ GpuMat::empty怎么用?C++ GpuMat::empty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GpuMat
的用法示例。
在下文中一共展示了GpuMat::empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: void
void cv::cuda::BFMatcher_CUDA::knnMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask, Stream& stream)
{
if (query.empty() || train.empty())
return;
using namespace cv::cuda::device::bf_knnmatch;
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
CV_Assert(train.type() == query.type() && train.cols == query.cols);
CV_Assert(norm == NORM_L1 || norm == NORM_L2 || norm == NORM_HAMMING);
const caller_t* callers = norm == NORM_L1 ? callersL1 : norm == NORM_L2 ? callersL2 : callersHamming;
const int nQuery = query.rows;
const int nTrain = train.rows;
if (k == 2)
{
ensureSizeIsEnough(1, nQuery, CV_32SC2, trainIdx);
ensureSizeIsEnough(1, nQuery, CV_32FC2, distance);
}
else
{
ensureSizeIsEnough(nQuery, k, CV_32S, trainIdx);
ensureSizeIsEnough(nQuery, k, CV_32F, distance);
ensureSizeIsEnough(nQuery, nTrain, CV_32FC1, allDist);
}
trainIdx.setTo(Scalar::all(-1), stream);
caller_t func = callers[query.depth()];
CV_Assert(func != 0);
func(query, train, k, mask, trainIdx, distance, allDist, StreamAccessor::getStream(stream));
}
示例2: void
void cv::gpu::BruteForceMatcher_GPU_base::knnMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
const GpuMat& mask, Stream& stream)
{
if (query.empty() || train.empty())
return;
using namespace cv::gpu::device::bf_knnmatch;
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
cudaStream_t stream);
static const caller_t callers[3][6] =
{
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
},
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
},
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
}
};
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
CV_Assert(train.type() == query.type() && train.cols == query.cols);
const int nQuery = query.rows;
const int nTrain = train.rows;
if (k == 2)
{
ensureSizeIsEnough(1, nQuery, CV_32SC2, trainIdx);
ensureSizeIsEnough(1, nQuery, CV_32FC2, distance);
}
else
{
ensureSizeIsEnough(nQuery, k, CV_32S, trainIdx);
ensureSizeIsEnough(nQuery, k, CV_32F, distance);
ensureSizeIsEnough(nQuery, nTrain, CV_32FC1, allDist);
}
if (stream)
stream.enqueueMemSet(trainIdx, Scalar::all(-1));
else
trainIdx.setTo(Scalar::all(-1));
caller_t func = callers[distType][query.depth()];
CV_Assert(func != 0);
func(query, train, k, mask, trainIdx, distance, allDist, StreamAccessor::getStream(stream));
}
示例3: void
void cv::gpu::BFMatcher_GPU::radiusMatchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
const GpuMat& mask, Stream& stream)
{
if (query.empty() || train.empty())
return;
using namespace cv::gpu::device::bf_radius_match;
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, float maxDistance, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSz<unsigned int>& nMatches,
cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
const int nQuery = query.rows;
const int nTrain = train.rows;
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
CV_Assert(train.type() == query.type() && train.cols == query.cols);
CV_Assert(trainIdx.empty() || (trainIdx.rows == nQuery && trainIdx.size() == distance.size()));
CV_Assert(norm == NORM_L1 || norm == NORM_L2 || norm == NORM_HAMMING);
const caller_t* callers = norm == NORM_L1 ? callersL1 : norm == NORM_L2 ? callersL2 : callersHamming;
ensureSizeIsEnough(1, nQuery, CV_32SC1, nMatches);
if (trainIdx.empty())
{
ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32SC1, trainIdx);
ensureSizeIsEnough(nQuery, std::max((nTrain / 100), 10), CV_32FC1, distance);
}
if (stream)
stream.enqueueMemSet(nMatches, Scalar::all(0));
else
nMatches.setTo(Scalar::all(0));
caller_t func = callers[query.depth()];
CV_Assert(func != 0);
func(query, train, maxDistance, mask, trainIdx, distance, nMatches, StreamAccessor::getStream(stream));
}
示例4: sz
void cv::gpu::ORB_GPU::buildScalePyramids(const GpuMat& image, const GpuMat& mask)
{
CV_Assert(image.type() == CV_8UC1);
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()));
imagePyr_.resize(nLevels_);
maskPyr_.resize(nLevels_);
for (int level = 0; level < nLevels_; ++level)
{
float scale = 1.0f / getScale(scaleFactor_, firstLevel_, level);
Size sz(cvRound(image.cols * scale), cvRound(image.rows * scale));
ensureSizeIsEnough(sz, image.type(), imagePyr_[level]);
ensureSizeIsEnough(sz, CV_8UC1, maskPyr_[level]);
maskPyr_[level].setTo(Scalar::all(255));
// Compute the resized image
if (level != firstLevel_)
{
if (level < firstLevel_)
{
resize(image, imagePyr_[level], sz, 0, 0, INTER_LINEAR);
if (!mask.empty())
resize(mask, maskPyr_[level], sz, 0, 0, INTER_LINEAR);
}
else
{
resize(imagePyr_[level - 1], imagePyr_[level], sz, 0, 0, INTER_LINEAR);
if (!mask.empty())
{
resize(maskPyr_[level - 1], maskPyr_[level], sz, 0, 0, INTER_LINEAR);
threshold(maskPyr_[level], maskPyr_[level], 254, 0, THRESH_TOZERO);
}
}
}
else
{
image.copyTo(imagePyr_[level]);
if (!mask.empty())
mask.copyTo(maskPyr_[level]);
}
// Filter keypoints by image border
ensureSizeIsEnough(sz, CV_8UC1, buf_);
buf_.setTo(Scalar::all(0));
Rect inner(edgeThreshold_, edgeThreshold_, sz.width - 2 * edgeThreshold_, sz.height - 2 * edgeThreshold_);
buf_(inner).setTo(Scalar::all(255));
bitwise_and(maskPyr_[level], buf_, maskPyr_[level]);
}
}
示例5: void
void cv::gpu::BFMatcher_GPU::knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& maskCollection, Stream& stream)
{
if (query.empty() || trainCollection.empty())
return;
using namespace cv::gpu::device::bf_knnmatch;
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream);
static const caller_t callersL1[] =
{
match2L1_gpu<unsigned char>, 0/*match2L1_gpu<signed char>*/,
match2L1_gpu<unsigned short>, match2L1_gpu<short>,
match2L1_gpu<int>, match2L1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*match2L2_gpu<unsigned char>*/, 0/*match2L2_gpu<signed char>*/,
0/*match2L2_gpu<unsigned short>*/, 0/*match2L2_gpu<short>*/,
0/*match2L2_gpu<int>*/, match2L2_gpu<float>
};
static const caller_t callersHamming[] =
{
match2Hamming_gpu<unsigned char>, 0/*match2Hamming_gpu<signed char>*/,
match2Hamming_gpu<unsigned short>, 0/*match2Hamming_gpu<short>*/,
match2Hamming_gpu<int>, 0/*match2Hamming_gpu<float>*/
};
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
CV_Assert(norm == NORM_L1 || norm == NORM_L2 || norm == NORM_HAMMING);
const caller_t* callers = norm == NORM_L1 ? callersL1 : norm == NORM_L2 ? callersL2 : callersHamming;
const int nQuery = query.rows;
ensureSizeIsEnough(1, nQuery, CV_32SC2, trainIdx);
ensureSizeIsEnough(1, nQuery, CV_32SC2, imgIdx);
ensureSizeIsEnough(1, nQuery, CV_32FC2, distance);
if (stream)
stream.enqueueMemSet(trainIdx, Scalar::all(-1));
else
trainIdx.setTo(Scalar::all(-1));
caller_t func = callers[query.depth()];
CV_Assert(func != 0);
DeviceInfo info;
int cc = info.majorVersion() * 10 + info.minorVersion();
func(query, trainCollection, maskCollection, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));
}
示例6: detect
void _Flow::detect(void)
{
Frame* pGray;
Frame* pNextFrame;
Frame* pPrevFrame;
GpuMat* pPrev;
GpuMat* pNext;
GpuMat GMat;
GpuMat pGMat[2];
if(m_pStream==NULL)return;
pGray = m_pStream->getGrayFrame();
if(pGray->empty())return;
pNextFrame = m_pGrayFrames->getLastFrame();
if(pGray->getFrameID() <= pNextFrame->getFrameID())return;
m_pGrayFrames->updateFrameIndex();
pNextFrame = m_pGrayFrames->getLastFrame();
pPrevFrame = m_pGrayFrames->getPrevFrame();
pNextFrame->getResizedOf(pGray,m_width,m_height);
pPrev = pPrevFrame->getGMat();
pNext = pNextFrame->getGMat();
if(pPrev->empty())return;
if(pNext->empty())return;
if(pPrev->size() != pNext->size())return;
m_pFarn->calc(*pPrev, *pNext, m_GFlowMat);
//Generate Depth Map from Flow
if(m_bDepth==0)return;
cuda::abs(m_GFlowMat, GMat);
cuda::split(GMat, pGMat);
cuda::add(pGMat[0],pGMat[1], GMat);
cuda::multiply(GMat, Scalar(100), pGMat[1]);
pGMat[1].convertTo(*(m_pDepth->getGMat()),CV_8UC1);
m_pDepth->updatedGMat();
// m_flowMax = cuda::sum(fGMat)[0] / (fGMat.cols*fGMat.rows);
// fInterval = 1.0/m_flowMax;
// fInterval *= 50.0;
// cuda::min(fGMat,Scalar(m_flowMax),pGMat[0]);
// cuda::multiply(pGMat[0],Scalar(fInterval),fGMat);
// cv::cuda::cvtColor(depthGMat, idxGMat, CV_GRAY2BGR);
// m_pGpuLUT->transform(idxGMat,segGMat);
}
示例7: trainIdxCPU
void cv::cuda::BFMatcher_CUDA::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches)
{
if (trainIdx.empty() || distance.empty())
return;
Mat trainIdxCPU(trainIdx);
Mat distanceCPU(distance);
matchConvert(trainIdxCPU, distanceCPU, matches);
}
示例8: trainIdxCPU
void cv::gpu::BruteForceMatcher_GPU_base::matchDownload(const GpuMat& trainIdx, const GpuMat& distance, vector<DMatch>& matches)
{
if (trainIdx.empty() || distance.empty())
return;
Mat trainIdxCPU(trainIdx);
Mat distanceCPU(distance);
matchConvert(trainIdxCPU, distanceCPU, matches);
}
示例9: void
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf)
{
using namespace mathfunc::minmaxloc;
typedef void (*Caller)(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
typedef void (*MaskedCaller)(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
static const Caller callers[2][7] =
{ { min_max_loc_multipass_caller<unsigned char>, min_max_loc_multipass_caller<char>,
min_max_loc_multipass_caller<unsigned short>, min_max_loc_multipass_caller<short>,
min_max_loc_multipass_caller<int>, min_max_loc_multipass_caller<float>, 0 },
{ min_max_loc_caller<unsigned char>, min_max_loc_caller<char>,
min_max_loc_caller<unsigned short>, min_max_loc_caller<short>,
min_max_loc_caller<int>, min_max_loc_caller<float>, min_max_loc_caller<double> } };
static const MaskedCaller masked_callers[2][7] =
{ { min_max_loc_mask_multipass_caller<unsigned char>, min_max_loc_mask_multipass_caller<char>,
min_max_loc_mask_multipass_caller<unsigned short>, min_max_loc_mask_multipass_caller<short>,
min_max_loc_mask_multipass_caller<int>, min_max_loc_mask_multipass_caller<float>, 0 },
{ min_max_loc_mask_caller<unsigned char>, min_max_loc_mask_caller<char>,
min_max_loc_mask_caller<unsigned short>, min_max_loc_mask_caller<short>,
min_max_loc_mask_caller<int>, min_max_loc_mask_caller<float>, min_max_loc_mask_caller<double> } };
CV_Assert(src.channels() == 1);
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));
CV_Assert(src.type() != CV_64F || hasNativeDoubleSupport(getDevice()));
double minVal_; if (!minVal) minVal = &minVal_;
double maxVal_; if (!maxVal) maxVal = &maxVal_;
int minLoc_[2];
int maxLoc_[2];
Size valbuf_size, locbuf_size;
get_buf_size_required(src.cols, src.rows, src.elemSize(), valbuf_size.width,
valbuf_size.height, locbuf_size.width, locbuf_size.height);
valbuf.create(valbuf_size, CV_8U);
locbuf.create(locbuf_size, CV_8U);
if (mask.empty())
{
Caller caller = callers[hasAtomicsSupport(getDevice())][src.type()];
if (!caller) CV_Error(CV_StsBadArg, "minMaxLoc: unsupported type");
caller(src, minVal, maxVal, minLoc_, maxLoc_, valbuf, locbuf);
}
else
{
MaskedCaller caller = masked_callers[hasAtomicsSupport(getDevice())][src.type()];
if (!caller) CV_Error(CV_StsBadArg, "minMaxLoc: unsupported type");
caller(src, mask, minVal, maxVal, minLoc_, maxLoc_, valbuf, locbuf);
}
if (minLoc) { minLoc->x = minLoc_[0]; minLoc->y = minLoc_[1]; }
if (maxLoc) { maxLoc->x = maxLoc_[0]; maxLoc->y = maxLoc_[1]; }
}
示例10: trainIdxCPU
void cv::gpu::BFMatcher_GPU::radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
vector< vector<DMatch> >& matches, bool compactResult)
{
if (trainIdx.empty() || distance.empty() || nMatches.empty())
return;
Mat trainIdxCPU(trainIdx);
Mat distanceCPU(distance);
Mat nMatchesCPU(nMatches);
radiusMatchConvert(trainIdxCPU, distanceCPU, nMatchesCPU, matches, compactResult);
}
示例11: void
void cv::gpu::BruteForceMatcher_GPU_base::matchCollection(const GpuMat& query, const GpuMat& trainCollection,
GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
const GpuMat& masks, Stream& stream)
{
if (query.empty() || trainCollection.empty())
return;
using namespace ::cv::gpu::device::bf_match;
typedef void (*caller_t)(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,
int cc, cudaStream_t stream);
static const caller_t callers[3][6] =
{
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
},
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
},
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
}
};
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
const int nQuery = query.rows;
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);
ensureSizeIsEnough(1, nQuery, CV_32S, imgIdx);
ensureSizeIsEnough(1, nQuery, CV_32F, distance);
caller_t func = callers[distType][query.depth()];
CV_Assert(func != 0);
DeviceInfo info;
int cc = info.majorVersion() * 10 + info.minorVersion();
func(query, trainCollection, masks, trainIdx, imgIdx, distance, cc, StreamAccessor::getStream(stream));
}
示例12: void
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
const GpuMat& mask, GpuMat& valBuf, GpuMat& locBuf)
{
typedef void (*func_t)(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
static const func_t funcs[] =
{
::minMaxLoc::run<uchar>,
::minMaxLoc::run<schar>,
::minMaxLoc::run<ushort>,
::minMaxLoc::run<short>,
::minMaxLoc::run<int>,
::minMaxLoc::run<float>,
::minMaxLoc::run<double>
};
CV_Assert( src.channels() == 1 );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
CV_Error(cv::Error::StsUnsupportedFormat, "The device doesn't support double");
}
Size valbuf_size, locbuf_size;
::minMaxLoc::getBufSize(src.cols, src.rows, src.elemSize(), valbuf_size.width, valbuf_size.height, locbuf_size.width, locbuf_size.height);
ensureSizeIsEnough(valbuf_size, CV_8U, valBuf);
ensureSizeIsEnough(locbuf_size, CV_8U, locBuf);
const func_t func = funcs[src.depth()];
double temp1, temp2;
Point temp3, temp4;
func(src, mask, minVal ? minVal : &temp1, maxVal ? maxVal : &temp2, minLoc ? &minLoc->x : &temp3.x, maxLoc ? &maxLoc->x : &temp4.x, valBuf, locBuf);
}
示例13: if
void cv::gpu::normalize(const GpuMat& src, GpuMat& dst, double a, double b, int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf)
{
double scale = 1, shift = 0;
if (norm_type == NORM_MINMAX)
{
double smin = 0, smax = 0;
double dmin = std::min(a, b), dmax = std::max(a, b);
minMax(src, &smin, &smax, mask, norm_buf);
scale = (dmax - dmin) * (smax - smin > numeric_limits<double>::epsilon() ? 1.0 / (smax - smin) : 0.0);
shift = dmin - smin * scale;
}
else if (norm_type == NORM_L2 || norm_type == NORM_L1 || norm_type == NORM_INF)
{
scale = norm(src, norm_type, mask, norm_buf);
scale = scale > numeric_limits<double>::epsilon() ? a / scale : 0.0;
shift = 0;
}
else
{
CV_Error(CV_StsBadArg, "Unknown/unsupported norm type");
}
if (mask.empty())
{
src.convertTo(dst, dtype, scale, shift);
}
else
{
src.convertTo(cvt_buf, dtype, scale, shift);
cvt_buf.copyTo(dst, mask);
}
}
示例14:
void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask)
{
if (mask.empty())
::bitwise_not_caller(src, dst, 0);
else
::bitwise_not_caller(src, dst, mask, 0);
}
示例15:
int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor, int minNeighbors, Size minSize)
{
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U);
CV_Assert( !this->empty());
const int defaultObjSearchNum = 100;
if (objectsBuf.empty())
{
objectsBuf.create(1, defaultObjSearchNum, DataType<Rect>::type);
}
NcvSize32u ncvMinSize = impl->getClassifierSize();
if (ncvMinSize.width < (unsigned)minSize.width && ncvMinSize.height < (unsigned)minSize.height)
{
ncvMinSize.width = minSize.width;
ncvMinSize.height = minSize.height;
}
unsigned int numDetections;
NCVStatus ncvStat = impl->process(image, objectsBuf, (float)scaleFactor, minNeighbors, findLargestObject, visualizeInPlace, ncvMinSize, numDetections);
if (ncvStat != NCV_SUCCESS)
{
CV_Error(CV_GpuApiCallError, "Error in face detectioln");
}
return numDetections;
}