本文整理汇总了C++中GpuMat类的典型用法代码示例。如果您正苦于以下问题:C++ GpuMat类的具体用法?C++ GpuMat怎么用?C++ GpuMat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GpuMat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: CV_Error
int cv::gpu::FAST_GPU::getKeyPoints(GpuMat& keypoints)
{
using namespace cv::gpu::device::fast;
if (!TargetArchs::builtWith(GLOBAL_ATOMICS) || !DeviceInfo().supports(GLOBAL_ATOMICS))
CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");
if (count_ == 0)
return 0;
ensureSizeIsEnough(ROWS_COUNT, count_, CV_32FC1, keypoints);
if (nonmaxSupression)
return nonmaxSupression_gpu(kpLoc_.ptr<short2>(), count_, score_, keypoints.ptr<short2>(LOCATION_ROW), keypoints.ptr<float>(RESPONSE_ROW));
GpuMat locRow(1, count_, kpLoc_.type(), keypoints.ptr(0));
kpLoc_.colRange(0, count_).copyTo(locRow);
keypoints.row(1).setTo(Scalar::all(0));
return count_;
}
示例2: h_keypoints
void cv::gpu::ORB_GPU::downloadKeyPoints(GpuMat &d_keypoints, std::vector<KeyPoint>& keypoints)
{
if (d_keypoints.empty())
{
keypoints.clear();
return;
}
Mat h_keypoints(d_keypoints);
convertKeyPoints(h_keypoints, keypoints);
}
示例3: void
void cv::gpu::pyrDown(InputArray _src, OutputArray _dst, Stream& stream)
{
using namespace cv::gpu::cudev::imgproc;
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[6][4] =
{
{pyrDown_gpu<uchar> , 0 /*pyrDown_gpu<uchar2>*/ , pyrDown_gpu<uchar3> , pyrDown_gpu<uchar4> },
{0 /*pyrDown_gpu<schar>*/, 0 /*pyrDown_gpu<schar2>*/ , 0 /*pyrDown_gpu<schar3>*/, 0 /*pyrDown_gpu<schar4>*/},
{pyrDown_gpu<ushort> , 0 /*pyrDown_gpu<ushort2>*/, pyrDown_gpu<ushort3> , pyrDown_gpu<ushort4> },
{pyrDown_gpu<short> , 0 /*pyrDown_gpu<short2>*/ , pyrDown_gpu<short3> , pyrDown_gpu<short4> },
{0 /*pyrDown_gpu<int>*/ , 0 /*pyrDown_gpu<int2>*/ , 0 /*pyrDown_gpu<int3>*/ , 0 /*pyrDown_gpu<int4>*/ },
{pyrDown_gpu<float> , 0 /*pyrDown_gpu<float2>*/ , pyrDown_gpu<float3> , pyrDown_gpu<float4> }
};
GpuMat src = _src.getGpuMat();
CV_Assert( src.depth() <= CV_32F && src.channels() <= 4 );
const func_t func = funcs[src.depth()][src.channels() - 1];
CV_Assert( func != 0 );
_dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type());
GpuMat dst = _dst.getGpuMat();
func(src, dst, StreamAccessor::getStream(stream));
}
示例4: int
int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf)
{
using namespace ::cv::gpu::device::matrix_reductions::countnonzero;
typedef int (*Caller)(const PtrStepSzb src, PtrStepb buf);
static Caller multipass_callers[7] =
{
countNonZeroMultipassCaller<unsigned char>, countNonZeroMultipassCaller<char>,
countNonZeroMultipassCaller<unsigned short>, countNonZeroMultipassCaller<short>,
countNonZeroMultipassCaller<int>, countNonZeroMultipassCaller<float>, 0
};
static Caller singlepass_callers[7] =
{
countNonZeroCaller<unsigned char>, countNonZeroCaller<char>,
countNonZeroCaller<unsigned short>, countNonZeroCaller<short>,
countNonZeroCaller<int>, countNonZeroCaller<float>, countNonZeroCaller<double> };
CV_Assert(src.depth() <= CV_64F);
CV_Assert(src.channels() == 1);
if (src.depth() == CV_64F)
{
if (!TargetArchs::builtWith(NATIVE_DOUBLE) || !DeviceInfo().supports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
Size buf_size;
getBufSizeRequired(src.cols, src.rows, buf_size.width, buf_size.height);
ensureSizeIsEnough(buf_size, CV_8U, buf);
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(GLOBAL_ATOMICS) && DeviceInfo().supports(GLOBAL_ATOMICS))
callers = singlepass_callers;
Caller caller = callers[src.type()];
CV_Assert(caller != 0);
return caller(src, buf);
}
示例5: void
void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream)
{
using namespace cv::gpu::device::imgproc;
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[6][4] =
{
{pyrUp_gpu<uchar> , 0 /*pyrUp_gpu<uchar2>*/ , pyrUp_gpu<uchar3> , pyrUp_gpu<uchar4> },
{0 /*pyrUp_gpu<schar>*/, 0 /*pyrUp_gpu<schar2>*/ , 0 /*pyrUp_gpu<schar3>*/, 0 /*pyrUp_gpu<schar4>*/},
{pyrUp_gpu<ushort> , 0 /*pyrUp_gpu<ushort2>*/, pyrUp_gpu<ushort3> , pyrUp_gpu<ushort4> },
{pyrUp_gpu<short> , 0 /*pyrUp_gpu<short2>*/ , pyrUp_gpu<short3> , pyrUp_gpu<short4> },
{0 /*pyrUp_gpu<int>*/ , 0 /*pyrUp_gpu<int2>*/ , 0 /*pyrUp_gpu<int3>*/ , 0 /*pyrUp_gpu<int4>*/ },
{pyrUp_gpu<float> , 0 /*pyrUp_gpu<float2>*/ , pyrUp_gpu<float3> , pyrUp_gpu<float4> }
};
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
const func_t func = funcs[src.depth()][src.channels() - 1];
CV_Assert(func != 0);
dst.create(src.rows * 2, src.cols * 2, src.type());
func(src, dst, StreamAccessor::getStream(stream));
}
示例6: void
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
const GpuMat& mask, GpuMat& valBuf, GpuMat& locBuf)
{
typedef void (*func_t)(const PtrStepSzb src, const PtrStepb mask, double* minval, double* maxval, int* minloc, int* maxloc, PtrStepb valbuf, PtrStep<unsigned int> locbuf);
#ifdef OPENCV_TINY_GPU_MODULE
static const func_t funcs[] =
{
::minMaxLoc::run<uchar>,
0/*::minMaxLoc::run<schar>*/,
0/*::minMaxLoc::run<ushort>*/,
0/*::minMaxLoc::run<short>*/,
::minMaxLoc::run<int>,
::minMaxLoc::run<float>,
0/*::minMaxLoc::run<double>*/,
};
#else
static const func_t funcs[] =
{
::minMaxLoc::run<uchar>,
::minMaxLoc::run<schar>,
::minMaxLoc::run<ushort>,
::minMaxLoc::run<short>,
::minMaxLoc::run<int>,
::minMaxLoc::run<float>,
::minMaxLoc::run<double>,
};
#endif
CV_Assert( src.channels() == 1 );
CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
if (src.depth() == CV_64F)
{
if (!deviceSupports(NATIVE_DOUBLE))
CV_Error(CV_StsUnsupportedFormat, "The device doesn't support double");
}
Size valbuf_size, locbuf_size;
::minMaxLoc::getBufSize(src.cols, src.rows, src.elemSize(), valbuf_size.width, valbuf_size.height, locbuf_size.width, locbuf_size.height);
ensureSizeIsEnough(valbuf_size, CV_8U, valBuf);
ensureSizeIsEnough(locbuf_size, CV_8U, locBuf);
const func_t func = funcs[src.depth()];
if (!func)
CV_Error(CV_StsUnsupportedFormat, "Unsupported combination of source and destination types");
double temp1, temp2;
Point temp3, temp4;
func(src, mask, minVal ? minVal : &temp1, maxVal ? maxVal : &temp2, minLoc ? &minLoc->x : &temp3.x, maxLoc ? &maxLoc->x : &temp4.x, valBuf, locBuf);
}
示例7: process
NCVStatus process(const GpuMat& src, GpuMat& objects, float scaleStep, int minNeighbors,
bool findLargestObject, bool visualizeInPlace, cv::Size ncvMinSize,
/*out*/unsigned int& numDetections)
{
calculateMemReqsAndAllocate(src.size());
NCVMemPtr src_beg;
src_beg.ptr = (void*)src.ptr<Ncv8u>();
src_beg.memtype = NCVMemoryTypeDevice;
NCVMemSegment src_seg;
src_seg.begin = src_beg;
src_seg.size = src.step * src.rows;
NCVMatrixReuse<Ncv8u> d_src(src_seg, static_cast<int>(devProp.textureAlignment), src.cols, src.rows, static_cast<int>(src.step), true);
ncvAssertReturn(d_src.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
CV_Assert(objects.rows == 1);
NCVMemPtr objects_beg;
objects_beg.ptr = (void*)objects.ptr<NcvRect32u>();
objects_beg.memtype = NCVMemoryTypeDevice;
NCVMemSegment objects_seg;
objects_seg.begin = objects_beg;
objects_seg.size = objects.step * objects.rows;
NCVVectorReuse<NcvRect32u> d_rects(objects_seg, objects.cols);
ncvAssertReturn(d_rects.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NcvSize32u roi;
roi.width = d_src.width();
roi.height = d_src.height();
NcvSize32u winMinSize(ncvMinSize.width, ncvMinSize.height);
Ncv32u flags = 0;
flags |= findLargestObject? NCVPipeObjDet_FindLargestObject : 0;
flags |= visualizeInPlace ? NCVPipeObjDet_VisualizeInPlace : 0;
ncvStat = ncvDetectObjectsMultiScale_device(
d_src, roi, d_rects, numDetections, haar, *h_haarStages,
*d_haarStages, *d_haarNodes, *d_haarFeatures,
winMinSize,
minNeighbors,
scaleStep, 1,
flags,
*gpuAllocator, *cpuAllocator, devProp, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
示例8: ensureSizeIsEnough
void cv::gpu::ORB_GPU::computeDescriptors(GpuMat& descriptors)
{
using namespace cv::gpu::device::orb;
int nAllkeypoints = 0;
for (int level = 0; level < nLevels_; ++level)
nAllkeypoints += keyPointsCount_[level];
if (nAllkeypoints == 0)
{
descriptors.release();
return;
}
ensureSizeIsEnough(nAllkeypoints, descriptorSize(), CV_8UC1, descriptors);
int offset = 0;
for (int level = 0; level < nLevels_; ++level)
{
if (keyPointsCount_[level] == 0)
continue;
GpuMat descRange = descriptors.rowRange(offset, offset + keyPointsCount_[level]);
if (blurForDescriptor)
{
// preprocess the resized image
ensureSizeIsEnough(imagePyr_[level].size(), imagePyr_[level].type(), buf_);
blurFilter->apply(imagePyr_[level], buf_, Rect(0, 0, imagePyr_[level].cols, imagePyr_[level].rows));
}
computeOrbDescriptor_gpu(blurForDescriptor ? buf_ : imagePyr_[level], keyPointsPyr_[level].ptr<short2>(0), keyPointsPyr_[level].ptr<float>(2),
keyPointsCount_[level], pattern_.ptr<int>(0), pattern_.ptr<int>(1), descRange, descriptorSize(), WTA_K_, 0);
offset += keyPointsCount_[level];
}
}
示例9: coeffsMat
void cv::gpu::buildWarpPerspectiveMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream)
{
using namespace cv::gpu::device::imgproc;
CV_Assert(M.rows == 3 && M.cols == 3);
xmap.create(dsize, CV_32FC1);
ymap.create(dsize, CV_32FC1);
float coeffs[3 * 3];
Mat coeffsMat(3, 3, CV_32F, (void*)coeffs);
if (inverse)
M.convertTo(coeffsMat, coeffsMat.type());
else
{
cv::Mat iM;
invert(M, iM);
iM.convertTo(coeffsMat, coeffsMat.type());
}
buildWarpPerspectiveMaps_gpu(coeffs, xmap, ymap, StreamAccessor::getStream(stream));
}
示例10: throw_no_cuda
GpuMat cv::cuda::getInputMat(InputArray _src, Stream& stream)
{
GpuMat src;
#ifndef HAVE_CUDA
(void) _src;
(void) stream;
throw_no_cuda();
#else
if (_src.kind() == _InputArray::CUDA_GPU_MAT)
{
src = _src.getGpuMat();
}
else if (!_src.empty())
{
BufferPool pool(stream);
src = pool.getBuffer(_src.size(), _src.type());
src.upload(_src, stream);
}
#endif
return src;
}
示例11: void
void cv::gpu::BFMatcher_GPU::matchSingle(const GpuMat& query, const GpuMat& train,
GpuMat& trainIdx, GpuMat& distance,
const GpuMat& mask, Stream& stream)
{
if (query.empty() || train.empty())
return;
using namespace cv::gpu::device::bf_match;
typedef void (*caller_t)(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
int cc, cudaStream_t stream);
static const caller_t callersL1[] =
{
matchL1_gpu<unsigned char>, 0/*matchL1_gpu<signed char>*/,
matchL1_gpu<unsigned short>, matchL1_gpu<short>,
matchL1_gpu<int>, matchL1_gpu<float>
};
static const caller_t callersL2[] =
{
0/*matchL2_gpu<unsigned char>*/, 0/*matchL2_gpu<signed char>*/,
0/*matchL2_gpu<unsigned short>*/, 0/*matchL2_gpu<short>*/,
0/*matchL2_gpu<int>*/, matchL2_gpu<float>
};
static const caller_t callersHamming[] =
{
matchHamming_gpu<unsigned char>, 0/*matchHamming_gpu<signed char>*/,
matchHamming_gpu<unsigned short>, 0/*matchHamming_gpu<short>*/,
matchHamming_gpu<int>, 0/*matchHamming_gpu<float>*/
};
CV_Assert(query.channels() == 1 && query.depth() < CV_64F);
CV_Assert(train.cols == query.cols && train.type() == query.type());
CV_Assert(norm == NORM_L1 || norm == NORM_L2 || norm == NORM_HAMMING);
const caller_t* callers = norm == NORM_L1 ? callersL1 : norm == NORM_L2 ? callersL2 : callersHamming;
const int nQuery = query.rows;
ensureSizeIsEnough(1, nQuery, CV_32S, trainIdx);
ensureSizeIsEnough(1, nQuery, CV_32F, distance);
caller_t func = callers[query.depth()];
CV_Assert(func != 0);
DeviceInfo info;
int cc = info.majorVersion() * 10 + info.minorVersion();
func(query, train, mask, trainIdx, distance, cc, StreamAccessor::getStream(stream));
}
示例12: faceDetect_GPU
int faceDetect_GPU(Mat &image, vector<Mat> &faces, int init)
{
static CascadeClassifier_GPU cascade_gpu;
static HANDLE init_mutex;
if(init == 1)
{
init_mutex = CreateMutex(NULL, FALSE, NULL);
cascade_gpu.load(string(CASCADE_PATH));
return 0;
}
Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
equalizeHist(gray, gray);
GpuMat image_gpu(gray);
GpuMat objbuf;
WaitForSingleObject(init_mutex, INFINITE);
double start = GetTickCount();
int detections_number = cascade_gpu.detectMultiScale(image_gpu, objbuf, 1.1, 3);
cout << "Face Detect GPU Time : " << GetTickCount()-start << "ms" << endl;
ReleaseMutex(init_mutex);
Mat obj_host;
objbuf.colRange(0, detections_number).download(obj_host);
Rect* det_faces = obj_host.ptr<Rect>();
for(int i = 0; i < detections_number; ++i)
{
Mat face = gray(det_faces[i]);
faces.push_back(face);
}
return 0;
}
示例13: DeviceInfo
void cv::gpu::Stream::enqueueMemSet(GpuMat& src, Scalar s)
{
CV_Assert((src.depth() != CV_64F) ||
(TargetArchs::builtWith(NATIVE_DOUBLE) && DeviceInfo().supports(NATIVE_DOUBLE)));
if (s[0] == 0.0 && s[1] == 0.0 && s[2] == 0.0 && s[3] == 0.0)
{
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, 0, src.cols * src.elemSize(), src.rows, Impl::getStream(impl)) );
return;
}
if (src.depth() == CV_8U)
{
int cn = src.channels();
if (cn == 1 || (cn == 2 && s[0] == s[1]) || (cn == 3 && s[0] == s[1] && s[0] == s[2]) || (cn == 4 && s[0] == s[1] && s[0] == s[2] && s[0] == s[3]))
{
int val = saturate_cast<uchar>(s[0]);
cudaSafeCall( cudaMemset2DAsync(src.data, src.step, val, src.cols * src.elemSize(), src.rows, Impl::getStream(impl)) );
return;
}
}
setTo(src, s, Impl::getStream(impl));
}
示例14: void
Scalar cv::gpu::sqrSum(const GpuMat& src, GpuMat& buf)
{
using namespace cv::gpu::device::matrix_reductions::sum;
typedef void (*Caller)(const PtrStepSzb, PtrStepb, double*, int);
static Caller multipass_callers[] =
{
sqrSumMultipassCaller<unsigned char>, sqrSumMultipassCaller<char>,
sqrSumMultipassCaller<unsigned short>, sqrSumMultipassCaller<short>,
sqrSumMultipassCaller<int>, sqrSumMultipassCaller<float>
};
static Caller singlepass_callers[7] =
{
sqrSumCaller<unsigned char>, sqrSumCaller<char>,
sqrSumCaller<unsigned short>, sqrSumCaller<short>,
sqrSumCaller<int>, sqrSumCaller<float>
};
CV_Assert(src.depth() <= CV_32F);
Caller* callers = multipass_callers;
if (TargetArchs::builtWith(GLOBAL_ATOMICS) && DeviceInfo().supports(GLOBAL_ATOMICS))
callers = singlepass_callers;
Size buf_size;
getBufSizeRequired(src.cols, src.rows, src.channels(), buf_size.width, buf_size.height);
ensureSizeIsEnough(buf_size, CV_8U, buf);
Caller caller = callers[src.depth()];
double result[4];
caller(src, buf, result, src.channels());
return Scalar(result[0], result[1], result[2], result[3]);
}
示例15: detect
void _Flow::detect(void)
{
Frame* pGray;
Frame* pNextFrame;
Frame* pPrevFrame;
GpuMat* pPrev;
GpuMat* pNext;
GpuMat GMat;
GpuMat pGMat[2];
if(m_pStream==NULL)return;
pGray = m_pStream->getGrayFrame();
if(pGray->empty())return;
pNextFrame = m_pGrayFrames->getLastFrame();
if(pGray->getFrameID() <= pNextFrame->getFrameID())return;
m_pGrayFrames->updateFrameIndex();
pNextFrame = m_pGrayFrames->getLastFrame();
pPrevFrame = m_pGrayFrames->getPrevFrame();
pNextFrame->getResizedOf(pGray,m_width,m_height);
pPrev = pPrevFrame->getGMat();
pNext = pNextFrame->getGMat();
if(pPrev->empty())return;
if(pNext->empty())return;
if(pPrev->size() != pNext->size())return;
m_pFarn->calc(*pPrev, *pNext, m_GFlowMat);
//Generate Depth Map from Flow
if(m_bDepth==0)return;
cuda::abs(m_GFlowMat, GMat);
cuda::split(GMat, pGMat);
cuda::add(pGMat[0],pGMat[1], GMat);
cuda::multiply(GMat, Scalar(100), pGMat[1]);
pGMat[1].convertTo(*(m_pDepth->getGMat()),CV_8UC1);
m_pDepth->updatedGMat();
// m_flowMax = cuda::sum(fGMat)[0] / (fGMat.cols*fGMat.rows);
// fInterval = 1.0/m_flowMax;
// fInterval *= 50.0;
// cuda::min(fGMat,Scalar(m_flowMax),pGMat[0]);
// cuda::multiply(pGMat[0],Scalar(fInterval),fGMat);
// cv::cuda::cvtColor(depthGMat, idxGMat, CV_GRAY2BGR);
// m_pGpuLUT->transform(idxGMat,segGMat);
}