本文整理汇总了C++中InputArray::cols方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::cols方法的具体用法?C++ InputArray::cols怎么用?C++ InputArray::cols使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::cols方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
void IPPE::PoseSolver::rot2vec(InputArray _R, OutputArray _r)
{
assert(_R.type() == CV_64FC1);
assert(_R.rows() == 3);
assert(_R.cols() == 3);
_r.create(3, 1, CV_64FC1);
cv::Mat R = _R.getMat();
cv::Mat rvec = _r.getMat();
double trace = R.at<double>(0, 0) + R.at<double>(1, 1) + R.at<double>(2, 2);
double w_norm = acos((trace - 1.0) / 2.0);
double c0, c1, c2;
double eps = std::numeric_limits<float>::epsilon();
double d = 1 / (2 * sin(w_norm)) * w_norm;
if (w_norm < eps) //rotation is the identity
{
rvec.setTo(0);
}
else {
c0 = R.at<double>(2, 1) - R.at<double>(1, 2);
c1 = R.at<double>(0, 2) - R.at<double>(2, 0);
c2 = R.at<double>(1, 0) - R.at<double>(0, 1);
rvec.at<double>(0) = d * c0;
rvec.at<double>(1) = d * c1;
rvec.at<double>(2) = d * c2;
}
}
示例2: height
GuidedFilterRefImpl::GuidedFilterRefImpl(InputArray _guide, int _rad, double _eps) :
height(_guide.rows()), width(_guide.cols()), rad(_rad), chNum(_guide.channels()), eps(_eps)
{
Mat guide = _guide.getMat();
CV_Assert(chNum > 0 && chNum <= 3);
channels = new Mat[chNum];
exps = new Mat[chNum];
A = new Mat *[chNum];
vars = new Mat *[chNum];
for (int i = 0; i < chNum; ++i)
{
A[i] = new Mat[chNum];
vars[i] = new Mat[chNum];
}
split(guide, channels);
for (int i = 0; i < chNum; ++i)
{
channels[i].convertTo(channels[i], CV_32F);
meanFilter(channels[i], exps[i]);
}
computeCovGuide();
computeCovGuideInv();
}
示例3: p
double IPPE::PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec)
{
assert((_objectPoints.type() == CV_64FC3) | (_objectPoints.type() == CV_64FC3));
size_t n = _objectPoints.rows() * _objectPoints.cols();
Mat R;
Mat q;
Rodrigues(_rvec, R);
double zBar = 0;
for (size_t i = 0; i < n; i++) {
cv::Mat p(_objectPoints.getMat().at<Point3d>(i));
q = R * p + _tvec.getMat();
double z;
if (q.depth() == CV_64FC1) {
z = q.at<double>(2);
}
else {
z = static_cast<double>(q.at<float>(2));
}
zBar += z;
//if (z <= 0) {
// std::cout << "Warning: object point " << i << " projects behind the camera! This should not be allowed. " << std::endl;
//}
}
return zBar / static_cast<double>(n);
}
示例4: result_
static bool convolve_32F(InputArray _image, InputArray _templ, OutputArray _result)
{
_result.create(_image.rows() - _templ.rows() + 1, _image.cols() - _templ.cols() + 1, CV_32F);
if (_image.channels() == 1)
return(convolve_dft(_image, _templ, _result));
else
{
UMat image = _image.getUMat();
UMat templ = _templ.getUMat();
UMat result_(image.rows-templ.rows+1,(image.cols-templ.cols+1)*image.channels(), CV_32F);
bool ok = convolve_dft(image.reshape(1), templ.reshape(1), result_);
if (ok==false)
return false;
UMat result = _result.getUMat();
return (extractFirstChannel_32F(result_, _result, _image.channels()));
}
}
示例5: kernel
static bool ocl_sepFilter3x3_8UC1(InputArray _src, OutputArray _dst, int ddepth,
InputArray _kernelX, InputArray _kernelY, double delta, int borderType)
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !(dev.isIntel() && (type == CV_8UC1) && (ddepth == CV_8U) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
(_src.cols() % 16 == 0) && (_src.rows() % 2 == 0)) )
return false;
Mat kernelX = _kernelX.getMat().reshape(1, 1);
if (kernelX.cols % 2 != 1)
return false;
Mat kernelY = _kernelY.getMat().reshape(1, 1);
if (kernelY.cols % 2 != 1)
return false;
if (ddepth < 0)
ddepth = sdepth;
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
char build_opts[1024];
sprintf(build_opts, "-D %s %s%s", borderMap[borderType],
ocl::kernelToStr(kernelX, CV_32F, "KERNEL_MATRIX_X").c_str(),
ocl::kernelToStr(kernelY, CV_32F, "KERNEL_MATRIX_Y").c_str());
ocl::Kernel kernel("sepFilter3x3_8UC1_cols16_rows2", cv::ocl::imgproc::sepFilter3x3_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
idxArg = kernel.set(idxArg, static_cast<float>(delta));
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
示例6: UZero
void IPPE::PoseSolver::makeCanonicalObjectPoints(InputArray _objectPoints, OutputArray _canonicalObjPoints, OutputArray _MmodelPoints2Canonical)
{
int objType = _objectPoints.type();
assert((objType == CV_64FC3) | (objType == CV_32FC3));
size_t n = _objectPoints.rows() * _objectPoints.cols();
_canonicalObjPoints.create(1, n, CV_64FC2);
_MmodelPoints2Canonical.create(4, 4, CV_64FC1);
cv::Mat objectPoints = _objectPoints.getMat();
cv::Mat canonicalObjPoints = _canonicalObjPoints.getMat();
cv::Mat UZero(3, n, CV_64FC1);
double xBar = 0;
double yBar = 0;
double zBar = 0;
bool isOnZPlane = true;
for (size_t i = 0; i < n; i++) {
double x, y, z;
if (objType == CV_32FC3) {
x = static_cast<double>(objectPoints.at<Vec3f>(i)[0]);
y = static_cast<double>(objectPoints.at<Vec3f>(i)[1]);
z = static_cast<double>(objectPoints.at<Vec3f>(i)[2]);
}
else {
x = objectPoints.at<Vec3d>(i)[0];
y = objectPoints.at<Vec3d>(i)[1];
z = objectPoints.at<Vec3d>(i)[2];
if (abs(z) > IPPE_SMALL) {
isOnZPlane = false;
}
}
xBar += x;
yBar += y;
zBar += z;
UZero.at<double>(0, i) = x;
UZero.at<double>(1, i) = y;
UZero.at<double>(2, i) = z;
}
xBar = xBar / (double)n;
yBar = yBar / (double)n;
zBar = zBar / (double)n;
for (size_t i = 0; i < n; i++) {
UZero.at<double>(0, i) -= xBar;
UZero.at<double>(1, i) -= yBar;
UZero.at<double>(2, i) -= zBar;
}
cv::Mat MCenter(4, 4, CV_64FC1);
MCenter.setTo(0);
MCenter.at<double>(0, 0) = 1;
MCenter.at<double>(1, 1) = 1;
MCenter.at<double>(2, 2) = 1;
MCenter.at<double>(3, 3) = 1;
MCenter.at<double>(0, 3) = -xBar;
MCenter.at<double>(1, 3) = -yBar;
MCenter.at<double>(2, 3) = -zBar;
if (isOnZPlane) {
//MmodelPoints2Canonical is given by MCenter
MCenter.copyTo(_MmodelPoints2Canonical);
for (size_t i = 0; i < n; i++) {
canonicalObjPoints.at<Vec2d>(i)[0] = UZero.at<double>(0, i);
canonicalObjPoints.at<Vec2d>(i)[1] = UZero.at<double>(1, i);
}
}
else {
cv::Mat UZeroAligned(3, n, CV_64FC1);
cv::Mat R; //rotation that rotates objectPoints to the plane z=0
if (!computeObjextSpaceR3Pts(objectPoints,R))
{
//we could not compute R, problably because there is a duplicate point in {objectPoints(0),objectPoints(1),objectPoints(2)}. So we compute it with the SVD (which is slower):
computeObjextSpaceRSvD(UZero,R);
}
UZeroAligned = R * UZero;
for (size_t i = 0; i < n; i++) {
canonicalObjPoints.at<Vec2d>(i)[0] = UZeroAligned.at<double>(0, i);
canonicalObjPoints.at<Vec2d>(i)[1] = UZeroAligned.at<double>(1, i);
assert(abs(UZeroAligned.at<double>(2, i))<=IPPE_SMALL);
}
cv::Mat MRot(4, 4, CV_64FC1);
MRot.setTo(0);
MRot.at<double>(3, 3) = 1;
R.copyTo(MRot.colRange(0, 3).rowRange(0, 3));
cv::Mat Mb = MRot * MCenter;
Mb.copyTo(_MmodelPoints2Canonical);
}
}
示例7: openvx_sobel
static bool openvx_sobel(InputArray _src, OutputArray _dst,
int dx, int dy, int ksize,
double scale, double delta, int borderType)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_16SC1 ||
ksize != 3 || scale != 1.0 || delta != 0.0 ||
(dx | dy) != 1 || (dx + dy) != 1 ||
_src.cols() < ksize || _src.rows() < ksize ||
ovx::skipSmallImages<VX_KERNEL_SOBEL_3x3>(_src.cols(), _src.rows())
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
// border = VX_BORDER_REPLICATE;
// break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
//if ((vx_size)ksize > ctx.convolutionMaxDimension())
// return false;
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_S16,
ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
if(dx)
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, ib, NULL));
else
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, NULL, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
示例8: k
static bool ocl_Laplacian5(InputArray _src, OutputArray _dst,
const Mat & kd, const Mat & ks, double scale, double delta,
int borderType, int depth, int ddepth)
{
const size_t tileSizeX = 16;
const size_t tileSizeYmin = 8;
const ocl::Device dev = ocl::Device::getDefault();
int stype = _src.type();
int sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype), esz = CV_ELEM_SIZE(stype);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
Mat kernelX = kd.reshape(1, 1);
if (kernelX.cols % 2 != 1)
return false;
Mat kernelY = ks.reshape(1, 1);
if (kernelY.cols % 2 != 1)
return false;
CV_Assert(kernelX.cols == kernelY.cols);
size_t wgs = dev.maxWorkGroupSize();
size_t lmsz = dev.localMemSize();
size_t src_step = _src.step(), src_offset = _src.offset();
const size_t tileSizeYmax = wgs / tileSizeX;
// workaround for Nvidia: 3 channel vector type takes 4*elem_size in local memory
int loc_mem_cn = dev.vendorID() == ocl::Device::VENDOR_NVIDIA && cn == 3 ? 4 : cn;
if (((src_offset % src_step) % esz == 0) &&
(
(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE) ||
((borderType == BORDER_REFLECT || borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) &&
(_src.cols() >= (int) (kernelX.cols + tileSizeX) && _src.rows() >= (int) (kernelY.cols + tileSizeYmax)))
) &&
(tileSizeX * tileSizeYmin <= wgs) &&
(LAPLACIAN_LOCAL_MEM(tileSizeX, tileSizeYmin, kernelX.cols, loc_mem_cn * 4) <= lmsz)
)
{
Size size = _src.size(), wholeSize;
Point origin;
int dtype = CV_MAKE_TYPE(ddepth, cn);
int wdepth = CV_32F;
size_t tileSizeY = tileSizeYmax;
while ((tileSizeX * tileSizeY > wgs) || (LAPLACIAN_LOCAL_MEM(tileSizeX, tileSizeY, kernelX.cols, loc_mem_cn * 4) > lmsz))
{
tileSizeY /= 2;
}
size_t lt2[2] = { tileSizeX, tileSizeY};
size_t gt2[2] = { lt2[0] * (1 + (size.width - 1) / lt2[0]), lt2[1] };
char cvt[2][40];
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
"BORDER_REFLECT_101" };
String opts = cv::format("-D BLK_X=%d -D BLK_Y=%d -D RADIUS=%d%s%s"
" -D convertToWT=%s -D convertToDT=%s"
" -D %s -D srcT1=%s -D dstT1=%s -D WT1=%s"
" -D srcT=%s -D dstT=%s -D WT=%s"
" -D CN=%d ",
(int)lt2[0], (int)lt2[1], kernelX.cols / 2,
ocl::kernelToStr(kernelX, wdepth, "KERNEL_MATRIX_X").c_str(),
ocl::kernelToStr(kernelY, wdepth, "KERNEL_MATRIX_Y").c_str(),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
borderMap[borderType],
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), ocl::typeToStr(wdepth),
ocl::typeToStr(CV_MAKETYPE(sdepth, cn)),
ocl::typeToStr(CV_MAKETYPE(ddepth, cn)),
ocl::typeToStr(CV_MAKETYPE(wdepth, cn)),
cn);
ocl::Kernel k("laplacian", ocl::imgproc::laplacian5_oclsrc, opts);
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, dtype);
UMat dst = _dst.getUMat();
int src_offset_x = static_cast<int>((src_offset % src_step) / esz);
int src_offset_y = static_cast<int>(src_offset / src_step);
src.locateROI(wholeSize, origin);
k.args(ocl::KernelArg::PtrReadOnly(src), (int)src_step, src_offset_x, src_offset_y,
wholeSize.height, wholeSize.width, ocl::KernelArg::WriteOnly(dst),
static_cast<float>(scale), static_cast<float>(delta));
return k.run(2, gt2, lt2, false);
}
int iscale = cvRound(scale), idelta = cvRound(delta);
bool floatCoeff = std::fabs(delta - idelta) > DBL_EPSILON || std::fabs(scale - iscale) > DBL_EPSILON;
int wdepth = std::max(depth, floatCoeff ? CV_32F : CV_32S), kercn = 1;
if (!doubleSupport && wdepth == CV_64F)
return false;
//.........这里部分代码省略.........