本文整理汇总了C++中InputArray::type方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::type方法的具体用法?C++ InputArray::type怎么用?C++ InputArray::type使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::type方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: if
void cv::cuda::divide(InputArray _src1, InputArray _src2, OutputArray _dst, double scale, int dtype, Stream& stream)
{
if (_src1.type() == CV_8UC4 && _src2.type() == CV_32FC1)
{
GpuMat src1 = _src1.getGpuMat();
GpuMat src2 = _src2.getGpuMat();
CV_Assert( src1.size() == src2.size() );
_dst.create(src1.size(), src1.type());
GpuMat dst = _dst.getGpuMat();
divMat_8uc4_32f(src1, src2, dst, stream);
}
else if (_src1.type() == CV_16SC4 && _src2.type() == CV_32FC1)
{
GpuMat src1 = _src1.getGpuMat();
GpuMat src2 = _src2.getGpuMat();
CV_Assert( src1.size() == src2.size() );
_dst.create(src1.size(), src1.type());
GpuMat dst = _dst.getGpuMat();
divMat_16sc4_32f(src1, src2, dst, stream);
}
else
{
arithm_op(_src1, _src2, _dst, GpuMat(), scale, dtype, stream, divMat, divScalar);
}
}
示例2:
void cv::viz::vtkTrajectorySource::SetTrajectory(InputArray _traj)
{
CV_Assert(_traj.kind() == _InputArray::STD_VECTOR || _traj.kind() == _InputArray::MAT);
CV_Assert(_traj.type() == CV_32FC(16) || _traj.type() == CV_64FC(16));
Mat traj;
_traj.getMat().convertTo(traj, CV_64F);
const Affine3d* dpath = traj.ptr<Affine3d>();
size_t total = traj.total();
points = vtkSmartPointer<vtkPoints>::New();
points->SetDataType(VTK_DOUBLE);
points->SetNumberOfPoints((vtkIdType)total);
tensors = vtkSmartPointer<vtkDoubleArray>::New();
tensors->SetNumberOfComponents(9);
tensors->SetNumberOfTuples((vtkIdType)total);
for(size_t i = 0; i < total; ++i, ++dpath)
{
Matx33d R = dpath->rotation().t(); // transposed because of
tensors->SetTuple((vtkIdType)i, R.val); // column major order
Vec3d p = dpath->translation();
points->SetPoint((vtkIdType)i, p.val);
}
}
示例3: p
double IPPE::PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec)
{
assert((_objectPoints.type() == CV_64FC3) | (_objectPoints.type() == CV_64FC3));
size_t n = _objectPoints.rows() * _objectPoints.cols();
Mat R;
Mat q;
Rodrigues(_rvec, R);
double zBar = 0;
for (size_t i = 0; i < n; i++) {
cv::Mat p(_objectPoints.getMat().at<Point3d>(i));
q = R * p + _tvec.getMat();
double z;
if (q.depth() == CV_64FC1) {
z = q.at<double>(2);
}
else {
z = static_cast<double>(q.at<float>(2));
}
zBar += z;
//if (z <= 0) {
// std::cout << "Warning: object point " << i << " projects behind the camera! This should not be allowed. " << std::endl;
//}
}
return zBar / static_cast<double>(n);
}
示例4: blendLinear
void cv::blendLinear( InputArray _src1, InputArray _src2, InputArray _weights1, InputArray _weights2, OutputArray _dst )
{
int type = _src1.type(), depth = CV_MAT_DEPTH(type);
Size size = _src1.size();
CV_Assert(depth == CV_8U || depth == CV_32F);
CV_Assert(size == _src2.size() && size == _weights1.size() && size == _weights2.size());
CV_Assert(type == _src2.type() && _weights1.type() == CV_32FC1 && _weights2.type() == CV_32FC1);
_dst.create(size, type);
CV_OCL_RUN(_dst.isUMat(),
ocl_blendLinear(_src1, _src2, _weights1, _weights2, _dst))
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), weights1 = _weights1.getMat(),
weights2 = _weights2.getMat(), dst = _dst.getMat();
if (depth == CV_8U)
{
BlendLinearInvoker<uchar> invoker(src1, src2, weights1, weights2, dst);
parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16));
}
else if (depth == CV_32F)
{
BlendLinearInvoker<float> invoker(src1, src2, weights1, weights2, dst);
parallel_for_(Range(0, src1.rows), invoker, dst.total()/(double)(1<<16));
}
}
示例5: drawKeypoints
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& _color, DrawMatchesFlags flags )
{
CV_INSTRUMENT_REGION();
if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
if (image.type() == CV_8UC3 || image.type() == CV_8UC4)
{
image.copyTo(outImage);
}
else if( image.type() == CV_8UC1 )
{
cvtColor( image, outImage, COLOR_GRAY2BGR );
}
else
{
CV_Error( Error::StsBadArg, "Incorrect type of input image: " + typeToString(image.type()) );
}
}
RNG& rng=theRNG();
bool isRandColor = _color == Scalar::all(-1);
CV_Assert( !outImage.empty() );
std::vector<KeyPoint>::const_iterator it = keypoints.begin(),
end = keypoints.end();
for( ; it != end; ++it )
{
Scalar color = isRandColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : _color;
_drawKeypoint( outImage, *it, color, flags );
}
}
示例6: if
void cv::cuda::multiply(InputArray _src1, InputArray _src2, OutputArray _dst, double scale, int dtype, Stream& stream)
{
if (_src1.type() == CV_8UC4 && _src2.type() == CV_32FC1)
{
GpuMat src1 = getInputMat(_src1, stream);
GpuMat src2 = getInputMat(_src2, stream);
CV_Assert( src1.size() == src2.size() );
GpuMat dst = getOutputMat(_dst, src1.size(), src1.type(), stream);
mulMat_8uc4_32f(src1, src2, dst, stream);
syncOutput(dst, _dst, stream);
}
else if (_src1.type() == CV_16SC4 && _src2.type() == CV_32FC1)
{
GpuMat src1 = getInputMat(_src1, stream);
GpuMat src2 = getInputMat(_src2, stream);
CV_Assert( src1.size() == src2.size() );
GpuMat dst = getOutputMat(_dst, src1.size(), src1.type(), stream);
mulMat_16sc4_32f(src1, src2, dst, stream);
syncOutput(dst, _dst, stream);
}
else
{
arithm_op(_src1, _src2, _dst, GpuMat(), scale, dtype, stream, mulMat, mulScalar);
}
}
示例7: matte
int matte(InputArray src, OutputArray dst, Point firstPoint, Point secondPoint, float sigmaX,
float sigmaY, Size ksize)
{
CV_Assert((src.type() == CV_8UC3) || (src.type() == CV_32FC3));
CV_Assert((sigmaX > 0.0f) || (sigmaY > 0.0f));
Mat srcImg = src.getMat();
CV_Assert(!(srcImg.empty()));
if(srcImg.type() != CV_32FC3)
{
srcImg.convertTo(srcImg, CV_32FC3, 1.0f/255.0f);
}
int xsize = firstPoint.x - secondPoint.x;
int ysize = firstPoint.y - secondPoint.y;
Point topLeft = topleftFind(firstPoint, secondPoint, xsize, ysize);
const Scalar black = Scalar(0.0f,0.0f,0.0f);
const Scalar white = Scalar(1.0f,1.0f,1.0f);
Mat mask(srcImg.rows, srcImg.cols, CV_32FC1, black);
ellipse(mask, Point((topLeft.x+xsize/2),(topLeft.y-ysize/2)),
Size(xsize/2,ysize/2), 0, 0, 360, white, -1);
GaussianBlur(mask, mask, ksize, sigmaX, sigmaY);
vector<Mat> ch_img;
split(srcImg,ch_img);
ch_img[0]=ch_img[0].mul(mask)+1.0f-mask;
ch_img[1]=ch_img[1].mul(mask)+1.0f-mask;
ch_img[2]=ch_img[2].mul(mask)+1.0f-mask;
merge(ch_img,dst);
return 0;
}
示例8: assert
void IPPE::PoseSolver::homographyFromSquarePoints(InputArray _targetPoints, double halfLength, OutputArray H_)
{
assert((_targetPoints.type() == CV_32FC2) | (_targetPoints.type() == CV_64FC2));
cv::Mat pts = _targetPoints.getMat();
H_.create(3, 3, CV_64FC1);
Mat H = H_.getMat();
double p1x, p1y;
double p2x, p2y;
double p3x, p3y;
double p4x, p4y;
if (_targetPoints.type() == CV_32FC2) {
p1x = -pts.at<Vec2f>(0)(0);
p1y = -pts.at<Vec2f>(0)(1);
p2x = -pts.at<Vec2f>(1)(0);
p2y = -pts.at<Vec2f>(1)(1);
p3x = -pts.at<Vec2f>(2)(0);
p3y = -pts.at<Vec2f>(2)(1);
p4x = -pts.at<Vec2f>(3)(0);
p4y = -pts.at<Vec2f>(3)(1);
}
else {
p1x = -pts.at<Vec2d>(0)(0);
p1y = -pts.at<Vec2d>(0)(1);
p2x = -pts.at<Vec2d>(1)(0);
p2y = -pts.at<Vec2d>(1)(1);
p3x = -pts.at<Vec2d>(2)(0);
p3y = -pts.at<Vec2d>(2)(1);
p4x = -pts.at<Vec2d>(3)(0);
p4y = -pts.at<Vec2d>(3)(1);
}
//analytic solution:
double detsInv = -1 / (halfLength * (p1x * p2y - p2x * p1y - p1x * p4y + p2x * p3y - p3x * p2y + p4x * p1y + p3x * p4y - p4x * p3y));
H.at<double>(0, 0) = detsInv * (p1x * p3x * p2y - p2x * p3x * p1y - p1x * p4x * p2y + p2x * p4x * p1y - p1x * p3x * p4y + p1x * p4x * p3y + p2x * p3x * p4y - p2x * p4x * p3y);
H.at<double>(0, 1) = detsInv * (p1x * p2x * p3y - p1x * p3x * p2y - p1x * p2x * p4y + p2x * p4x * p1y + p1x * p3x * p4y - p3x * p4x * p1y - p2x * p4x * p3y + p3x * p4x * p2y);
H.at<double>(0, 2) = detsInv * halfLength * (p1x * p2x * p3y - p2x * p3x * p1y - p1x * p2x * p4y + p1x * p4x * p2y - p1x * p4x * p3y + p3x * p4x * p1y + p2x * p3x * p4y - p3x * p4x * p2y);
H.at<double>(1, 0) = detsInv * (p1x * p2y * p3y - p2x * p1y * p3y - p1x * p2y * p4y + p2x * p1y * p4y - p3x * p1y * p4y + p4x * p1y * p3y + p3x * p2y * p4y - p4x * p2y * p3y);
H.at<double>(1, 1) = detsInv * (p2x * p1y * p3y - p3x * p1y * p2y - p1x * p2y * p4y + p4x * p1y * p2y + p1x * p3y * p4y - p4x * p1y * p3y - p2x * p3y * p4y + p3x * p2y * p4y);
H.at<double>(1, 2) = detsInv * halfLength * (p1x * p2y * p3y - p3x * p1y * p2y - p2x * p1y * p4y + p4x * p1y * p2y - p1x * p3y * p4y + p3x * p1y * p4y + p2x * p3y * p4y - p4x * p2y * p3y);
H.at<double>(2, 0) = -detsInv * (p1x * p3y - p3x * p1y - p1x * p4y - p2x * p3y + p3x * p2y + p4x * p1y + p2x * p4y - p4x * p2y);
H.at<double>(2, 1) = detsInv * (p1x * p2y - p2x * p1y - p1x * p3y + p3x * p1y + p2x * p4y - p4x * p2y - p3x * p4y + p4x * p3y);
H.at<double>(2, 2) = 1.0;
}
示例9: process
void NullOutlierRejector::process(
Size /*frameSize*/, InputArray points0, InputArray points1, OutputArray mask)
{
CV_Assert(points0.type() == points1.type());
CV_Assert(points0.getMat().checkVector(2) == points1.getMat().checkVector(2));
int npoints = points0.getMat().checkVector(2);
mask.create(1, npoints, CV_8U);
Mat mask_ = mask.getMat();
mask_.setTo(1);
}
示例10: addNoise
void addNoise(InputArray src_, OutputArray dest_, const double sigma, const double sprate, const int seed)
{
if(seed!=0) cv::theRNG().state = seed;
if (dest_.empty() || dest_.size() != src_.size() || dest_.type() != src_.type()) dest_.create(src_.size(), src_.type());
Mat src = src_.getMat();
Mat dest = dest_.getMat();
if (src.channels() == 1)
{
addNoiseMono(src, dest, sigma);
if (sprate != 0)addNoiseSoltPepperMono(dest, dest, sprate, seed);
return;
}
else
{
vector<Mat> s(src.channels());
vector<Mat> d(src.channels());
split(src, s);
for (int i = 0; i < src.channels(); i++)
{
addNoiseMono(s[i], d[i], sigma);
if (sprate != 0)addNoiseSoltPepperMono(d[i], d[i], sprate, seed);
}
cv::merge(d, dest);
}
if (seed != 0) cv::theRNG().state = cv::getTickCount();
}
示例11: elbp
static void elbp(InputArray src, OutputArray dst, int radius, int neighbors)
{
int type = src.type();
switch (type)
{
case CV_8SC1: elbp_<char>(src,dst, radius, neighbors);
break;
case CV_8UC1: elbp_<unsigned char>(src, dst, radius, neighbors);
break;
case CV_16SC1: elbp_<short>(src,dst, radius, neighbors);
break;
case CV_16UC1: elbp_<unsigned short>(src,dst, radius, neighbors);
break;
case CV_32SC1: elbp_<int>(src,dst, radius, neighbors);
break;
case CV_32FC1: elbp_<float>(src,dst, radius, neighbors);
break;
case CV_64FC1: elbp_<double>(src,dst, radius, neighbors);
break;
default:
String error_msg = format("Using Original Local Binary Patterns for feature extraction only works on single-channel images (given %d). Please pass the image data as a grayscale image!", type);
CV_Error(CV_StsNotImplemented, error_msg);
break;
}
}
示例12: Niblack
/*!
Niblack binarization algorithm.
@param src [in] Mat, single channel uchar image.
@param dst [out] Mat, result image.
@param windowSize [in] int, window size for calculation.
@param k [in] int, parameter for local threshold.
@return int, 0x0000 = Success.
*/
int Niblack(InputArray src, OutputArray dst, int windowSize, float k)
{
if (src.type() != CV_8UC1 || src.empty())
return 0x0001; /*!< source image type not supported. */
/*! update window size, which should be odd. */
if (windowSize < 2)
return 0x0002; /*!< window size not supported. */
if (windowSize / 2 == 0)
windowSize++;
Mat source, destination;
Mat sourceUchar = src.getMat();
sourceUchar.convertTo(source, CV_32FC1);
/*! calcalte mean and variance via
D(x) = E(x^2) - (Ex)^2 */
Mat avg, power, avg_power, power_avg;
Mat standard;
boxFilter(source, avg, -1, Size(windowSize, windowSize));
pow(avg, 2, avg_power);
pow(source, 2, power);
boxFilter(power, power_avg, -1, Size(windowSize, windowSize));
sqrt(power_avg - power_avg, standard);
/*! calculate local threshold */
Mat threshold = avg + k * standard;
/*! Output result */
dst.create(sourceUchar.size(), CV_8UC1);
destination = dst.getMat();
destination = source > threshold;
return 0x0000;
}
示例13: repeat
void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
CV_Assert( _src.dims() <= 2 );
CV_Assert( ny > 0 && nx > 0 );
Size ssize = _src.size();
_dst.create(ssize.height*ny, ssize.width*nx, _src.type());
CV_OCL_RUN(_dst.isUMat(),
ocl_repeat(_src, ny, nx, _dst))
Mat src = _src.getMat(), dst = _dst.getMat();
Size dsize = dst.size();
int esz = (int)src.elemSize();
int x, y;
ssize.width *= esz; dsize.width *= esz;
for( y = 0; y < ssize.height; y++ )
{
for( x = 0; x < dsize.width; x += ssize.width )
memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width );
}
for( ; y < dsize.height; y++ )
memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width );
}
示例14: detect
void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
{
CV_INSTRUMENT_REGION()
std::vector<Point2f> corners;
if (_image.isUMat())
{
UMat ugrayImage;
if( _image.type() != CV_8U )
cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
else
ugrayImage = _image.getUMat();
goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
else
{
Mat image = _image.getMat(), grayImage = image;
if( image.type() != CV_8U )
cvtColor( image, grayImage, COLOR_BGR2GRAY );
goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
blockSize, useHarrisDetector, k );
}
keypoints.resize(corners.size());
std::vector<Point2f>::const_iterator corner_it = corners.begin();
std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
*keypoint_it = KeyPoint( *corner_it, (float)blockSize );
}
示例15: ocl_integral
static bool ocl_integral( InputArray _src, OutputArray _sum, int sdepth )
{
if ( _src.type() != CV_8UC1 || _src.step() % vlen != 0 || _src.offset() % vlen != 0 ||
!(sdepth == CV_32S || sdepth == CV_32F) )
return false;
ocl::Kernel k1("integral_sum_cols", ocl::imgproc::integral_sum_oclsrc,
format("-D sdepth=%d", sdepth));
if (k1.empty())
return false;
Size size = _src.size(), t_size = Size(((size.height + vlen - 1) / vlen) * vlen, size.width),
ssize(size.width + 1, size.height + 1);
_sum.create(ssize, sdepth);
UMat src = _src.getUMat(), t_sum(t_size, sdepth), sum = _sum.getUMat();
t_sum = t_sum(Range::all(), Range(0, size.height));
int offset = (int)src.offset / vlen, pre_invalid = (int)src.offset % vlen;
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
int sum_offset = (int)sum.offset / vlen;
k1.args(ocl::KernelArg::PtrReadOnly(src), ocl::KernelArg::PtrWriteOnly(t_sum),
offset, pre_invalid, src.rows, src.cols, (int)src.step, (int)t_sum.step);
size_t gt = ((vcols + 1) / 2) * 256, lt = 256;
if (!k1.run(1, >, <, false))
return false;
ocl::Kernel k2("integral_sum_rows", ocl::imgproc::integral_sum_oclsrc,
format("-D sdepth=%d", sdepth));
k2.args(ocl::KernelArg::PtrReadWrite(t_sum), ocl::KernelArg::PtrWriteOnly(sum),
t_sum.rows, t_sum.cols, (int)t_sum.step, (int)sum.step, sum_offset);
size_t gt2 = t_sum.cols * 32, lt2 = 256;
return k2.run(1, >2, <2, false);
}