本文整理汇总了C++中InputArray类的典型用法代码示例。如果您正苦于以下问题:C++ InputArray类的具体用法?C++ InputArray怎么用?C++ InputArray使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了InputArray类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: imwrite
bool imwrite( const String& filename, InputArray _img,
const std::vector<int>& params )
{
Mat img = _img.getMat();
return imwrite_(filename, img, params, false);
}
示例2: process
void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
{
std::vector<Mat> images;
src.getMatVector(images);
Mat times = _times.getMat();
CV_Assert(images.size() == times.total());
checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U);
int channels = images[0].channels();
Size size = images[0].size();
int CV_32FCC = CV_MAKETYPE(CV_32F, channels);
dst.create(images[0].size(), CV_32FCC);
Mat result = dst.getMat();
Mat response = input_response.getMat();
if(response.empty()) {
response = linearResponse(channels);
response.at<Vec3f>(0) = response.at<Vec3f>(1);
}
log(response, response);
CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
response.channels() == channels);
Mat exp_values(times);
log(exp_values, exp_values);
result = Mat::zeros(size, CV_32FCC);
std::vector<Mat> result_split;
split(result, result_split);
Mat weight_sum = Mat::zeros(size, CV_32F);
for(size_t i = 0; i < images.size(); i++) {
std::vector<Mat> splitted;
split(images[i], splitted);
Mat w = Mat::zeros(size, CV_32F);
for(int c = 0; c < channels; c++) {
LUT(splitted[c], weights, splitted[c]);
w += splitted[c];
}
w /= channels;
Mat response_img;
LUT(images[i], response, response_img);
split(response_img, splitted);
for(int c = 0; c < channels; c++) {
result_split[c] += w.mul(splitted[c] - exp_values.at<float>((int)i));
}
weight_sum += w;
}
weight_sum = 1.0f / weight_sum;
for(int c = 0; c < channels; c++) {
result_split[c] = result_split[c].mul(weight_sum);
}
merge(result_split, result);
exp(result, result);
}
示例3: openvx_sobel
static bool openvx_sobel(InputArray _src, OutputArray _dst,
int dx, int dy, int ksize,
double scale, double delta, int borderType)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_16SC1 ||
ksize != 3 || scale != 1.0 || delta != 0.0 ||
(dx | dy) != 1 || (dx + dy) != 1 ||
_src.cols() < ksize || _src.rows() < ksize ||
ovx::skipSmallImages<VX_KERNEL_SOBEL_3x3>(_src.cols(), _src.rows())
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
// border = VX_BORDER_REPLICATE;
// break;
default:
return false;
}
try
{
ivx::Context ctx = ovx::getOpenVXContext();
//if ((vx_size)ksize > ctx.convolutionMaxDimension())
// return false;
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_S16,
ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
if(dx)
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, ib, NULL));
else
ivx::IVX_CHECK_STATUS(vxuSobel3x3(ctx, ia, NULL, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
示例4: Laplacian
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
double scale, double delta, int borderType )
{
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
if (ddepth < 0)
ddepth = sdepth;
_dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );
#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY
if ((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) &&
((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1)))
{
int iscale = saturate_cast<int>(scale), idelta = saturate_cast<int>(delta);
bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1;
bool floatDelta = std::fabs(delta - idelta) > DBL_EPSILON, needDelta = delta != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
Mat src = _src.getMat(), dst = _dst.getMat();
if (src.data != dst.data)
{
Ipp32s bufsize;
IppStatus status = (IppStatus)-1;
IppiSize roisize = { src.cols, src.rows };
IppiMaskSize masksize = ksize == 3 ? ippMskSize3x3 : ippMskSize5x5;
IppiBorderType borderTypeIpp = ippiGetBorderType(borderTypeNI);
#define IPP_FILTER_LAPLACIAN(ippsrctype, ippdsttype, ippfavor) \
do \
{ \
if (borderTypeIpp >= 0 && ippiFilterLaplacianGetBufferSize_##ippfavor##_C1R(roisize, masksize, &bufsize) >= 0) \
{ \
Ipp8u * buffer = ippsMalloc_8u(bufsize); \
status = ippiFilterLaplacianBorder_##ippfavor##_C1R((const ippsrctype *)src.data, (int)src.step, (ippdsttype *)dst.data, \
(int)dst.step, roisize, masksize, borderTypeIpp, 0, buffer); \
ippsFree(buffer); \
} \
} while ((void)0, 0)
CV_SUPPRESS_DEPRECATED_START
if (sdepth == CV_8U && ddepth == CV_16S && !floatScale && !floatDelta)
{
IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s);
if (needScale && status >= 0)
status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, (Ipp16s *)dst.data, (int)dst.step, roisize, 0);
if (needDelta && status >= 0)
status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, (Ipp16s *)dst.data, (int)dst.step, roisize, 0);
}
else if (sdepth == CV_32F && ddepth == CV_32F)
{
IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f);
if (needScale && status >= 0)
status = ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, roisize);
if (needDelta && status >= 0)
status = ippiAddC_32f_C1IR((Ipp32f)delta, (Ipp32f *)dst.data, (int)dst.step, roisize);
}
CV_SUPPRESS_DEPRECATED_END
if (status >= 0)
return;
}
示例5: recoverPose
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
OutputArray _R, OutputArray _t, InputOutputArray _mask)
{
Mat points1, points2, cameraMatrix;
_points1.getMat().convertTo(points1, CV_64F);
_points2.getMat().convertTo(points2, CV_64F);
_cameraMatrix.getMat().convertTo(cameraMatrix, CV_64F);
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
CV_Assert(cameraMatrix.rows == 3 && cameraMatrix.cols == 3 && cameraMatrix.channels() == 1);
if (points1.channels() > 1)
{
points1 = points1.reshape(1, npoints);
points2 = points2.reshape(1, npoints);
}
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double cx = cameraMatrix.at<double>(0,2);
double cy = cameraMatrix.at<double>(1,2);
points1.col(0) = (points1.col(0) - cx) / fx;
points2.col(0) = (points2.col(0) - cx) / fx;
points1.col(1) = (points1.col(1) - cy) / fy;
points2.col(1) = (points2.col(1) - cy) / fy;
points1 = points1.t();
points2 = points2.t();
Mat R1, R2, t;
decomposeEssentialMat(E, R1, R2, t);
Mat P0 = Mat::eye(3, 4, R1.type());
Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type());
P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0;
P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0;
P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0;
P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0;
// Do the cheirality check.
// Notice here a threshold dist is used to filter
// out far away points (i.e. infinite points) since
// there depth may vary between postive and negtive.
double dist = 50.0;
Mat Q;
triangulatePoints(P0, P1, points1, points2, Q);
Mat mask1 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask1 = (Q.row(2) < dist) & mask1;
Q = P1 * Q;
mask1 = (Q.row(2) > 0) & mask1;
mask1 = (Q.row(2) < dist) & mask1;
triangulatePoints(P0, P2, points1, points2, Q);
Mat mask2 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask2 = (Q.row(2) < dist) & mask2;
Q = P2 * Q;
mask2 = (Q.row(2) > 0) & mask2;
mask2 = (Q.row(2) < dist) & mask2;
triangulatePoints(P0, P3, points1, points2, Q);
Mat mask3 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask3 = (Q.row(2) < dist) & mask3;
Q = P3 * Q;
mask3 = (Q.row(2) > 0) & mask3;
mask3 = (Q.row(2) < dist) & mask3;
triangulatePoints(P0, P4, points1, points2, Q);
Mat mask4 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask4 = (Q.row(2) < dist) & mask4;
Q = P4 * Q;
mask4 = (Q.row(2) > 0) & mask4;
mask4 = (Q.row(2) < dist) & mask4;
mask1 = mask1.t();
mask2 = mask2.t();
mask3 = mask3.t();
mask4 = mask4.t();
// If _mask is given, then use it to filter outliers.
if (!_mask.empty())
//.........这里部分代码省略.........
示例6: ocl_Canny
static bool ocl_Canny(InputArray _src, const UMat& dx_, const UMat& dy_, OutputArray _dst, float low_thresh, float high_thresh,
int aperture_size, bool L2gradient, int cn, const Size & size)
{
CV_INSTRUMENT_REGION_OPENCL()
UMat map;
const ocl::Device &dev = ocl::Device::getDefault();
int max_wg_size = (int)dev.maxWorkGroupSize();
int lSizeX = 32;
int lSizeY = max_wg_size / 32;
if (lSizeY == 0)
{
lSizeX = 16;
lSizeY = max_wg_size / 16;
}
if (lSizeY == 0)
{
lSizeY = 1;
}
if (aperture_size == 7)
{
low_thresh = low_thresh / 16.0f;
high_thresh = high_thresh / 16.0f;
}
if (L2gradient)
{
low_thresh = std::min(32767.0f, low_thresh);
high_thresh = std::min(32767.0f, high_thresh);
if (low_thresh > 0)
low_thresh *= low_thresh;
if (high_thresh > 0)
high_thresh *= high_thresh;
}
int low = cvFloor(low_thresh), high = cvFloor(high_thresh);
if (!useCustomDeriv &&
aperture_size == 3 && !_src.isSubmatrix())
{
/*
stage1_with_sobel:
Sobel operator
Calc magnitudes
Non maxima suppression
Double thresholding
*/
char cvt[40];
ocl::Kernel with_sobel("stage1_with_sobel", ocl::imgproc::canny_oclsrc,
format("-D WITH_SOBEL -D cn=%d -D TYPE=%s -D convert_floatN=%s -D floatN=%s -D GRP_SIZEX=%d -D GRP_SIZEY=%d%s",
cn, ocl::memopTypeToStr(_src.depth()),
ocl::convertTypeStr(_src.depth(), CV_32F, cn, cvt),
ocl::typeToStr(CV_MAKE_TYPE(CV_32F, cn)),
lSizeX, lSizeY,
L2gradient ? " -D L2GRAD" : ""));
if (with_sobel.empty())
return false;
UMat src = _src.getUMat();
map.create(size, CV_32S);
with_sobel.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::WriteOnlyNoSize(map),
(float) low, (float) high);
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height },
localsize[2] = { (size_t)lSizeX, (size_t)lSizeY };
if (!with_sobel.run(2, globalsize, localsize, false))
return false;
}
else
{
/*
stage1_without_sobel:
Calc magnitudes
Non maxima suppression
Double thresholding
*/
double scale = 1.0;
if (aperture_size == 7)
{
scale = 1 / 16.0;
}
UMat dx, dy;
if (!useCustomDeriv)
{
Sobel(_src, dx, CV_16S, 1, 0, aperture_size, scale, 0, BORDER_REPLICATE);
Sobel(_src, dy, CV_16S, 0, 1, aperture_size, scale, 0, BORDER_REPLICATE);
}
else
{
dx = dx_;
dy = dy_;
}
//.........这里部分代码省略.........
示例7: CamShift
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
TermCriteria criteria )
{
CV_INSTRUMENT_REGION()
const int TOLERANCE = 10;
Size size;
Mat mat;
UMat umat;
bool isUMat = _probImage.isUMat();
if (isUMat)
umat = _probImage.getUMat(), size = umat.size();
else
mat = _probImage.getMat(), size = mat.size();
meanShift( _probImage, window, criteria );
window.x -= TOLERANCE;
if( window.x < 0 )
window.x = 0;
window.y -= TOLERANCE;
if( window.y < 0 )
window.y = 0;
window.width += 2 * TOLERANCE;
if( window.x + window.width > size.width )
window.width = size.width - window.x;
window.height += 2 * TOLERANCE;
if( window.y + window.height > size.height )
window.height = size.height - window.y;
// Calculating moments in new center mass
Moments m = isUMat ? moments(umat(window)) : moments(mat(window));
double m00 = m.m00, m10 = m.m10, m01 = m.m01;
double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;
if( fabs(m00) < DBL_EPSILON )
return RotatedRect();
double inv_m00 = 1. / m00;
int xc = cvRound( m10 * inv_m00 + window.x );
int yc = cvRound( m01 * inv_m00 + window.y );
double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;
// Calculating width & height
double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );
// Calculating orientation
double theta = atan2( 2 * b, a - c + square );
// Calculating width & length of figure
double cs = cos( theta );
double sn = sin( theta );
double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
double length = std::sqrt( rotate_a * inv_m00 ) * 4;
double width = std::sqrt( rotate_c * inv_m00 ) * 4;
// In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
if( length < width )
{
std::swap( length, width );
std::swap( cs, sn );
theta = CV_PI*0.5 - theta;
}
// Saving results
int _xc = cvRound( xc );
int _yc = cvRound( yc );
int t0 = cvRound( fabs( length * cs ));
int t1 = cvRound( fabs( width * sn ));
t0 = MAX( t0, t1 ) + 2;
window.width = MIN( t0, (size.width - _xc) * 2 );
t0 = cvRound( fabs( length * sn ));
t1 = cvRound( fabs( width * cs ));
t0 = MAX( t0, t1 ) + 2;
window.height = MIN( t0, (size.height - _yc) * 2 );
window.x = MAX( 0, _xc - window.width / 2 );
window.y = MAX( 0, _yc - window.height / 2 );
window.width = MIN( size.width - window.x, window.width );
window.height = MIN( size.height - window.y, window.height );
RotatedRect box;
box.size.height = (float)length;
box.size.width = (float)width;
box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
while(box.angle < 0)
box.angle += 360;
while(box.angle >= 360)
//.........这里部分代码省略.........
示例8: float
/*******************************************************************************
* Function: getOtsuThreshold
* Description: computes the threhsold using Otsu's method
* Arguments:
lowerVal - lower bound of pixel value
upperVal - upper bound of pixel value
u1Ptr - pointer to receive the mean of class 1
roiMask - ROI binary mask
* Returns: int - Otsu threshold
* Comments:
* Revision:
*******************************************************************************/
inline int
FGExtraction::getOtsuThreshold(int lowerVal, int upperVal, int* u1Ptr, InputArray roiMask)
{
Mat _roiMask = roiMask.getMat();
int channels[] = {0};
int nbins = 256;
const int histSize[] = {nbins};
float range[] = {0, 255};
const float* ranges[] = {range};
Mat hist;
cv::calcHist(&_inImg, 1, channels, roiMask, hist, 1, histSize, ranges);
Mat_<float> hist_(hist);
float size = float(sum(hist)[0]);
float w1, w2, u1, u2;
float max = -1;
int index = 1;
float u1max = -1;
float histMax = 0;
int mode = 0;
float count = 0;
for (int i = lowerVal+1; i < upperVal; ++i){
if(hist_(i,0) > histMax) {
histMax = hist_(i,0);
mode = i;
}
w1 = 0;
for (int j = lowerVal+1; j <= i; ++j){
w1 = w1 + hist_(j-1,0);
}
w1 = w1 / size;
w2 = 1 - w1;
u1 = 0;
count = 0;
for (int j = lowerVal; j <= i-1; ++j){
u1 = u1 + j*hist_(j,0);
count += hist_(j,0);
}
u1 /= count;
u2 = 0;
count = 0;
for (int j = i; j <= upperVal; ++j){
u2 = u2 + j*hist_(j, 0);
count += hist_(j, 0);
}
u2 /= count;
if (w1 * w2 * (u1-u2) * (u1-u2) > max){
max = w1 * w2 * (u1-u2) * (u1-u2);
index = i;
u1max = u1;
}
else{
max = max;
index = index;
}
}
//cout << "mode = " << mode << endl;
//cout << "u1 = " << u1max << "; index = " << index << "; ";
*u1Ptr = (int)(u1max + 0.5);
return index;
}
示例9: Update
void SAGMMBuilder::Update(InputArray frame, OutputArray mask)
{
Mat Image = frame.getMat();
Mat Foreground(Image.size(),CV_8U,Scalar::all(0));
Mat FilteredImage;
// Initialize temporal-spatial filter.
if (frame_counter < filter->getTemporalWindow()) {
if (frame_counter == 0) {
// Initialize in zero three channels of img kernel.
filter->initializeFirstImage(Image);
}
//Apply filter and puts result in FilteredImage.
//Note this filter also keeps internal copy of filter result.
filter->SpatioTemporalPreprocessing(Image, FilteredImage);
Foreground.copyTo(mask);
frame_counter += 1;
if ( frame_counter == filter->getTemporalWindow() ) {
// Initialize model
model->initializeModel(FilteredImage);
model->getBackground(Background);
}
return;
}
//Applies spatial and temporal filter
//note this filter return a Mat CV_32FC3 type.
filter->SpatioTemporalPreprocessing(Image, FilteredImage);
//Global illumination changing factor 'g' between reference image ir and current image ic.
double globalIlluminationFactor = factor->getIlluminationFactor(FilteredImage,Background);
//Calling background subtraction algorithm.
model->operator()(FilteredImage, Foreground, update_bg_model ? -1 : 0, globalIlluminationFactor);
// background to calculate illumination next iteration.
model->getBackground(Background);
// Applying morphological filter, Erode the image
Mat Eroded;
if (ApplyMorphologicalFilter) {
Mat Element(2,2,CV_8U,Scalar(1));
//erode(Mask,Eroded,Mat());
erode(Foreground,Eroded,Element);
Eroded.copyTo(mask);
}
else {
// return mask
Foreground.copyTo(mask);
}
Foreground.copyTo(mask);
frame_counter += 1;
}
示例10: computeRawCornerMat
void FeatureShiCorner::computeRawCornerMat( InputArray _image, OutputArray _corner )
{
// TODO check: _corner must be CV_32SC1
const Mat image = _image.getMat();
const int height = image.rows;
const int width = image.cols;
const int radius = 1;
Mat derX( height, width, CV_32SC1, Scalar( 0 ) );
Mat derY( height, width, CV_32SC1, Scalar( 0 ) );
Mat Mx2( height, width, CV_32SC1, Scalar( 0 ) );
Mat My2( height, width, CV_32SC1, Scalar( 0 ) );
Mat Mxy( height, width, CV_32SC1, Scalar( 0 ) );
applyFilter< uchar, int32_t >( _image, derX, &filter_derX[0][0], 3, 1, 0, true );
applyFilter< uchar, int32_t >( _image, derY, &filter_derY[0][0], 1, 3, 0, true );
int normDivisor = 0;
const int * pGauss = &FeatureShiCorner::filter_gauss[0][0];
int const * pGaussE = pGauss + 9;
for(; pGauss != pGaussE; pGauss++ )
{
normDivisor += abs( *pGauss );
}
int32_t maxVal = 0;
for( int y = 0; y < height; y++ )
{
for( int x = 0; x < width; x++ )
{
for( int dy = -radius; dy <= radius; dy++ )
{
for( int dx = -radius; dx <= radius; dx++ )
{
int fx = x + dx;
if( (fx < 0) || (fx >= width) ) { continue; }
int fy = y + dy;
if( (fy < 0) || (fy >= height) ) { continue; }
int f = FeatureShiCorner::filter_gauss[(radius + dx)][(radius + dy)];
Mx2.at< int32_t >( y, x ) += int32_t( f * pow( derX.at< int32_t >( fy, fx ), 2 ) );
My2.at< int32_t >( y, x ) += int32_t( f * pow( derY.at< int32_t >( fy, fx ), 2 ) );
Mxy.at< int32_t >( y, x ) += int32_t( f * derX.at< int32_t >( fy, fx ) * derY.at< int >( fy, fx ) );
}
}
Mx2.at< int32_t >( y, x ) /= normDivisor;
My2.at< int32_t >( y, x ) /= normDivisor;
Mxy.at< int32_t >( y, x ) /= normDivisor;
maxVal = max( Mx2.at< int32_t >( y, x ), maxVal );
maxVal = max( My2.at< int32_t >( y, x ), maxVal );
maxVal = max( Mxy.at< int32_t >( y, x ), maxVal );
}
}
Mat corners = _corner.getMat();
const auto it_cE = corners.end< int32_t >();
auto it_cS = corners.begin< int32_t >();
auto it_Mx2S = Mx2.begin< int32_t >();
auto it_My2S = My2.begin< int32_t >();
auto it_MxyS = Mxy.begin< int32_t >();
// reduce to high values if necessary
// maxval: 0..1 * 255^2, maxval^2 should not overflow for the next step
// reduce to sqrt( 2^31-1 (signed int) ) -> 46340
const int maxValC = 46340;
if( maxVal > maxValC )
{
cout << "maxVal > maxValC | maxVal: " << maxVal << endl;
const double scaleFac = maxValC / (double) maxVal; // scaleFac = 0.xxxx
while( it_cS != it_cE )
{
*it_cS *= int32_t( scaleFac );
*it_Mx2S *= int32_t( scaleFac );
*it_My2S *= int32_t( scaleFac );
*it_MxyS *= int32_t( scaleFac );
it_cS++;
it_Mx2S++;
it_My2S++;
it_MxyS++;
}
// reset iterators
it_cS = corners.begin< int32_t >();
it_Mx2S = Mx2.begin< int32_t >();
it_My2S = My2.begin< int32_t >();
it_MxyS = Mxy.begin< int32_t >();
}
maxVal = 0;
// calc eigenvalues
int32_t trc, det;
double ev_sqrt, trc_halve, eigVal1, eigVal2;
//.........这里部分代码省略.........
示例11: guiAlphaBlend
void guiAlphaBlend(InputArray src1_, InputArray src2_)
{
Mat& src1 = src1_.getMat();
Mat& src2 = src2_.getMat();
Mat s1,s2;
if(src1.depth()==CV_8U || src1.depth()==CV_32F)
{
if(src1.channels()==1)cvtColor(src1,s1,CV_GRAY2BGR);
else s1 = src1;
if(src2.channels()==1)cvtColor(src2,s2,CV_GRAY2BGR);
else s2 = src2;
}
else
{
Mat ss1,ss2;
src1.convertTo(ss1,CV_32F);
src2.convertTo(ss2,CV_32F);
if(src1.channels()==1)cvtColor(ss1,s1,CV_GRAY2BGR);
else s1 = ss1.clone();
if(src2.channels()==1)cvtColor(ss2,s2,CV_GRAY2BGR);
else s2 = ss2.clone();
}
namedWindow("alphaBlend");
int a = 0;
createTrackbar("a","alphaBlend",&a,100);
int key = 0;
Mat show;
while(key!='q')
{
addWeighted(s1,1.0-a/100.0,s2,a/100.0,0.0,show);
if(show.depth()==CV_8U)
imshow("alphaBlend",show);
else
{
double minv,maxv;
minMaxLoc(show, &minv, &maxv);
Mat s;
if(maxv<=255)
show.convertTo(s,CV_8U);
else
show.convertTo(s,CV_8U,255/maxv);
imshow("alphaBlend",s);
}
key = waitKey(1);
if(key=='f')
{
a = (a > 0) ? 0 : 100;
setTrackbarPos("a","alphaBlend",a);
}
if(key=='i')
{
showMatInfo(src1,"========src1========");
cout<<endl;
showMatInfo(src2,"========src2========");
}
}
destroyWindow("alphaBlend");
}
示例12: ToUMat
inline UMat ToUMat(InputArray src)
{
UMat dst;
src.getMat().copyTo(dst);
return dst;
}
示例13: checkNormRelative
static inline double checkNormRelative(InputArray m1, InputArray m2, InputArray mask = noArray())
{
return cvtest::norm(m1.getMat(), m2.getMat(), cv::NORM_INF, mask) /
std::max((double)std::numeric_limits<float>::epsilon(),
(double)std::max(cvtest::norm(m1.getMat(), cv::NORM_INF), cvtest::norm(m2.getMat(), cv::NORM_INF)));
}
示例14: imdecode
Mat imdecode( InputArray _buf, int flags )
{
Mat buf = _buf.getMat(), img;
imdecode_( buf, flags, LOAD_MAT, &img );
return img;
}
示例15: FAST_t
void FAST_t(InputArray _img, std::vector<KeyPoint>& keypoints, int threshold, bool nonmax_suppression)
{
Mat img = _img.getMat();
const int K = patternSize/2, N = patternSize + K + 1;
#if CV_SSE2
const int quarterPatternSize = patternSize/4;
(void)quarterPatternSize;
#endif
int i, j, k, pixel[25];
makeOffsets(pixel, (int)img.step, patternSize);
keypoints.clear();
threshold = std::min(std::max(threshold, 0), 255);
#if CV_SSE2
__m128i delta = _mm_set1_epi8(-128), t = _mm_set1_epi8((char)threshold), K16 = _mm_set1_epi8((char)K);
(void)K16;
(void)delta;
(void)t;
#endif
uchar threshold_tab[512];
for( i = -255; i <= 255; i++ )
threshold_tab[i+255] = (uchar)(i < -threshold ? 1 : i > threshold ? 2 : 0);
AutoBuffer<uchar> _buf((img.cols+16)*3*(sizeof(int) + sizeof(uchar)) + 128);
uchar* buf[3];
buf[0] = _buf; buf[1] = buf[0] + img.cols; buf[2] = buf[1] + img.cols;
int* cpbuf[3];
cpbuf[0] = (int*)alignPtr(buf[2] + img.cols, sizeof(int)) + 1;
cpbuf[1] = cpbuf[0] + img.cols + 1;
cpbuf[2] = cpbuf[1] + img.cols + 1;
memset(buf[0], 0, img.cols*3);
for(i = 3; i < img.rows-2; i++)
{
const uchar* ptr = img.ptr<uchar>(i) + 3;
uchar* curr = buf[(i - 3)%3];
int* cornerpos = cpbuf[(i - 3)%3];
memset(curr, 0, img.cols);
int ncorners = 0;
if( i < img.rows - 3 )
{
j = 3;
#if CV_SSE2
if( patternSize == 16 )
{
for(; j < img.cols - 16 - 3; j += 16, ptr += 16)
{
__m128i m0, m1;
__m128i v0 = _mm_loadu_si128((const __m128i*)ptr);
__m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta);
v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta);
__m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta);
__m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta);
__m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta);
__m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta);
m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0));
m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2)));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3)));
m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0)));
m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0)));
m0 = _mm_or_si128(m0, m1);
int mask = _mm_movemask_epi8(m0);
if( mask == 0 )
continue;
if( (mask & 255) == 0 )
{
j -= 8;
ptr -= 8;
continue;
}
__m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0;
for( k = 0; k < N; k++ )
{
__m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta);
m0 = _mm_cmpgt_epi8(x, v0);
m1 = _mm_cmpgt_epi8(v1, x);
c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0);
c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1);
max0 = _mm_max_epu8(max0, c0);
max1 = _mm_max_epu8(max1, c1);
}
max0 = _mm_max_epu8(max0, max1);
int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16));
for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
if(m & 1)
{
cornerpos[ncorners++] = j+k;
if(nonmax_suppression)
//.........这里部分代码省略.........