本文整理汇总了C++中cv::OutputArray::getMat方法的典型用法代码示例。如果您正苦于以下问题:C++ OutputArray::getMat方法的具体用法?C++ OutputArray::getMat怎么用?C++ OutputArray::getMat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::OutputArray
的用法示例。
在下文中一共展示了OutputArray::getMat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: compute_derivative_kernels
/**
* @brief Compute derivative kernels for sizes different than 3
* @param _kx Horizontal kernel ues
* @param _ky Vertical kernel values
* @param dx Derivative order in X-direction (horizontal)
* @param dy Derivative order in Y-direction (vertical)
* @param scale_ Scale factor or derivative size
*/
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
int ksize = 3 + 2 * (scale - 1);
// The standard Scharr kernel
if (scale == 1) {
getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F);
return;
}
_kx.create(ksize, 1, CV_32F, -1, true);
_ky.create(ksize, 1, CV_32F, -1, true);
Mat kx = _kx.getMat();
Mat ky = _ky.getMat();
float w = 10.0f / 3.0f;
float norm = 1.0f / (2.0f*scale*(w + 2.0f));
for (int k = 0; k < 2; k++) {
Mat* kernel = k == 0 ? &kx : &ky;
int order = k == 0 ? dx : dy;
std::vector<float> kerI(ksize, 0.0f);
if (order == 0) {
kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm;
}
else if (order == 1) {
kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
}
Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]);
temp.copyTo(*kernel);
}
}
示例2: stereoRectify
void stereo::stereoRectify(cv::InputArray _K1, cv::InputArray _K2, cv::InputArray _R, cv::InputArray _T,
cv::OutputArray _R1, cv::OutputArray _R2, cv::OutputArray _P1, cv::OutputArray _P2)
{
Mat K1 = _K1.getMat(), K2 = _K2.getMat(), R = _R.getMat(), T = _T.getMat();
_R1.create(3, 3, CV_32F);
_R2.create(3, 3, CV_32F);
Mat R1 = _R1.getMat();
Mat R2 = _R2.getMat();
_P1.create(3, 4, CV_32F);
_P2.create(3, 4, CV_32F);
Mat P1 = _P1.getMat();
Mat P2 = _P2.getMat();
if(K1.type()!=CV_32F)
K1.convertTo(K1,CV_32F);
if(K2.type()!=CV_32F)
K2.convertTo(K2,CV_32F);
if(R.type()!=CV_32F)
R.convertTo(R,CV_32F);
if(T.type()!=CV_32F)
T.convertTo(T,CV_32F);
if(T.rows != 3)
T = T.t();
// R and T is the transformation from the first to the second camera
// Get the transformation from the second to the first camera
Mat R_inv = R.t();
Mat T_inv = -R.t()*T;
Mat e1, e2, e3;
e1 = T_inv.t() / norm(T_inv);
/*Mat z = (Mat_<float>(1, 3) << 0.0,0.0,-1.0);
e2 = e1.cross(z);
e2 = e2 / norm(e2);*/
e2 = (Mat_<float>(1,3) << T_inv.at<float>(1)*-1, T_inv.at<float>(0), 0.0 );
e2 = e2 / (sqrt(e2.at<float>(0)*e2.at<float>(0) + e2.at<float>(1)*e2.at<float>(1)));
e3 = e1.cross(e2);
e3 = e3 / norm(e3);
e1.copyTo(R1.row(0));
e2.copyTo(R1.row(1));
e3.copyTo(R1.row(2));
R2 = R_inv * R1;
P1.setTo(Scalar(0));
R1.copyTo(P1.colRange(0, 3));
P1 = K1 * P1;
P2.setTo(Scalar(0));
R2.copyTo(P2.colRange(0, 3));
P2 = K2 * P2;
}
示例3: getAverage
/**
* Get the average image at the given altitude.
* If the altitude is not located exactly at one of the altitude bins,
* the average image is interpolated between any overlapping bins.
* @param _dst the output image (zeros at desired resolution)
* @param alt the altitude the image was taken at
* @param pitch the pitch of the vehicle
* @param roll the roll of the vehicle
*/
void getAverage(cv::OutputArray _dst, double alt, double pitch, double roll) {
Mat dst = _dst.getMat();
int width = dst.size().width;
int height = dst.size().height;
Mat D = Mat::zeros(height, width, CV_32F);
double width_m = width * cameras.pixel_sep;
double height_m = height * cameras.pixel_sep;
interp::distance_map(D, alt, pitch, roll, width_m, height_m, cameras.focal_length);
// now discretize into slices
int i = 0;
Mat W = Mat::zeros(D.size(), CV_32F);
while(cv::countNonZero(W) == 0) {
interp::dist_weight(D, W, alt_step, i++);
}
while(cv::countNonZero(W) > 0) {
Slice<int>* slice = getSlice(i);
Mat sAverage;
// get the slice average
boost::mutex* mutex = slice->get_mutex();
{ // protect slice with mutex to prevent interleaved read/write operations
boost::lock_guard<boost::mutex> lock(*mutex);
sAverage = slice->getLightfield()->getAverage();
}
dst += sAverage.mul(W); // multiply by slice weight
interp::dist_weight(D, W, alt_step, i++);
}
}
示例4: stereo_disparity_normal
void stereo_disparity_normal(cv::InputArray left_image, cv::InputArray right_image, cv::OutputArray disp_,
int max_dis_level, int scale, float sigma) {
cv::Mat imL = left_image.getMat();
cv::Mat imR = right_image.getMat();
CV_Assert(imL.size() == imR.size());
CV_Assert(imL.type() == CV_8UC3 && imR.type() == CV_8UC3);
cv::Size imageSize = imL.size();
disp_.create(imageSize, CV_8U);
cv::Mat disp = disp_.getMat();
CDisparityHelper dispHelper;
//step 1: cost initialization
cv::Mat costVol = dispHelper.GetMatchingCost(imL, imR, max_dis_level);
//step 2: cost aggregation
CSegmentTree stree;
CColorWeight cWeight(imL);
stree.BuildSegmentTree(imL.size(), sigma, TAU, cWeight);
stree.Filter(costVol, max_dis_level);
//step 3: disparity computation
cv::Mat disparity = dispHelper.GetDisparity_WTA((float*)costVol.data,
imageSize.width, imageSize.height, max_dis_level);
MeanFilter(disparity, disparity, 3);
disparity *= scale;
disparity.copyTo(disp);
}
示例5: glAssert
void EdgeDetector_<ParallelUtils::eGLSL>::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) {
_oLastEdgeMask.create(m_oFrameSize,CV_8UC1);
cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat();
if(!GLImageProcAlgo::m_bFetchingOutput)
glAssert(GLImageProcAlgo::setOutputFetching(true))
GLImageProcAlgo::fetchLastOutput(oLastEdgeMask);
}
示例6: textureFlattening
void textureFlattening(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
double low_threshold,
double high_threshold,
int kernel_size)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.texture_flatten(src, cs_mask, gray, low_threshold, high_threshold, kernel_size, blend);
}
示例7: illuminationChange
void illuminationChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float a,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float alpha = a;
float beta = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.illum_change(src, cs_mask, gray, blend, alpha, beta);
}
示例8: colorChange
void colorChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float r,
float g,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float red = r;
float green = g;
float blue = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.local_color_change(src, cs_mask, gray, blend, red, green, blue);
}
示例9: operator
void BackgroundSubtractorMedian::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate)
{
framecount++;
cv::Mat image = _image.getMat();
if (image.channels() > 1) {
cvtColor(image,image,CV_BGR2GRAY);
}
if (image.cols == 0 || image.rows == 0) {
return;
}
_fgmask.create(image.size(), CV_8U);
cv::Mat fgmask = _fgmask.getMat();
if (!init)
{
init = true;
bgmodel = cv::Mat(image.size(), CV_8U);
}
//printf("(%d,%d)(%d) ",image.cols,image.rows,image.type());
//printf("(%d,%d)(%d)\n",bgmodel.cols,bgmodel.rows,bgmodel.type());
cv::Mat cmpArr = cv::Mat(image.size(),CV_8U);
cv::compare(image, bgmodel, cmpArr, CV_CMP_GT);
cv::bitwise_and(cmpArr, 1, cmpArr);
cv::add(bgmodel, cmpArr, bgmodel);
cmpArr = cv::Mat(image.size(),CV_8U);
cv::compare(image, bgmodel, cmpArr, CV_CMP_LT);
cv::bitwise_and(cmpArr, 1, cmpArr);
cv::subtract(bgmodel, cmpArr, bgmodel);
cv::absdiff(image, bgmodel,fgmask);
cv::threshold(fgmask,fgmask,fg_threshold,255,CV_THRESH_TOZERO);
cv::medianBlur(fgmask,fgmask,median_filter_level);
}
示例10: boostColor
int boostColor(cv::InputArray src, cv::OutputArray dst, float intensity)
{
const int MAX_INTENSITY = 255;
Mat srcImg = src.getMat();
CV_Assert(srcImg.channels() == 3);
CV_Assert(intensity >= 0.0f && intensity <= 1.0f);
if (srcImg.type() != CV_8UC3)
{
srcImg.convertTo(srcImg, CV_8UC3);
}
Mat srcHls;
cvtColor(srcImg, srcHls, CV_BGR2HLS);
int intensityInt = intensity * MAX_INTENSITY;
srcHls += Scalar(0, 0, intensityInt);
cvtColor(srcHls, dst, CV_HLS2BGR);
dst.getMat().convertTo(dst, srcImg.type());
return 0;
}
示例11: getLatestForegroundMask
void IBackgroundSubtractor_GLSL::getLatestForegroundMask(cv::OutputArray _oLastFGMask) {
_oLastFGMask.create(m_oImgSize,CV_8UC1);
cv::Mat oLastFGMask = _oLastFGMask.getMat();
glAssert(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true))
if(GLImageProcAlgo::m_nInternalFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastFGMask);
else
oLastFGMask = cv::Scalar_<uchar>(0);
}
示例12: sadTemplate
void sadTemplate(cv::InputArray tar, cv::InputArray tmp, cv::OutputArray res, int *minx, int *miny){
//引数の入力をMatとして受け取る
cv::Mat tarM = tar.getMat();
cv::Mat tmpM = tmp.getMat();
cv::Mat resM = res.getMat();
//sadが最小値のところがマッチングしたい箇所なので
int minsad = std::numeric_limits<int>::max();
int sad = 0; //各回のsadを格納
int diff; //sadに加算する前の作業変数
int tarx,tary; //目的のxy座標
for(int y=0;y<tarM.rows - tmpM.rows;y++){
for(int x=0;x<tarM.cols - tmpM.cols;x++){
sad = 0; //次の領域の計算の前に初期化
//探索
for(int yt = 0; yt < tmpM.rows; yt++){
for(int xt = 0; xt < tmpM.cols; xt++){
diff = (int)(tarM.at<uchar>(y+yt,x+xt) - tmpM.at<uchar>(yt,xt));
if(diff < 0){ //負なら正に変換
diff = -diff;
}
sad += diff;
////残差逐次検定法
if(sad > minsad){
yt = tmpM.rows;
break;
}
}
}
//探索結果:sadが今までで最小なら
if(sad < minsad){
minsad = sad; //最小値を更新
//目的のxyを格納
tarx = x;
tary = y;
}
}
}
//outputに出力
for(int y=0;y<resM.rows;y++){
for(int x=0;x<resM.cols;x++){
if(x==tarx && y==tary){
resM.at<uchar>(y,x) = (uchar)0;
}else{
resM.at<uchar>(y,x) = (uchar)255;
}
}
}
std::cout << "最小値=" << minsad << std::endl;
std::cout << "最小点=[" << tarx << ", " << tary << "]" << std::endl;
*minx = tarx;
*miny = tary;
}
示例13:
bool OpenNI2Grabber::grabFrame(cv::OutputArray _color) {
if (_color.kind() != cv::_InputArray::MAT)
BOOST_THROW_EXCEPTION(GrabberException("Grabbing only into cv::Mat"));
_color.create(p->color_image_resolution.height, p->color_image_resolution.width, CV_8UC3);
cv::Mat color = _color.getMat();
return p->grabFrame(color);
}
示例14: if
void BackgroundSubtractor_<ParallelUtils::eGLSL>::getLatestForegroundMask(cv::OutputArray _oLastFGMask) {
_oLastFGMask.create(m_oImgSize,CV_8UC1);
cv::Mat oLastFGMask = _oLastFGMask.getMat();
if(!GLImageProcAlgo::m_bFetchingOutput)
glAssert(GLImageProcAlgo::setOutputFetching(true))
else if(m_nFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastFGMask);
else
oLastFGMask = cv::Scalar_<uchar>(0);
}
示例15: getLatestEdgeMask
void IEdgeDetector_GLSL::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) {
lvAssert_(GLImageProcAlgo::m_bGLInitialized,"algo must be initialized first");
_oLastEdgeMask.create(GLImageProcAlgo::m_oFrameSize,CV_8UC1);
cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat();
lvAssert_(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true),"algo not initialized with mat output support")
if(GLImageProcAlgo::m_nInternalFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastEdgeMask);
else
oLastEdgeMask = cv::Scalar_<uchar>(0);
}