本文整理汇总了C++中cv::InputArray::getMat方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::getMat方法的具体用法?C++ InputArray::getMat怎么用?C++ InputArray::getMat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::InputArray
的用法示例。
在下文中一共展示了InputArray::getMat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: write
void Regression::write(cv::InputArray array)
{
write() << "kind" << array.kind();
write() << "type" << array.type();
if (isVector(array))
{
int total = (int)array.total();
int idx = regRNG.uniform(0, total);
write() << "len" << total;
write() << "idx" << idx;
cv::Mat m = array.getMat(idx);
if (m.total() * m.channels() < 26) //5x5 or smaller
write() << "val" << m;
else
write(m);
}
else
{
if (array.total() * array.channels() < 26) //5x5 or smaller
write() << "val" << array.getMat();
else
write(array.getMat());
}
}
示例2: illuminationChange
void illuminationChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float a,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float alpha = a;
float beta = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.illum_change(src, cs_mask, gray, blend, alpha, beta);
}
示例3: stereo_disparity_normal
void stereo_disparity_normal(cv::InputArray left_image, cv::InputArray right_image, cv::OutputArray disp_,
int max_dis_level, int scale, float sigma) {
cv::Mat imL = left_image.getMat();
cv::Mat imR = right_image.getMat();
CV_Assert(imL.size() == imR.size());
CV_Assert(imL.type() == CV_8UC3 && imR.type() == CV_8UC3);
cv::Size imageSize = imL.size();
disp_.create(imageSize, CV_8U);
cv::Mat disp = disp_.getMat();
CDisparityHelper dispHelper;
//step 1: cost initialization
cv::Mat costVol = dispHelper.GetMatchingCost(imL, imR, max_dis_level);
//step 2: cost aggregation
CSegmentTree stree;
CColorWeight cWeight(imL);
stree.BuildSegmentTree(imL.size(), sigma, TAU, cWeight);
stree.Filter(costVol, max_dis_level);
//step 3: disparity computation
cv::Mat disparity = dispHelper.GetDisparity_WTA((float*)costVol.data,
imageSize.width, imageSize.height, max_dis_level);
MeanFilter(disparity, disparity, 3);
disparity *= scale;
disparity.copyTo(disp);
}
示例4: colorChange
void colorChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float r,
float g,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float red = r;
float green = g;
float blue = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.local_color_change(src, cs_mask, gray, blend, red, green, blue);
}
示例5: textureFlattening
void textureFlattening(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
double low_threshold,
double high_threshold,
int kernel_size)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.texture_flatten(src, cs_mask, gray, low_threshold, high_threshold, kernel_size, blend);
}
示例6: globalMatting
void globalMatting(cv::InputArray _image, cv::InputArray _trimap, cv::OutputArray _foreground, cv::OutputArray _alpha, cv::OutputArray _conf)
{
cv::Mat image = _image.getMat();
cv::Mat trimap = _trimap.getMat();
if (image.empty())
CV_Error(CV_StsBadArg, "image is empty");
if (image.type() != CV_8UC3)
CV_Error(CV_StsBadArg, "image mush have CV_8UC3 type");
if (trimap.empty())
CV_Error(CV_StsBadArg, "trimap is empty");
if (trimap.type() != CV_8UC1)
CV_Error(CV_StsBadArg, "trimap mush have CV_8UC1 type");
if (image.size() != trimap.size())
CV_Error(CV_StsBadArg, "image and trimap mush have same size");
cv::Mat &foreground = _foreground.getMatRef();
cv::Mat &alpha = _alpha.getMatRef();
cv::Mat tempConf;
globalMattingHelper(image, trimap, foreground, alpha, tempConf);
if(_conf.needed())
tempConf.copyTo(_conf);
}
示例7:
FilterCall::FilterCall(cv::InputArray in, cv::InputArray out,
impl::CallMetaData data, QString type,
QString description, QString requestedView)
: Call{ data, std::move(type),
std::move(description), std::move(requestedView) },
input_{ in.getMat().clone() }, output_{ out.getMat().clone() }
{
}
示例8: sadTemplate
void sadTemplate(cv::InputArray tar, cv::InputArray tmp, cv::OutputArray res, int *minx, int *miny){
//引数の入力をMatとして受け取る
cv::Mat tarM = tar.getMat();
cv::Mat tmpM = tmp.getMat();
cv::Mat resM = res.getMat();
//sadが最小値のところがマッチングしたい箇所なので
int minsad = std::numeric_limits<int>::max();
int sad = 0; //各回のsadを格納
int diff; //sadに加算する前の作業変数
int tarx,tary; //目的のxy座標
for(int y=0;y<tarM.rows - tmpM.rows;y++){
for(int x=0;x<tarM.cols - tmpM.cols;x++){
sad = 0; //次の領域の計算の前に初期化
//探索
for(int yt = 0; yt < tmpM.rows; yt++){
for(int xt = 0; xt < tmpM.cols; xt++){
diff = (int)(tarM.at<uchar>(y+yt,x+xt) - tmpM.at<uchar>(yt,xt));
if(diff < 0){ //負なら正に変換
diff = -diff;
}
sad += diff;
////残差逐次検定法
if(sad > minsad){
yt = tmpM.rows;
break;
}
}
}
//探索結果:sadが今までで最小なら
if(sad < minsad){
minsad = sad; //最小値を更新
//目的のxyを格納
tarx = x;
tary = y;
}
}
}
//outputに出力
for(int y=0;y<resM.rows;y++){
for(int x=0;x<resM.cols;x++){
if(x==tarx && y==tary){
resM.at<uchar>(y,x) = (uchar)0;
}else{
resM.at<uchar>(y,x) = (uchar)255;
}
}
}
std::cout << "最小値=" << minsad << std::endl;
std::cout << "最小点=[" << tarx << ", " << tary << "]" << std::endl;
*minx = tarx;
*miny = tary;
}
示例9: stereoMatching
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2)
{
Mat img1 = _recImage1.getMat();
Mat img2 = _recIamge2.getMat();
_disparityMap.create(img1.size(), CV_16S);
Mat dis = _disparityMap.getMat();
StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
示例10: testFunction
void testFunction(cv::InputArray ip, cv::InputArray op) {
cv::Mat img = ip.getMat();
cv::Mat obj = op.getMat();
printMatrix(img);
printMatrix(obj);
// std::cerr<<img.checkVector()<<std::endl;
}
示例11: assert
void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints, cv::InputArray _normalizedInputPoints,
cv::OutputArray _Ma, cv::OutputArray _Mb)
{
//argument checking:
size_t n = _objectPoints.rows() * _objectPoints.cols(); //number of points
int objType = _objectPoints.type();
int type_input = _normalizedInputPoints.type();
assert((objType == CV_32FC3) | (objType == CV_64FC3));
assert((type_input == CV_32FC2) | (type_input == CV_64FC2));
assert((_objectPoints.rows() == 1) | (_objectPoints.cols() == 1));
assert((_objectPoints.rows() >= 4) | (_objectPoints.cols() >= 4));
assert((_normalizedInputPoints.rows() == 1) | (_normalizedInputPoints.cols() == 1));
assert(static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()) == n);
cv::Mat normalizedInputPoints;
if (type_input == CV_32FC2) {
_normalizedInputPoints.getMat().convertTo(normalizedInputPoints, CV_64FC2);
}
else {
normalizedInputPoints = _normalizedInputPoints.getMat();
}
cv::Mat objectInputPoints;
if (type_input == CV_32FC3) {
_objectPoints.getMat().convertTo(objectInputPoints, CV_64FC3);
}
else {
objectInputPoints = _objectPoints.getMat();
}
cv::Mat canonicalObjPoints;
cv::Mat MmodelPoints2Canonical;
//transform object points to the canonical position (zero centred and on the plane z=0):
makeCanonicalObjectPoints(objectInputPoints, canonicalObjPoints, MmodelPoints2Canonical);
//compute the homography mapping the model's points to normalizedInputPoints
cv::Mat H;
HomographyHO::homographyHO(canonicalObjPoints, _normalizedInputPoints, H);
//now solve
cv::Mat MaCanon, MbCanon;
solveCanonicalForm(canonicalObjPoints, normalizedInputPoints, H, MaCanon, MbCanon);
//transform computed poses to account for canonical transform:
cv::Mat Ma = MaCanon * MmodelPoints2Canonical;
cv::Mat Mb = MbCanon * MmodelPoints2Canonical;
//output poses:
Ma.copyTo(_Ma);
Mb.copyTo(_Mb);
}
示例12: stereoRectify
void stereo::stereoRectify(cv::InputArray _K1, cv::InputArray _K2, cv::InputArray _R, cv::InputArray _T,
cv::OutputArray _R1, cv::OutputArray _R2, cv::OutputArray _P1, cv::OutputArray _P2)
{
Mat K1 = _K1.getMat(), K2 = _K2.getMat(), R = _R.getMat(), T = _T.getMat();
_R1.create(3, 3, CV_32F);
_R2.create(3, 3, CV_32F);
Mat R1 = _R1.getMat();
Mat R2 = _R2.getMat();
_P1.create(3, 4, CV_32F);
_P2.create(3, 4, CV_32F);
Mat P1 = _P1.getMat();
Mat P2 = _P2.getMat();
if(K1.type()!=CV_32F)
K1.convertTo(K1,CV_32F);
if(K2.type()!=CV_32F)
K2.convertTo(K2,CV_32F);
if(R.type()!=CV_32F)
R.convertTo(R,CV_32F);
if(T.type()!=CV_32F)
T.convertTo(T,CV_32F);
if(T.rows != 3)
T = T.t();
// R and T is the transformation from the first to the second camera
// Get the transformation from the second to the first camera
Mat R_inv = R.t();
Mat T_inv = -R.t()*T;
Mat e1, e2, e3;
e1 = T_inv.t() / norm(T_inv);
/*Mat z = (Mat_<float>(1, 3) << 0.0,0.0,-1.0);
e2 = e1.cross(z);
e2 = e2 / norm(e2);*/
e2 = (Mat_<float>(1,3) << T_inv.at<float>(1)*-1, T_inv.at<float>(0), 0.0 );
e2 = e2 / (sqrt(e2.at<float>(0)*e2.at<float>(0) + e2.at<float>(1)*e2.at<float>(1)));
e3 = e1.cross(e2);
e3 = e3 / norm(e3);
e1.copyTo(R1.row(0));
e2.copyTo(R1.row(1));
e3.copyTo(R1.row(2));
R2 = R_inv * R1;
P1.setTo(Scalar(0));
R1.copyTo(P1.colRange(0, 3));
P1 = K1 * P1;
P2.setTo(Scalar(0));
R2.copyTo(P2.colRange(0, 3));
P2 = K2 * P2;
}
示例13: estimate_rigid_transform
cv::Mat flutter::estimate_rigid_transform(cv::InputArray src1, cv::InputArray src2,
double ransac_good_ratio, double ransac_threshold)
{
Mat M(2, 3, CV_64F), A = src1.getMat(), B = src2.getMat();
CvMat matA = A, matB = B, matM = M;
int err = estimate_rigid_transform_detail(&matA, &matB, &matM,
ransac_good_ratio, ransac_threshold);
if (err == 1) {
return M;
} else {
return Mat();
}
}
示例14: StereoMatching
void StereoMatch::StereoMatching(cv::InputArray rec_image1, cv::InputArray rec_image2,
cv::OutputArray disparity_map, int min_disparity, int num_disparities, int SAD_window_size,
int P1, int P2)
{
cv::Mat img1 = rec_image1.getMat();
cv::Mat img2 = rec_image2.getMat();
disparity_map.create(img1.size(), CV_16S);
cv::Mat dis = disparity_map.getMat();
cv::StereoSGBM matcher(min_disparity, num_disparities, SAD_window_size, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
示例15: setAnimation
void Picture::setAnimation(const AnimationEnum &animationEnum,
const cv::InputArray &startImg, const cv::InputArray &endImg)
{
delete animation;
Mat startImage = startImg.getMat();
Mat endImage = endImg.getMat();
if (!startImg.empty() && startImg.size() != this->size())
cv::resize(startImg, startImage, this->size());
if (!endImg.empty() && endImg.size() != this->size())
cv::resize(endImg, endImage, this->size());
animation = AnimationFactory::createAnimation(animationEnum, startImage, endImage);
}