本文整理汇总了C++中Mat::inv方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::inv方法的具体用法?C++ Mat::inv怎么用?C++ Mat::inv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::inv方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: doTranslation
void doTranslation(){
cout<<"doTranslation begin "<<endl;
//Mat m = (Mat_<float>(3,3) << 1.4572068353989741e+003, 0, 3.1950000000000000e+002,0, 1.4572068353989741e+003, 2.3950000000000000e+002,0, 0, 1);
cv::Mat tempMat, tempMat2;
double s, zConst = 0;
rotationMatrix.convertTo(rotationMatrix,CV_64FC1);
cameraMatrix.convertTo(cameraMatrix,CV_64FC1);
tempMat = rotationMatrix.inv() * cameraMatrix.inv() * uvPoint;
tempMat2 = rotationMatrix.inv() * tvec;
s = zConst + tempMat2.at<double>(2,0);
s /= tempMat.at<double>(2,0);
cout<<"s"<<s<<endl ;
cv::Mat wcPoint = rotationMatrix.inv() * (s * cameraMatrix.inv() * uvPoint - tvec);
Point3f realPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0), wcPoint.at<double>(2, 0));
cout<<"image point "<<endl<<uvPoint<< endl<< "to real point "<<endl <<wcPoint<<endl<<endl ;
/*static vector<Point2d> imagePoints_hand ;
static vector<double> timeStampList_hand ;
static vector<Point3f> realWorldPoints_hand ;
static vector<int> frameNumList_hand ;*/
imagePoints_hand.push_back(Point2d(uvPoint.at<double>(0,0),uvPoint.at<double>(1,0)));
realWorldPoints_hand.push_back(realPoint);
frameNumList_hand.push_back(frameNum_hand);
}
示例2: searchAngle
double searchAngle(const Mat &src, const Mat &dst, double st, double ed, double step, char dir, double &mpsnr) {
int captureWidth = src.cols, captureHeight = src.rows;
//const double fuvX = 799, fuvY = 799;
const double fuvX = 744.5, fuvY = 744.5;
double cx = captureWidth / 2 - 0.5, cy = captureHeight / 2 - 0.5;
Mat K = Mat::zeros(3, 3, CV_64F);
K.at<double>(0, 0) = fuvX;
K.at<double>(1, 1) = fuvY;
K.at<double>(0, 2) = cx;
K.at<double>(1, 2) = cy;
K.at<double>(2, 2) = 1;
double max = -100, angle = 0;
for (double x = st; x >= ed; x -= step) {
Mat R = VideoStabilization::rotationMat(angle2Quaternion(x * (dir == 0), x * (dir == 1), 0));
Mat H = K * R.inv() * K.inv(), src2, dst2, diff;
warpPerspective(dst, dst2, H, Size(src.cols, src.rows), INTER_LINEAR + WARP_INVERSE_MAP);
src.copyTo(src2, dst2);
double psnr = getPSNR(src2, dst2);
if (psnr > max) {
max = psnr;
angle = x;
}
}
mpsnr = max;
return angle;
}
示例3: rectify
void rectify(Mat& K1, Mat& R1, Mat& t1,
Mat& K2, Mat& R2, Mat& t2,
Size size,
Mat& T1, Mat& T2) {
Mat_<double> Kn1(3, 3), Kn2(3, 3);
Mat_<double> c1(3, 1), c2(3, 1),
v1(3, 1), v2(3, 1), v3(3, 1);
K1.copyTo(Kn1);
K2.copyTo(Kn2);
// optical centers
c1 = - R1.t() * t1;
c2 = - R2.t() * t2;
// x axis
v1 = c2 - c1;
// y axis
v2 = Mat(R1.row(2).t()).cross(v1);
// z axis
v3 = v1.cross(v2);
Mat_<double> m_R(3, 3);
v1 = v1 / norm(v1);
v2 = v2 / norm(v2);
v3 = v3 / norm(v3);
m_R(0, 0) = v1(0), m_R(0, 1) = v1(1), m_R(0, 2) = v1(2);
m_R(1, 0) = v2(0), m_R(1, 1) = v2(1), m_R(1, 2) = v2(2);
m_R(2, 0) = v3(0), m_R(2, 1) = v3(1), m_R(2, 2) = v3(2);
T1 = (Kn1 * m_R) * (R1.t() * K1.inv());
T2 = (Kn2 * m_R) * (R2.t() * K2.inv());
// Image center displacement
Mat_<double> o1(3, 1), o2(3, 1);
o1(0) = 0.5 * size.width, o1(1) = 0.5 * size.height, o1(2) = 1.0;
o2(0) = 0.5 * size.width, o2(1) = 0.5 * size.height, o2(2) = 1.0;
Mat_<double> on1(3, 1), on2(3, 1), d1(2, 1), d2(2, 1);
on1 = T1 * o1;
on2 = T2 * o2;
d1(0) = o1(0) - on1(0) / on1(2);
d1(1) = o1(1) - on1(1) / on1(2);
d2(0) = o2(0) - on2(0) / on1(2);
d2(1) = o2(1) - on2(1) / on1(2);
d1(1) = d2(1);
Kn1(0, 2) = Kn1(0, 2) + d1(0);
Kn1(1, 2) = Kn1(1, 2) + d1(1);
Kn2(0, 2) = Kn2(0, 2) + d2(0);
Kn2(1, 2) = Kn2(1, 2) + d2(1);
T1 = (Kn1 * m_R) * (R1.t() * K1.inv());
T2 = (Kn2 * m_R) * (R2.t() * K2.inv());
}
示例4: doTranslation
void doTranslation() {
cout<<"doTranslation begin "<<endl;
//Mat m = (Mat_<float>(3,3) << 1.4572068353989741e+003, 0, 3.1950000000000000e+002,0, 1.4572068353989741e+003, 2.3950000000000000e+002,0, 0, 1);
cv::Mat tempMat, tempMat2;
double s, zConst = 0;
rotationMatrix.convertTo(rotationMatrix,CV_64FC1);
cameraMatrix.convertTo(cameraMatrix,CV_64FC1);
tempMat = rotationMatrix.inv() * cameraMatrix.inv() * uvPoint;
tempMat2 = rotationMatrix.inv() * tvec;
s = zConst + tempMat2.at<double>(2,0);
s /= tempMat.at<double>(2,0);
/* double m11,m12,m14,m21,m22,m24,m31,m32,m34 ;
double u,v;
double ay,ax,by,bx,c ;
m11 = rotationMatrix.at<double>(0,0);
m12 = rotationMatrix.at<double>(0,1);
m21 = rotationMatrix.at<double>(1,0);
m22 = rotationMatrix.at<double>(1,1);
m31 = rotationMatrix.at<double>(2,0);
m32 = rotationMatrix.at<double>(2,1);
m14 = tvec.at<double>(0,0);
m24 = tvec.at<double>(1,0);
m34 = tvec.at<double>(2,0);
u = uvPoint.at<double>(0,0);
v = uvPoint.at<double>(1,0);
c= m12*m21-m11*m22 ;
ay = m21*u-m11*v ;
by = m14*m21 - m11*m24 ;
ax = m22*u-m12*v ;
bx = m14*m22-m12*m24 ;
s = ((m31*bx-m32*by)/c+m34)/(1+(m31*ax-m32*ay)/c);*/
cout<<"s"<<s<<endl ;
cv::Mat wcPoint = rotationMatrix.inv() * (s * cameraMatrix.inv() * uvPoint - tvec);
Point3f realPoint(wcPoint.at<double>(0, 0), wcPoint.at<double>(1, 0), wcPoint.at<double>(2, 0));
cout<<"image point "<<endl<<uvPoint<< endl<< "to real point "<<endl <<wcPoint<<endl<<endl ;
}
示例5: adaptXYZToNewWhitePoint
void adaptXYZToNewWhitePoint(Mat& XYZWhitePoint, const Mat& BGRWhitePoint){
Mat sRGBChromCoord = (Mat_<float>(3, 3) << 0.6400, 0.3000, 0.1500,
0.3300, 0.6000, 0.0600,
0.2126, 0.7152, 0.0722);
Mat invsRGBChromCoord = sRGBChromCoord.inv();
Mat XYZD65 = (Mat_<float>(1, 3) << 0.9642, 1.0000, 0.8249);
Mat xyWhitePoint = Mat(BGRWhitePoint.rows, BGRWhitePoint.cols, CV_32FC3);
for (int k = 0; k < 3; k++){
xyWhitePoint.at<Vec3f>(0, 0).val[k] = product(XYZD65, invsRGBChromCoord, k);
}
Mat T = (Mat_<float>(3, 3) << xyWhitePoint.at<Vec3f>(0, 0).val[0], 0, 0,
0, xyWhitePoint.at<Vec3f>(0, 0).val[1], 0,
0, 0, xyWhitePoint.at<Vec3f>(0, 0).val[2]);
Mat tmpMat = Mat(3, 3, CV_32F);
dotProduct(sRGBChromCoord, T, tmpMat);
Mat linBGRWhitePoint = Mat(BGRWhitePoint.rows, BGRWhitePoint.cols, CV_32FC3);
for (int i = 0; i < 3; i++){
linBGRWhitePoint.at<Vec3f>(0, 0).val[i] =
sRGBInverseCompanding(BGRWhitePoint.at<Vec3f>(0, 0).val[i]);
}
for (int k = 0; k < 3; k++){
XYZWhitePoint.at<Vec3f>(0, 0).val[k] =
linBGRWhitePoint.at<Vec3f>(0, 0).val[2] * tmpMat.at<float>(k, 0) +
linBGRWhitePoint.at<Vec3f>(0, 0).val[1] * tmpMat.at<float>(k, 1) +
linBGRWhitePoint.at<Vec3f>(0, 0).val[0] * tmpMat.at<float>(k, 2);
}
}
示例6: computeBGRGrayWhitePoint
bool computeBGRGrayWhitePoint(const Mat& bgrImg, Mat& whitePoint,
Mat& prevYUVWhitePoint){
float sumElementsByChannel[2] = {0, 0};
int cntElm = 0;
Mat matrix = (Mat_<float>(3, 3) << 0.299, 0.587, 0.114,
-0.229, -0.587, 0.886,
0.701, -0.587, -0.114);
Mat invMatrix = matrix.inv();
Mat curYUVWhitePoint = Mat(1, 1, CV_32FC3);
Mat yuvImg = Mat(bgrImg.rows, bgrImg.cols, CV_32FC3);
convertBGRtoYUV(bgrImg, yuvImg);
for (int i = 0; i < yuvImg.rows; i++){
for (int j = 0; j < yuvImg.cols; j++){
float y = yuvImg.at<Vec3f>(i, j).val[0];
float u = yuvImg.at<Vec3f>(i, j).val[1];
float v = yuvImg.at<Vec3f>(i, j).val[2];
float ratio = (abs(u) + abs(v)) / y;
if (ratio < GREY_THRESHOLD){
sumElementsByChannel[0] += u;
sumElementsByChannel[1] += v;
cntElm++;
}
}
}
curYUVWhitePoint.at<Vec3f>(0, 0).val[0] = 1;
curYUVWhitePoint.at<Vec3f>(0, 0).val[1] = sumElementsByChannel[0] / cntElm;
curYUVWhitePoint.at<Vec3f>(0, 0).val[2] = sumElementsByChannel[1] / cntElm;
// termination criteria
float curY = curYUVWhitePoint.at<Vec3f>(0, 0).val[0];
float curU = curYUVWhitePoint.at<Vec3f>(0, 0).val[1];
float curV = curYUVWhitePoint.at<Vec3f>(0, 0).val[2];
float prevU = prevYUVWhitePoint.at<Vec3f>(0, 0).val[1];
float prevV = prevYUVWhitePoint.at<Vec3f>(0, 0).val[2];
float norm = sqrt(pow(curU - prevU, 2) + pow(curV - prevV, 2));
if (!std::isnan(prevU) && (norm < pow(10, -6) ||
std::max(abs(curU), abs(curV)) < 0.001)){
return true;
}
prevYUVWhitePoint.at<Vec3f>(0, 0).val[0] = curY;
prevYUVWhitePoint.at<Vec3f>(0, 0).val[1] = curU;
prevYUVWhitePoint.at<Vec3f>(0, 0).val[2] = curV;
for (int i = 0; i < 3; i++){
whitePoint.at<Vec3f>(0, 0).val[2 - i] = invMatrix.at<float>(i, 0) * curY +
invMatrix.at<float>(i, 1) * curU +
invMatrix.at<float>(i, 2) * curV;
whitePoint.at<Vec3f>(0, 0).val[2 - i] =
clipImg(whitePoint.at<Vec3f>(0, 0).val[2 - i], 0, 1);
}
return false;
}
示例7: Error
/// If H is not orientation preserving at the point, the error is infinite.
/// For this test, it is assumed that det(H)>0.
/// \param H The homography matrix.
/// \param index The correspondence index
/// \param side In which image is the error measured?
/// \return The square reprojection error.
double HomographyModel::Error(const Mat &H, size_t index, int* side) const {
double err = std::numeric_limits<double>::infinity();
if(side) *side=1;
libNumerics::vector<double> x(3);
// Transfer error in image 2
x(0) = x1_(0,index);
x(1) = x1_(1,index);
x(2) = 1.0;
x = H * x;
if(x(2)>0) {
x /= x(2);
err = sqr(x2_(0,index)-x(0)) + sqr(x2_(1,index)-x(1));
}
// Transfer error in image 1
if(symError_) { // ... but only if requested
x(0) = x2_(0,index);
x(1) = x2_(1,index);
x(2) = 1.0;
x = H.inv() * x;
if(x(2)>0) {
x /= x(2);
double err1 = sqr(x1_(0,index)-x(0)) + sqr(x1_(1,index)-x(1));
if(err1>err) { // Keep worse error
err=err1;
if(side) *side=0;
}
}
}
return err;
}
示例8: transformPts
// ----------------------------------------------------------------
// Применяем преобразование к точкам рамки
// ----------------------------------------------------------------
void transformPts(vector<Point2f>& src_pts,Mat& Tau,vector<Point2f>& dst_pts)
{
vector<Point2f> pts(4);
double w=fabs(src_pts[0].x-src_pts[2].x);
double h=fabs(src_pts[0].y-src_pts[2].y);
pts.assign(src_pts.begin(),src_pts.end());
// Нашли центр
Point2f center=(pts[0]+pts[2])*0.5;
Mat tmp(4,1,CV_64FC2);
for(int i=0;i<4;i++)
{
tmp.at<Vec2d>(i)[0]=pts[i].x-center.x;
tmp.at<Vec2d>(i)[1]=pts[i].y-center.y;
}
Mat Tau_inv=Tau.inv();
perspectiveTransform(tmp,tmp,Tau_inv);
for(int i=0;i<4;i++)
{
pts[i].x=tmp.at<Vec2d>(i)[0]+center.x;
pts[i].y=tmp.at<Vec2d>(i)[1]+center.y;
}
dst_pts.assign(pts.begin(),pts.end());
}
示例9: inv
PoseRT PoseRT::inv() const
{
Mat projectiveMatrix = getProjectiveMatrix();
Mat invertedProjectiveMatrix = projectiveMatrix.inv(DECOMP_SVD);
PoseRT invertedPose(invertedProjectiveMatrix);
return invertedPose;
}
示例10: fixPictures
cv::Mat AntiShake::fixPictures(Mat &img_1, Mat &img_2, int loops, double final_pic_size,
double maxDetDiff, int featurePoints, int coreSize, double absoluteRelation, int matches_type) {
// Firstly we calculate the Homography matrix and refine it in the FeedbackController function:
Mat H = calcHomographyFeedbackController(img_1, img_2, loops, final_pic_size, featurePoints, coreSize,
absoluteRelation, matches_type);
double det = determinant(H);
cout << "STEP 11 final homography = " << endl << H << endl << " determinant = " << det << endl;
//Secondly, lets transform the picture according to the calculated (resultant) smatrix.
//TODO
/*
* Create white Image with the same size as img_1 and img_2
* apply Homography on white one
* Count histogramas and pixels with value == 0 (black pixels)
* if value > 0, crop on both img_1 and img_2
*
* */
if (det >= 1.0) {
applyHomography(H, img_1, img_2);
cout << "STEP 12 fixed original pictures 1->2" << endl;
return H;
} else {
Mat H2 = H.inv();
H.release();
applyHomography(H2, img_2, img_1);
cout << "STEP 12 fixed original pictures 2->1 " << endl;
return H2;
}
}
示例11: iviFundamentalMatrix
// -----------------------------------------------------------------------
/// \brief Initialise et calcule la matrice fondamentale.
///
/// @param mLeftIntrinsic: matrice intrinseque de la camera gauche
/// @param mLeftExtrinsic: matrice extrinseque de la camera gauche
/// @param mRightIntrinsic: matrice intrinseque de la camera droite
/// @param mRightExtrinsic: matrice extrinseque de la camera droite
/// @return matrice fondamentale
// -----------------------------------------------------------------------
Mat iviFundamentalMatrix(const Mat& mLeftIntrinsic,
const Mat& mLeftExtrinsic,
const Mat& mRightIntrinsic,
const Mat& mRightExtrinsic) {
// A modifier !
// Doit utiliser la fonction iviVectorProductMatrix
Mat mFundamental = Mat::eye(3, 3, CV_64F);
Mat tmp = (Mat_<double>(3,4) <<
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0
);
Mat P1 = mLeftIntrinsic * tmp * mLeftExtrinsic;
Mat P2 = mRightIntrinsic * tmp * mRightExtrinsic;
Mat O = mLeftExtrinsic.inv();
Mat O1 = (Mat_<double>(4,1) <<
O.at<double>(3),
O.at<double>(7),
O.at<double>(11),
O.at<double>(15)
);
Mat Hpi = P2 * P1.inv(DECOMP_SVD);
mFundamental = iviVectorProductMatrix(P2*O1) * Hpi;
// Retour de la matrice fondamentale
return mFundamental;
}
示例12: setCameraParams
void ProjectorBase::setCameraParams(InputArray _K, InputArray _R, InputArray _T)
{
Mat K = _K.getMat(), R = _R.getMat(), T = _T.getMat();
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
CV_Assert((T.size() == Size(1, 3) || T.size() == Size(3, 1)) && T.type() == CV_32F);
Mat_<float> K_(K);
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);
k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);
Mat_<float> Rinv = R.t();
rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);
rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);
rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);
Mat_<float> R_Kinv = R * K.inv();
r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2);
r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2);
r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2);
Mat_<float> K_Rinv = K * Rinv;
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
Mat_<float> T_(T.reshape(0, 3));
t[0] = T_(0,0); t[1] = T_(1,0); t[2] = T_(2,0);
}
示例13: setCameraParams
void ProjectorBase::setCameraParams(const Mat &K, const Mat &R, const Mat &T)
{
CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);
CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);
CV_Assert((T.size() == Size(1, 3) || T.size() == Size(3, 1)) && T.type() == CV_32F);
Mat_<float> K_(K);
k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);
k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);
k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);
Mat_<float> Rinv = R.t();
rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);
rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);
rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);
Mat_<float> R_Kinv = R * K.inv();
r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2);
r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2);
r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2);
Mat_<float> K_Rinv = K * Rinv;
k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2);
k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2);
k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2);
Mat_<float> T_(T.reshape(0, 3));
t[0] = T_(0,0); t[1] = T_(1,0); t[2] = T_(2,0);
}
示例14: denormalize
void denormalize(Mat &H, const Mat &Hn, const Mat &T, const Mat &Tp)
{
H.create(Hn.rows, Hn.cols, Hn.type());
H = T.inv()*Hn*Tp;
H /= H.at<float>(H.rows-1, H.cols-1);
}
示例15: draw_image_in_quad
Mat draw_image_in_quad(Mat& image, vector<Point>& quad, const Mat& imposed, Mat homography = Mat())
{
vector<Point> src_points;
src_points.push_back(Point(0, 0));
src_points.push_back(Point(imposed.cols, 0));
src_points.push_back(Point(imposed.cols, imposed.rows));
src_points.push_back(Point(0, imposed.rows));
if (homography.rows == 0 || homography.cols == 0)
{
homography = findHomography(src_points, quad);
}
Mat homography_inv = homography.inv();
Rect r = boundingRect(quad);
for (int i = 0; i < r.height; ++i)
{
for (int j = 0; j < r.width; ++j)
{
int x = j + r.x;
int y = i + r.y;
// if (x, y) is in the quad
if (pointPolygonTest(quad, Point(x, y), false) > -0.5)
{
// lookup pixel value
// H * src == dest ->
// src ~= H^-1 * dest
Mat p = Mat::zeros(3, 1, CV_64F);
p.at<double>(0, 0) = x;
p.at<double>(1, 0) = y;
p.at<double>(2, 0) = 1;
Mat q = homography_inv * p;
double wx = q.at<double>(0, 0);
double wy = q.at<double>(1, 0);
double w = q.at<double>(2, 0);
assert(w != 0.0f);
int qx = (int)(round(wx / w));
int qy = (int)(round(wy / w));
if (qx >= 0 && qy >= 0 && qx < imposed.cols && qy < imposed.rows)
{
// printf("(%d, %d) -> (%d, %d)\n", y, x, qy, qx);
image.at<Vec3b>(y, x) = imposed.at<Vec3b>(qy, qx);
}
else
{
// cout << qx << ", " << qy << endl;
}
}
}
}
return homography;
}