本文整理汇总了C++中cv::Mat::t方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::t方法的具体用法?C++ Mat::t怎么用?C++ Mat::t使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::t方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: execute
void RotateImage::execute()
{
const cv::Mat src = mIn;
int angle = mAngle;
cv::Mat dest;
if(angle == 90)
{
dest = src.t();
cv::flip(dest, dest, 1);
}
else if(angle == 180)
{
cv::flip(src, dest, -1);
}
else if(angle == 270)
{
dest = src.t();
cv::flip(dest, dest, 0);
}
else
{
dest = src;
}
mOut.send(dest);
}
示例2: concatMatsHorizontal
//
// concatMatsHorizontal
//
cv::Mat concatMatsHorizontal (const cv::Mat &mat_left, const cv::Mat &mat_right)
{
CV_Assert (mat_left.rows == mat_right.rows);
cv::Mat dst_t = mat_left.t ();
cv::Mat mat_right_t = mat_right.t ();
dst_t.push_back (mat_right_t);
return dst_t.t ();
}
示例3: vecDist
float vecDist(cv::Mat vec) {
assert((vec.rows == 1) || (vec.cols == 1));
assert(vec.type() == CV_64F);
cv::Mat results;
if (vec.rows > 0)
results = vec * vec.t();
else
results = vec.t() * vec;
return (float) std::sqrt(results.at<double>(0));
}
示例4: m
/**
* Convert cv::Mat to MxArray
* @param mat cv::Mat object
* @param classid classid of mxArray. e.g., mxDOUBLE_CLASS. When mxUNKNOWN_CLASS
* is specified, classid will be automatically determined from
* the type of cv::Mat. default: mxUNKNOWN_CLASS
* @param transpose Optional transposition to the return value so that rows and
* columns of the 2D Mat are mapped to the 2nd and 1st
* dimensions in MxArray, respectively. This does not apply
* the N-D array conversion. default true.
* @return MxArray object
*
* Convert cv::Mat object to an MxArray. When the cv::Mat object is 2D, the
* width, height, and channels are mapped to the first, second, and third
* dimensions of the MxArray unless transpose flag is set to false. When the
* cv::Mat object is N-D, (dim 1, dim 2,...dim N, channels) are mapped to (dim
* 2, dim 1, ..., dim N, dim N+1), respectively.
*
* Example:
* @code
* cv::Mat x(120, 90, CV_8UC3, Scalar(0));
* mxArray* plhs[0] = MxArray(x);
* @endcode
*
*/
MxArray::MxArray(const cv::Mat& mat, mxClassID classid, bool transpose)
{
if (mat.empty()) {
p_ = mxCreateNumericArray(0,0,mxDOUBLE_CLASS,mxREAL);
if (!p_)
mexErrMsgIdAndTxt("mexopencv:error","Allocation error");
return;
}
#if CV_MINOR_VERSION >= 2
const cv::Mat& rm = (mat.dims==2 && transpose) ? cv::Mat(mat.t()) : mat;
#else
const cv::Mat& rm = (transpose) ? cv::Mat(mat.t()) : mat;
#endif
// Create a new mxArray
int nchannels = rm.channels();
#if CV_MINOR_VERSION >= 2
const int* dims_ = rm.size;
vector<mwSize> d(dims_,dims_+rm.dims);
#else
int dims_[] = {rm.rows,rm.cols};
vector<mwSize> d(dims_,dims_+2);
#endif
d.push_back(nchannels);
classid = (classid == mxUNKNOWN_CLASS) ? ClassIDOf[rm.depth()] : classid;
std::swap(d[0],d[1]);
p_ = (classid==mxLOGICAL_CLASS) ?
mxCreateLogicalArray(d.size(),&d[0]) :
mxCreateNumericArray(d.size(),&d[0],classid,mxREAL);
if (!p_)
mexErrMsgIdAndTxt("mexopencv:error","Allocation error");
// Copy each channel
std::vector<cv::Mat> mv;
cv::split(rm,mv);
std::vector<mwSize> si(d.size(),0); // subscript index
int type = CV_MAKETYPE(DepthOf[classid],1); // destination type
for (int i = 0; i < nchannels; ++i) {
si[si.size()-1] = i; // last dim is a channel index
void *ptr = reinterpret_cast<void*>(
reinterpret_cast<size_t>(mxGetData(p_))+
mxGetElementSize(p_)*subs(si));
#if CV_MINOR_VERSION >= 2
cv::Mat m(rm.dims,dims_,type,ptr);
#else
cv::Mat m(dims_[0],dims_[1],type,ptr);
#endif
mv[i].convertTo(m,type); // Write to mxArray through m
}
}
示例5: compute_covariance_matrix
void ofxIcp::compute_covariance_matrix(const vector<cv::Point3d> & points_1, const cv::Mat & centroid_1, const vector<cv::Point3d> & points_2, const cv::Mat & centroid_2, cv::Mat & output){
for(int i = 0; i < points_1.size(); i++){
cv::Mat mat2 = centroid_1.t() - cv::Mat(points_1[i], false).reshape(1);
cv::Mat mat1 = centroid_2.t() - cv::Mat(points_2[i], false).reshape(1);
cv::Mat mat3 = mat1*mat2.t();
if(i == 0){
output = mat3;
}else{
output += mat3;
}
}
}
示例6: triangulateFromUpVp
void Triangulator::triangulateFromUpVp(cv::Mat &up, cv::Mat &vp, cv::Mat &xyz){
std::cerr << "WARNING! NOT FULLY IMPLEMENTED!" << std::endl;
int N = up.rows * up.cols;
cv::Mat projPointsCam(2, N, CV_32F);
uc.reshape(0,1).copyTo(projPointsCam.row(0));
vc.reshape(0,1).copyTo(projPointsCam.row(1));
cv::Mat projPointsProj(2, N, CV_32F);
up.reshape(0,1).copyTo(projPointsProj.row(0));
vp.reshape(0,1).copyTo(projPointsProj.row(1));
cv::Mat Pc(3,4,CV_32F,cv::Scalar(0.0));
cv::Mat(calibration.Kc).copyTo(Pc(cv::Range(0,3), cv::Range(0,3)));
cv::Mat Pp(3,4,CV_32F), temp(3,4,CV_32F);
cv::Mat(calibration.Rp).copyTo(temp(cv::Range(0,3), cv::Range(0,3)));
cv::Mat(calibration.Tp).copyTo(temp(cv::Range(0,3), cv::Range(3,4)));
Pp = cv::Mat(calibration.Kp) * temp;
cv::Mat xyzw;
cv::triangulatePoints(Pc, Pp, projPointsCam, projPointsProj, xyzw);
xyz.create(3, N, CV_32F);
for(int i=0; i<N; i++){
xyz.at<float>(0,i) = xyzw.at<float>(0,i)/xyzw.at<float>(3,i);
xyz.at<float>(1,i) = xyzw.at<float>(1,i)/xyzw.at<float>(3,i);
xyz.at<float>(2,i) = xyzw.at<float>(2,i)/xyzw.at<float>(3,i);
}
xyz = xyz.t();
xyz = xyz.reshape(3, up.rows);
}
示例7: inv
cv::Mat Transformations::inv(const cv::Mat &aTb)
{
// inv(T) = [R^t | -R^t p]
const cv::Mat R = aTb.rowRange(0,3).colRange(0,3);
const cv::Mat t = aTb.rowRange(0,3).colRange(3,4);
cv::Mat Rt = R.t();
cv::Mat t2 = -Rt*t;
cv::Mat ret;
if(aTb.type() == CV_32F)
{
ret = (cv::Mat_<float>(4,4) <<
Rt.at<float>(0,0), Rt.at<float>(0,1), Rt.at<float>(0,2), t2.at<float>(0,0),
Rt.at<float>(1,0), Rt.at<float>(1,1), Rt.at<float>(1,2), t2.at<float>(1,0),
Rt.at<float>(2,0), Rt.at<float>(2,1), Rt.at<float>(2,2), t2.at<float>(2,0),
0, 0, 0, 1);
}else
{
ret = (cv::Mat_<double>(4,4) <<
Rt.at<double>(0,0), Rt.at<double>(0,1), Rt.at<double>(0,2), t2.at<double>(0,0),
Rt.at<double>(1,0), Rt.at<double>(1,1), Rt.at<double>(1,2), t2.at<double>(1,0),
Rt.at<double>(2,0), Rt.at<double>(2,1), Rt.at<double>(2,2), t2.at<double>(2,0),
0, 0, 0, 1);
}
return ret;
}
示例8: motionEstimationEssentialMat
bool motionEstimationEssentialMat (const std::vector<cv::Point2f>& points_1,
const std::vector<cv::Point2f>& points_2,
const cv::Mat& F,
const cv::Mat& K,
cv::Mat& T, cv::Mat& R)
{
// calculate essential mat
cv::Mat E = K.t() * F * K; //according to HZ (9.12)
// decompose right solution for R and T values and saved it to P1. get point cloud of triangulated points
cv::Mat P;
std::vector<cv::Point3f> pointCloud;
bool goodPFound = getRightProjectionMat(E, P, K, points_1, points_2, pointCloud);
if (!goodPFound) {
cout << "NO MOVEMENT: no perspective Mat Found" << endl;
return false;
}
cv::Mat T_temp, R_temp;
decomposeProjectionMat(P, T_temp, R_temp);
//delete y motion
//T_temp.at<float>(1)=0;
T = T_temp;
R = R_temp;
return true;
}
示例9: SetPose
void KeyFrame::SetPose(const cv::Mat &Rcw,const cv::Mat &tcw)
{
boost::mutex::scoped_lock lock(mMutexPose);
Rcw.copyTo(Tcw.rowRange(0,3).colRange(0,3));
tcw.copyTo(Tcw.col(3).rowRange(0,3));
Ow=-Rcw.t()*tcw;
}
示例10:
EstimateCameraPose::EstimateCameraPose(std::vector<cv::Point2f> points1, std::vector<cv::Point2f> points2, cv::Mat K)
{
matchedpoints1 = points1;
matchedpoints2 = points2;
intrinsic = K;
fundamental = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, Mask);
E = K.t() * fundamental * K;
}
示例11: hidden_activations_for
/* "pattern": row input vector, size <1>x<number visible units>
* OUT "activation": row vector, size <1>x<number hidden units>
* "mc_steps": number of markov chain monte carlo steps before sampling. DEFAULT: 1
*/
void RBMbb::hidden_activations_for(const cv::Mat& pattern, cv::Mat& activations, bool probabilities, int mc_steps)
{
cv::Mat visible_state = pattern.t();
for(int step=0; step < mc_steps; step++)
{
if(step == mc_steps-1 && probabilities)
{
visible_to_hidden_probabilities_logistic(visible_state, activations);
break;
}
binary_sample_hidden_state(visible_state, activations);
binary_sample_visible_state(activations, visible_state);
}
activations = activations.t();
}
示例12: reconstruct
/* "pattern": row input vector, size <1>x<number visible units>
* OUT "reconstruction": row vector, size <1>x<number visible units>
* "mc_steps": number of markov chain monte carlo steps before sampling. DEFAULT: 1
*/
void RBMbb::reconstruct(const cv::Mat& pattern, cv::Mat& reconstruction, int mc_steps)
{
reconstruction = pattern.t();
cv::Mat hidden_state;
for(int step=0; step < mc_steps; step++)
{
binary_sample_hidden_state(reconstruction, hidden_state);
if(step == mc_steps-1)
{
hidden_to_visible_probabilities_logistic(hidden_state, reconstruction);
break;
}
binary_sample_visible_state(hidden_state, reconstruction);
}
reconstruction = reconstruction.t();
}
示例13: invRtTransformMatrix
cv::Mat invRtTransformMatrix(const cv::Mat& rot, const cv::Mat& trans) {
cv::Mat result = cv::Mat::eye(4, 4, trans.depth());
cv::Mat invR = rot.t();
cv::Mat invT = -invR * trans;
invR.copyTo(result(cv::Range(0, 3), cv::Range(0, 3)));
invT.copyTo(result(cv::Range(0, 3), cv::Range(3, 4)));
return result;
}
示例14: rotate
void rotate (cv::Mat& src, cv::Mat& dst) {
if(angle >= 360) {
angle -= 360;
}
cout << "angle = " << angle << endl;
if(angle == 90) {
flip(src.t(), dst, 1);
}
else if(angle == 180) {
flip(src, dst, -1);
}
else if(angle == 270) {
flip(src.t(), dst, 0);
}
}
示例15: compute_rigid_transformation
void ofxIcp::compute_rigid_transformation(const vector<cv::Point3d> & points_1, const cv::Mat & centroid_1, const vector<cv::Point3d> & points_2, const cv::Mat & centroid_2, cv::Mat & rotation, cv::Mat & translation){
cv::Mat covariance_matrix;
compute_covariance_matrix(points_1, centroid_1, points_2, centroid_2, covariance_matrix);
compute_rotation_matrix(covariance_matrix, rotation);
cv::Mat rot_centroid_1_mat = rotation * centroid_1.t();
translation = centroid_2 - rot_centroid_1_mat.t();
}