本文整理汇总了C++中cv::InputArray::type方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::type方法的具体用法?C++ InputArray::type怎么用?C++ InputArray::type使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::InputArray
的用法示例。
在下文中一共展示了InputArray::type方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
RadiometricResponse::RadiometricResponse(cv::InputArray _response, ChannelOrder order) : order_(order) {
if (_response.size().width != 256 || _response.size().height != 1)
BOOST_THROW_EXCEPTION(RadiometricResponseException("Radiometric response should have 1 x 256 size")
<< RadiometricResponseException::Size(_response.size()));
if (_response.type() != CV_32FC3)
BOOST_THROW_EXCEPTION(RadiometricResponseException("Radiometric response values should be 3-channel float")
<< RadiometricResponseException::Type(_response.type()));
response_ = _response.getMat();
cv::log(response_, log_response_);
cv::split(response_, response_channels_);
}
示例2: assert
void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints, cv::InputArray _normalizedInputPoints,
cv::OutputArray _Ma, cv::OutputArray _Mb)
{
//argument checking:
size_t n = _objectPoints.rows() * _objectPoints.cols(); //number of points
int objType = _objectPoints.type();
int type_input = _normalizedInputPoints.type();
assert((objType == CV_32FC3) | (objType == CV_64FC3));
assert((type_input == CV_32FC2) | (type_input == CV_64FC2));
assert((_objectPoints.rows() == 1) | (_objectPoints.cols() == 1));
assert((_objectPoints.rows() >= 4) | (_objectPoints.cols() >= 4));
assert((_normalizedInputPoints.rows() == 1) | (_normalizedInputPoints.cols() == 1));
assert(static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()) == n);
cv::Mat normalizedInputPoints;
if (type_input == CV_32FC2) {
_normalizedInputPoints.getMat().convertTo(normalizedInputPoints, CV_64FC2);
}
else {
normalizedInputPoints = _normalizedInputPoints.getMat();
}
cv::Mat objectInputPoints;
if (type_input == CV_32FC3) {
_objectPoints.getMat().convertTo(objectInputPoints, CV_64FC3);
}
else {
objectInputPoints = _objectPoints.getMat();
}
cv::Mat canonicalObjPoints;
cv::Mat MmodelPoints2Canonical;
//transform object points to the canonical position (zero centred and on the plane z=0):
makeCanonicalObjectPoints(objectInputPoints, canonicalObjPoints, MmodelPoints2Canonical);
//compute the homography mapping the model's points to normalizedInputPoints
cv::Mat H;
HomographyHO::homographyHO(canonicalObjPoints, _normalizedInputPoints, H);
//now solve
cv::Mat MaCanon, MbCanon;
solveCanonicalForm(canonicalObjPoints, normalizedInputPoints, H, MaCanon, MbCanon);
//transform computed poses to account for canonical transform:
cv::Mat Ma = MaCanon * MmodelPoints2Canonical;
cv::Mat Mb = MbCanon * MmodelPoints2Canonical;
//output poses:
Ma.copyTo(_Ma);
Mb.copyTo(_Mb);
}
示例3: showWindow
void showWindow(const string &winName, cv::InputArray mat)
{
Mat temp(mat.size(), mat.type());
mat.getMat().copyTo(temp);
namedWindow(winName, WINDOW_NORMAL); // Create a window for display.
imshow(winName, temp); // Show our image inside it.
}
示例4: write
void Regression::write(cv::InputArray array)
{
write() << "kind" << array.kind();
write() << "type" << array.type();
if (isVector(array))
{
int total = (int)array.total();
int idx = regRNG.uniform(0, total);
write() << "len" << total;
write() << "idx" << idx;
cv::Mat m = array.getMat(idx);
if (m.total() * m.channels() < 26) //5x5 or smaller
write() << "val" << m;
else
write(m);
}
else
{
if (array.total() * array.channels() < 26) //5x5 or smaller
write() << "val" << array.getMat();
else
write(array.getMat());
}
}
示例5: warmify
void warmify(cv::InputArray src, cv::OutputArray dst, uchar delta)
{
CV_Assert(src.type() == CV_8UC3);
Mat imgSrc = src.getMat();
CV_Assert(imgSrc.data);
dst.create(src.size(), CV_8UC3);
Mat imgDst = dst.getMat();
imgDst = imgSrc + Scalar(0, delta, delta);
}
示例6: apply
void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){
int stype = _src.type();
int dcn = _src.channels();
int depth = CV_MAT_DEPTH(stype);
if (0 <= ddepth)
depth = ddepth;
Mat src, dst;
src = _src.getMat();
Size sz = src.size();
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
int imageWidth = src.rows;
int imageHeight = src.cols;
Mat srcChannels[3];
split(src, srcChannels);
int margineWidth = kernel.cols / 2;
int margineHeight = kernel.rows / 2;
double kernelElemCount = (double)(kernel.cols * kernel.rows);
for(int ch = 0; ch < dcn; ++ch){
for(int y = 0; y < imageHeight; ++y){
Vec3d *ptr = dst.ptr<Vec3d>(y);
for(int x = 0; x < imageWidth; ++x){
if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){
ptr[x][ch]
= calcKernelOutputAtEdge(srcChannels[ch],
kernel, x, y,
imageWidth, imageHeight,
margineWidth, margineHeight);
}else{
ptr[x][ch]
= calcKernelOutput(srcChannels[ch],
kernel, x, y,
margineWidth, margineHeight,
kernelElemCount);
}
}
}
}
}
示例7: normalizeDataIsotropic
void HomographyHO::normalizeDataIsotropic(cv::InputArray _Data, cv::OutputArray _DataN, cv::OutputArray _T, cv::OutputArray _Ti)
{
cv::Mat Data = _Data.getMat();
int numPoints = Data.rows * Data.cols;
assert((Data.rows == 1) | (Data.cols == 1));
assert((Data.channels() == 2) | (Data.channels() == 3));
assert(numPoints >= 4);
int dataType = _Data.type();
assert((dataType == CV_64FC2) | (dataType == CV_64FC3) | (dataType == CV_32FC2) | (dataType == CV_32FC3));
_DataN.create(2, numPoints, CV_64FC1);
_T.create(3, 3, CV_64FC1);
_Ti.create(3, 3, CV_64FC1);
cv::Mat DataN = _DataN.getMat();
cv::Mat T = _T.getMat();
cv::Mat Ti = _Ti.getMat();
_T.setTo(0);
_Ti.setTo(0);
double xm, ym;
int numChannels = Data.channels();
xm = 0;
ym = 0;
for (int i = 0; i < numPoints; i++) {
if (numChannels == 2) {
if (dataType == CV_32FC2) {
xm = xm + Data.at<Vec2f>(i)[0];
ym = ym + Data.at<Vec2f>(i)[1];
}
else {
xm = xm + Data.at<Vec2d>(i)[0];
ym = ym + Data.at<Vec2d>(i)[1];
}
}
else {
if (dataType == CV_32FC3) {
xm = xm + Data.at<Vec3f>(i)[0];
ym = ym + Data.at<Vec3f>(i)[1];
}
else {
xm = xm + Data.at<Vec3d>(i)[0];
ym = ym + Data.at<Vec3d>(i)[1];
}
}
}
xm = xm / (double)numPoints;
ym = ym / (double)numPoints;
double kappa = 0;
double xh, yh;
for (int i = 0; i < numPoints; i++) {
if (numChannels == 2) {
if (dataType == CV_32FC2) {
xh = Data.at<Vec2f>(i)[0] - xm;
yh = Data.at<Vec2f>(i)[1] - ym;
}
else {
xh = Data.at<Vec2d>(i)[0] - xm;
yh = Data.at<Vec2d>(i)[1] - ym;
}
}
else {
if (dataType == CV_32FC3) {
xh = Data.at<Vec3f>(i)[0] - xm;
yh = Data.at<Vec3f>(i)[1] - ym;
}
else {
xh = Data.at<Vec3d>(i)[0] - xm;
yh = Data.at<Vec3d>(i)[1] - ym;
}
}
DataN.at<double>(0, i) = xh;
DataN.at<double>(1, i) = yh;
kappa = kappa + xh * xh + yh * yh;
}
double beta = sqrt(2 * numPoints / kappa);
DataN = DataN * beta;
T.at<double>(0, 0) = 1.0 / beta;
T.at<double>(1, 1) = 1.0 / beta;
T.at<double>(0, 2) = xm;
T.at<double>(1, 2) = ym;
T.at<double>(2, 2) = 1;
Ti.at<double>(0, 0) = beta;
Ti.at<double>(1, 1) = beta;
Ti.at<double>(0, 2) = -beta * xm;
Ti.at<double>(1, 2) = -beta * ym;
//.........这里部分代码省略.........
示例8: unprojectPointsFisheye
void unprojectPointsFisheye( cv::InputArray distorted, cv::OutputArray undistorted, cv::InputArray K, cv::InputArray D, cv::InputArray R, cv::InputArray P)
{
// will support only 2-channel data now for points
CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2);
undistorted.create(distorted.size(), CV_MAKETYPE(distorted.depth(), 3));
CV_Assert(P.empty() || P.size() == cv::Size(3, 3) || P.size() == cv::Size(4, 3));
CV_Assert(R.empty() || R.size() == cv::Size(3, 3) || R.total() * R.channels() == 3);
CV_Assert(D.total() == 4 && K.size() == cv::Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F));
cv::Vec2d f, c;
if (K.depth() == CV_32F)
{
cv::Matx33f camMat = K.getMat();
f = cv::Vec2f(camMat(0, 0), camMat(1, 1));
c = cv::Vec2f(camMat(0, 2), camMat(1, 2));
}
else
{
cv::Matx33d camMat = K.getMat();
f = cv::Vec2d(camMat(0, 0), camMat(1, 1));
c = cv::Vec2d(camMat(0, 2), camMat(1, 2));
}
cv::Vec4d k = D.depth() == CV_32F ? (cv::Vec4d)*D.getMat().ptr<cv::Vec4f>(): *D.getMat().ptr<cv::Vec4d>();
cv::Matx33d RR = cv::Matx33d::eye();
if (!R.empty() && R.total() * R.channels() == 3)
{
cv::Vec3d rvec;
R.getMat().convertTo(rvec, CV_64F);
RR = cv::Affine3d(rvec).rotation();
}
else if (!R.empty() && R.size() == cv::Size(3, 3))
R.getMat().convertTo(RR, CV_64F);
if(!P.empty())
{
cv::Matx33d PP;
P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
RR = PP * RR;
}
// start undistorting
const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>();
const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>();
cv::Vec3f* dstf = undistorted.getMat().ptr<cv::Vec3f>();
cv::Vec3d* dstd = undistorted.getMat().ptr<cv::Vec3d>();
size_t n = distorted.total();
int sdepth = distorted.depth();
for(size_t i = 0; i < n; i++ )
{
cv::Vec2d pi = sdepth == CV_32F ? (cv::Vec2d)srcf[i] : srcd[i]; // image point
cv::Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]); // world point
double theta_d = sqrt(pw[0]*pw[0] + pw[1]*pw[1]);
double theta = theta_d;
if (theta_d > 1e-8)
{
// compensate distortion iteratively
for(int j = 0; j < 10; j++ )
{
double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2;
theta = theta_d / (1 + k[0] * theta2 + k[1] * theta4 + k[2] * theta6 + k[3] * theta8);
}
}
double z = std::cos(theta);
double r = std::sin(theta);
cv::Vec3d pu = cv::Vec3d(r*pw[0], r*pw[1], z); //undistorted point
// reproject
cv::Vec3d pr = RR * pu; // rotated point optionally multiplied by new camera matrix
cv::Vec3d fi; // final
normalize(pr, fi);
if( sdepth == CV_32F )
dstf[i] = fi;
else
dstd[i] = fi;
}
}
示例9: write_one
bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr)
{
mfxStatus res;
mfxFrameSurface1 *workSurface = 0;
mfxSyncPoint sync;
if (!bgr.empty() && (bgr.dims() != 2 || bgr.type() != CV_8UC3 || bgr.size() != frameSize))
{
MSG(cerr << "MFX: invalid frame passed to encoder: "
<< "dims/depth/cn=" << bgr.dims() << "/" << bgr.depth() << "/" << bgr.channels()
<< ", size=" << bgr.size() << endl);
return false;
}
if (!bgr.empty())
{
workSurface = pool->getFreeSurface();
if (!workSurface)
{
// not enough surfaces
MSG(cerr << "MFX: Failed to get free surface" << endl);
return false;
}
const int rows = workSurface->Info.Height;
const int cols = workSurface->Info.Width;
Mat Y(rows, cols, CV_8UC1, workSurface->Data.Y, workSurface->Data.Pitch);
Mat UV(rows / 2, cols, CV_8UC1, workSurface->Data.UV, workSurface->Data.Pitch);
to_nv12(bgr, Y, UV);
CV_Assert(Y.ptr() == workSurface->Data.Y);
CV_Assert(UV.ptr() == workSurface->Data.UV);
}
while (true)
{
outSurface = 0;
DBG(cout << "Calling with surface: " << workSurface << endl);
res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync);
if (res == MFX_ERR_NONE)
{
res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout
if (res == MFX_ERR_NONE)
{
// ready to write
if (!bs->write())
{
MSG(cerr << "MFX: Failed to write bitstream" << endl);
return false;
}
else
{
DBG(cout << "Write bitstream" << endl);
return true;
}
}
else
{
MSG(cerr << "MFX: Sync error: " << res << endl);
return false;
}
}
else if (res == MFX_ERR_MORE_DATA)
{
DBG(cout << "ERR_MORE_DATA" << endl);
return false;
}
else if (res == MFX_WRN_DEVICE_BUSY)
{
DBG(cout << "Waiting for device" << endl);
sleep(1);
continue;
}
else
{
MSG(cerr << "MFX: Bad status: " << res << endl);
return false;
}
}
}
示例10: verify
void Regression::verify(cv::FileNode node, cv::InputArray array, double eps, ERROR_TYPE err)
{
int expected_kind = (int)node["kind"];
int expected_type = (int)node["type"];
ASSERT_EQ(expected_kind, array.kind()) << " Argument \"" << node.name() << "\" has unexpected kind";
ASSERT_EQ(expected_type, array.type()) << " Argument \"" << node.name() << "\" has unexpected type";
cv::FileNode valnode = node["val"];
if (isVector(array))
{
int expected_length = (int)node["len"];
ASSERT_EQ(expected_length, (int)array.total()) << " Vector \"" << node.name() << "\" has unexpected length";
int idx = node["idx"];
cv::Mat actual = array.getMat(idx);
if (valnode.isNone())
{
ASSERT_LE((size_t)26, actual.total() * (size_t)actual.channels())
<< " \"" << node.name() << "[" << idx << "]\" has unexpected number of elements";
verify(node, actual, eps, cv::format("%s[%d]", node.name().c_str(), idx), err);
}
else
{
cv::Mat expected;
valnode >> expected;
if(expected.empty())
{
ASSERT_TRUE(actual.empty())
<< " expected empty " << node.name() << "[" << idx<< "]";
}
else
{
ASSERT_EQ(expected.size(), actual.size())
<< " " << node.name() << "[" << idx<< "] has unexpected size";
cv::Mat diff;
cv::absdiff(expected, actual, diff);
if (err == ERROR_ABSOLUTE)
{
if (!cv::checkRange(diff, true, 0, 0, eps))
{
if(expected.total() * expected.channels() < 12)
std::cout << " Expected: " << std::endl << expected << std::endl << " Actual:" << std::endl << actual << std::endl;
double max;
cv::minMaxIdx(diff.reshape(1), 0, &max);
FAIL() << " Absolute difference (=" << max << ") between argument \""
<< node.name() << "[" << idx << "]\" and expected value is greater than " << eps;
}
}
else if (err == ERROR_RELATIVE)
{
double maxv, maxa;
int violations = countViolations(expected, actual, diff, eps, &maxv, &maxa);
if (violations > 0)
{
FAIL() << " Relative difference (" << maxv << " of " << maxa << " allowed) between argument \""
<< node.name() << "[" << idx << "]\" and expected value is greater than " << eps << " in " << violations << " points";
}
}
}
}
}
else
{
if (valnode.isNone())
示例11: disparityFitPlane
void disparityFitPlane(cv::InputArray disparity, cv::InputArray image, cv::OutputArray dest, int slicRegionSize, float slicRegularization, float slicMinRegionRatio, int slicMaxIteration, int ransacNumofSample, float ransacThreshold)
{
//disparityFitTest(ransacNumofSample, ransacThreshold);
//cv::FileStorage pointxml("planePoint.xml", cv::FileStorage::WRITE); int err = 0;
Mat segment;
SLIC(image, segment, slicRegionSize, slicRegularization, slicMinRegionRatio, slicMaxIteration);
vector<vector<Point3f>> points;
SLICSegment2Vector3D_<float>(segment, disparity, 0, points);
Mat disp32f = Mat::zeros(dest.size(), CV_32F);
for (int i = 0; i < points.size(); ++i)
{
if (points[i].size() < 3)
{
if (!points[i].empty())
{
for (int j = 0; j < points[i].size(); ++j)
{
points[i][j].z = 0.f;
}
}
}
else
{
Point3f abc;
fitPlaneRANSAC(points[i], abc, ransacNumofSample, ransacThreshold, 1);
//for refinement(if nessesary)
int v = countArrowablePointDistanceZ(points[i], abc, ransacThreshold);
/*double rate = (double)v / points[i].size() * 100;
int itermax = 1;
for (int n = 0; n < itermax;n++)
{
if (rate < 30)
{
//pointxml <<format("point%03d",err++)<< points[i];
fitPlaneRANSAC(points[i], abc, ransacNumofSample, ransacThreshold, 1);
v = countArrowablePointDistanceZ(points[i], abc, ransacThreshold);
rate = (double)v / points[i].size() * 100;
}
}*/
for (int j = 0; j < points[i].size(); ++j)
{
points[i][j].z = points[i][j].x*abc.x + points[i][j].y*abc.y + abc.z;
}
}
}
SLICVector3D2Signal(points, image.size(), disp32f);
if (disparity.depth() == CV_32F)
{
disp32f.copyTo(dest);
}
else if (disparity.depth() == CV_8U || disparity.depth() == CV_16U || disparity.depth() == CV_16S || disparity.depth() == CV_32S)
{
disp32f.convertTo(dest, disparity.type(), 1.0, 0.5);
}
else
{
disp32f.convertTo(dest, disparity.type());
}
}