本文整理汇总了C++中cv::InputArray::copyTo方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::copyTo方法的具体用法?C++ InputArray::copyTo怎么用?C++ InputArray::copyTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::InputArray
的用法示例。
在下文中一共展示了InputArray::copyTo方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: solveGeneric
void IPPE::PoseSolver::solveGeneric(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs,
cv::OutputArray _rvec1, cv::OutputArray _tvec1, float& err1, cv::OutputArray _rvec2, cv::OutputArray _tvec2, float& err2)
{
cv::Mat normalizedImagePoints; //undistored version of imagePoints
if (_cameraMatrix.empty()) {
//there is no camera matrix and image points are given in normalized pixel coordinates.
_imagePoints.copyTo(normalizedImagePoints);
}
else {
//undistort the image points (i.e. put them in normalized pixel coordinates):
cv::undistortPoints(_imagePoints, normalizedImagePoints, _cameraMatrix, _distCoeffs);
}
//solve:
cv::Mat Ma, Mb;
solveGeneric(_objectPoints, normalizedImagePoints, Ma, Mb);
//the two poses computed by IPPE (sorted):
cv::Mat M1, M2;
//sort poses by reprojection error:
sortPosesByReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb, M1, M2, err1, err2);
//fill outputs
rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}
示例2: ProcessFrame
void FaceDetectorModule::ProcessFrame(cv::InputArray in, cv::OutputArray out){
if(IsEnabled()){
float scale = imageScale;
float unscale = 1.0/scale;
cv::UMat latestStep;
in.copyTo(latestStep);
cv::resize(latestStep, latestStep, cv::Size(), scale, scale, INTER_NEAREST);
cvtColor( latestStep, latestStep, CV_BGR2GRAY );
equalizeHist( latestStep, latestStep );
cv::UMat outCopy;
//outCopy = cv::Mat(in.cols(), in.rows(), in.type());
out.copyTo(outCopy);
std::vector<cv::Rect> faces;
//-- Detect faces
face_cascade.detectMultiScale( latestStep, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
//http://docs.opencv.org/3.0.0/db/d28/tutorial_cascade_classifier.html
for( size_t i = 0; i < faces.size(); i++ )
{
cv::Point center( (faces[i].x + faces[i].width*0.5) * unscale, (faces[i].y + faces[i].height*0.5) * unscale);
ellipse( outCopy, center, cv::Size( (faces[i].width*0.5) * unscale, (faces[i].height*0.5) * unscale), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
/*
cv::UMat faceROI = latestStep( faces[i] );
std::vector<cv::Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, cv::Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
cv::Point eye_center( (faces[i].x + eyes[j].x + eyes[j].width/2) * unscale, (faces[i].y + eyes[j].y + eyes[j].height/2) * unscale );
int radius = cvRound( (eyes[j].width + eyes[j].height) * 0.25 * unscale );
circle( outCopy, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
*/
}
outCopy.copyTo(out);
}
else{
//in.copyTo(out);
}
}
示例3: evalReprojError
void IPPE::PoseSolver::sortPosesByReprojError(cv::InputArray _objectPoints, cv::InputArray _imagePoints, cv::InputArray _cameraMatrix, cv::InputArray _distCoeffs, cv::InputArray _Ma, cv::InputArray _Mb, cv::OutputArray _M1, cv::OutputArray _M2, float& err1, float& err2)
{
float erra, errb;
evalReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _Ma, erra);
evalReprojError(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _Mb, errb);
if (erra < errb) {
err1 = erra;
_Ma.copyTo(_M1);
err2 = errb;
_Mb.copyTo(_M2);
}
else {
err1 = errb;
_Mb.copyTo(_M1);
err2 = erra;
_Ma.copyTo(_M2);
}
}
示例4: findColouredCirclesInFrame
std::vector<std::vector<struct ColouredCircle> > findColouredCirclesInFrame(cv::InputArray frame, cv::InputArray background) {
cv::UMat foreground;
cv::UMat processed;
cv::Ptr<cv::BackgroundSubtractorMOG2> bs = cv::createBackgroundSubtractorMOG2(1, 254, false); // HEURISTIC: 254 = parameter for background subtractor
bs->apply(background, foreground, 1);
bs->apply(frame, foreground, 0);
frame.copyTo(processed, foreground);
cv::medianBlur(processed, processed, 9); // HEURISTIC: 9 = amount of blur applied before colour convertion
cv::cvtColor(processed, processed, cv::COLOR_BGR2HSV);
std::array<cv::UMat, 5> masks; // order: ball, blue, yellow, pink, green
cv::inRange(processed, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), masks[0]); // HEURISTIC: range of HSV values
cv::inRange(processed, cv::Scalar(76, 90, 90), cv::Scalar(146, 255, 255), masks[1]); // HEURISTIC: range of HSV values
cv::inRange(processed, cv::Scalar(24, 200, 200), cv::Scalar(44, 255, 255), masks[2]); // HEURISTIC: range of HSV values
cv::inRange(processed, cv::Scalar(165, 90, 90), cv::Scalar(180, 255, 255), masks[3]); // HEURISTIC: range of HSV values
cv::inRange(processed, cv::Scalar(50, 200, 200), cv::Scalar(70, 255, 255), masks[4]); // HEURISTIC: range of HSV values
std::array<std::vector<std::vector<cv::Point> >, 5> contours; // order: ball, blue, yellow, pink, green
// TODO: potentially can be vectorized
for(size_t i = 0; i < masks.size(); i++) {
cv::findContours(masks[i], contours[i], cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
}
std::vector<std::vector<struct ColouredCircle> > circles;
// TODO: potentially can be vectorized
for(size_t i = 0; i < contours.size(); i++) {
std::vector<struct ColouredCircle> c;
findCircles(contours[i], c, 1, 5); // HEURISTIC: deviation of circle and ellipse center and radius
for(size_t j = 0; j < c.size(); j++) {
c[j].colour = int(i);
}
circles.push_back(c);
}
return circles;
}