本文整理汇总了C++中GpuMat::download方法的典型用法代码示例。如果您正苦于以下问题:C++ GpuMat::download方法的具体用法?C++ GpuMat::download怎么用?C++ GpuMat::download使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类GpuMat
的用法示例。
在下文中一共展示了GpuMat::download方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: compute
void StereoSingleGpu::compute(const Mat& leftFrame, const Mat& rightFrame, Mat& disparity)
{
cuda::setDevice(deviceId_);
d_leftFrame.upload(leftFrame);
d_rightFrame.upload(rightFrame);
d_alg->compute(d_leftFrame, d_rightFrame, d_disparity);
d_disparity.download(disparity);
}
示例2:
void cv::cuda::syncOutput(const GpuMat& dst, OutputArray _dst, Stream& stream)
{
#ifndef HAVE_CUDA
(void) dst;
(void) _dst;
(void) stream;
throw_no_cuda();
#else
if (_dst.kind() != _InputArray::CUDA_GPU_MAT)
{
if (stream)
dst.download(_dst, stream);
else
dst.download(_dst);
}
#endif
}
示例3: main
int main( int argc, const char** argv )
{
VideoCapture cap;
Rect trackWindow;
struct timeval timea, timeb, timeS, timeE;
long totalTime = 0, matchTime = 0, convertTime = 0, loadTime = 0;
int nFrames = 0;
cap.open("/home/ubuntu/Aerial/photos/SoccerGoal2_464.mp4"); //open smaller video file (reccomended for Jetson)
// cap.open("/home/scott/Aerial//aerial_navigation/photos/SoccerGoal2.mp4"); //open regular video file (desktop)
cerr << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
cerr << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
vector<string> screenshots;
//smaller training images (Jetson)
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh1_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh2_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh3_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh4_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh5_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh6_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh7_464.png");
screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh8_464.png");
//regular training images (Jetson)
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh1.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh2.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh3.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh4.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh5.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh6.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh7.png");
// screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh8.png");
if( !cap.isOpened() ) //make sure video file could be opened
{
cout << "***Could not initialize capturing...***\n";
return -1;
}
//define the shape that is to be used for errode and dilate. Change size to increase or decrease the amount erroded and dilated
Mat element = getStructuringElement(element_shape, Size(3, 3), Point(-1, -1) );
//Initialize kalman filter
KalmanFilter KF(4, 2, 0);
Mat_<float> measurement(2,1); measurement.setTo(Scalar(0));
Point pt(0, 0);
//intialize display window
namedWindow( "TrackingWicket", 0 );
setMouseCallback( "TrackingWicket", onMouse, 0 );
Rect bb; //rectangle for used for masking image to decrease template match search time
//state variables
bool paused = false;
bool debug = true;
cap >> frame0; //load the first frame
paused = true; //paused for training
vector<int> index(8); //indexes of the training images
Point2f ctr_point, kal_point;
ctr_point = pt; //point for the measured center of matched image
kal_point = pt; //point for the corrected kalman filter eastimate
//Gather training images
for(int i = 0; i < screenshots.size(); i++){
sh = imread(screenshots[i]); //read in trained image file
for(;;){
gpu_frame0.upload(sh); //upload image to gpu memory
proccess_frame(element, thresh); //process the frame prior to selection
gpu_gray.download(image); //download processed image so it can be displayed
if(trackObject < 0) { //part of image has been selected so get the trained image
mask_coll[i] = GpuMat(gpu_gray.size(), CV_8UC1, Scalar::all(0)); //intialize a mask
mask_coll[i](selection).setTo(Scalar::all(255)); //set the mask to be the selected area
gpu::bitwise_and(gpu_gray, mask_coll[i], train_coll[i]); //set the image to be only the parts in the mask
train_coll[i] = train_coll[i](selection); //set the trained image to be just the size of the selection. I'm not sure that this process is the best way
selections[i] = selection; //save the selection value for later use
index[i] = i; // save the index value
trackObject = 0; //set track object to 0 so we don't repeate this process until we have selected an object
selectObject = 0; //reset the selection object state to no object
break;
}
if( selectObject && selection.width > 0 && selection.height > 0 ) //if selecting an object show the area being selected
{
Mat mask(image, selection);
bitwise_not(mask, mask);
}
imshow("TrackingWicket", image); //display the image
waitKey(10);
}
break;
}
//loop over frames in video feed (breaks at end of file)
//.........这里部分代码省略.........
示例4: download
static void download(const GpuMat& d_mat, vector<uchar>& vec)
{
vec.resize(d_mat.cols);
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);
d_mat.download(mat);
}
示例5: flags
cv::Mat::Mat(const GpuMat& m) : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows)
{
m.download(*this);
}
示例6: findObjectByContour
void _ObjectDetector::findObjectByContour(void)
{
int i;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Rect boundRect;
//DEMO
if(m_bOneImg==1)
{
boundRect.height = m_Mat.size().height - 50;
boundRect.width = boundRect.height;
boundRect.x = (m_Mat.size().width - boundRect.width)*0.5;
boundRect.y = (m_Mat.size().height - boundRect.height)*0.5;
m_pClassMgr->addObject(get_time_usec(),&m_Mat,&boundRect,NULL);
return;
}
return;
// m_pContourFrame->switchFrame();
GpuMat* pThr = m_pContourFrame->getGMat();
m_pCanny->detect(*m_pGray, *pThr);
// Detect edges using Threshold
// cuda::threshold(*m_pGray, *pThr, 200, 255, THRESH_BINARY);
pThr->download(m_contourMat);
// Find contours
findContours(m_contourMat, contours, hierarchy, CV_RETR_TREE,
CV_CHAIN_APPROX_SIMPLE);
// findContours(m_frame, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
// Approximate contours to polygons + get bounding rects
vector<vector<Point> > contours_poly(contours.size());
for (i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
boundRect = boundingRect(Mat(contours_poly[i]));
if (boundRect.area() < 5000)
continue;
int extraW = boundRect.width * 0.15;
int extraH = boundRect.height * 0.15;
boundRect.x -= extraW;
boundRect.y -= extraH;
if (boundRect.x < 0)
boundRect.x = 0;
if (boundRect.y < 0)
boundRect.y = 0;
boundRect.width += extraW + extraW;
boundRect.height += extraH + extraH;
int overW = m_Mat.cols - boundRect.x - boundRect.width;
int overH = m_Mat.rows - boundRect.y - boundRect.height;
if (overW < 0)
boundRect.width += overW;
if (overH < 0)
boundRect.height += overH;
m_pClassMgr->addObject(get_time_usec(),&m_Mat,&boundRect,&contours_poly[i]);
}
}
示例7: recognize
int Transformer::recognize(const GpuMat& img){
Mat img_host;
img.download(img_host);
return ocr->predict(img_host);
}
示例8: cannySegmentation
Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
// Segments items in gray image (img0)
// minPixelSize=
// -1, returns largest region only
// pixels, threshold for removing smaller regions, with less than minPixelSize pixels
// 0, returns all detected segments
// LB: Zero pad image to remove edge effects when getting regions....
int padPixels=20;
// Rect border added at start...
Rect tempRect;
tempRect.x=padPixels;
tempRect.y=padPixels;
tempRect.width=img0.cols;
tempRect.height=img0.rows;
Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
img0.copyTo(img1(tempRect));
if (useGPU)// converted to GPU -> NOT tested to speed up here!
{
GpuMat imgGPU;
imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
canny->detect(imgGPU, imgGPU);
#endif
imgGPU.download(img1);
}
else
{
Canny(img1, img1, 100, 200, 3); //100, 200, 3);
}
// find the contours
vector< vector<Point> > contours;
findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
// Mask for segmented regiond
Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);
vector<double> areas(contours.size());
if (minPixelSize==-1)
{ // Case of taking largest region
for(int i = 0; i < (int)contours.size(); i++)
areas[i] = contourArea(Mat(contours[i]));
double max;
Point maxPosition;
cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
}
else
{ // Case for using minimum pixel size
for (int i = 0; i < (int)contours.size(); i++)
{
if (contourArea(Mat(contours[i]))>minPixelSize)
drawContours(mask, contours, i, Scalar(1), CV_FILLED);
}
}
// normalize so imwrite(...)/imshow(...) shows the mask correctly!
cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);
Mat returnMask;
returnMask=mask(tempRect);
// show the images
if (displayFaces) imshow("Canny: Img in", img0);
if (displayFaces) imshow("Canny: Mask", returnMask);
if (displayFaces) imshow("Canny: Output", img1);
return returnMask;
}
示例9: App_main
int App_main( int argc, char** argv )
{
int numImg=50;
#if !defined WIN32 && !defined _WIN32 && !defined WINCE && defined __linux__ && !defined ANDROID
pthread_setname_np(pthread_self(),"App_main");
#endif
char filename[500];
Mat image, cameraMatrix, R, T;
vector<Mat> images,Rs,Ts;
Mat ret;//a place to return downloaded images to
double reconstructionScale=5/5.;
for(int i=0;i<numImg;i++){
Mat tmp;
sprintf(filename,"../../Trajectory_30_seconds/scene_%03d.png",i);
convertAhandaPovRayToStandard("../../Trajectory_30_seconds",
i,
cameraMatrix,
R,
T);
Mat image;
cout<<"Opening: "<< filename << endl;
imread(filename, -1).convertTo(image,CV_32FC3,1.0/65535.0);
resize(image,image,Size(),reconstructionScale,reconstructionScale);
images.push_back(image.clone());
Rs.push_back(R.clone());
Ts.push_back(T.clone());
}
cv::cuda::CudaMem cret(images[0].rows,images[0].cols,CV_32FC1);
ret=cret.createMatHeader();
//Setup camera matrix
double sx=reconstructionScale;
double sy=reconstructionScale;
cameraMatrix+=(Mat)(Mat_<double>(3,3) << 0.0,0.0,0.5,
0.0,0.0,0.5,
0.0,0.0,0.0);
cameraMatrix=cameraMatrix.mul((Mat)(Mat_<double>(3,3) << sx,0.0,sx,
0.0,sy ,sy,
0.0,0.0,1.0));
cameraMatrix-=(Mat)(Mat_<double>(3,3) << 0.0,0.0,0.5,
0.0,0.0,0.5,
0.0,0.0,0);
int layers=32;
int imagesPerCV=2;
CostVolume cv(images[0],(FrameID)0,layers,0.010,0.0,Rs[0],Ts[0],cameraMatrix);;
int imageNum=0;
cv::cuda::Stream s;
for (int imageNum=0;imageNum<numImg;imageNum++){
T=Ts[imageNum];
R=Rs[imageNum];
image=images[imageNum];
if(cv.count<imagesPerCV){
cv.updateCost(image, R, T);
}
else{
//Attach optimizer
Ptr<DepthmapDenoiseWeightedHuber> dp = createDepthmapDenoiseWeightedHuber(cv.baseImageGray,cv.cvStream);
DepthmapDenoiseWeightedHuber& denoiser=*dp;
Optimizer optimizer(cv);
optimizer.initOptimization();
GpuMat a(cv.loInd.size(),cv.loInd.type());
cv.loInd.copyTo(a,cv.cvStream);
GpuMat d;
denoiser.cacheGValues();
ret=image*0;
pfShow("A function", ret, 0, cv::Vec2d(0, layers));
pfShow("D function", ret, 0, cv::Vec2d(0, layers));
pfShow("A function loose", ret, 0, cv::Vec2d(0, layers));
pfShow("Predicted Image",ret,0,Vec2d(0,1));
pfShow("Actual Image",ret);
pfShow("A", ret, 0, cv::Vec2d(0, layers));
// waitKey(0);
// gpause();
gpause();
bool doneOptimizing; int Acount=0; int QDcount=0;
do{
// cout<<"Theta: "<< optimizer.getTheta()<<endl;
//
if(Acount==0)
gpause();
a.download(ret);
pfShow("A function", ret, 0, cv::Vec2d(0, layers));
// optimizer.epsilon*=optimizer.thetaStep;
for (int i = 0; i < 10; i++) {
//.........这里部分代码省略.........
示例10: skinDetect
Mat visionUtils::skinDetect(Mat captureframe, Mat3b *skinDetectHSV, Mat *skinMask, std::vector<int> adaptiveHSV, int minPixelSize, int imgBlurPixels, int imgMorphPixels, int singleRegionChoice, bool displayFaces)
{
if (adaptiveHSV.size()!=6 || adaptiveHSV.empty())
{
adaptiveHSV.clear();
adaptiveHSV.push_back(5);
adaptiveHSV.push_back(38);
adaptiveHSV.push_back(51);
adaptiveHSV.push_back(17);
adaptiveHSV.push_back(250);
adaptiveHSV.push_back(242);
}
//int step = 0;
Mat3b frameTemp;
Mat3b frame;
// Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size.....
// Note returned to original size at end...
Size s = captureframe.size();
cv::resize(captureframe,captureframe,Size(640,480));
if (useGPU)
{
GpuMat imgGPU, imgGPUHSV;
imgGPU.upload(captureframe);
cv::cvtColor(imgGPU, imgGPUHSV, CV_BGR2HSV);
GaussianBlur(imgGPUHSV, imgGPUHSV, Size(imgBlurPixels,imgBlurPixels), 1, 1);
imgGPUHSV.download(frameTemp);
}
else
{
cv::cvtColor(captureframe, frameTemp, CV_BGR2HSV);
GaussianBlur(frameTemp, frameTemp, Size(imgBlurPixels,imgBlurPixels), 1, 1);
}
// Potential FASTER VERSION using inRange
Mat frameThreshold = Mat::zeros(frameTemp.rows,frameTemp.cols, CV_8UC1);
Mat hsvMin = (Mat_<int>(1,3) << adaptiveHSV[0], adaptiveHSV[1],adaptiveHSV[2] );
Mat hsvMax = (Mat_<int>(1,3) << adaptiveHSV[3], adaptiveHSV[4],adaptiveHSV[5] );
inRange(frameTemp,hsvMin ,hsvMax, frameThreshold);
frameTemp.copyTo(frame,frameThreshold);
/* BGR CONVERSION AND THRESHOLD */
Mat1b frame_gray;
// send HSV to skinDetectHSV for return
*skinDetectHSV=frame.clone();
cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);
// Adaptive thresholding technique
// 1. Threshold data to find main areas of skin
adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1);
if (useGPU)
{
GpuMat imgGPU;
imgGPU.upload(frame_gray);
// 2. Fill in thresholded areas
#if CV_MAJOR_VERSION == 2
gpu::morphologyEx(imgGPU, imgGPU, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
gpu::GaussianBlur(imgGPU, imgGPU, Size(imgBlurPixels,imgBlurPixels), 1, 1);
#elif CV_MAJOR_VERSION == 3
//TODO: Check if that's correct
Mat element = getStructuringElement(MORPH_RECT, Size(imgMorphPixels, imgMorphPixels), Point(-1, -1));
Ptr<cuda::Filter> closeFilter = cuda::createMorphologyFilter(MORPH_CLOSE, imgGPU.type(), element, Point(-1, -1), 2);
closeFilter->apply(imgGPU, imgGPU);
cv::Ptr<cv::cuda::Filter> gaussianFilter = cv::cuda::createGaussianFilter(imgGPU.type(), imgGPU.type(), Size(imgMorphPixels, imgMorphPixels), 1, 1);
gaussianFilter->apply(imgGPU, imgGPU);
#endif
imgGPU.download(frame_gray);
}
else
{
// 2. Fill in thresholded areas
morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1);
// Select single largest region from image, if singleRegionChoice is selected (1)
}
if (singleRegionChoice)
{
*skinMask = cannySegmentation(frame_gray, -1, displayFaces);
}
else // Detect each separate block and remove blobs smaller than a few pixels
{
*skinMask = cannySegmentation(frame_gray, minPixelSize, displayFaces);
}
// Just return skin
Mat frame_skin;
captureframe.copyTo(frame_skin,*skinMask); // Copy captureframe data to frame_skin, using mask from frame_ttt
//.........这里部分代码省略.........
示例11: directRotation
double CKinFuTracker::directRotation(const CKeyFrame::tp_ptr pRefeFrame_, const CKeyFrame::tp_ptr pLiveFrame_, SO3Group<double>* pR_rl_)
{
Intr sCamIntr_ = pRefeFrame_->_pRGBCamera->getIntrinsics(2);
Matrix3d K = Matrix3d::Identity();
//note that camera parameters are
K(0, 0) = sCamIntr_.fx;
K(1, 1) = sCamIntr_.fy;
K(0, 2) = sCamIntr_.cx;
K(1, 2) = sCamIntr_.cy;
SO3Group<double> CurR_rl_ = *pR_rl_;
SO3Group<double> PrevR_rl_ = *pR_rl_;
SO3Group<double> MinR_rl_ = *pR_rl_;
Matrix3d R_rl_Kinv = PrevR_rl_.matrix() *K.inverse();
Matrix3d H_rl = K * R_rl_Kinv;
//get R,T of previous
Matrix3d H_rl_t = H_rl.transpose();
Matrix3d R_rl_Kinv_t = R_rl_Kinv.transpose();
const Matd33& devH_rl = pcl::device::device_cast<pcl::device::Matd33> (H_rl_t);
const Matd33& devR_rl_Kinv = pcl::device::device_cast<pcl::device::Matd33> (R_rl_Kinv_t);
double dMinEnergy = numeric_limits<double>::max();
double dPrevEnergy = numeric_limits<double>::max();
dPrevEnergy = energy_direct_radiance_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
dMinEnergy = dPrevEnergy;
//cout << setprecision(15) << dMinEnergy << endl;
for (short sIter = 0; sIter < 5; ++sIter) {
//get R and T
GpuMat gSumBuf = btl::device::direct_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
Mat Buf; gSumBuf.download(Buf);
SO3Group<double> R_rl = btl::utility::extractRFromBuffer<double>((double*)Buf.data);
//cout << Tran_nc.matrix() << endl;
CurR_rl_ = R_rl *PrevR_rl_;
R_rl_Kinv = CurR_rl_.matrix()*K.inverse();
H_rl = K * R_rl_Kinv;
H_rl_t = H_rl.transpose();
R_rl_Kinv_t = R_rl_Kinv.transpose();
double dCurEnergy = energy_direct_radiance_rotation(sCamIntr_, devR_rl_Kinv, devH_rl, _n_rad_origin_2_ref, _n_rad_live[2], _err_live[2]);
//cout << sIter << ": " << dPrevEnergy << " " << dCurEnergy << endl;
if (dCurEnergy < dMinEnergy){
dMinEnergy = dCurEnergy;
MinR_rl_ = CurR_rl_;
}
if (dMinEnergy / dCurEnergy < 0.25){ //divereges
//cout << "Diverge Warning:" << endl;
dCurEnergy = dMinEnergy;
CurR_rl_ = MinR_rl_;
break;
}
PrevR_rl_ = CurR_rl_;
if (fabs(dPrevEnergy / dCurEnergy - 1) < 0.01f){ //converges
//cout << "Converges" << endl;
dCurEnergy = dMinEnergy;
CurR_rl_ = MinR_rl_;
break;
}
dPrevEnergy = dCurEnergy;
}
*pR_rl_ = CurR_rl_;
return dMinEnergy;
}
示例12: dvoICPIC
double CKinFuTracker::dvoICPIC(const CKeyFrame::tp_ptr pRefeFrame_, CKeyFrame::tp_ptr pLiveFrame_, const short asICPIterations_[], SE3Group<double>* pT_rl_, Eigen::Vector4i* pActualIter_) const
{
SE3Group<double> PrevT_rl = *pT_rl_;
SE3Group<double> NewT_rl = *pT_rl_;
//get R,T of previous
Matrix3d R_rl_t_tmp = PrevT_rl.so3().inverse().matrix();
const Matd33& devR_rl = pcl::device::device_cast<pcl::device::Matd33> (R_rl_t_tmp); //implicit inverse
Vector3d t_rl = PrevT_rl.translation();
const double3& devT_rl = pcl::device::device_cast<double3> (t_rl);
//from low resolution to high
double dCurEnergy = numeric_limits<double>::max();
for (short sPyrLevel = pLiveFrame_->pyrHeight() - 1; sPyrLevel >= 0; sPyrLevel--){
// for each pyramid level we have a min energy and corresponding best R t
if (asICPIterations_[sPyrLevel] > 0){
dCurEnergy = btl::device::dvo_icp_energy(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
devR_rl, devT_rl,
*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
//PRINT(dMinEnergy);
}
SE3Group<double> MinT_rl = NewT_rl;
double dMin = dCurEnergy;
double dPrevEnergy = dCurEnergy;
for (short sIter = 0; sIter < asICPIterations_[sPyrLevel]; ++sIter) {
//get R and T
GpuMat cvgmSumBuf = btl::device::dvo_icp(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
devR_rl, devT_rl,
*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
Mat Buf; cvgmSumBuf.download(Buf);
SE3Group<double> Tran_nc = btl::utility::extractRTFromBuffer<double>((double*)Buf.data);
NewT_rl = Tran_nc * PrevT_rl;
R_rl_t_tmp = NewT_rl.so3().inverse().matrix();
t_rl = NewT_rl.translation();
dCurEnergy = btl::device::dvo_icp_energy(pLiveFrame_->_pRGBCamera->getIntrinsics(sPyrLevel),
devR_rl, devT_rl,
*pRefeFrame_->_agPyrPts[sPyrLevel], *pRefeFrame_->_agPyrNls[sPyrLevel], _n_rad_ref[sPyrLevel],
*pLiveFrame_->_agPyrPts[sPyrLevel], *pLiveFrame_->_agPyrNls[sPyrLevel], _n_rad_live[sPyrLevel],
*pLiveFrame_->_agPyrDepths[sPyrLevel], _err_live[sPyrLevel], *pLiveFrame_->_pry_mask[sPyrLevel]);
//cout << sIter << ": " << dPrevEnergy << " " << dCurEnergy << endl;
if (dCurEnergy < dMin){
dMin = dCurEnergy;
MinT_rl = NewT_rl;
}
if (dMin / dCurEnergy > 1.125){ //diverges
//cout << "Diverge Warning:" << endl;
//cout <<"New "<< NewT_rl.matrix() << endl;
//cout <<"Prev" <<PrevT_rl.matrix() << endl;
NewT_rl = MinT_rl;
dCurEnergy = dMin;
break;
}
PrevT_rl = NewT_rl;
if (fabs(dPrevEnergy / dCurEnergy - 1) < 1e-6f){ //converges
//cout << "Converges" << endl;
dCurEnergy = dMin;
NewT_rl = MinT_rl;
break;
}
dPrevEnergy = dCurEnergy;
}//for each iteration
}//for pyrlevel
*pT_rl_ = NewT_rl;
SE3Group<double> T_rw(pRefeFrame_->_R_cw, pRefeFrame_->_Tw);
T_rw = NewT_rl.inverse()*T_rw;
pLiveFrame_->_R_cw = T_rw.so3();
pLiveFrame_->_Tw = T_rw.translation();
return dCurEnergy;
}
示例13: enqueueDownload
inline
void Stream::enqueueDownload(const GpuMat& src, OutputArray dst)
{
src.download(dst, *this);
}
示例14: showImage
void showImage(GpuMat& img) {
cv::Mat m;
img.download(m);
showImage(m);
}