当前位置: 首页>>代码示例>>C++>>正文


C++ GpuMat::upload方法代码示例

本文整理汇总了C++中GpuMat::upload方法的典型用法代码示例。如果您正苦于以下问题:C++ GpuMat::upload方法的具体用法?C++ GpuMat::upload怎么用?C++ GpuMat::upload使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在GpuMat的用法示例。


在下文中一共展示了GpuMat::upload方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: compute

void StereoSingleGpu::compute(const Mat& leftFrame, const Mat& rightFrame, Mat& disparity)
{
    cuda::setDevice(deviceId_);
    d_leftFrame.upload(leftFrame);
    d_rightFrame.upload(rightFrame);
    d_alg->compute(d_leftFrame, d_rightFrame, d_disparity);
    d_disparity.download(disparity);
}
开发者ID:112000,项目名称:opencv,代码行数:8,代码来源:stereo_multi.cpp

示例2: getGPUMat

GpuMat* ImageImPro_OpenCvImpl::getGPUMat(){
    Mat* ptrMat = this->getMat();
    GpuMat* ptrGpuMat = new GpuMat();
    ptrGpuMat->upload(*ptrMat);
    delete ptrMat;
    return ptrGpuMat;
}
开发者ID:saul-calderonramirez,项目名称:ImProc_suite,代码行数:7,代码来源:imageimpro_opencvimpl.cpp

示例3: SetMean

/* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file)
{
    BlobProto blob_proto;
    ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);

    /* Convert from BlobProto to Blob<float> */
    Blob<float> mean_blob;
    mean_blob.FromProto(blob_proto);
    CHECK_EQ(mean_blob.channels(), num_channels_)
        << "Number of channels of mean file doesn't match input layer.";

    /* The format of the mean file is planar 32-bit float BGR or grayscale. */
    std::vector<Mat> channels;
    float* data = mean_blob.mutable_cpu_data();
    for (int i = 0; i < num_channels_; ++i)
    {
        /* Extract an individual channel. */
        Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
        channels.push_back(channel);
        data += mean_blob.height() * mean_blob.width();
    }

    /* Merge the separate channels into a single image. */
    Mat packed_mean;
    merge(channels, packed_mean);

    /* Compute the global mean pixel value and create a mean image
     * filled with this value. */
    Scalar channel_mean = mean(packed_mean);
    Mat host_mean = Mat(input_geometry_, packed_mean.type(), channel_mean);
    mean_.upload(host_mean);
}
开发者ID:NVIDIA,项目名称:gpu-rest-engine,代码行数:33,代码来源:classification.cpp

示例4: trainCollectionCPU

void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
    const vector<GpuMat>& masks)
{
    if (empty())
        return;

    if (masks.empty())
    {
        Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2Db)));

        DevMem2Db* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2Db>();

        for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr)
            *trainCollectionCPU_ptr = trainDescCollection[i];

        trainCollection.upload(trainCollectionCPU);
        maskCollection.release();
    }
    else
    {
        CV_Assert(masks.size() == trainDescCollection.size());

        Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2Db)));
        Mat maskCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStepb)));

        DevMem2Db* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2Db>();
        PtrStepb* maskCollectionCPU_ptr = maskCollectionCPU.ptr<PtrStepb>();

        for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr, ++maskCollectionCPU_ptr)
        {
            const GpuMat& train = trainDescCollection[i];
            const GpuMat& mask = masks[i];

            CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == train.rows));

            *trainCollectionCPU_ptr = train;
            *maskCollectionCPU_ptr = mask;
        }

        trainCollection.upload(trainCollectionCPU);
        maskCollection.upload(maskCollectionCPU);
    }
}
开发者ID:Ashwini7,项目名称:smart-python-programs,代码行数:43,代码来源:brute_force_matcher.cpp

示例5: LevelsInit

        LevelsInit()
        {
            nValues3[0] = nValues3[1] = nValues3[2] = 256;
            for (int i = 0; i < 256; ++i)
                pLevels[i] = i;


#if (CUDA_VERSION <= 4020)
            pLevels3[0] = pLevels3[1] = pLevels3[2] = pLevels;
#else
            d_pLevels.upload(Mat(1, 256, CV_32S, pLevels));
            pLevels3[0] = pLevels3[1] = pLevels3[2] = d_pLevels.ptr<Npp32s>();
#endif
        }
开发者ID:LiliMeng,项目名称:opencv-2,代码行数:14,代码来源:arithm.cpp

示例6: switch

GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf)
{
    switch (arr.kind())
    {
    case _InputArray::GPU_MAT:
        return arr.getGpuMat();

    case _InputArray::OPENGL_BUFFER:
        arr.getOGlBuffer().copyTo(buf);
        return buf;

    default:
        buf.upload(arr.getMat());
        return buf;
    }
}
开发者ID:112000,项目名称:opencv,代码行数:16,代码来源:input_array_utility.cpp

示例7: if

GpuMat cv::cuda::getInputMat(InputArray _src, Stream& stream)
{
    GpuMat src;

#ifndef HAVE_CUDA
    (void) _src;
    (void) stream;
    throw_no_cuda();
#else
    if (_src.kind() == _InputArray::CUDA_GPU_MAT)
    {
        src = _src.getGpuMat();
    }
    else if (!_src.empty())
    {
        BufferPool pool(stream);
        src = pool.getBuffer(_src.size(), _src.type());
        src.upload(_src, stream);
    }
#endif

    return src;
}
开发者ID:kylefleming,项目名称:opencv,代码行数:23,代码来源:cuda_gpu_mat.cpp

示例8: main

int main( int argc, const char** argv )
{

	VideoCapture cap;
	Rect trackWindow;

	struct timeval timea, timeb, timeS, timeE;
	long totalTime = 0, matchTime = 0, convertTime = 0, loadTime = 0;
	int nFrames = 0;

	cap.open("/home/ubuntu/Aerial/photos/SoccerGoal2_464.mp4"); //open smaller video file (reccomended for Jetson)
//	cap.open("/home/scott/Aerial//aerial_navigation/photos/SoccerGoal2.mp4"); //open regular video file (desktop)

	cerr << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
	cerr << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
	vector<string> screenshots;
	//smaller training images (Jetson)
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh1_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh2_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh3_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh4_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh5_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh6_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh7_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh8_464.png");
	//regular training images (Jetson)
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh1.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh2.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh3.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh4.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh5.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh6.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh7.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh8.png");


	if( !cap.isOpened() ) //make sure video file could be opened
	{
		cout << "***Could not initialize capturing...***\n";
		return -1;
	}
	//define the shape that is to be used for errode and dilate. Change size to increase or decrease the amount erroded and dilated
	Mat element = getStructuringElement(element_shape, Size(3, 3), Point(-1, -1) );

	//Initialize kalman filter
	KalmanFilter KF(4, 2, 0);
	Mat_<float> measurement(2,1); measurement.setTo(Scalar(0));
	Point pt(0, 0);

	//intialize display window
	namedWindow( "TrackingWicket", 0 );
	setMouseCallback( "TrackingWicket", onMouse, 0 );

	Rect bb; //rectangle for used for masking image to decrease template match search time

	//state variables
	bool paused = false;
	bool debug = true;

	cap >> frame0; //load the first frame
	paused = true; //paused for training
	vector<int> index(8); //indexes of the training images
	Point2f ctr_point, kal_point;

	ctr_point = pt; //point for the measured center of matched image
	kal_point = pt; //point for the corrected kalman filter eastimate

	//Gather training images
	for(int i = 0; i < screenshots.size(); i++){
		sh = imread(screenshots[i]); //read in trained image file

		for(;;){

			gpu_frame0.upload(sh); //upload image to gpu memory
			proccess_frame(element, thresh); //process the frame prior to selection

			gpu_gray.download(image); //download processed image so it can be displayed
			if(trackObject < 0) { //part of image has been selected so get the trained image

				mask_coll[i] = GpuMat(gpu_gray.size(), CV_8UC1, Scalar::all(0)); //intialize a mask
				mask_coll[i](selection).setTo(Scalar::all(255)); //set the mask to be the selected area
				gpu::bitwise_and(gpu_gray, mask_coll[i], train_coll[i]); //set the image to be only the parts in the mask
				train_coll[i] = train_coll[i](selection); //set the trained image to be just the size of the selection. I'm not sure that this process is the best way
				selections[i] = selection; //save the selection value for later use
				index[i] = i; // save the index value
				trackObject = 0; //set track object to 0 so we don't repeate this process until we have selected an object
				selectObject = 0; //reset the selection object state to no object
				break;
			}
			if( selectObject && selection.width > 0 && selection.height > 0 ) //if selecting an object show the area being selected
			{
				Mat mask(image, selection);
				bitwise_not(mask, mask);
			}
			imshow("TrackingWicket", image); //display the image
			waitKey(10);
		}
		break;
	}
	//loop over frames in video feed (breaks at end of file)
//.........这里部分代码省略.........
开发者ID:OSURoboticsClub,项目名称:aerial_navigation,代码行数:101,代码来源:WicketTracker.cpp

示例9: tmpMat


//.........这里部分代码省略.........
    CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()));

    ensureSizeIsEnough(image.size(), CV_32F, eig_);

    if (useHarrisDetector)
        cornerHarris(image, eig_, Dx_, Dy_, buf_, blockSize, 3, harrisK);
    else
        cornerMinEigenVal(image, eig_, Dx_, Dy_, buf_, blockSize, 3);

    double maxVal = 0;
    minMax(eig_, 0, &maxVal, GpuMat(), minMaxbuf_);

    ensureSizeIsEnough(1, std::max(1000, static_cast<int>(image.size().area() * 0.05)), CV_32FC2, tmpCorners_);

    int total = findCorners_gpu(eig_, static_cast<float>(maxVal * qualityLevel), mask, tmpCorners_.ptr<float2>(), tmpCorners_.cols);

    if (total == 0)
    {
        corners.release();
        return;
    }

    sortCorners_gpu(eig_, tmpCorners_.ptr<float2>(), total);

    if (minDistance < 1)
        tmpCorners_.colRange(0, maxCorners > 0 ? std::min(maxCorners, total) : total).copyTo(corners);
    else
    {
        vector<Point2f> tmp(total);
        Mat tmpMat(1, total, CV_32FC2, (void*)&tmp[0]);
        tmpCorners_.colRange(0, total).download(tmpMat);

        vector<Point2f> tmp2;
        tmp2.reserve(total);

        const int cell_size = cvRound(minDistance);
        const int grid_width = (image.cols + cell_size - 1) / cell_size;
        const int grid_height = (image.rows + cell_size - 1) / cell_size;

        std::vector< std::vector<Point2f> > grid(grid_width * grid_height);

        for (int i = 0; i < total; ++i)
        {
            Point2f p = tmp[i];

            bool good = true;

            int x_cell = static_cast<int>(p.x / cell_size);
            int y_cell = static_cast<int>(p.y / cell_size);

            int x1 = x_cell - 1;
            int y1 = y_cell - 1;
            int x2 = x_cell + 1;
            int y2 = y_cell + 1;

            // boundary check
            x1 = std::max(0, x1);
            y1 = std::max(0, y1);
            x2 = std::min(grid_width - 1, x2);
            y2 = std::min(grid_height - 1, y2);

            for (int yy = y1; yy <= y2; yy++)
            {
                for (int xx = x1; xx <= x2; xx++)
                {
                    vector<Point2f>& m = grid[yy * grid_width + xx];

                    if (!m.empty())
                    {
                        for(size_t j = 0; j < m.size(); j++)
                        {
                            float dx = p.x - m[j].x;
                            float dy = p.y - m[j].y;

                            if (dx * dx + dy * dy < minDistance * minDistance)
                            {
                                good = false;
                                goto break_out;
                            }
                        }
                    }
                }
            }

break_out:

            if(good)
            {
                grid[y_cell * grid_width + x_cell].push_back(p);

                tmp2.push_back(p);

                if (maxCorners > 0 && tmp2.size() == static_cast<size_t>(maxCorners))
                    break;
            }
        }

        corners.upload(Mat(1, static_cast<int>(tmp2.size()), CV_32FC2, &tmp2[0]));
    }
}
开发者ID:chenleic,项目名称:Opencv,代码行数:101,代码来源:gftt.cpp

示例10: cannySegmentation

Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize=
    // -1, returns largest region only
    // pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments


    // LB: Zero pad image to remove edge effects when getting regions....
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;

    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));


    if (useGPU)// converted to GPU -> NOT tested to speed up here!
    {
        GpuMat imgGPU;
        imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
        gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
        cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
        canny->detect(imgGPU, imgGPU);
#endif
        imgGPU.download(img1);
    }
    else
    {
        Canny(img1, img1, 100, 200, 3); //100, 200, 3);
    }


    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

    if (minPixelSize==-1)
    {   // Case of taking largest region
        for(int i = 0; i < (int)contours.size(); i++)
            areas[i] = contourArea(Mat(contours[i]));
        double max;
        Point maxPosition;
        cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
        drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
    }
    else
    {   // Case for using minimum pixel size
        for (int i = 0; i < (int)contours.size(); i++)
        {
            if (contourArea(Mat(contours[i]))>minPixelSize)
                drawContours(mask, contours, i, Scalar(1), CV_FILLED);
        }
    }
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

    Mat returnMask;
    returnMask=mask(tempRect);

    // show the images
    if (displayFaces)   imshow("Canny: Img in", img0);
    if (displayFaces)   imshow("Canny: Mask", returnMask);
    if (displayFaces)   imshow("Canny: Output", img1);

    return returnMask;
}
开发者ID:towardthesea,项目名称:wysiwyd,代码行数:79,代码来源:visionUtils.cpp

示例11: skinDetect

Mat visionUtils::skinDetect(Mat captureframe, Mat3b *skinDetectHSV, Mat *skinMask, std::vector<int> adaptiveHSV, int minPixelSize, int imgBlurPixels, int imgMorphPixels, int singleRegionChoice, bool displayFaces)
{

    if (adaptiveHSV.size()!=6 || adaptiveHSV.empty())
    {
        adaptiveHSV.clear();
        adaptiveHSV.push_back(5);
        adaptiveHSV.push_back(38);
        adaptiveHSV.push_back(51);
        adaptiveHSV.push_back(17);
        adaptiveHSV.push_back(250);
        adaptiveHSV.push_back(242);
    }


    //int step = 0;
    Mat3b frameTemp;
    Mat3b frame;
    // Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size.....
    // Note returned to original size at end...
    Size s = captureframe.size();
    cv::resize(captureframe,captureframe,Size(640,480));



    if (useGPU)
    {
        GpuMat imgGPU, imgGPUHSV;
        imgGPU.upload(captureframe);
        cv::cvtColor(imgGPU, imgGPUHSV, CV_BGR2HSV);
        GaussianBlur(imgGPUHSV, imgGPUHSV, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        imgGPUHSV.download(frameTemp);
    }
    else
    {
        cv::cvtColor(captureframe, frameTemp, CV_BGR2HSV);
        GaussianBlur(frameTemp, frameTemp, Size(imgBlurPixels,imgBlurPixels), 1, 1);
    }

    // Potential FASTER VERSION using inRange
    Mat frameThreshold = Mat::zeros(frameTemp.rows,frameTemp.cols, CV_8UC1);
    Mat hsvMin = (Mat_<int>(1,3) << adaptiveHSV[0], adaptiveHSV[1],adaptiveHSV[2] );
    Mat hsvMax = (Mat_<int>(1,3) << adaptiveHSV[3], adaptiveHSV[4],adaptiveHSV[5] );
    inRange(frameTemp,hsvMin ,hsvMax, frameThreshold);
    frameTemp.copyTo(frame,frameThreshold);

    /* BGR CONVERSION AND THRESHOLD */
    Mat1b frame_gray;

    // send HSV to skinDetectHSV for return
    *skinDetectHSV=frame.clone();

    cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);


    // Adaptive thresholding technique
    // 1. Threshold data to find main areas of skin
    adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1);


    if (useGPU)
    {
        GpuMat imgGPU;
        imgGPU.upload(frame_gray);
        // 2. Fill in thresholded areas
#if CV_MAJOR_VERSION == 2
        gpu::morphologyEx(imgGPU, imgGPU, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        gpu::GaussianBlur(imgGPU, imgGPU, Size(imgBlurPixels,imgBlurPixels), 1, 1);
#elif CV_MAJOR_VERSION == 3
        //TODO: Check if that's correct
        Mat element = getStructuringElement(MORPH_RECT, Size(imgMorphPixels, imgMorphPixels), Point(-1, -1));
        Ptr<cuda::Filter> closeFilter = cuda::createMorphologyFilter(MORPH_CLOSE, imgGPU.type(), element, Point(-1, -1), 2);
        closeFilter->apply(imgGPU, imgGPU);
        cv::Ptr<cv::cuda::Filter> gaussianFilter = cv::cuda::createGaussianFilter(imgGPU.type(), imgGPU.type(), Size(imgMorphPixels, imgMorphPixels), 1, 1);
        gaussianFilter->apply(imgGPU, imgGPU);
#endif

        imgGPU.download(frame_gray);
    }
    else
    {
        // 2. Fill in thresholded areas
        morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        // Select single largest region from image, if singleRegionChoice is selected (1)
    }


    if (singleRegionChoice)
    {
        *skinMask = cannySegmentation(frame_gray, -1, displayFaces);
    }
    else // Detect each separate block and remove blobs smaller than a few pixels
    {
        *skinMask = cannySegmentation(frame_gray, minPixelSize, displayFaces);
    }

    // Just return skin
    Mat frame_skin;
    captureframe.copyTo(frame_skin,*skinMask);  // Copy captureframe data to frame_skin, using mask from frame_ttt
//.........这里部分代码省略.........
开发者ID:towardthesea,项目名称:wysiwyd,代码行数:101,代码来源:visionUtils.cpp

示例12: enqueueUpload

inline
void Stream::enqueueUpload(InputArray src, GpuMat& dst)
{
    dst.upload(src, *this);
}
开发者ID:Amorming,项目名称:opencv,代码行数:5,代码来源:gpu.inl.hpp

示例13: host_levels

void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
{
    Mat host_levels(1, nLevels, CV_32SC1);
    nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
    levels.upload(host_levels);
}
开发者ID:AaronPlay,项目名称:opencv,代码行数:6,代码来源:histogram.cpp


注:本文中的GpuMat::upload方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。