当前位置: 首页>>代码示例>>C++>>正文


C++ Mat::create方法代码示例

本文整理汇总了C++中Mat::create方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::create方法的具体用法?C++ Mat::create怎么用?C++ Mat::create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Mat的用法示例。


在下文中一共展示了Mat::create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: _prepareImgAndDrawKeypoints

static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,
                                         const Mat& img2, const vector<KeyPoint>& keypoints2,
                                         Mat& outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, int flags )
{
    Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
    if( flags & DrawMatchesFlags::DRAW_OVER_OUTIMG )
    {
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( CV_StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
    }
    else
    {
        outImg.create( size, CV_MAKETYPE(img1.depth(), 3) );
        outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );

        if( img1.type() == CV_8U )
            cvtColor( img1, outImg1, CV_GRAY2BGR );
        else
            img1.copyTo( outImg1 );

        if( img2.type() == CV_8U )
            cvtColor( img2, outImg2, CV_GRAY2BGR );
        else
            img2.copyTo( outImg2 );
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
        drawKeypoints( outImg1, keypoints1, outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
        drawKeypoints( outImg2, keypoints2, outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
开发者ID:RebUT,项目名称:REBUT,代码行数:40,代码来源:draw.cpp

示例2: operator

void PatchGenerator::operator ()(const Mat& image, const Mat& T,
                                 Mat& patch, Size patchSize, RNG& rng) const
{
    patch.create( patchSize, image.type() );
    if( backgroundMin != backgroundMax )
    {
        rng.fill(patch, RNG::UNIFORM, Scalar::all(backgroundMin), Scalar::all(backgroundMax));
        warpAffine(image, patch, T, patchSize, INTER_LINEAR, BORDER_TRANSPARENT);
    }
    else
        warpAffine(image, patch, T, patchSize, INTER_LINEAR, BORDER_CONSTANT, Scalar::all(backgroundMin));

    int ksize = randomBlur ? (unsigned)rng % 9 - 5 : 0;
    if( ksize > 0 )
    {
        ksize = ksize*2 + 1;
        GaussianBlur(patch, patch, Size(ksize, ksize), 0, 0);
    }

    if( noiseRange > 0 )
    {
        AutoBuffer<uchar> _noiseBuf( patchSize.width*patchSize.height*image.elemSize() );
        Mat noise(patchSize, image.type(), (uchar*)_noiseBuf);
        int delta = image.depth() == CV_8U ? 128 : image.depth() == CV_16U ? 32768 : 0;
        rng.fill(noise, RNG::NORMAL, Scalar::all(delta), Scalar::all(noiseRange));
        if( backgroundMin != backgroundMax )
            addWeighted(patch, 1, noise, 1, -delta, patch);
        else
        {
            for( int i = 0; i < patchSize.height; i++ )
            {
                uchar* prow = patch.ptr<uchar>(i);
                const uchar* nrow =  noise.ptr<uchar>(i);
                for( int j = 0; j < patchSize.width; j++ )
                    if( prow[j] != backgroundMin )
                        prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
            }
        }
    }
}
开发者ID:BenJamesbabala,项目名称:OpenTracking,代码行数:40,代码来源:PatchGenerator.cpp

示例3: main

int main(int argc, char *argv[])
{
    float alpha = 0;
    int   beta  = 0;
    Mat   image;
    Mat   result;

    if (argc<2) {
        cout<<"Usage:./contrast [image_name]"<<endl;
        return -1;
    }

    image = imread(argv[1]);
    if (!image.data) {
        cout<<"Read image error."<<endl;
    }

    cout<<"Enter alpha value[1-3]:"<<endl;
    cin>>alpha;
    cout<<"Enter beta value[0-100]:"<<endl;
    cin>>beta;    

    result.create(image.size(), image.type()); 

    int n = image.rows * image.cols * image.channels();
    uchar *p = image.data;
    uchar *q = result.data;
    for (int i=0; i<n; i++) {
        *q++ = saturate_cast<uchar>(alpha * (*p++) + beta);     
    }

    namedWindow("origin", CV_WINDOW_AUTOSIZE);
    imshow("origin", image);
    namedWindow("result", CV_WINDOW_AUTOSIZE);
    imshow("result", result);

    waitKey(0);

    return 0;
}
开发者ID:JuannyWang,项目名称:opencv_study,代码行数:40,代码来源:contrast.cpp

示例4: pow_fmath

void pow_fmath(const Mat& src, const float a, Mat & dest)
{
    if (dest.empty())dest.create(src.size(), CV_32F);

    int width = src.cols;
    int height = src.rows;

    int size = src.size().area();
    int i = 0;

    const float* s = src.ptr<float>(0);
    float* d = dest.ptr<float>(0);
    const __m128 ma = _mm_set1_ps(a);
    for (i = 0; i <= size - 4; i += 4)
    {
        _mm_store_ps(d + i, _mm_pow_ps(_mm_load_ps(s + i), ma));
    }
    for (; i < size; i++)
    {
        d[i] = cv::pow(s[i], a);
    }
}
开发者ID:ArtisticCoding,项目名称:OpenCP,代码行数:22,代码来源:arithmetic.cpp

示例5: h_filter

void AdaptiveManifoldFilterN::h_filter(const Mat1f& src, Mat& dst, float sigma)
{
    CV_DbgAssert(src.depth() == CV_32F);

    const float a = exp(-sqrt(2.0f) / sigma);

    dst.create(src.size(), CV_32FC1);

    for (int y = 0; y < src.rows; ++y)
    {
        const float* src_row = src[y];
        float* dst_row = dst.ptr<float>(y);

        dst_row[0] = src_row[0];
        for (int x = 1; x < src.cols; ++x)
        {
            dst_row[x] = src_row[x] + a * (dst_row[x - 1] - src_row[x]);
        }
        for (int x = src.cols - 2; x >= 0; --x)
        {
            dst_row[x] = dst_row[x] + a * (dst_row[x + 1] - dst_row[x]);
        }
    }

    for (int y = 1; y < src.rows; ++y)
    {
        float* dst_cur_row = dst.ptr<float>(y);
        float* dst_prev_row = dst.ptr<float>(y-1);

        rf_vert_row_pass(dst_cur_row, dst_prev_row, a, src.cols);
    }
    for (int y = src.rows - 2; y >= 0; --y)
    {
        float* dst_cur_row = dst.ptr<float>(y);
        float* dst_prev_row = dst.ptr<float>(y+1);

        rf_vert_row_pass(dst_cur_row, dst_prev_row, a, src.cols);
    }
}
开发者ID:23pointsNorth,项目名称:opencv_contrib,代码行数:39,代码来源:adaptive_manifold_filter_n.cpp

示例6: vIdle

void vIdle() {
    if (TheCaptureFlag) {
        // capture image
        TheVideoCapturer.grab();
        TheVideoCapturer.retrieve(TheInputImage);
        TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
        // by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
        cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
        // remove distorion in image
        cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
                      TheCameraParams.Distorsion);
        // detect markers
        MDetector.detect(TheUndInputImage, TheMarkers);
        // Detection of the board
        TheBoardDetected.second = TheBoardDetector.detect(
            TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
        // chekc the speed by calculating the mean speed of all iterations
        // resize the image to the size of the GL window
        cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
    }
    glutPostRedisplay();
}
开发者ID:paroj,项目名称:aruco,代码行数:22,代码来源:aruco_test_board_gl.cpp

示例7: matLoad_oneChannel

void matLoad_oneChannel(string fileName, Mat &data)
{
	ifstream infileBin;
	infileBin.open(fileName, ios::binary);
	int row = 0;
	int col = 0;

	infileBin.read((char *)&row, sizeof(row));
	infileBin.read((char *)&col, sizeof(col));
	data.create(row, col, CV_32FC1);
	for (int h = 0; h < row; h++)
	{
		for (int w = 0; w < col; w++)
		{
			float tmp = 0;
			infileBin.read((char *)&tmp, sizeof(tmp));
			data.at<float>(h, w) = tmp;
		}
	}

	infileBin.close();
}
开发者ID:trantorznh,项目名称:maxPooing_denseSampling_in_fine-grained-classification,代码行数:22,代码来源:supportFunc.cpp

示例8: meanShiftFiltering_

static void meanShiftFiltering_(const Mat &src_roi, Mat &dst_roi, int sp, int sr, cv::TermCriteria crit)
{
    if( src_roi.empty() )
        CV_Error( Error::StsBadArg, "The input image is empty" );

    if( src_roi.depth() != CV_8U || src_roi.channels() != 4 )
        CV_Error( Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );

    dst_roi.create(src_roi.size(), src_roi.type());

    CV_Assert( (src_roi.cols == dst_roi.cols) && (src_roi.rows == dst_roi.rows) );
    CV_Assert( !(dst_roi.step & 0x3) );

    if( !(crit.type & cv::TermCriteria::MAX_ITER) )
        crit.maxCount = 5;
    int maxIter = std::min(std::max(crit.maxCount, 1), 100);
    float eps;
    if( !(crit.type & cv::TermCriteria::EPS) )
        eps = 1.f;
    eps = (float)std::max(crit.epsilon, 0.0);

    int tab[512];
    for(int i = 0; i < 512; i++)
        tab[i] = (i - 255) * (i - 255);
    uchar *sptr = src_roi.data;
    uchar *dptr = dst_roi.data;
    int sstep = (int)src_roi.step;
    int dstep = (int)dst_roi.step;
    cv::Size size = src_roi.size();

    for(int i = 0; i < size.height; i++, sptr += sstep - (size.width << 2),
        dptr += dstep - (size.width << 2))
    {
        for(int j = 0; j < size.width; j++, sptr += 4, dptr += 4)
        {
            do_meanShift(j, i, sptr, dptr, sstep, size, sp, sr, maxIter, eps, tab);
        }
    }
}
开发者ID:AlgoFl4sh,项目名称:opencv,代码行数:39,代码来源:perf_imgproc.cpp

示例9: update

void FireVibe::update(const Mat &image, Mat &foreImage)
{
    CV_Assert(image.type() == imageType && image.cols == imageWidth && image.rows == imageHeight);

    foreImage.create(imageHeight, imageWidth, CV_8UC1);

    // 画面最外面一圈的像素默认为背景
    // 本算法的前景检测和背景模型更新过程涉及到邻域操作
    // 最外面一圈像素的邻域是不完整的, 如果要进行处理, 需要额外的判断
    // 但是在实际的应用中, 画面不会太小, 最外面一圈像素的识别结果不会对最终的结果产生显著的影响
    // 所以直接把这一圈像素忽略掉了
    foreImage.row(0).setTo(0);
    foreImage.row(imageHeight - 1).setTo(0);
    foreImage.col(0).setTo(0);
    foreImage.col(imageWidth - 1).setTo(0);

    // 按照实际的图片类型进行处理
    if (imageChannels == 3)
        proc8UC3(image, foreImage);
    else if (imageChannels == 1)
        proc8UC1(image, foreImage);
}
开发者ID:zhousoft,项目名称:FireDetection,代码行数:22,代码来源:firevibe.cpp

示例10: dibujaPatron

void dibujaPatron(Mat *interferograma, int dim,int width, int height, string nombre){
    Mat imagen;
    imagen.create(height,width,CV_32FC3);
    imagen.setTo(Scalar::all(0));
    for (int i=0;i<dim;i++){
        for (int x=0;x<imagen.cols;x++){
            int j=x*interferograma[i].cols/imagen.cols;
            int y=50+100*(i)+50*(get2D32F(interferograma[i],j,0,0));

            //cout<<"x: " << j*2*Xmax[i]/(interf[i].rows-1) << " y: "<<y-250 <<endl;
            set2D32F(imagen,x,y,i,1);
            set2D32F(imagen,x,100*i,i,1);
            set2D32F(imagen,x,50+100*i,i,1);
            set2D32F(imagen,x,100+100*i,i,1);
        }
        for (int y=0;y<50;y++){
            set2D32F(imagen,(int)(imagen.cols/2),50+100*i+y-25,i,1);
        }

    }
    imshow( nombre, imagen);
}
开发者ID:nvelozsavino,项目名称:tesis-nico,代码行数:22,代码来源:generales.cpp

示例11: normal

/**
  * Logica que realiza el match_template normal.
  * Se llama dentro de normal(Mat)
  **/
void MatchTemplate::normal()
{
    /// Create the result matrix
    Mat result;
    int result_cols =  mOriginalImage.cols - mTemplateImg.cols + 1;
    int result_rows = mOriginalImage.rows - mTemplateImg.rows + 1;
    result.create( result_rows, result_cols, CV_32FC1 );

    /// @LuisAlonso, el tiempo del match template empieza aqui

    mBenchmark->startTimer();
    mBenchmark->startTickCPU();

    /// Realiza el match template
    matchTemplate( mOriginalImage, mTemplateImg, result, MATCH_METHOD );

    mBenchmark->markLapTimer();
    mBenchmark->markLapTickCPU();

    /// @LuisAlonso, y termina aqui

    normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );

    /// Localizing the best match with minMaxLoc
    double minVal; double maxVal; Point minLoc; Point maxLoc; Point matchLoc;
    minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );

    /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
    if( MATCH_METHOD  == CV_TM_SQDIFF || MATCH_METHOD == CV_TM_SQDIFF_NORMED )
    { matchLoc = minLoc; }
    else
    { matchLoc = maxLoc; }

    /// Show me what you got
    rectangle( result, matchLoc, Point( matchLoc.x + mTemplateImg.cols , matchLoc.y + mTemplateImg.rows ), Scalar::all(0), 2, 8, 0 );

    /// Notifica a quien este conectado
    emit onMatchTemplateFinished(result);
}
开发者ID:fabianz66,项目名称:efectos-mondrian-pollock,代码行数:43,代码来源:MatchTemplate.cpp

示例12: laplacianFiltering

void laplacianFiltering(const Mat& input, const Mat& laplacianMask, float scale, Mat& output, Mat& scaledLaplacian){
    Mat input_tmp,tmp;
    float* input_data;
    float* output_data;
    float* scaleLap_data;
    double t = (double)getTickCount();
    Size inputSize;
    spatialFiltering(input, laplacianMask, scaledLaplacian); //get scaledLaplacian(haven't multiple scale)
    // To check input data is float or uchar
    if(input.type() == CV_8UC(1)){
        input.convertTo(input_tmp, CV_32FC(1), 1/255.0);
    }
    else if(input.type() == CV_32FC(1)){
        input_tmp = input;
    }
    else{
        printf("Error Type in laplacianFiltering!!!\n");
        exit(0);
    }
    inputSize = input.size();
    output.create(inputSize, CV_32FC(1));
    scaleLap_data = (float*)scaledLaplacian.data;
    input_data = (float*)input_tmp.data;
    output_data = (float*)output.data;
    // start doing laplace transform
    for(int p=0; p<inputSize.width*inputSize.height; p++){
        float buf;
        scaleLap_data[p] = scaleLap_data[p]*scale;//multiple scale
        buf = input_data[p] + scaleLap_data[p]; // add to origin input
        if(buf>1) // if value > 1
            buf = 1;
        else if(buf<0)// if value <0
            buf = 0;
        output_data[p] = buf;
    }
    t = (double)getTickCount()-t;
    printf("Laplacian total consume %gms\n", t*1000/getTickFrequency());// get processing time
    return;
}
开发者ID:fuenwang,项目名称:DIP_LAB,代码行数:39,代码来源:lab2.cpp

示例13: main

//-----------------------------------【main( )函数】--------------------------------------------
//		描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main( ) 
{
	//从摄像头读入视频
	VideoCapture cap(0);

	//检测是否读取成功
	if(!cap.isOpened()) {
		cout << "Capture could not be opened succesfully" << endl;
		return -1;
	}

	namedWindow("Video");
	namedWindow("Backprojection");

	setMouseCallback("Video", on_mouse);

	while(char(waitKey(1)) != 'q' && cap.isOpened()) {
		cap >> frame;
		if(!selected) mask.create(frame.rows+2, frame.cols+2, CV_8UC1);
		// Check if the video is over
		if(frame.empty()) {
			cout << "Video over" << endl;
			break;
		}
		cvtColor(frame, frame_hsv, CV_BGR2HSV);

		// backproject on the HSV image
		Mat frame_backprojected = Mat::zeros(frame.size(), CV_8UC1);        
		if(selected) {
			int channels[] = {0, 1};
			calcBackProject(&frame_hsv, 1, channels, hist, frame_backprojected, ranges);
		}

		imshow("Video", frame);
		imshow("Backprojection", frame_backprojected);
	}

	return 0;
}
开发者ID:BigCreatation,项目名称:OpenCV3-Intro-Book-Src,代码行数:42,代码来源:18_calcBackProject2.cpp

示例14: GetDepthDataAsArgb

/// <summary>
/// Converts from Kinect depth frame data into a ARGB OpenCV image matrix
/// User must pre-allocate space for matrix.
/// </summary>
/// <param name="pImage">pointer in which to return the OpenCV image matrix</param>
/// <returns>S_OK if successful, an error code otherwise</returns>
HRESULT OpenCVFrameHelper::GetDepthDataAsArgb(Mat* pImage) const
{
    DWORD depthWidth, depthHeight;
    NuiImageResolutionToSize(m_depthResolution, depthWidth, depthHeight);

    // Get the depth image
    Mat depthImage;
    depthImage.create(depthHeight, depthWidth, DEPTH_TYPE);
    HRESULT hr = GetDepthData(&depthImage);
    if (!SUCCEEDED(hr)) {
        return hr;
    }

    for (UINT y = 0; y < depthHeight; ++y)
    {
        // Get row pointers for Mats
        const USHORT* pDepthRow = depthImage.ptr<USHORT>(y);
        Vec4b* pDepthRgbRow = pImage->ptr<Vec4b>(y);

        for (UINT x = 0; x < depthWidth; ++x)
        {
            USHORT raw_depth = pDepthRow[x];

            // If depth value is valid, convert and copy it
            if (raw_depth != 65535)
            {
                UINT8 redPixel, greenPixel, bluePixel;
                DepthShortToRgb(raw_depth, &redPixel, &greenPixel, &bluePixel);
                pDepthRgbRow[x] = Vec4b(redPixel, greenPixel, bluePixel, 1);
            }
            else
            {
                pDepthRgbRow[x] = 0;
            }
        }
    }

    return S_OK;
}
开发者ID:Sizmar,项目名称:AGVC,代码行数:45,代码来源:OpenCVFrameHelper.cpp

示例15: getGradientInterleave

static void getGradientInterleave(const Mat& image,Mat & grad){
    //Image gradients for alignment
    //Note that these gradients have theoretical problems under the sudden 
    // changes model of images. It might be wise to blur the images before 
    // alignment, to avoid sudden changes, but that makes occlusion more 
    // problematic.
    grad.create(2,image.rows*image.cols,CV_32FC1);
    Mat gray;
    if (image.type()==CV_32FC1) {
        gray=image;
    }else {
        cvtColor(image, gray, CV_BGR2GRAY);
        gray.convertTo(gray,CV_32FC1);
    }
    Mat gradX(image.rows,image.cols,CV_32FC1);
    Scharr( gray, gradX, CV_32FC1, 1, 0, 1.0/26.0, 0, BORDER_REPLICATE );
    Mat gradY(image.rows,image.cols,CV_32FC1);
    Scharr( gray, gradY, CV_32FC1, 0, 1, 1.0/26.0, 0, BORDER_REPLICATE);
    Mat src [2]={gradY,gradX};
    merge(src,2,grad);
    
}
开发者ID:amiltonwong,项目名称:OpenDTAM,代码行数:22,代码来源:Align_part.cpp


注:本文中的Mat::create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。