当前位置: 首页>>代码示例>>C++>>正文


C++ cvSetData函数代码示例

本文整理汇总了C++中cvSetData函数的典型用法代码示例。如果您正苦于以下问题:C++ cvSetData函数的具体用法?C++ cvSetData怎么用?C++ cvSetData使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvSetData函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: while

void BleWindowsCaptureSource::run()
{
    // TODO make could select screen
    // QGuiApplication::screens();
    while (!m_stop) {
        QElapsedTimer elapsedTimer;
        elapsedTimer.start();

        QScreen *screen = QGuiApplication::primaryScreen();

        if (screen) {
            QPixmap pixmap = screen->grabWindow(m_wid, m_x, m_y, m_width, m_height);
#if 1
            // TODO to draw cursor to image
            QRect desktopRect = QRect(QPoint(0, 0), screen->size());
            if (desktopRect.contains(QCursor::pos())) {
                drawCursor(&pixmap);
            }
#endif
            QImage image = pixmap.toImage();

            m_modifyMutex.lock();           // Start lock

            BleImage be;
            be.width = image.width();
            be.height = image.height();

            int imageSize = be.width * be.height * 3;
            be.data = new char[imageSize];

            IplImage *oriImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 4);
            cvSetData(oriImage, image.bits(), image.bytesPerLine());

            IplImage *dstImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 3);
            cvSetData(dstImage, be.data, be.width * 3);

            cvCvtColor(oriImage, dstImage, CV_BGRA2BGR);

            be.dataSize = imageSize;
            be.format = BleImage_Format_BGR24;

            m_image = be;

            cvReleaseImageHeader(&oriImage);
            cvReleaseImageHeader(&dstImage);

            m_modifyMutex.unlock();        // End unlock
        }

        int elapsedMs = elapsedTimer.elapsed();
        int needSleepMs = m_interval - elapsedMs;
        if (needSleepMs < 0) {
            needSleepMs = 0;
        }
        msleep(needSleepMs);
    }

    log_trace("BleWindowsCaptureSource exit normally.");
}
开发者ID:JaydenChou,项目名称:Bull-Live-Encoder,代码行数:59,代码来源:BleWindowsCaptureSource.cpp

示例2: clifGrayscaleIntegral

CLIFIntegralResult
clifGrayscaleIntegral(const IplImage* source,
                      CLIFEnvironmentData* data,
                      const cl_bool use_opencl)
{
    CLIFIntegralResult ret;
    
    if(!use_opencl) {
        IplImage* grayscale = cvCreateImage(cvSize(source->width, source->height), IPL_DEPTH_8U, 1);
        cvCvtColor(source, grayscale, CV_BGR2GRAY);
        ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1);
        ret.square_image = cvCreateMat(source->height + 1, source->width + 1, CV_64FC1);
        cvIntegral(grayscale, ret.image, ret.square_image);
        cvReleaseImage(&grayscale);
        
        return ret;
    }
    
    cl_int error = CL_SUCCESS;
    
    // Init buffer
    error = clEnqueueWriteBuffer(data->environment.queue, data->bgr_to_gray_data.buffers[0], CL_FALSE, 0, source->widthStep * source->height, source->imageData, 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Run kernel
    error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[0], 2, NULL, data->bgr_to_gray_data.global_size, data->bgr_to_gray_data.local_size, 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Set as arg the output of greyscale
    clSetKernelArg(data->environment.kernels[1], 0, sizeof(cl_mem), &(data->bgr_to_gray_data.buffers[1]));
    clCheckOrExit(error);
    
    // Run sum rows kernel
    error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[1], 1, NULL, &(data->integral_image_data.global_size[0]), &(data->integral_image_data.local_size[0]), 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Run sum cols kernel
    error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[2], 1, NULL, &(data->integral_image_data.global_size[1]), &(data->integral_image_data.local_size[1]), 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Read result
    cl_uint* result = (cl_uint*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[3], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_uint), 0, NULL, NULL, &error);
    clCheckOrExit(error);
    
    cl_ulong* square_result = (cl_ulong*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[4], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_ulong), 0, NULL, NULL, &error);
    clCheckOrExit(error);
    
    data->integral_image_data.ptr = result;
    
    // Return
    ret.image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_32SC1);
    cvSetData(ret.image, result, source->width + 1);
    ret.square_image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_64FC1);
    cvSetData(ret.square_image, square_result, source->width + 1);
    return ret;
}
开发者ID:GabrieleCocco,项目名称:CLFaceDetection,代码行数:56,代码来源:clif.cpp

示例3: Q_UNUSED

void BleImageProcess::paintEvent(QPaintEvent *event)
{
    Q_UNUSED(event);

    QPainter p(this);

    p.setRenderHint(QPainter::SmoothPixmapTransform);

    // back ground
    p.fillRect(rect(), QBrush(QColor(48, 48, 48)));

    // element draw
    for (int i = 0; i < m_sources.size(); ++i) {
        const SourcePair & pair = m_sources.at(i);
        BleSourceAbstract *s = pair.source;

        // TODO image data may be used by other thread
        BleImage image = s->getImage();

        if (image.dataSize <= 0) continue;

        QImage qimage;
        if (image.format == BleImage_Format_BGR24) {
            IplImage *oriImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
            cvSetData(oriImage, image.data, image.width*3);

            IplImage *dstImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
            cvSetData(dstImage, image.data, image.width*3);

            cvCvtColor(oriImage, dstImage, CV_BGR2RGB);

            cvReleaseImageHeader(&oriImage);
            cvReleaseImageHeader(&dstImage);
        }
        qimage = QImage((uchar*)image.data, image.width, image.height, QImage::Format_RGB888);

        p.drawPixmap(pair.rect, QPixmap::fromImage(qimage));
        // p.drawImage(pair.rect, qimage);
    }

    if (m_activePair && m_activePair->rect.isValid()) {
        QPen pen(Qt::SolidLine);

        pen.setColor(Qt::white);
        pen.setWidth(2);
        pen.setStyle(Qt::DotLine);
        p.setPen(pen);
        p.drawRect(m_activePair->rect);

        QRect topLeftRect(m_activePair->rect.x(), m_activePair->rect.y(), 8, 8);
        p.fillRect(topLeftRect, QBrush(Qt::red));

        QRect bottomRightRect(m_activePair->rect.bottomRight().x(), m_activePair->rect.bottomRight().y(), -8, -8);
        p.fillRect(bottomRightRect, QBrush(Qt::red));
    }
}
开发者ID:JaydenChou,项目名称:Bull-Live-Encoder,代码行数:56,代码来源:BleImageProcess.cpp

示例4: cvCreateMat

void ofxImageROI::projectToTarget(ofPoint *dstPoints)
{
	CvPoint2D32f cvsrc[4];
	CvPoint2D32f cvdst[4];

	cvsrc[0].x = cornerPoints[0].x;
	cvsrc[0].y = cornerPoints[0].y;
	cvsrc[1].x = cornerPoints[1].x;
	cvsrc[1].y = cornerPoints[1].y;
	cvsrc[2].x = cornerPoints[2].x;
	cvsrc[2].y = cornerPoints[2].y;
	cvsrc[3].x = cornerPoints[3].x;
	cvsrc[3].y = cornerPoints[3].y;

	cvdst[0].x = dstPoints[0].x;
	cvdst[0].y = dstPoints[0].y;
	cvdst[1].x = dstPoints[1].x;
	cvdst[1].y = dstPoints[1].y;
	cvdst[2].x = dstPoints[2].x;
	cvdst[2].y = dstPoints[2].y;
	cvdst[3].x = dstPoints[3].x;
	cvdst[3].y = dstPoints[3].y;

	CvMat* src_mat = cvCreateMat(4, 2, CV_32FC1);
	CvMat* dst_mat = cvCreateMat(4, 2, CV_32FC1);

	cvSetData(src_mat, cvsrc, sizeof(CvPoint2D32f));
	cvSetData(dst_mat, cvdst, sizeof(CvPoint2D32f));

	CvMat * translate = cvCreateMat(3,3,CV_32FC1); // 領域確保
    cvFindHomography(src_mat, dst_mat, translate); // ホモグラフィ計算

    // 画像サイズが変わったとき
    if (this->bAllocated() == true && inputImg.bAllocated == false) {
        projectedImage.allocate(this->width, this->height, OF_IMAGE_COLOR);
        inputImg.allocate(this->width, this->height);
        outputImg.allocate(this->width, this->height);
    }

    inputImg.setFromPixels(this->getPixelsRef()); // 画像のロード
    cvWarpPerspective(inputImg.getCvImage(), outputImg.getCvImage(), translate, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(255)); // 透視投影変換

    //出力結果をofImageに変換
    projectedImage.setFromPixels(outputImg.getPixels(), outputImg.getWidth(), outputImg.getHeight(), OF_IMAGE_COLOR);

    cvReleaseMat(&translate);
    cvReleaseMat(&src_mat);
    cvReleaseMat(&dst_mat);
}
开发者ID:tanakatsu,项目名称:of_pmtool,代码行数:49,代码来源:ofxImageROI.cpp

示例5: icvCreateSpillTree

static CvSpillTree*
icvCreateSpillTree( const CvMat* raw_data,
		    const int naive,
		    const double rho,
		    const double tau )
{
  int n = raw_data->rows;
  int d = raw_data->cols;

  CvSpillTree* tr = (CvSpillTree*)cvAlloc( sizeof(CvSpillTree) );
  tr->root = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) );
  memset(tr->root, 0, sizeof(CvSpillTreeNode));
  tr->refmat = (CvMat**)cvAlloc( sizeof(CvMat*)*n );
  tr->total = n;
  tr->naive = naive;
  tr->rho = rho;
  tr->tau = tau;
  tr->type = raw_data->type;

  // tie a link-list to the root node
  tr->root->lc = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) );
  memset(tr->root->lc, 0, sizeof(CvSpillTreeNode));
  tr->root->lc->center = cvCreateMatHeader( 1, d, tr->type );
  cvSetData( tr->root->lc->center, _dispatch_mat_ptr(raw_data, 0), raw_data->step );
  tr->refmat[0] = tr->root->lc->center;
  tr->root->lc->lc = NULL;
  tr->root->lc->leaf = true;
  tr->root->lc->i = 0;
  CvSpillTreeNode* node = tr->root->lc;
  for ( int i = 1; i < n; i++ )
    {
      CvSpillTreeNode* newnode = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) );
      memset(newnode, 0, sizeof(CvSpillTreeNode));
      newnode->center = cvCreateMatHeader( 1, d, tr->type );
      cvSetData( newnode->center, _dispatch_mat_ptr(raw_data, i*d), raw_data->step );
      tr->refmat[i] = newnode->center;
      newnode->lc = node;
      newnode->i = i;
      newnode->leaf = true;
      newnode->rc = NULL;
      node->rc = newnode;
      node = newnode;
    }
  tr->root->rc = node;
  tr->root->cc = n;
  icvDFSInitSpillTreeNode( tr, d, tr->root );
  return tr;
}
开发者ID:09beezahmad,项目名称:opencv,代码行数:48,代码来源:spilltree.cpp

示例6: showInputLayer2DImage

void showInputLayer2DImage(LayerStack* stack){
	//printf("show image\n");
	int scale = 10;
	CvMat* image;
	int width = sqrt(stack->layerSizes[0]);
	image =cvCreateMat(width*scale,width*scale,CV_8UC1);
	//upscaled =cvCreateMat(width*scale,width*scale,CV_8UC1);
	unsigned char* data = malloc((width*width*scale*scale)*sizeof(unsigned char));
	assert(data!=NULL);
	for(int i=0;i<width*scale;i++){
		for(int j=0;j<width*scale;j++){
			//printf("to %d from %d\n",i*width*scale+j,(i/scale)*width+j/scale);
			data[i*width*scale+j] = 255-(stack->layers[0][(i/scale)*width+j/scale]*255);
			//printf("neuron: %.3f, pixel: %d \n",stack->stack[0]->neurons[i],data[i]);
		}
	}
	cvSetData(image,data,width*scale);

	

	
	cvNamedWindow("NeuroOutput",CV_WINDOW_AUTOSIZE);
	cvShowImage("NeuroOutput",image);
	cvWaitKey(0);
	
	cvReleaseMat(&image);
	free(data);
	//cvReleaseMat(&upscaled);
}
开发者ID:MarcGroef,项目名称:DNN,代码行数:29,代码来源:dnn_show.c

示例7: switch

IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
{
    if(framebuffer) {
        int depth = 0, channels = 0;
        switch(pixelFormat) {
            case ARV_PIXEL_FORMAT_MONO_8:
                depth = IPL_DEPTH_8U;
                channels = 1;
                break;
            case ARV_PIXEL_FORMAT_MONO_12:
                depth = IPL_DEPTH_16U;
                channels = 1;
                break;
        }
        if(depth && channels) {
            IplImage src;
            cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );

            cvSetData( &src, framebuffer, src.widthStep );
            if( !frame ||
                 frame->width != src.width ||
                 frame->height != src.height ||
                 frame->depth != src.depth ||
                 frame->nChannels != src.nChannels) {

                cvReleaseImage( &frame );
                frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
            }
            cvCopy(&src, frame);

            return frame;
        }
    }
    return NULL;
}
开发者ID:acss,项目名称:opencv,代码行数:35,代码来源:cap_aravis.cpp

示例8: cvSetData

void ICVConsumer::consume (IConsumer::info consume_info,
	ssi_size_t stream_in_num,
	ssi_stream_t stream_in[]) {

	cvSetData (_image_in, stream_in[0].ptr, _stride_in);
	consume (stream_in[0].sr, _image_in);
}
开发者ID:hcmlab,项目名称:mobileSSI,代码行数:7,代码来源:ICVConsumer.cpp

示例9: cvInitMatHeader

// initialize CvMat header, allocated by the user
CV_IMPL CvMat*
cvInitMatHeader( CvMat* arr, int height, int width,
                 int type, void* data, int step )
{
    CV_FUNCNAME( "cvInitMatHeader" );
    
    __BEGIN__;

    if( !arr )
        CV_ERROR_FROM_CODE( CV_StsNullPtr );

    if( (unsigned)(CV_ARR_CN(type) - 1) >= CV_CN_MAX )
        CV_ERROR_FROM_CODE( CV_BadDepth );

    if( (unsigned)CV_ARR_DEPTH(type) > CV_DEPTH_MAX )
        CV_ERROR_FROM_CODE( CV_BadNumChannels );

    if( height <= 0 || width <= 0 )
        CV_ERROR_FROM_CODE( CV_StsBadSize );
 
    arr->type = (type & CV_ARR_TYPE_MASK) | CV_ARR_MAGIC_VAL;
    arr->height = height;
    arr->width = width;

    CV_CALL( cvSetData( arr, data, step ));

    __END__;

    return arr;
}
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:31,代码来源:cvarray.cpp

示例10: cvInitImageHeader

IplImage* CvCaptureAVI_VFW::retrieveFrame(int)
{
    if( avistream && bmih )
    {
        bool isColor = bmih->biBitCount == 24;
        int nChannels = (isColor) ? 3 : 1;
        IplImage src;
        cvInitImageHeader( &src, cvSize( bmih->biWidth, bmih->biHeight ),
                           IPL_DEPTH_8U, nChannels, IPL_ORIGIN_BL, 4 );

        char* dataPtr = (char*)(bmih + 1);

        // Only account for the color map size if we are an 8-bit image and the color map is used
        if (!isColor)
        {
            static int RGBQUAD_SIZE_PER_BYTE = sizeof(RGBQUAD)/sizeof(BYTE);
            int offsetFromColormapToData = (int)bmih->biClrUsed*RGBQUAD_SIZE_PER_BYTE;
            dataPtr += offsetFromColormapToData;
        }

        cvSetData( &src, dataPtr, src.widthStep );

        if( !frame || frame->width != src.width || frame->height != src.height )
        {
            cvReleaseImage( &frame );
            frame = cvCreateImage( cvGetSize(&src), 8, nChannels );
        }

        cvFlip( &src, frame, 0 );
        return frame;
    }

    return 0;
}
开发者ID:12rohanb,项目名称:opencv,代码行数:34,代码来源:cap_vfw.cpp

示例11: cvCreateImageHeader

/******************将图片缩小至640*512方便显示***********************/
void CMainFrame::Resize()
 {
	CvSize cvSize;
	cvSize.width = Width;
 	cvSize.height = Height;
 	
	//生成支持OPENCV的IPLIMAGE数据结构,并使用相机采集的图像数据初始化
	IplImage *iplImage = cvCreateImageHeader(cvSize,IPL_DEPTH_8U,3);
 	cvSetData(iplImage,m_pImageBuffer,Width*3);  
	
    CvSize my_cvSize;           //新图片尺寸
	my_cvSize.width = Width/2;
 	my_cvSize.height = Height/2;

	IplImage *iplgraytemp = cvCreateImage(my_cvSize,IPL_DEPTH_8U,3); //新图片矩阵

	cvResize(iplImage,iplgraytemp,CV_INTER_NN);
    	
	//从LPLIMAGE数据结构中提取图像数据
    memcpy(DisplayBuffer,(BYTE*)iplgraytemp->imageData,Height/2*Width/2*3);

 	//释放申请的图象空间
	cvReleaseImage(&iplgraytemp);
	

 }
开发者ID:lzhang57,项目名称:3D_scanner,代码行数:27,代码来源:MainFrm.cpp

示例12: icvRetrieveImage

static IplImage*
icvRetrieveImage( void* obj )
{
    IplImage* img = 0;

    if( CV_IS_IMAGE(obj) )
        img = (IplImage*)obj;
    else if( CV_IS_MAT(obj) )
    {
        CvMat* m = (CvMat*)obj;
        img = cvCreateImageHeader( cvSize(m->cols,m->rows),
                        CV_MAT_DEPTH(m->type), CV_MAT_CN(m->type) );
        cvSetData( img, m->data.ptr, m->step );
        img->imageDataOrigin = (char*)m->refcount;
        m->data.ptr = 0; m->step = 0;
        cvReleaseMat( &m );
    }
    else if( obj )
    {
        cvRelease( &obj );
        CV_Error( CV_StsUnsupportedFormat, "The object is neither an image, nor a matrix" );
    }

    return img;
}
开发者ID:93sam,项目名称:opencv,代码行数:25,代码来源:image.cpp

示例13: cvCreateImageHeader

PyObject *wrapped_People_detectAllFaces(PyObject *self, PyObject *args)
{
  wrapped_People_t *pp = (wrapped_People_t*)self;

  // IplImage *image, const char* haar_classifier_filename, double threshold, IplImage *disparity_image, CvStereoCamModel *cam_model, bool do_draw)
  IplImage *input;
  char *haar_filename;
  double threshold;
  PyObject *disparity_image, *cam_model;
  int do_draw;

  {
    char *imgdata;
    int imgdata_size, x, y;
    if (!PyArg_ParseTuple(args, "s#iisdOOi", &imgdata, &imgdata_size, &x, &y, &haar_filename, &threshold, &disparity_image, &cam_model, &do_draw))
      return NULL;
    input = cvCreateImageHeader(cvSize(x, y), IPL_DEPTH_8U, 1);
    cvSetData(input, imgdata, x);
  }

  vector<CvRect> faces_vector = pp->c.detectAllFaces(input, haar_filename, threshold, NULL, NULL, do_draw);
  PyObject *r = PyList_New(faces_vector.size());
  for (size_t i = 0; i < faces_vector.size(); i++)
    PyList_SetItem(r, i, Py_BuildValue("iiii",
                    faces_vector[i].x,
                    faces_vector[i].y,
                    faces_vector[i].width,
                    faces_vector[i].height));
  return r;
}
开发者ID:janfrs,项目名称:kwc-ros-pkg,代码行数:30,代码来源:py.cpp

示例14: MAX

void ofxCvBrightnessContrast::setBrightnessAndContrast(ofxCvImage& img, float brightnessAmount, float contrastAmount){
	brightnessVal = MAX(-127, MIN(127, brightnessAmount));
	contrastVal = MAX(-127, MIN(127, contrastAmount));
	
	unsigned char data[ 256 ];
	CvMat * matrix;
	double delta, a, b;
	
	matrix = cvCreateMatHeader( 1, 256, CV_8UC1 );
    cvSetData( matrix, data, 0 );
	
	if ( contrastVal>0 ) {
        delta = (127.0f*contrastVal) / 128.0f;
        a = 255.0f / ( 255.0f-(delta*2.0f) );
        b = a * (brightnessVal-delta);
    }
    else {
		delta = (-128.0f*contrastVal) / 128.0f;
		a = ( 256.0f-(delta*2.0f) ) / 255.0f;
		b = ( a*brightnessVal )+delta;
    }
	
	for( int i=0; i<256; i++ ) {
		int value = cvRound( (a*i)+b );
		data[i]	= (unsigned char) min( max(0,value), 255 );
	}
	
    cvLUT( img.getCvImage(), img.getCvImage(), matrix );
	cvReleaseMat( &matrix );
	
}
开发者ID:robksawyer,项目名称:ofxAddons,代码行数:31,代码来源:ofxCvBrightnessContrast.cpp

示例15: VRFrame

void Filters::lowPass(VRFrame* frame, int size)
{
    IplImage* imgDst = 0;
    IplImage* imgAux = 0;
    IplImage* imgNew = 0;

    VRFrame* frameAux;

    Log::writeLog("%s :: param: frame[%x] size[%d]", __FUNCTION__, frame, size);

    //Ajuste do tamanho da matriz.
    if (size > 9)
        size = 9;

    int cols_i = size;
    int rows_i = size;
    int total_size = 0;
    CvMat *filter = 0;

    total_size=(int)pow((double)size,2);

    // Máscara para realizar o processo de convolução.
    ///double convMask[total_size];
    double * convMask = new double[total_size];

    // Cria uma imagem com os mesmos parâmetros da original.
    frameAux = new VRFrame(frame);

    imgDst = VRFrame::imgAlloc(frameAux);
    imgAux = VRFrame::imgAlloc(frameAux);
    imgNew = VRFrame::imgAlloc(frameAux);

    // Monta a máscara com o tamanho que foi passado como parâmetro.
    for (int i=0; i<total_size; i++)	
        convMask[i] = (double)1/(double)total_size;

    imgAux->imageData = frameAux->data->imageData;
    imgAux->widthStep = frameAux->data->width;

    imgDst->imageData = imgAux->imageData;
    imgDst->widthStep = imgAux->width;

    filter = cvCreateMatHeader(rows_i, cols_i, CV_64FC1);

    cvSetData(filter, convMask, cols_i*8);

    cvFilter2D(imgAux, imgDst, filter, cvPoint(-1,-1));

    VRFrame::imgCopy(imgDst, imgNew);

    frame->setImage(imgNew);

    // Desaloca os temporários
    VRFrame::imgDealloc(imgAux);
    VRFrame::imgDealloc(imgDst);

    delete[] convMask;
    delete frameAux;

}
开发者ID:plsang,项目名称:visualrhythm,代码行数:60,代码来源:Filters.cpp


注:本文中的cvSetData函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。