当前位置: 首页>>代码示例>>C++>>正文


C++ cvLoadImage函数代码示例

本文整理汇总了C++中cvLoadImage函数的典型用法代码示例。如果您正苦于以下问题:C++ cvLoadImage函数的具体用法?C++ cvLoadImage怎么用?C++ cvLoadImage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cvLoadImage函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main( int argc, char** argv )
{
    if (argc == 1) {
        std::cerr << "Usage: ./transform_image -i source.pgm -o warped.pgm [-t warp.xfm] [mode]\n"
                  << "  Rotation angle:           -a 60\n"
                  << "  Scale factor:             -s 1.5\n"
                  << "  Use homography from file: -h warp.xfm\n";
        return 0;
    }
    
    char *image_file = NULL, *transform_file = NULL, *homography_file = NULL;
    std::string out_file = "warped.pgm";
    float angle = 0; //degrees
    float scaling = 0;
    Mode mode = None;
    
    int arg = 0;
    while (++arg < argc) {
        if (! strcmp(argv[arg], "-i"))
            image_file = argv[++arg];
        if (! strcmp(argv[arg], "-o"))
            out_file = argv[++arg];
        if (! strcmp(argv[arg], "-t"))
            transform_file = argv[++arg];
        if (! strcmp(argv[arg], "-a")) {
            angle = atof(argv[++arg]);
            mode = Rotate;
        }
        if (! strcmp(argv[arg], "-s")) {
            scaling = atof(argv[++arg]);
            mode = Scale;
        }
        if (! strcmp(argv[arg], "-h")) {
            homography_file = argv[++arg];
            mode = Transform;
        }
    }
    assert(image_file);
    assert(mode != None);

    IplImage* loaded = cvLoadImage(image_file, CV_LOAD_IMAGE_GRAYSCALE);
    assert(loaded);
    int W = loaded->width;
    int H = loaded->height;

    CvMat* transform = NULL;
    IplImage* warped = NULL;
    int flags = CV_INTER_CUBIC | CV_WARP_FILL_OUTLIERS;

    if (mode == Rotate) {
        transform = cvCreateMat(2, 3, CV_32FC1);
        CvSize warped_size = FullImageRotation(W, H, angle, transform);
        warped = cvCreateImage(warped_size, IPL_DEPTH_8U, 1);
        cvWarpAffine(loaded, warped, transform, flags);
    }
    else if (mode == Scale) {
        transform = cvCreateMat(2, 3, CV_32FC1);
        cvZero(transform);
        float* data = transform->data.fl;
        *data = scaling;
        data[transform->step/sizeof(float) + 1] = scaling;
        CvSize warped_size = cvSize(W*scaling + 0.5, H*scaling + 0.5);
        warped = cvCreateImage(warped_size, IPL_DEPTH_8U, 1);
        cvWarpAffine(loaded, warped, transform, flags);
    }
    else if (mode == Transform) {
        transform = cvCreateMat(3, 3, CV_32FC1);
        ReadTransform(homography_file, transform);
        warped = cvCreateImage(cvSize(W, H), IPL_DEPTH_8U, 1);
        cvWarpPerspective(loaded, warped, transform, flags);
    }
    
    cvSaveImage(out_file.c_str(), warped);

    if (transform_file)
        WriteTransform(transform_file, transform);
    
    return 0;
}
开发者ID:janfrs,项目名称:kwc-ros-pkg,代码行数:79,代码来源:transform_image.cpp

示例2: main

int main(int argc, char *argv[])
{
	int height, width, step, channels;
	unsigned char *data;
	char *window = "Object Detection";
	int i, j, k;

	//If we do not have an input image
	if(argc < 2)
	{
		printf("Usage: object-detection <image-file>\n");
		exit(0);
	}

	//Load image from input
	IplImage *img = 0;
	IplImage *src = 0;
	src =  cvLoadImage(argv[1], LOAD_RGB);
	//CvCapture *capture = cvCaptureFromCAM(0);

	if(!src)
	{
		printf("Could not load image file: %s\n", argv[1]);
		exit(0);
	}

	
	/*if(!cvGrabFrame(capture))
	{
		printf("Could not capture device\n");
		exit(0);
	}*/

	//img = cvRetrieveFrame(capture,1);
	
	//Get the image data
	height    = src->height;
	width     = src->width;
	step      = src->widthStep;
	channels  = src->nChannels;
	data      = (unsigned char *)src->imageData;
	

	img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	
	cvCvtColor(src, img, CV_BGR2GRAY);

	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	data      = (unsigned char *)img->imageData;
	
	//Information about the image
	printf("Processing a %dx%d image with %d channels\n", height, width, channels); 

	//Set up basic window
	cvNamedWindow(window, CV_WINDOW_AUTOSIZE);
	cvMoveWindow(window, 100, 100);
	
	//unsigned char *test = (unsigned char *)malloc(sizeof(unsigned char) * width * height);
	IplImage *b = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage *g = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage *r = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);

	cvSplit(src, b, g, r, 0);	
	steeringKernel(b);
	steeringKernel(g);
	steeringKernel(r);

	cvMerge(b, g, r, 0, src);

	//steeringKernel(img);

	//memcpy(data, test, sizeof(unsigned char) * width * height);
	//Invert the image
	//bilateralKernel(src);

	//pca(src);
	//Display the image on the windowi
	cvShowImage(window, src);
    //cvShowImage("b", b);
	//cvShowImage("g", g);
	cvSaveImage("object-detection-output.jpg", img, 0);

	//Wait key to signal exit  
	cvWaitKey(0);

	//Releases the image
	cvReleaseImage(&img);
	//cvReleaseCapture(&capture);
	return 0;
}
开发者ID:dayanta,项目名称:object-detection,代码行数:93,代码来源:working-pca.c

示例3: main

int main(int argc, char** argv)
{
	CvMemStorage* mstrg = cvCreateMemStorage();
	CvSeq* contours = 0; 
	CvSeq* contours2 = 0; 

	const char* filename = 0;
	IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
	IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
	CvCapture* capture = 0;		

	int c, n, nframes = 0;
	int nframesToLearnBG = 300;

	model = cvCreateBGCodeBookModel();

	//Set color thresholds to default values
	model->modMin[0] = 3;
	model->modMin[1] = model->modMin[2] = 3;
	model->modMax[0] = 10;
	model->modMax[1] = model->modMax[2] = 10;
	model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

	bool pause = false;
	bool singlestep = false;

	printf("Capturando de la camara...\n");
	capture = cvCaptureFromCAM( 0 );

	if( !capture )
	{
		printf( "No se pudo inicializar la captura de video\n\n" );
		return -1;
	}

	while (true)
	{

		rawImage = cvQueryFrame( capture );
		++nframes;
		if(!rawImage) 
			break;


		//First time:
		if( nframes == 1 && rawImage )
		{
			borde = cvLoadImage("Borde.png",0);

			// CODEBOOK METHOD ALLOCATION
			yuvImage = cvCloneImage(rawImage);

			int w = yuvImage->width;
			cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
			IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
			cvCopy(yuvImage, tmp, NULL);
			cvResetImageROI(yuvImage);
			yuvImage = cvCloneImage(tmp);

			ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
			ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );

			cvSet(ImaskCodeBook,cvScalar(255));

			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
//.........这里部分代码省略.........
开发者ID:rovim,项目名称:LSMRecognition,代码行数:101,代码来源:convexhull2.cpp

示例4: main

/*
 * Parameters : complete path to the two image to be compared
 * The file format must be supported by your OpenCV build
 */
int main(int argc, char** argv)
{
	if(argc!=3)
		return -1;
	
	// default settings
	double C1 = 6.5025, C2 = 58.5225;

	IplImage
		*img1=NULL, *img2=NULL, *img1_img2=NULL,
		*img1_temp=NULL, *img2_temp=NULL,
		*img1_sq=NULL, *img2_sq=NULL,
		*mu1=NULL, *mu2=NULL,
		*mu1_sq=NULL, *mu2_sq=NULL, *mu1_mu2=NULL,
		*sigma1_sq=NULL, *sigma2_sq=NULL, *sigma12=NULL,
		*ssim_map=NULL, *temp1=NULL, *temp2=NULL, *temp3=NULL;
	

	/***************************** INITS **********************************/
	img1_temp = cvLoadImage(argv[1]);
	img2_temp = cvLoadImage(argv[2]);

	if(img1_temp==NULL || img2_temp==NULL)
		return -1;

	int x=img1_temp->width, y=img1_temp->height;
	int nChan=img1_temp->nChannels, d=IPL_DEPTH_32F;
	CvSize size = cvSize(x, y);

	img1 = cvCreateImage( size, d, nChan);
	img2 = cvCreateImage( size, d, nChan);

	cvConvert(img1_temp, img1);
	cvConvert(img2_temp, img2);
	cvReleaseImage(&img1_temp);
	cvReleaseImage(&img2_temp);

	
	img1_sq = cvCreateImage( size, d, nChan);
	img2_sq = cvCreateImage( size, d, nChan);
	img1_img2 = cvCreateImage( size, d, nChan);
	
	cvPow( img1, img1_sq, 2 );
	cvPow( img2, img2_sq, 2 );
	cvMul( img1, img2, img1_img2, 1 );

	mu1 = cvCreateImage( size, d, nChan);
	mu2 = cvCreateImage( size, d, nChan);

	mu1_sq = cvCreateImage( size, d, nChan);
	mu2_sq = cvCreateImage( size, d, nChan);
	mu1_mu2 = cvCreateImage( size, d, nChan);
	

	sigma1_sq = cvCreateImage( size, d, nChan);
	sigma2_sq = cvCreateImage( size, d, nChan);
	sigma12 = cvCreateImage( size, d, nChan);

	temp1 = cvCreateImage( size, d, nChan);
	temp2 = cvCreateImage( size, d, nChan);
	temp3 = cvCreateImage( size, d, nChan);

	ssim_map = cvCreateImage( size, d, nChan);
	/*************************** END INITS **********************************/


	//////////////////////////////////////////////////////////////////////////
	// PRELIMINARY COMPUTING
	cvSmooth( img1, mu1, CV_GAUSSIAN, 11, 11, 1.5 );
	cvSmooth( img2, mu2, CV_GAUSSIAN, 11, 11, 1.5 );
	
	cvPow( mu1, mu1_sq, 2 );
	cvPow( mu2, mu2_sq, 2 );
	cvMul( mu1, mu2, mu1_mu2, 1 );


	cvSmooth( img1_sq, sigma1_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq );
	
	cvSmooth( img2_sq, sigma2_sq, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq );

	cvSmooth( img1_img2, sigma12, CV_GAUSSIAN, 11, 11, 1.5 );
	cvAddWeighted( sigma12, 1, mu1_mu2, -1, 0, sigma12 );
	

	//////////////////////////////////////////////////////////////////////////
	// FORMULA

	// (2*mu1_mu2 + C1)
	cvScale( mu1_mu2, temp1, 2 );
	cvAddS( temp1, cvScalarAll(C1), temp1 );

	// (2*sigma12 + C2)
	cvScale( sigma12, temp2, 2 );
	cvAddS( temp2, cvScalarAll(C2), temp2 );
//.........这里部分代码省略.........
开发者ID:Anjan369,项目名称:ssim,代码行数:101,代码来源:SSIM.cpp

示例5: zoomImg

void zoomImg(safeQueue *sq, struct bio_job *job)
{
    /* Search tmp folder */

    char *uri = job->name+strlen(SERVICE_ZOOM) + 1;
    sds dstpath = zoomePathInTmpDir(uri);
    //job->result = ufileMakeHttpReplyFromFile(dstpath);
    job->result = ufileMmapHttpReply(dstpath);
    printf("After Read File %.2lf \n", (double)(clock()));
    if(job->result) {
        sdsfree(dstpath);
        safeQueuePush(sq,job); /* the current job will be freed by master */
        return;
    }

    int width = 0, height = 0;
    sds fn = NULL;
    sds srcpath = NULL;
    IplImage* src = NULL;
    IplImage* dst = NULL;
    IplImage* toencode = NULL;
    CvMat* enImg = NULL;
    int notpushed = 1;
    int iscrop = 1;
    int p[3];
    p[0] = CV_IMWRITE_JPEG_QUALITY;
    p[1] = IMG_DEFAULT_QUALITY;
    p[2] = 0;
    uchar *buf = NULL;
    size_t len = 0;
    uri_parse_state state = img_parse_uri(uri,&fn,&width,&height, &iscrop, &p[1]);
    if(state == parse_error) goto clean;
    // initializations
    srcpath = bioPathInSrcDir(fn);    
    printf("Before Load Image %.2lf \n", (double)(clock()));
    src = cvLoadImage(srcpath, CV_LOAD_IMAGE_COLOR);
    printf("After Load Image %.2lf \n", (double)(clock()));
    /* validate that everything initialized properly */
    if(!src)
    {
        ulog(CCACHE_VERBOSE,"can't load image file: %s\n",srcpath);
        goto clean;
    }

    int src_width = src->width;
    int src_height = src->height;
    int roi_src_width = src_width;
    int roi_src_height = src_height;


    if(width&&height) {
        /* Preserve origial ratio */
        /* NOTICE: dangerous type conversion */
        roi_src_width = src_height*width/height;
        roi_src_height = src_width*height/width;
        if(roi_src_width>src_width) roi_src_width = src_width;
        if(roi_src_height>src_height) roi_src_height = src_height;
    }
    else if(!width&&height) {
        width = src_width;
    }
    else if(width&&!height) {
        height = src_height;
    }
    else {
        toencode = src;
    }

    if(!toencode) {
        if(iscrop) {
            int x = (src_width - roi_src_width)/2;
            int y = (src_height - roi_src_height)/2;
            // Say what the source region is
            cvSetImageROI( src, cvRect(x,y,roi_src_width,roi_src_height));
        }

        dst = cvCreateImage(cvSize(width,height), src->depth, src->nChannels);
        if(!dst) goto clean;

        cvResize(src,dst,CV_INTER_CUBIC);
        printf("After Resize Image %.2lf \n", (double)(clock()));


        if(iscrop) {
            cvResetImageROI( src );
        }

        toencode = dst;
    }


    enImg = cvEncodeImage(IMG_ENCODE_DEFAULT, toencode, p );

    printf("After Encode Image %.2lf \n", (double)(clock()));

    buf = enImg->data.ptr;
    len = enImg->rows*enImg->cols;
    job->result = ufilMakettpReplyFromBuffer(buf,len);
    job->type |= BIO_WRITE_FILE; /* Remind master of new written file  */
    safeQueuePush(sq,job);    
//.........这里部分代码省略.........
开发者ID:truongminh,项目名称:ccache,代码行数:101,代码来源:zoom.c

示例6: printf

/*!
    \fn CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
 */
void CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
{
  if( db_type == XM2VTS )
  {
    printf("Training an SVM classifier  ................\n");
    CvXm2vts *xm2vts = (CvXm2vts*)database;
    int nTrainingExample = 200*4;
    CvMat* trainData = cvCreateMat(nTrainingExample, nofeatures, CV_32FC1);
    CvMat* response = cvCreateMat(nTrainingExample, 1, CV_32FC1);
    
    for (int i = 0; i < nofeatures; i++)
    {
      /* load feature value */
      CvGaborFeature *feature;
      feature = new_pool->getfeature(i);
      printf("Getting the %d feature ............\n", i+1);
      
      char *filename = new char[50];
      //training validation
      double l, t;
      int fal = 0;
      for(int sub = 1; sub <= 200; sub++)
      {
        if (((CvXm2vts*)database)->getGender( sub )) t = 1.0;
        else t = 2.0;
        
        for(int pic = 1; pic <= 4; pic++)
        {
          sprintf(filename, "%s/%d_%d.bmp", path, sub, pic);
          IplImage *img = cvLoadImage( filename, CV_LOAD_IMAGE_ANYCOLOR );
          IplImage *grayimg = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
          if ( img->nChannels == 1 )  cvCopy( img, grayimg, NULL );
          else if (img->nChannels == 3)   cvCvtColor( img, grayimg, CV_RGB2GRAY );
          
          double vfeature = feature->val( img );
          cvSetReal2D( trainData, ((sub-1)*4+(pic-1)), i, vfeature );
          cvSetReal1D( response, ((sub-1)*4+(pic-1)), t );
          cvReleaseImage(&img);
          cvReleaseImage(&grayimg);
        }
      }
      delete [] filename;
    }
    
    
    printf("building the svm classifier .........................\n");
    CvTermCriteria term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 200, 0.8);
  /*Type of SVM, one of the following types:
    CvSVM::C_SVC - n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers.
    CvSVM::NU_SVC - n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C.
    CvSVM::ONE_CLASS - one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
    CvSVM::EPS_SVR - regression. The distance between feature vectors from the training set and the fitting hyperplane must be less than p. For outliers the penalty multiplier C is used.
    CvSVM::NU_SVR - regression; nu is used instead of p. */
    int _svm_type = CvSVM::NU_SVC;
  /*The kernel type, one of the following types:
    CvSVM::LINEAR - no mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x•y == (x,y)
    CvSVM::POLY - polynomial kernel: d(x,y) = (gamma*(x•y)+coef0)degree
    CvSVM::RBF - radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|2)
    CvSVM::SIGMOID - sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(x•y)+coef0) */
    
    int _kernel_type = CvSVM::POLY;
    
    double _degree = 3.0;
    double _gamma = 1.0;
    double _coef0 = 0.0;
    double _C = 1.0;
    double _nu = 1.0;
    double _p = 1.0;
    
    CvSVMParams  params( CvSVM::C_SVC, CvSVM::POLY, _degree, _gamma, _coef0, _C, _nu, _p,
                         0, term_crit );
    
    svm->train( trainData, response, 0, 0, params );
    
    svm->save( "svm.xml", "svm" );
    cvReleaseMat(&response);
    cvReleaseMat(&trainData);
  }
}
开发者ID:Slipperboy,项目名称:gaborboosting,代码行数:82,代码来源:cvbingabadafeatureselect.cpp

示例7: main

void main()
{
	windage::Logger logger(&std::cout);

	IplImage* grabImage;
	IplImage* inputImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	IplImage* resizeImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* grayImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);

	FleaCamera* capture = new FleaCamera();
	capture->open();
	capture->start();
//	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
	cvNamedWindow("result");

	// create and initialize tracker
	//IMPORTANT
	windage::Frameworks::PlanarObjectTracking tracking;

	windage::Calibration* calibration;
	windage::Algorithms::FeatureDetector* detector;
	windage::Algorithms::SearchTree* searchtree;
	windage::Algorithms::OpticalFlow* opticalflow;
	windage::Algorithms::HomographyEstimator* estimator;
	windage::Algorithms::OutlierChecker* checker;
	windage::Algorithms::HomographyRefiner* refiner;

	calibration = new windage::Calibration();
	detector = new windage::Algorithms::WSURFdetector();
	searchtree = new windage::Algorithms::KDtree();
	opticalflow = new windage::Algorithms::OpticalFlow();
	estimator = new windage::Algorithms::RANSACestimator();
	checker = new windage::Algorithms::OutlierChecker();
	refiner = new windage::Algorithms::LMmethod();

	calibration->Initialize(INTRINSIC[0], INTRINSIC[1], INTRINSIC[2], INTRINSIC[3], INTRINSIC[4], INTRINSIC[5], INTRINSIC[6], INTRINSIC[7]);
	searchtree->SetRatio(0.7);
	opticalflow->Initialize(WIDTH, HEIGHT, cvSize(15, 15), 3);
	estimator->SetReprojectionError(REPROJECTION_ERROR);
	checker->SetReprojectionError(REPROJECTION_ERROR * 3);
	refiner->SetMaxIteration(10);

	tracking.AttatchCalibration(calibration);
	tracking.AttatchDetetor(detector);
	tracking.AttatchMatcher(searchtree);
	tracking.AttatchTracker(opticalflow);
	tracking.AttatchEstimator(estimator);
	tracking.AttatchChecker(checker);
	tracking.AttatchRefiner(refiner);
//	tracking.AttatchFilter(filter);

	tracking.SetDitectionRatio(1);
	tracking.Initialize(WIDTH, HEIGHT, (double)WIDTH, (double)HEIGHT);

	int keypointCount = 0;
	int matchingCount = 0;
	double threshold = detector->GetThreshold();
	double processingTime = 0.0;

	bool trained = false;

#if USE_TEMPLATE_IMAEG
	IplImage* sampleImage = cvLoadImage(TEMPLATE_IMAGE, 0);
	detector->SetThreshold(threshold);
	tracking.AttatchReferenceImage(sampleImage);
	tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
	detector->SetThreshold(threshold);
	trained = true;
#endif

	char message[100];
	bool flip = false;
	bool processing = true;
	while(processing)
	{
		// capture image
		capture->update();
		grabImage = capture->GetIPLImage();
//		inputImage = cvRetrieveFrame(capture);
		cvResize(grabImage, inputImage);
		cvCvtColor(inputImage, resultImage, CV_BGRA2BGR);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		if(flip)
			cvFlip(inputImage, inputImage);

		logger.updateTickCount();

		// track object
		if(trained)
		{
			//IMPORTANT
			tracking.UpdateCamerapose(grayImage);

			// adaptive threshold
#if USE_ADAPTIVE_THRESHOLD
			int localcount = detector->GetKeypointsCount();
			if(keypointCount != localcount)
			{
				if(localcount > FEATURE_COUNT)
//.........这里部分代码省略.........
开发者ID:Barbakas,项目名称:windage,代码行数:101,代码来源:main.cpp

示例8: main

int main( int argc, char** argv )
{
	init();
	
	/* data structure for the image */
	IplImage *img = 0;

	char filename[]="test1.jpg";

	/* load the image,
	use CV_LOAD_IMAGE_GRAYSCALE to load the image in grayscale */
	img = cvLoadImage(filename, CV_LOAD_IMAGE_COLOR );
	
	/* always check */
	if( img == 0 ) {
		fprintf( stderr, "Cannot load file %s!\n", filename);
		return 1;
	}

	/* create a window */ 
	cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "image", img );
	
	
	
	
	
	IplImage *colored_image=cvCreateImage(cvSize(img->width,img->height),8,3);
	BayerToColor(img,colored_image,GBRG);
	
	/* create a window */ 
	cvNamedWindow( "Colored image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "Colored image", colored_image );
	
	
	
	IplImage *bw_image=cvCreateImage(cvSize(img->width,img->height),8,1);
	
	BayerToGray(img,bw_image,GBRG);
	
	/* create a window */ 
	cvNamedWindow( "BW image", CV_WINDOW_AUTOSIZE );
	/* display the image */  
	cvShowImage( "BW image", bw_image );




	/* wait until user press a key */
	cvWaitKey(0);

	/* free memory */
	cvDestroyWindow( "image" );
	cvDestroyWindow( "Colored image" );
	cvDestroyWindow( "BW image");

	cvReleaseImage( &img );
	cvReleaseImage( &colored_image );
	cvReleaseImage( &bw_image );
	return 0;
}
开发者ID:erebuswolf,项目名称:IGVC-Code,代码行数:63,代码来源:Colorize.cpp

示例9: opencvProcess

bool _stdcall opencvProcess(LPWSTR csInputPath, LPWSTR csOutputPath)
{
	char inputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csInputPath, -1, inputPath, SIZE, NULL, NULL);//wchar_t * to char
	char outputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csOutputPath, -1, outputPath, SIZE, NULL, NULL);//wchar_t * to char *

	//load image
	img = cvLoadImage(inputPath, 1);
	if(!img)
		return false;
	else 
	{
		CvSize size = cvGetSize(img); 

		int xScreen = GetSystemMetrics(SM_CXSCREEN);
		int yScreen = GetSystemMetrics(SM_CYSCREEN);
		
		while(size.width + 100 > xScreen || size.height + 100 > yScreen)
		{
			size.width /= 1.4;
			size.height /= 1.4;
		}//end while
	
		size.height += 90;

		cvNamedWindow(windowName, 0);
		cvResizeWindow(windowName, size.width, size.height); 
		cvMoveWindow(windowName, (xScreen-size.width)/2, (yScreen-size.height)/2 ); 
		
		dst = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
	
		LookupTableMatrix = cvCreateMatHeader(1,256,CV_8UC1);
		cvSetData(LookupTableMatrix, LookupTableData, 0);

		cvNamedWindow(windowName, 1);

		//read Brightness Contrast from file
		char bcPath[SIZE] = "";
		sprintf(bcPath, "%s\\InstaFilter\\Brightness and Contrast.if", getenv("temp"));
		FILE *bc = fopen(bcPath, "rb");
		if(!bc) return false;
		//read
		char data[SIZE];
		fgets(data, SIZE, bc);
		char *token = strtok(data, " ");
		BrightnessPosition += atoi(token);
		token = strtok(NULL, " ");
		ContrastPosition += atoi(token);
		fclose(bc);

		cvCreateTrackbar("亮度", windowName, &BrightnessPosition, 200, onTrackbar);
		cvCreateTrackbar("對比", windowName, &ContrastPosition, 200, onTrackbar);

		onTrackbar(0);

		cvWaitKey(0);
			
		//release
		cvSaveImage(outputPath, dst);
		cvReleaseImage(&img);
		cvReleaseImage(&dst);
		cvDestroyAllWindows();

		return true;
	}//end else
	return false;
}//end opencvProcess
开发者ID:QbsuranAlang,项目名称:InstaFilter,代码行数:68,代码来源:Brightness+and+Contrast.cpp

示例10: main

int main()
{
    IplImage* img = cvLoadImage("goal_arena.bmp");
    CvSize imgSize = cvGetSize(img);
    IplImage* detected = cvCreateImage(imgSize, 8, 1);
 
    IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
    IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
    IplImage* imgRed = cvCreateImage(imgSize, 8, 1);

    cvSplit(img, imgBlue, imgGreen, imgRed, NULL);
    cvAnd(imgGreen, imgBlue, detected);
    cvAnd(detected, imgRed, detected);
    cvErode(detected, detected);
    cvDilate(detected, detected);    // Opening
 
    // cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);
    CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
    // CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    // cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);

    CvPoint left1 = cvPoint(0, 0);
    CvPoint left2 = cvPoint(0, 0);
    CvPoint right1 = cvPoint(0, 0);
    CvPoint right2 = cvPoint(0, 0);
    CvPoint top1 = cvPoint(0, 0);
    CvPoint top2 = cvPoint(0, 0);
    CvPoint bottom1 = cvPoint(0, 0);
    CvPoint bottom2 = cvPoint(0, 0);
 
    int numLines = lines->rows;
    int numTop = 0;
    int numBottom = 0;
    int numLeft = 0;
    int numRight = 0;

    for(int i=0;i<numLines;i++)
    {
    	CvScalar dat = cvGet1D(lines, i);
        double rho = dat.val[0];
        double theta = dat.val[1];
        if(theta==0.0)
            continue;
        double degrees = theta*180.0/(3.1412);
 
        CvPoint pt1 = cvPoint(0, rho/sin(theta));
        CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
         if(abs(rho)<50.0)
        {
        	if(degrees>45.0 && degrees<135.0)
            {
            	numTop++;
 
                // The line is vertical and near the top
                top1.x+=pt1.x;
                top1.y+=pt1.y;
 
                top2.x+=pt2.x;
                top2.y+=pt2.y;
            }

            else
            {
                numLeft++;
 
                // The line is vertical and near the left
                left1.x+=pt1.x;
                left1.y+=pt1.y;
 
                left2.x+=pt2.x;
                left2.y+=pt2.y;
            }
        }

        else
        {
            // We're in the right portion
            if(degrees>45.0 && degrees<135.0)
            {
                numBottom++;
 
                //The line is horizontal and near the bottom
                bottom1.x+=pt1.x;
                bottom1.y+=pt1.y;
 
                bottom2.x+=pt2.x;
                bottom2.y+=pt2.y;
            }
            else
            {
                numRight++;
 
                // The line is vertical and near the right
                right1.x+=pt1.x;
                right1.y+=pt1.y;
 
                right2.x+=pt2.x;
                right2.y+=pt2.y;
            }
//.........这里部分代码省略.........
开发者ID:p-kar,项目名称:vision,代码行数:101,代码来源:get_field.cpp

示例11: main

int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
//.........这里部分代码省略.........
开发者ID:emcute0319,项目名称:LearningOpenCVCode,代码行数:101,代码来源:ch12_ex12_1.cpp

示例12: main

int main(int argc, char* argv[]) {
	char fname[100]="output";


	if (argc == 0) return usage(argv[0]);

	int processArgumentOne, processArgumentTwo; speed_up = 33;

	printf("Choose processing mode:\n");
	printf("1: Hough Transform\n");
	printf("2: Least Squares Means Linear Regression\n");
	printf("3: Singular Value Decomposition\n");
	printf("4: Factored Singular Value Decomposition\n");
	scanf("%d", &processArgumentOne);

	sprintf(fname,"output%d.csv", processArgumentOne);
	oFile=fopen(fname,"w");

	//if(*processArgument != 1 || *processArgument != 2 || *processArgument != 3) { printf("Enter a valid option\n"); return -1; }

	printf("Choose input type:\n");
	printf("1: Process input as movie file\n");
	printf("2: Process input as image in sequential series\n");
	scanf("%d", &processArgumentTwo);

	if(processArgumentTwo == 1) { gCapture = cvCreateFileCapture(argv[1]); frame = cvQueryFrame(gCapture); }
	else if (processArgumentTwo == 2) frame = cvLoadImage(argv[1]);


	cvShowImage(argv[1], red);
	initializeImages();
	cvSplit(frame, blue, green, red, NULL);
	subFrameX = .1 * width;
	subFrameY = .1 * height;
	cvNamedWindow("Initial", CV_WINDOW_AUTOSIZE);
	cvShowImage("Initial", frame);
	cvSetMouseCallback("Initial", myMouseCallback, NULL);
	cvWaitKey(0);
	cvDestroyWindow("Initial");
	cvNamedWindow("Frame Analysis", CV_WINDOW_NORMAL);
	cvSetMouseCallback("Frame Analysis", onTileSelect, NULL);
	stream = fopen("output","w");

	printf("Starting...\n");
	gettimeofday(&tStart, 0);

	int processTypeInput = processArgumentOne;
	int inputFileType = processArgumentTwo;

	switch(processTypeInput) {
		case 1:
			runHoughTransform(inputFileType);
			break;
		case 2:
			runLSMRegression(inputFileType);
			break;
		case 3:
			runSVD(inputFileType);
			break;
		case 4:
			runFactoredSVD(inputFileType);
			break;
		default:
			printf("Quit without processing\n");
			break;
	}

	gettimeofday(&tComplete, 0);
	totalRunTime = (tComplete.tv_sec - tStart.tv_sec);
	//printf("Total Processing Time: %f\n", totalProcessingTime);
	printf("Total Runtime: %f seconds\n", totalRunTime);

	cvReleaseCapture(&gCapture);
	cvDestroyWindow(argv[0]);
	return(0);
}
开发者ID:freeman94,项目名称:vision,代码行数:76,代码来源:intersect.c

示例13: main


//.........这里部分代码省略.........
	}

	// Create output directory.
	if (save)
	{
		catcierge_make_path("%s", output_path);
	}

	args.super.type = MATCHER_TEMPLATE;
	hargs.super.type = MATCHER_HAAR;

	if (catcierge_matcher_init(&matcher,
		(!strcmp(matcher_str, "template")
		? (catcierge_matcher_args_t *)&args
		: (catcierge_matcher_args_t *)&hargs)))
	{
		fprintf(stderr, "Failed to init %s matcher.\n", matcher_str);
			return -1;
	}

	matcher->debug = debug;
	if (!matcher->is_obstructed)
		matcher->is_obstructed = catcierge_is_frame_obstructed;
	//catcierge_set_binary_thresholds(&ctx, 90, 200);

	// If we should preload the images or not
	// (Don't let file IO screw with benchmark)
	if (preload)
	{
		for (i = 0; i < (int)img_count; i++)
		{
			printf("Preload image %s\n", img_paths[i]);

			if (!(imgs[i] = cvLoadImage(img_paths[i], 1)))
			{
				fprintf(stderr, "Failed to load match image: %s\n", img_paths[i]);
				ret = -1;
				goto fail;
			}
		}
	}

	start = clock();

	if (test_matchable)
	{
		for (i = 0; i < (int)img_count; i++)
		{
			// This tests if an image frame is clear or not (matchable).
			int frame_obstructed;

			if ((frame_obstructed = matcher->is_obstructed(matcher, imgs[i])) < 0)
			{
				fprintf(stderr, "Failed to detect check for matchability frame\n");
				return -1;
			}

			printf("%s: Frame obstructed = %d\n",
				img_paths[i], frame_obstructed);

			if (show)
			{
				cvShowImage("image", imgs[i]);
				cvWaitKey(0);
			}
		}
开发者ID:johannjacobsohn,项目名称:catcierge,代码行数:67,代码来源:catcierge_tester.c

示例14: _img

inline
IplImageWrapper::IplImageWrapper(std::string fileName)
	: _img(cvLoadImage(fileName.c_str())), _nRefs(new std::size_t(1)), _mask()
{
//std::cout << "IplImageWrapper::IplImageWrapper() -- address: " << _img << " nRefs: " << (*_nRefs) << std::endl;
}
开发者ID:Xuelu91,项目名称:hog3d,代码行数:6,代码来源:IplImageWrapper.hpp

示例15: testfaceLib_pThread

int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
//.........这里部分代码省略.........
开发者ID:ruyiweicas,项目名称:FaceSmileAgeSex_Detection,代码行数:101,代码来源:testfacelib.cpp


注:本文中的cvLoadImage函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。