当前位置: 首页>>代码示例>>C++>>正文


C++ VideoCapture::open方法代码示例

本文整理汇总了C++中VideoCapture::open方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoCapture::open方法的具体用法?C++ VideoCapture::open怎么用?C++ VideoCapture::open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在VideoCapture的用法示例。


在下文中一共展示了VideoCapture::open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: colorKeyingHSV

void colorKeyingHSV(const string&videoPath){
	// Video laden
	VideoCapture video;
	video.open(videoPath);
	int width = video.get(CV_CAP_PROP_FRAME_WIDTH);
	int height = video.get(CV_CAP_PROP_FRAME_HEIGHT);

	namedWindow("Video");
	namedWindow("Hue");
	createTrackbar("Lower", "Hue", 0, 180);
	setTrackbarPos("Lower", "Hue", lowerHue);
	createTrackbar("Upper", "Hue", 0, 180);
	setTrackbarPos("Upper", "Hue", upperHue);

	namedWindow("Saturation");
	createTrackbar("Select", "Saturation", 0, 255);
	setTrackbarPos("Select", "Saturation", threshSaturation);
	namedWindow("Maske");

	Mat hueFrame(height, width, CV_8UC1);
	Mat saturationFrame(height, width, CV_8UC1);
	Mat mask(height, width, CV_8UC1);

	int frameNumber = 0;
	while(true){
		Mat videoFrame;
		if (video.read(videoFrame) == false){
			break;
		}

		// in Graustufen wandeln
		Mat hsvFrame;
		cvtColor(videoFrame, hsvFrame, CV_BGR2HSV);	

		// Schwellen holen
		int threshSaturation = getTrackbarPos("Select", "Saturation");
		int lowerThreshHue = getTrackbarPos("Lower", "Hue");
		int upperThreshHue = getTrackbarPos("Upper", "Hue");

		// Pixel analysieren
		int sumx = 0;
		int sumy = 0;
		int countWhites = 0;
		for(int x = 0; x < videoFrame.cols; x++){
			for(int y = 0; y < videoFrame.rows; y++){
				Vec3b hsvPixel = hsvFrame.at<Vec3b>(y,x);
				int hue = hsvPixel[0];
				int saturation = hsvPixel[1];

				// Maskierung und Schwerpunktsberechnung
				if (saturation > threshSaturation && hue > lowerThreshHue && hue < upperThreshHue){
					mask.at<uchar>(y,x) = 255;
					sumx += x;
					sumy += y;
					countWhites++;
				}
				else{
					mask.at<uchar>(y,x) = 0;
				}

				// die folgenden Schritte sind eigentlich nicht nötig, sie dienen der Veranschaulichung
				if (hue > lowerThreshHue && hue < upperThreshHue){
					hueFrame.at<uchar>(y,x) = 255;
				}
				else{
					hueFrame.at<uchar>(y,x) = 0;
				}
				if (saturation > threshSaturation){
					saturationFrame.at<uchar>(y,x) = 255;
				}
				else{
					saturationFrame.at<uchar>(y,x) = 0;
				}
			}
		}
	
		// Schwerpunkt berechnen
		if (countWhites > 0){
			Point center(sumx/countWhites, sumy/countWhites);
			cross(videoFrame, center, crossLength, colorGreen);
		}
		
		imshow("Hue", hueFrame);
		imshow("Saturation", saturationFrame);
		imshow("Maske", mask);
		imshow("Video", videoFrame);
		waitKey(100);
	}
}
开发者ID:SebastianBone,项目名称:avprg,代码行数:89,代码来源:colorkeying.cpp

示例2: main

int main(int argc, char * argv[]){
	int patcharray[6]={15,20,25,30,35};
	int minwind[3]={5,10,15};
	FILE *pfilezp;//=fopen("Record.txt","w");
	FILE *objectf;
	FILE *tablef;
	FILE *patchf;
	time_t start,end;
	double wholecost;
	struct tm *ptr;
	int retry;
	int startFrame=0;
	bool nopoint=true;//是否显示点
	bool drawDec=false;//是否显示detection的框框
	bool cameraAgain=false;
	bool breaknow=false;//为了退出大循环所设的变量
	bool play=false;//是否切换到play模式	
	char *test[]={
		"-p parameters.yml -s car.mpg -b car.txt",
		"-p ../parameters.yml -s ../datasets/01_david/david.mpg -b ../datasets/01_david/init.txt",
		"-p ../parameters.yml -s ../datasets/02_jumping/jumping.mpg -b ../datasets/02_jumping/init.txt",
		"-p ../parameters.yml -s ../datasets/03_pedestrian1/pedestrian1.mpg -b ../datasets/03_pedestrian1/init.txt",
		"-p ../parameters.yml -s ../datasets/04_pedestrian2/pedestrian2.mpg -b ../datasets/04_pedestrian2/init.txt",
		"-p ../parameters.yml -s ../datasets/05_pedestrian3/pedestrian3.mpg -b ../datasets/05_pedestrian3/init.txt",
		"-p ../parameters.yml -s ../datasets/06_car/car.mpg -b ../datasets/06_car/init.txt",
		"-p ../parameters.yml -s ../datasets/07_motocross/motocross.mpg -b ../datasets/07_motocross/init.txt",
		//"-p ../parameters.yml -s ../datasets/08_volkswagen/volkswagen.mpg -b ../datasets/08_volkswagen/init.txt",
		"-p ../parameters.yml -s ../datasets/09_carchase/carchase.mpg -b ../datasets/09_carchase/init.txt",
		"-p ../parameters.yml -s ../datasets/10_panda/panda.mpg -b ../datasets/10_panda/init.txt",
		"-p ../parameters.yml -s ../datasets/11_test/test2.avi"};
	char *testt[]={"-p parameters.yml -im data"};//,"-p parameters.yml -s car.mpg -b init1.txt",
		//"-p parameters.yml -s test.avi",
	//	"-p parameters.yml -s motocross.mpg -b init2.txt"};
	for(int i=0;i<1;i++){
		for (int flag=0;flag<1;flag++)
		//for (int pi=0;pi<15;pi++)		
		{
			RNG RNG( int64 seed=-1 );
			double costsum[7]={0.0,0.0,0.0,0.0,0.0,0.0,0.0};
			if(flag==1)
				int tempp=1;
			isImage=false;
			breaknow=false;
			retry=-1;
			patchf=fopen("patchgpu.txt", "at");
			pfilezp=fopen("Record.txt","at");
			tablef=fopen("tableout.txt","at");
			objectf=fopen("objectf.txt", "at");			
			drawing_box = false;
			gotBB = false;
			tl = true;
			rep = false;
			fromfile=false;
			start=time(NULL); ptr=localtime(&start);
			printf(asctime(ptr));
			fprintf(pfilezp,asctime(ptr));
			wholecost = (double)getTickCount();
			VideoCapture capture;
			//CvCapture* capture;
			capture.open(1);
			//capture = cvCaptureFromCAM( CV_CAP_ANY);
			FileStorage fs;
			//Read options
			string s = test[flag];
			string del = " ";
			char test2[10][100];
			test2[4][0]='0';//这里有很奇怪的事情,下次循环时竟然保留了上次循环的test2的值,按理说test2是在循环里面定义的,应该是个局部变量,每次循环应该是新开的变量啊。
			vector<string> strs = splitEx(s, del);
			for ( unsigned int i = 0; i < strs.size(); i++)
			{  
				//  cout << strs[i].c_str() << endl;
				//	test2[i]=strs[i].c_str();
				strcpy(test2[i],strs[i].c_str());
				//cout<<test2[i]<<endl;
			}
			//int tp=strs.size();
			char *p[10];
			char **test3;//搞不太懂这里啊。。。
			for(int i=0;i<10;i++)
				p[i]=test2[i];
			test3=p; 	

			read_options(10,test3,capture,fs);

//			video = string(argv[1]);//目标视频//实验中输入参数就是这三行
//			capture.open(video);
//			readBB(argv[2]);//目标框


			// read_options(argc,argv,capture,fs);
			if(startFrame>0)//说明按下了r键,要我们重新手动选择框框
			{				
				box = Rect( 0, 0, 0, 0 );
				gotBB=false;
			}
			//   read_options(argc,argv,capture,fs);
			//Init camera
			if (!capture.isOpened()&&!isImage)//打不开视频而且不是图像序列
			{
				cout << "capture device failed to open!" << endl;
//.........这里部分代码省略.........
开发者ID:ILoveFree2,项目名称:TLD-GPU-zlingh,代码行数:101,代码来源:run_tld.cpp

示例3: main

int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, keys);
    parser.about("This sample demonstrates the use ot the HoG descriptor.");
    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }
    int camera = parser.get<int>("camera");
    string file = parser.get<string>("video");
    if (!parser.check())
    {
        parser.printErrors();
        return 1;
    }

    VideoCapture cap;
    if (file.empty())
        cap.open(camera);
    else
        cap.open(file.c_str());
    if (!cap.isOpened())
    {
        cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
        return 2;
    }

    cout << "Press 'q' or <ESC> to quit." << endl;
    cout << "Press <space> to toggle between Default and Daimler detector" << endl;
    Detector detector;
    Mat frame;
    for (;;)
    {
        cap >> frame;
        if (frame.empty())
        {
            cout << "Finished reading: empty frame" << endl;
            break;
        }
        int64 t = getTickCount();
        vector<Rect> found = detector.detect(frame);
        t = getTickCount() - t;

        // show the window
        {
            ostringstream buf;
            buf << "Mode: " << detector.modeName() << " ||| "
                << "FPS: " << fixed << setprecision(1) << (getTickFrequency() / (double)t);
            putText(frame, buf.str(), Point(10, 30), FONT_HERSHEY_PLAIN, 2.0, Scalar(0, 0, 255), 2, LINE_AA);
        }
        for (vector<Rect>::iterator i = found.begin(); i != found.end(); ++i)
        {
            Rect &r = *i;
            detector.adjustRect(r);
            rectangle(frame, r.tl(), r.br(), cv::Scalar(0, 255, 0), 2);
        }
        imshow("People detector", frame);

        // interact with user
        const char key = (char)waitKey(30);
        if (key == 27 || key == 'q') // ESC
        {
            cout << "Exit requested" << endl;
            break;
        }
        else if (key == ' ')
        {
            detector.toggleMode();
        }
    }
    return 0;
}
开发者ID:ArkaJU,项目名称:opencv,代码行数:73,代码来源:peopledetect.cpp

示例4: main

int main(int argc, char** argv)
{
  VideoCapture cap;
  HandDetection *hd = nullptr;

  std::srand(std::time(0));
  if(!cap.open(0))
    return 0;
  
  HandGesture hg;
  Mat pierre = imread("./pierre.png");
  Mat feuille = imread("./feuille.png");
  Mat ciseaux = imread("./ciseaux.png");

  for(;;) {
      Mat frame;
      cap >> frame;


      std::stringstream ss;

      if(hd == nullptr) {
	hd = new HandDetection(frame.rows, frame.cols);
	std::cout << "created HandDetection!" << std::endl;
      }

      hg = hd->detect(frame);
      Mat cpuImage = Mat::zeros( feuille.size(), CV_8UC3 );
      Mat playerImage = Mat::zeros( feuille.size(), CV_8UC3 );

      std::string player, cpu;
      switch(hg) {
      case PIERRE : player = "player = pierre"; playerImage = pierre; cpu = "cpu = feuille"; cpuImage = feuille; break;
      case FEUILLE : player = "player = feuille"; playerImage = feuille; cpu = "cpu = ciseaux"; cpuImage = ciseaux; break;
      case CISEAUX : player = "player = ciseaux"; playerImage = ciseaux; cpu = "cpu = pierre"; cpuImage = pierre; break;
      case ERROR : player = "player = nothing detected"; break;
      }
      putText(frame, cpu, Point(5, frame.rows * 0.98), FONT_HERSHEY_PLAIN, 1,  Scalar(0,0,255,255));
      putText(frame, player, Point(frame.cols - player.size() * 9, frame.rows * 0.98), FONT_HERSHEY_PLAIN, 1,  Scalar(0,0,255,255));

      for (int i = 0; i < feuille.rows; i++) {
	for (int j = 0; j < feuille.cols; j++) {
	  frame.at<Vec3b>(i, j) = cpuImage.at<Vec3b>(i, j);
	}
      }

      for (int i = 0; i < feuille.rows; i++) {
	for (int j = 0; j < feuille.cols; j++) {
	  frame.at<Vec3b>(i, j + frame.cols - playerImage.cols) = playerImage.at<Vec3b>(i, j);
	}
      }

      if(hd->handForUi.rows > 0) {
	cv::resize(hd->handForUi, hd->handForUi, feuille.size());

	for (int i = 0; i < feuille.rows; i++) {
	  for (int j = 0; j < feuille.cols; j++) {
	    Vec3b v = frame.at<Vec3b>(i + frame.rows * 0.3, j + frame.cols - playerImage.cols);
	    v[0] = hd->handForUi.at<uchar>(i, j);
	    v[1] = hd->handForUi.at<uchar>(i, j);
	    v[2] = hd->handForUi.at<uchar>(i, j);
	    frame.at<Vec3b>(i + frame.rows * 0.3, j + frame.cols - playerImage.cols) = v;
	  }
	}
      }


      putText(frame, "CPU", Point(feuille.rows * 0.3, feuille.cols * 1.1), FONT_HERSHEY_PLAIN, 2,  Scalar(0,0,255,255));
      putText(frame, "PLAYER", Point(frame.rows * 1.07, feuille.cols * 1.1), FONT_HERSHEY_PLAIN, 2,  Scalar(0,0,255,255));
    
      imshow("Game Frame", frame);
      	  
      if( waitKey(1) == 27 ) break;
    }
  return 0;
}
开发者ID:znoraka,项目名称:master2,代码行数:76,代码来源:main.cpp

示例5: main

int main(int argc, const char * argv[])
{
    
    ft_data ftdata;
    if (argc<3) {
        cout<<argv[0]<<" user_profile_dir camera_profile.yaml";
        return 0;
    }

    fs::path baseDirPath(argv[1]);
    ASM_Gaze_Tracker poseTracker(baseDirPath / "trackermodel.yaml", fs::path(argv[2]));
    
    
    vector<Point3f> faceCrdRefVecs;
    faceCrdRefVecs.push_back(Point3f(0,0,0));
    faceCrdRefVecs.push_back(Point3f(50,0,0));
    faceCrdRefVecs.push_back(Point3f(0,50,0));
    faceCrdRefVecs.push_back(Point3f(0,0,50));
    
    VideoCapture cam;
    cam.open(0);
    if(!cam.isOpened()){
        return 0;
    }
    Mat rvec, tvec;
    Mat im;
    captureImage(cam,im);
    

    while(true){
        bool success = captureImage(cam, im, true);
        if (success == false) {
            break;
        }
        
        bool succeeded = poseTracker.featureTracking(im);
        if (succeeded)
            poseTracker.estimateFacePose();

        
        
        
        Mat frontim,flipback;
        flip(im,flipback,1);
        
        vector<Point2f> reprjCrdRefPts;
        vector<Point2f> reprjFeaturePts;
        poseTracker.projectPoints(poseTracker.facialPointsIn3D, reprjFeaturePts);
        poseTracker.projectPoints(faceCrdRefVecs, reprjCrdRefPts);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[1], Scalar(255,0,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[2], Scalar(0,255,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[3], Scalar(0,0,255),2);
        drawPoints(im, reprjFeaturePts);
        drawStringAtTopLeftCorner(im, "distance to camera:" + boost::lexical_cast<string>(poseTracker.distanceToCamera()));
        imshow("head pose",im);
        
        vector<Point2f> transformedPoints = poseTracker.tracker.points;
        fliplr(transformedPoints, im.size());
        Mat part;
        
        Mat hM = findHomography(poseTracker.facialPointsIn2D ,transformedPoints, 0);
        warpPerspective(flipback(boundingRect(transformedPoints)), frontim, hM, im.size());
        imshow("front", im);

        
        int c = waitKey(1)%256;
        if(c == 'q')break;
        
    }
    
}
开发者ID:fu4k6pingu,项目名称:eye-tracking,代码行数:71,代码来源:HeadPoseEstimation.cpp

示例6: main

int main(){

    // Open connection to Kinect
    cout << "Connecting to Kinect" << endl;
    
    VideoCapture kinect;
    kinect.open(CV_CAP_OPENNI);

    if ( !kinect.isOpened() ){
        cout << "Can't connect to Kinect" << endl;
        return 1;
    }
    
    
    // Registration - Loads calibration data to align depth map with visual camera
    if( kinect.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0 ) 
        kinect.set( CV_CAP_PROP_OPENNI_REGISTRATION, 1 );

    // Initialize variables
    
    // Pixel Frames
    Mat depth_map;
    Mat depth_img;
    Mat point_cld;
    Mat point_img;
    Mat rgb_img;
    Mat hsv_img;
    Mat thres_img;
    Mat thres2_img;
    Mat prev_thres_img;
    Mat valid_mask;
    
    // Stored Positions
    vector<Vec3f> circles, circles2, circles3;
    Mat pos3;
    Mat vel3;
    vector<Point> mes2;
    vector<Point> pos2;
    vector<Point> vel2;
        
    // Get Properties
    int width  = kinect.get( CV_CAP_PROP_FRAME_WIDTH );
    int height = kinect.get( CV_CAP_PROP_FRAME_HEIGHT);
    int fps = kinect.get( CV_CAP_PROP_FPS );
    cout << "Resolution    " << width << "x" << height << "    FSP    " << fps << endl;
    
    // Visual Ball Tracking        
    Point center;
    Point predicted;
    Point corrected;
    Point direction;
    
    int radius;
    string color;
    
    // Give ball color thresholds to detect
    /* // Pink 
    color = "Pink";
    Scalar hsv_min  = Scalar(0, 50, 170, 0);
    Scalar hsv_max  = Scalar(10, 180, 256, 0);
    Scalar hsv_min2 = Scalar(170, 50, 170, 0);
    Scalar hsv_max2 = Scalar(256, 180, 256, 0);
    /*/ 
    //* // Green
    color = "Green";
    Scalar hsv_min  = Scalar(40, 50, 70, 0);
    Scalar hsv_max  = Scalar(80, 256, 256, 0);
    /*/ // Yellow
    color = "Yellow";
    Scalar hsv_min  = Scalar(20, 30, 50, 0);
    Scalar hsv_max  = Scalar(60, 256, 256, 0);
    /* 
    color = "Purple";
    Scalar hsv_min  = Scalar(115, 30, 50, 0);
    Scalar hsv_max  = Scalar(160, 256, 256, 0);
    //*/
    Scalar hsv_min2 = hsv_min;
    Scalar hsv_max2 = hsv_max;
    //*/
    cout << "Detecting " << color << " Ball" << endl;
    
    prev_thres_img = Mat::zeros(height, width, CV_8UC1);
    int frames_between_valid = 1;
    pos3.push_back(Vec3f(0,0,0));
    vel3.push_back(Vec3f(0,0,0));
        
    // Filter Parameters
    int erodeAmt = 1;
    int dilateAmt = 1;
    int alpha = 0.9;
    
    // Kalman Filter Computer Vision Tracking
    /*
    KalmanFilter KF3(6, 3, 0);
    KF3.transitionMatrix = *(Mat_<float>(6,6) << 1,0,0,1,0,0,
                                                0,1,0,0,1,0,
                                                0,0,1,0,0,1,
                                                0,0,0,1,0,0,
                                                0,0,0,0,1,0,
                                                0,0,0,0,0,1 );
//.........这里部分代码省略.........
开发者ID:ksun,项目名称:CS225A,代码行数:101,代码来源:main_ball_tracking.cpp

示例7: main

int main(int argc, char** argv)
{
	if (argc != 3) {
		help(argv);
		return 1;
	}

	// Verify the input values
	//VideoCapture cap(argv[1]); // open the passed video

	VideoCapture cap;

	// Futile attempt to try differetn codecs
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'I', 'V', '4'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'A', 'V', 'C'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', '2'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', 'X'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('A', 'V', 'C', '1'));
	cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('H', '2', '6', '4'));
	cap.open(argv[1]);
	
	if (!cap.isOpened()) {		// check if we succeeded
		cout << "\nCan not open video file '" << argv[1] << "'" << endl;
		return -1;
	} else {
		cout << "Video " << argv[1] << endl;
		cout << " width  =" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
		cout << " height =" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
		cout <<	" nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
		cout << " fps    =" << cap.get(CV_CAP_PROP_FPS) << endl;
	}

	// Load the trail of locations
	location_train locations;

	if (locations.load(argv[2]) != location_train::error_code::no_error) {
		cout << "Cannot load the location file '" << argv[2] << "'" << endl;
		return -1;
	}

	// do the simple sanity check
	if (locations.getCount() != cap.get(CV_CAP_PROP_FRAME_COUNT)) {
		cout << "Data points don't match." << endl;
		cout << " n frames   =" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
		cout << " n locations=" << locations.getCount() << endl;
		return -1;
	}

	location_train::point_t ul{ 0,0 };
	location_train::point_t lr{ (unsigned long)cap.get(CV_CAP_PROP_FRAME_WIDTH),(unsigned long)cap.get(CV_CAP_PROP_FRAME_HEIGHT) };

	if (locations.verify(ul, lr) != location_train::error_code::no_error) {
		cout << "Data points don't fit into video space." << endl;
		return -1;
	}

	// Set up the detector with default parameters.
	SimpleBlobDetector detector;

	auto loc_index = 0;
	auto fps = cap.get(CV_CAP_PROP_FPS);

	// Process frame by frame
	for (;;)
	{
		Mat frame;
		cap >> frame; // get a new frame from the file
		double frame_time = loc_index / fps;

		// Detect blobs.
		std::vector<KeyPoint> keypoints;
		detector.detect(frame, keypoints);
		
		// No need to check the range since we already verified that the number of locations
		// is the same as the number of frames
		auto location = locations[loc_index];
		loc_index++;

		if (keypoints.size() == 0) {
			cout << "Error: No objects found at time: " << frame_time << endl;
		}
		bool located = false;
		for ( auto key : keypoints ) {
			// The found blob should be at least 3x3
			if (key.size > 3) {
				if (inPoint(key.pt, key.size, location)) {
					located = true;
					break;
				}
			}
		}
		if (!located) {
			cout << "Error: No objects at time: " << frame_time << "located at expected position" << endl;
		}
	}

	// the video file will be deinitialized automatically in VideoCapture destructor
	return 0;
}
开发者ID:ilya1725,项目名称:ProjectBits,代码行数:99,代码来源:Source.cpp

示例8: main

/*decision navTree(blob blobPosition, blob blobPosition2, Mat finalImage, bool findBlob1, bool findBlob2) {
    decision navDecision;
    if (findBlob1) {                            //if first sample has not been retrieved
        if (blobPosition.maxArea<MAX_AREA) {    //if first sample is not close enough to be retrieved
            navDecision.lookBlob1=true;
            if (blobPosition2.blobDetect && findBlob2) {     //if the second sample is also on screen and we are looking for it
                if (blobPosition2.maxArea<MAX_AREA) {        //if the second sample is not close enough to be retrieved
                    navDecision.lookBlob2=true;
                    if (blobPosition.maxArea>=blobPosition2.maxArea) {  //Navigate towards largest/closest sample
                        navDecision.lspeed=blobPosition.lturn*MAX_SPEED;
                        navDecision.rspeed=blobPosition.rturn*MAX_SPEED;
                        cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
                    } else {
                        navDecision.lspeed=blobPosition2.lturn*MAX_SPEED;
                        navDecision.rspeed=blobPosition2.rturn*MAX_SPEED;
                        cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,0));
                    }
                } else {
                    cout<<"Blob 2 was found\n";
                    navDecision.lookBlob2=false;
                }
            } else {                            //if the second sample isn't on screen, then navigate towards first sample
                if (findBlob2) {
                    navDecision.lookBlob2=true;
                } else {
                    navDecision.lookBlob2=false;
                }
                navDecision.lspeed=blobPosition.lturn*MAX_SPEED;
                navDecision.rspeed=blobPosition.rturn*MAX_SPEED;
                cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
            }
        } else {                                //if first sample is close enough to be retrieved
            cout<<"Blob 1 was found\n";         //then stop looking for first sample
            navDecision.lookBlob1=false;
        }
    } else if (findBlob2) {                     //if we found the first sample. but are still looking for the second sample
        if (blobPosition2.maxArea<MAX_AREA) {
            if (blobPosition.blobDetect&&findBlob1) {
                if (blobPosition2.maxArea>blobPosition2.maxArea) {
                    lspeed=blobPosition2.lturn*MAX_SPEED;
                    rspeed=blobPosition2.rturn*MAX_SPEED;
                    cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
                } else {
                    lspeed=blobPosition.lturn*MAX_SPEED;
                    rspeed=blobPosition.rturn*MAX_SPEED;
                    cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
                }
            } else {
                lspeed=blobPosition2.lturn*MAX_SPEED;
                rspeed=blobPosition2.rturn*MAX_SPEED;
                cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
            }
        } else {
            cout<<"Blob 2 was found\n";
            findBlob2=false;
        }
    }
}*/
int main( int argc, char** argv )
{
    initRoboteq();//Refer to nasaroboteq.c
    VideoCapture cap;
    blob blobPosition, blobPosition2;
    Mat frame, temp, temp2;
    Mat bgrImage, hsvImage, hsvOutputImage, hsvOutputImage2, finalImage;
    static int lspeed, rspeed;
    int x,y;
    int mArea;
    int width;
    bool useMorphOps=true;
    bool findBlob1 = true;
    bool findBlob2 = true;

    cap.open(0); //0:default web cam   1:external web cam
    trackbarInit();

    int fin=1;
    while(fin>=1)
    {
        cap>>frame;
        frame.copyTo(bgrImage);
        width=frame.cols;
        cvtColor(bgrImage, hsvImage, COLOR_BGR2HSV);
        inRange(hsvImage,Scalar(h_low,s_low,v_low), Scalar(h_high,s_high,v_high), hsvOutputImage);
        inRange(hsvImage,Scalar(h_low2,s_low2,v_low2), Scalar(h_high2,s_high2,v_high2), hsvOutputImage2);
        //imshow("Before Morph", hsvOutputImage2);
        if(useMorphOps) {
                morphOps(hsvOutputImage);
                morphOps(hsvOutputImage2);
        }
        finalImage=hsvOutputImage2|hsvOutputImage;
        hsvOutputImage.copyTo(temp);
        hsvOutputImage2.copyTo(temp2);
        blobPosition=blobCenter(temp, width);
        blobPosition2=blobCenter(temp2, width);



        if (findBlob1||findBlob2) {                     //if all samples have not been retrieved
            if (findBlob1) {                            //if first sample has not been retrieved
//.........这里部分代码省略.........
开发者ID:szwally,项目名称:UM-Dearborn-Mars-Rover-Challenge,代码行数:101,代码来源:goTowardsBlob.cpp

示例9: main

int main(int argc, char *argv[])
{
	ros::init(argc, argv, "verify_tracking_node");
	ros::NodeHandle n;
	std::string port;
	ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
	int baud;
	ros::param::param<int>("~baud", baud, 57600);
	ros::Rate loop_rate(10);

	ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
	ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
	ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);

	const int person_number = 3;
	cv_result_t cv_result = CV_OK;
	cv_handle_t handle_detect = NULL;
	cv_handle_t handle_track = NULL;
	cv_handle_t handle_verify = NULL;
	cv_feature_t *p_feature_new_1[person_number];

	int main_return = -1;
	int verify_flag = 0;
	int face_detect_flag = 0;
	VideoCapture capture;
	capture.open(0);         // open the camera
	if (!capture.isOpened()) {
		fprintf(stderr, "Liveness can not open camera!\n");
		return -1;
	}
	int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    int frame_half_width = frame_width >> 1;
	int frame_half_height = frame_height >> 1;
	//printf("width %d height %d \n", frame_width, frame_height);
	Point expect(frame_half_width , frame_half_height);
	handle_verify = cv_verify_create_handle("data/verify.tar");
	if(!handle_verify){
		fprintf(stderr, "failed to init verify handle \n");
		goto RETURN;
	}
	handle_track = cv_face_create_tracker(NULL, CV_FACE_SKIP_BELOW_THRESHOLD);
	if (!handle_track) {
		fprintf(stderr, "fail to init track handle\n");
		goto RETURN;
	}
	handle_detect = cv_face_create_detector(NULL, CV_FACE_SKIP_BELOW_THRESHOLD | CV_DETECT_ENABLE_ALIGN);
	if (!handle_detect) {
		fprintf(stderr, "fail to init detect handle\n");
		//goto RETURN;
		return -1;
	}
	create_verify_feature_db(handle_detect, handle_verify, person_number, p_feature_new_1, frame_width, frame_height);
	while(1)
	{
		int verfiy_flag = verify_from_camera(handle_detect, handle_verify, capture, p_feature_new_1, person_number, frame_width, frame_height);
		if(verify_flag != 1)
			continue;
		face_track(handle_track, capture, expect, frame_width, frame_height, servox_pub, servoy_pub, motor_pub);
	}
	for(int i = 1; i < person_number; i++)
	{
		cv_verify_release_feature(p_feature_new_1[i]);
	}
	// destroy verify handle
RETURN:
	// release the memory of face
	cv_verify_destroy_handle(handle_verify);
	// destroy detect handle
	cv_face_destroy_detector(handle_detect);

	fprintf(stderr, "test finish!\n");
}
开发者ID:Yvaine,项目名称:speech-robot,代码行数:73,代码来源:verify_tracking_node2.cpp

示例10: mainPop

int mainPop()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mogcanny,mask;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog,mogc;
	cap>>frame;
	mask=Mat::zeros(frame.size(),CV_8UC1);
	vector<vector<Point> > maskc;
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		mogc.set("backgroundRatio",0.01);
		//mogc.set("nmixtures",1);
		mogc(canny,mogcanny,0.01);
		//bitwise_not(mogcanny,mogcanny,mogcanny);
		int minsize = 5;
		
		/*if(!maskc.empty())
		{
			for( int i = 0; i< maskc.size(); i++ )
			{
				Scalar color = Scalar( 255,255,255 );
				Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
				drawContours( aux, maskc, i, color, 3, 8, noArray(), 0, Point());
				int nzt = countNonZero(aux);
				bitwise_and(mogcanny,aux,aux);
				int nz=countNonZero(aux);
				double per = nz/double(nzt);
				if(per<0.05)
				{
					maskc.erase(maskc.begin()+i);
				}
			}
		}*/
		//maskc.clear();
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		mask = Mat::zeros( foreground.size(), CV_8UC1 );
		/*findContours(mask,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		mask = Mat::zeros( foreground.size(), CV_8UC1 );
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
			drawContours( aux, contours, i, color, 3, 8, hierarchy, 0, Point());
			int nzt = countNonZero(aux);
			bitwise_and(mogcanny,aux,aux);
			int nz=countNonZero(aux);
			double per = nz/double(nzt);
			if(per>0.05)
			{
				drawContours( mask, contours, i, (255,255,255), -1, 8, hierarchy, 0, Point());
			}
		}*/
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
			if(contours[i].size()>minsize)
			{
				drawContours( aux, contours, i, color, 3, 8, hierarchy, 0, Point());
				int nzt = countNonZero(aux);
				bitwise_and(mogcanny,aux,aux);
				int nz=countNonZero(aux);
				double per = nz/double(nzt);
				if(per>0.01)
				{
					drawContours( mask, contours, i, color, 3, 8, hierarchy, 0, Point());
					//maskc.push_back(contours[i]);
//.........这里部分代码省略.........
开发者ID:Skythunder,项目名称:ARPool,代码行数:101,代码来源:BGContours.cpp

示例11: main

int main(int argc, const char* argv[])
{
    const char* keys =
        "{ h help     | false           | print help message }"
        "{ l left     |                 | specify left image }"
        "{ r right    |                 | specify right image }"
        "{ o output   | tvl1_output.jpg | specify output save path }"
        "{ c camera   | 0               | enable camera capturing }"
        "{ m cpu_mode | false           | run without OpenCL }"
        "{ v video    |                 | use video as input }";

    CommandLineParser cmd(argc, argv, keys);

    if (cmd.has("help"))
    {
        cout << "Usage: pyrlk_optical_flow [options]" << endl;
        cout << "Available options:" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }

    string fname0 = cmd.get<string>("l");
    string fname1 = cmd.get<string>("r");
    string vdofile = cmd.get<string>("v");
    string outpath = cmd.get<string>("o");
    bool useCPU = cmd.get<bool>("s");
    bool useCamera = cmd.get<bool>("c");
    int inputName = cmd.get<int>("c");

    UMat frame0, frame1;
    imread(fname0, cv::IMREAD_GRAYSCALE).copyTo(frame0);
    imread(fname1, cv::IMREAD_GRAYSCALE).copyTo(frame1);
    cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();

    UMat flow;
    Mat show_flow;
    vector<UMat> flow_vec;
    if (frame0.empty() || frame1.empty())
        useCamera = true;

    if (useCamera)
    {
        VideoCapture capture;
        UMat frame, frameCopy;
        UMat frame0Gray, frame1Gray;
        UMat ptr0, ptr1;

        if(vdofile.empty())
            capture.open( inputName );
        else
            capture.open(vdofile.c_str());

        if(!capture.isOpened())
        {
            if(vdofile.empty())
                cout << "Capture from CAM " << inputName << " didn't work" << endl;
            else
                cout << "Capture from file " << vdofile << " failed" <<endl;
            goto nocamera;
        }

        cout << "In capture ..." << endl;
        for(int i = 0;; i++)
        {
            if( !capture.read(frame) )
                break;

            if (i == 0)
            {
                frame.copyTo( frame0 );
                cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
            }
            else
            {
                if (i%2 == 1)
                {
                    frame.copyTo(frame1);
                    cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
                    ptr0 = frame0Gray;
                    ptr1 = frame1Gray;
                }
                else
                {
                    frame.copyTo(frame0);
                    cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
                    ptr0 = frame1Gray;
                    ptr1 = frame0Gray;
                }

                alg->calc(ptr0, ptr1, flow);
                split(flow, flow_vec);

                if (i%2 == 1)
                    frame1.copyTo(frameCopy);
                else
                    frame0.copyTo(frameCopy);
                getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
                imshow("tvl1 optical flow field", show_flow);
            }

//.........这里部分代码省略.........
开发者ID:13983441921,项目名称:opencv,代码行数:101,代码来源:tvl1_optical_flow.cpp

示例12: mainD2

int mainD2()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mask;
	IplImage *iplframe;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog;
    int fps=cap.get(CV_CAP_PROP_FPS);
    if(fps<=0)
        fps=10;
    else
        fps=1000/fps;
	cap>>frame;
	//mask=Mat::zeros(frame.size(),CV_8UC1);
	/*Mat cmask;
	Canny(frame,cmask,calpha,cbeta);*/
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		//Canny(foreground,edges,calpha,cbeta);
		//canny=edges.clone();
		Rect rect = Rect(42, 42, 435, 205);
		//edges = edges(rect);
		//canny = canny(rect);
		//fin=fin(rect);
		//foreground = foreground(rect);
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		int momsize=0;
		int minsize = 5;
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			if(contours[i].size()>minsize)
				drawContours( drawing, contours, i, color, 3, 8, hierarchy, 0, Point());
				momsize++;
		}
		
		Mat band=Mat::zeros(foreground.size(),CV_8UC1);
		//subtract(mask,cmask,mask);
		bitwise_and(canny,mask,mask);
		bitwise_and(canny,drawing,band);
		bitwise_or(mask,band,mask);
		/*Mat band2=Mat::zeros(band.size(), CV_8UC1);
		bitwise_and(canny,drawing,band2);
		bitwise_and(band,band2,band);*/
		
		//band.copyTo(mask);
		//bitwise_and(canny,mask,mask);
		/*
		/// Get the moments
		vector<Moments> mu(momsize);
		int j=0;
		for( int i = 0; i < contours.size(); i++ )
		{ 
			if(contours[i].size()<maxsize&&contours[i].size()>minsize)
				mu[j] = moments( contours[i], false ); 
				j++;
		}

		///  Get the mass centers:
		vector<Point2f> mc( momsize );
		for( int i = 0; i < momsize; i++ )
		{ 
			mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 
		}
		//draw
		for( int i = 0; i < momsize; i++ )
		{ 
			circle(drawing,mc[i],10,Scalar(255,0,0),-1);
//.........这里部分代码省略.........
开发者ID:Skythunder,项目名称:ARPool,代码行数:101,代码来源:BGContours.cpp

示例13: mainD3

int mainD3()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mask;
	IplImage *iplframe;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog;
    int fps=cap.get(CV_CAP_PROP_FPS);
    if(fps<=0)
        fps=10;
    else
        fps=1000/fps;
	cap>>frame;
	mask=Mat::zeros(frame.size(),CV_8UC1);
	/*Mat cmask;
	Canny(frame,cmask,calpha,cbeta);*/
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		//Canny(foreground,edges,calpha,cbeta);
		//canny=edges.clone();
		Rect rect = Rect(42, 42, 435, 205);
		//edges = edges(rect);
		//canny = canny(rect);
		//fin=fin(rect);
		//foreground = foreground(rect);
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		int momsize=0;
		int minsize = 5;
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			if(contours[i].size()>minsize)
				drawContours( drawing, contours, i, color, 3, 8, hierarchy, 0, Point());
				momsize++;
		}
		vector<vector<Point> > ccontours;
		vector<Vec4i> chierarchy;
		findContours(canny,ccontours,chierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Scalar color = Scalar( 255,255,255 );
		vector<vector<Point> > mcontours;
		vector<Vec4i> mhierarchy;
		findContours(mask,mcontours,mhierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		for( int i = 0; i< mcontours.size(); i++ )
		{
			int in=0;
			int tot = mcontours[i].size(); 
			for(int j=0;j<tot;j++)
			{
				Point p = mcontours[i][j];
				if(canny.at<uchar>(p.y,p.x) > 0)
					in++;
			}
			double f = in/double(tot);
			if(f>0.5&&ccontours[i].size()>minsize)
				drawContours( mask, ccontours, i, color, 3, 8, hierarchy, 0, Point());
			//comparar canny com mascara e reset de mascara
		}
		Mat auxmask = Mat::zeros(foreground.size(),CV_8UC1);
		for( int i = 0; i< ccontours.size(); i++ )
		{
			int in=0;
			int tot = ccontours[i].size(); 
			for(int j=0;j<tot;j++)
			{
				Point p = ccontours[i][j];
				if(drawing.at<uchar>(p.y,p.x) > 0)
					in++;
			}
//.........这里部分代码省略.........
开发者ID:Skythunder,项目名称:ARPool,代码行数:101,代码来源:BGContours.cpp

示例14: main

int main( int argc, const char** argv )
{
    VideoCapture cap;
    Rect trackWindow;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;
    CommandLineParser parser(argc, argv, keys);
    if (parser.has("help"))
    {
        help();
        return 0;
    }
    int camNum = parser.get<int>(0);
    cap.open(camNum);

    if( !cap.isOpened() )
    {
        help();
        cout << "***Could not initialize capturing...***\n";
        cout << "Current parameter's value: \n";
        parser.printMessage();
        return -1;
    }


    Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    VideoWriter videoStream;
    videoStream.open("./VirtualPiano.mp4", -1, cap.get(CV_CAP_PROP_FPS), S, true);
    if (!videoStream.isOpened())
    {
        cout  << "Could not open the output video." << endl;
        return -1;
    }
    cout << hot_keys;
    //namedWindow( "Histogram", 0 );
    namedWindow( "VirtualPiano", 0 );
    resizeWindow( "VirtualPiano", WINDOW_WIDTH, WINDOW_HEIGHT);
    setMouseCallback( "VirtualPiano", onMouse, 0 );
    //createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    //createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    //createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    RotatedRect trackBox;
    bool paused = false;

    for(;;)
    {
        if( !paused )
        {
            cap >> frame;
            if( frame.empty() )
                break;
        }

        frame.copyTo(image);
        Mat flippedImage;
        flip(image, flippedImage, 1);
        image = flippedImage;
        if( !paused )
        {
            cvtColor(image, hsv, COLOR_BGR2HSV);

            if( trackObject )
            {
                int _vmin = vmin, _vmax = vmax;

                inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                int ch[] = {0, 0};
                hue.create(hsv.size(), hsv.depth());
                mixChannels(&hsv, 1, &hue, 1, ch, 1);

                if( trackObject < 0 )
                {
                    Mat roi(hue, selection), maskroi(mask, selection);
                    calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                    normalize(hist, hist, 0, 255, NORM_MINMAX);

                    trackWindow = selection;
                    trackObject = 1;

                    histimg = Scalar::all(0);
                    int binW = histimg.cols / hsize;
                    Mat buf(1, hsize, CV_8UC3);
                    for( int i = 0; i < hsize; i++ )
                        buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                    cvtColor(buf, buf, COLOR_HSV2BGR);

                    for( int i = 0; i < hsize; i++ )
                    {
                        int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                        rectangle( histimg, Point(i*binW,histimg.rows),
                                   Point((i+1)*binW,histimg.rows - val),
                                   Scalar(buf.at<Vec3b>(i)), -1, 8 );
                    }
                }
//.........这里部分代码省略.........
开发者ID:quirell,项目名称:handrecognition,代码行数:101,代码来源:virtual_piano.cpp

示例15: main

int main( int argc, char** argv )
{
    Mat img_template_cpu = imread( argv[1],IMREAD_GRAYSCALE);
    gpu::GpuMat img_template;
    img_template.upload(img_template_cpu);

    //Detect keypoints and compute descriptors of the template
    gpu::SURF_GPU surf;
    gpu::GpuMat keypoints_template, descriptors_template;

    surf(img_template,gpu::GpuMat(),keypoints_template, descriptors_template);

    //Matcher variables
    gpu::BFMatcher_GPU matcher(NORM_L2);   

    //VideoCapture from the webcam
    gpu::GpuMat img_frame;
    gpu::GpuMat img_frame_gray;
    Mat img_frame_aux;
    VideoCapture cap;
    cap.open(0);
    if (!cap.isOpened()){
        cerr << "cannot open camera" << endl;
        return -1;
    }
    int nFrames = 0;
    uint64 totalTime = 0;
    //main loop
    for(;;){
        int64 start = getTickCount();
        cap >> img_frame_aux;
        if (img_frame_aux.empty())
            break;
        img_frame.upload(img_frame_aux);
        cvtColor(img_frame,img_frame_gray, CV_BGR2GRAY);

        //Step 1: Detect keypoints and compute descriptors
        gpu::GpuMat keypoints_frame, descriptors_frame;
        surf(img_frame_gray,gpu::GpuMat(),keypoints_frame, descriptors_frame);

        //Step 2: Match descriptors
        vector<vector<DMatch> >matches;
        matcher.knnMatch(descriptors_template,descriptors_frame,matches,2);

        //Step 3: Filter results
        vector<DMatch> good_matches;
        float ratioT = 0.7;
        for(int i = 0; i < (int) matches.size(); i++)
        {
            if((matches[i][0].distance < ratioT*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }
        // Step 4: Download results
        vector<KeyPoint> keypoints1, keypoints2;
        vector<float> descriptors1, descriptors2;
        surf.downloadKeypoints(keypoints_template, keypoints1);
        surf.downloadKeypoints(keypoints_frame, keypoints2);
        surf.downloadDescriptors(descriptors_template, descriptors1);
        surf.downloadDescriptors(descriptors_frame, descriptors2);

        //Draw the results
        Mat img_result_matches;
        drawMatches(img_template_cpu, keypoints1, img_frame_aux, keypoints2, good_matches, img_result_matches);
        imshow("Matching a template", img_result_matches);

        int64 time_elapsed = getTickCount() - start;
        double fps = getTickFrequency() / time_elapsed;
        totalTime += time_elapsed;
        nFrames++;
        cout << "FPS : " << fps << endl;

        int key = waitKey(30);
        if (key == 27)
            break;;
    }
    double meanFps = getTickFrequency() / (totalTime / nFrames);
    cout << "Mean FPS: " << meanFps << endl;

    return 0;
}
开发者ID:xenron,项目名称:sandbox-vision-opencv,代码行数:82,代码来源:matchTemplateGPU.cpp


注:本文中的VideoCapture::open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。