当前位置: 首页>>代码示例>>C++>>正文


C++ CV_RGB函数代码示例

本文整理汇总了C++中CV_RGB函数的典型用法代码示例。如果您正苦于以下问题:C++ CV_RGB函数的具体用法?C++ CV_RGB怎么用?C++ CV_RGB使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了CV_RGB函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: show

static void show()
{
    if(!exist_image || !exist_scan){
        return;
    }

    IplImage* image_view = cvCreateImage(cvGetSize(&image), image.depth, image.nChannels);
    cvCopy(&image, image_view);

	float min_d, max_d;
	min_d = max_d = scan_image.distance.at(0);
	for(int i = 1; i < IMAGE_WIDTH * IMAGE_HEIGHT; i++){
		float di = scan_image.distance.at(i);
		max_d = di > max_d ? di : max_d;
		min_d = di < min_d ? di : min_d;
	}
	float wid_d = max_d - min_d;

    /*
     * Plot depth points on an image
     */
    CvPoint pt;
    int height, width;
    for(int i = 0; i < (int)scan_image.distance.size(); i++) {
        height = (int)(i % IMAGE_HEIGHT);
        width = (int)(i / IMAGE_HEIGHT);
        if(scan_image.distance.at(i) != 0.0) {
            pt.x = width;
            pt.y = height;
			int colorid= wid_d ? ( (scan_image.distance.at(i) - min_d) * 255 / wid_d ) : 128;
			cv::Vec3b color=colormap.at<cv::Vec3b>(colorid);
			int g = color[1];
			int b = color[2];
			int r = color[0];
            cvCircle(image_view, pt, 2, CV_RGB (r, g, b), CV_FILLED, 8, 0);
        }
    }


  drawRects(image_view,
            car_fused_objects.obj,
            cvScalar(255.0, 255.0, 0,0),
            (image_view->height)*.3);

  drawRects(image_view,
            pedestrian_fused_objects.obj,
            cvScalar(0.0, 255.0, 0,0),
            (image_view->height)*.3);
  /* PUT DISTANCE text on image */
  putDistance(image_view,
              car_fused_objects.obj,
              (image_view->height)*.3,
              car_fused_objects.type.c_str());
  putDistance(image_view,
              pedestrian_fused_objects.obj,
              (image_view->height)*.3,
              pedestrian_fused_objects.type.c_str());

    /*
     * Show image
     */
    cvShowImage(window_name, image_view);
    cvWaitKey(2);
    cvReleaseImage(&image_view);
}
开发者ID:Keerecles,项目名称:Autoware,代码行数:65,代码来源:scan_image_d_viewer.cpp

示例2: imageCallback

void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  fprintf(stderr, "\n call Back funtion \n");
  //publish data (obstacle waypoints) back to the boat
  //ros::NodeHandle n;
  //std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message
  //std_msgs::Float32 yWaypoint_msg;         // Y coordinate obstacle message
  //publish the waypoint data             
  //ros::Publisher waypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //ros::Publisher Ywaypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //std::stringstream ss;
  
  /***********************************************************************/
    //live image coming streamed straight from the boat's camera
    IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
    //The boat takes flipped images, so you need to flip them back to normal
    cvFlip(boatFront, boatFront, 0);
    IplImage* backUpImage = cvCloneImage(boatFront);
    boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
    int X = boatFront->height;
    int Y = boatFront->width;
    //cout << "height " << X << endl;
    //cout << "width " << Y << endl;    
    
    /*********************Image Filtering variables****************************/
    //these images are used for segmenting objects from the overall background 
    //create a one channel image to convert from RGB to GRAY
    IplImage* grayImage = cvCreateImage(cvGetSize(boatFront),IPL_DEPTH_8U,1);
    //convert grayImage to binary (final step after converting from GRAY)
    IplImage* bwImage = cvCreateImage(cvGetSize(grayImage),IPL_DEPTH_8U,1);
    //variables used for the flood fill segmentation
    CvPoint seed_point = cvPoint(boatFront->height/1.45,0);       //not sure how this variable works
    CvScalar color = CV_RGB(250,0,0);
    CvMemStorage* grayStorage = NULL;     //memory storage for contour sequence
    CvSeq* contours = 0;
    // get blobs and filter them using their area
    //IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
    //IplImage* original, *originalThr;
    //IplImage* segmentated = cvCreateImage(cvGetSize(boatFront), 8, 1);
    //unsigned int blobNumber = 0;
    //IplImage* labelImg = cvCreateImage(cvGetSize(boatFront), IPL_DEPTH_LABEL, 1);
    CvMoments moment;
    
    /***********************************************************************/
    //boat's edge distance from the camera. This is used for visual calibration
    //to know the distance from the boat to the nearest obstacles.
    //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
    //and 15 inches (0.381 m).
    //float boatFrontDistance = 0.381;    //distance in meters
    //float boatSideDistance = 0.5334;    //distance in meters
    // These variables tell the distance from the center bottom of the image
    // (the camera) to the square surrounding a the obstacle
    float xObstacleDistance = 0.0;
    float yObstacleDistance = 0.0;
    float obstacleDistance = 0.0;
    float obstacleHeading = 0.0;
    
    int pixelsNumber = 50;  //number of pixels for an n x n matrix and # of neighbors
    const int arraySize = pixelsNumber;
    const int threeArraySize = pixelsNumber;
    //if n gets changed, then the algorithm might have to be
    //recalibrated. Try to keep it constant
    //these variables are used for the k nearest neighbors
    //int accuracy;
    //reponses for each of the classifications
    float responseWaterH, responseWaterS, responseWaterV; 
    float responseGroundH, responseGroundS, responseGroundV;
    float responseSkyH, responseSkyS, responseSkyV;
    float averageHue = 0.0;
    float averageSat = 0.0;
    float averageVal = 0.0;
    CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    CvMat* trainClassesH = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesS = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    CvMat* trainClassesV = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
    //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
    //used with the classifier 
    /*CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    //Distance
    CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
    CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
//.........这里部分代码省略.........
开发者ID:bubgum,项目名称:crw-cmu,代码行数:101,代码来源:my_subscriber-14-BETA.cpp

示例3: cvtColor

void face_detector::face_marker_lbp(Mat *frame)
{
    static vector<Rect> faces;
    static Mat frame_gray;
    cvtColor(*frame, frame_gray, COLOR_BGR2GRAY);
    equalizeHist(frame_gray, frame_gray);
    static String face_cascade_name = "lbpcascade_frontalface.xml";
    static String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
    static CascadeClassifier face_cascade;
    static CascadeClassifier eyes_cascade;
    static bool loaded = false;
    if (!loaded) {
        if(!face_cascade.load(face_cascade_name)) {
            fprintf(stderr, "Could not load face classifier cascade!");
            exit(1);
        }
        if(!eyes_cascade.load( eyes_cascade_name)) {
            fprintf(stderr, "Could not load eye classifier cascade!");
            exit(1);
        }
    }
    loaded = true;
    static Cgt_Rect _area;
    static Rect largestFace;
    static Mat smallImgROI;
    static Rect eyeArea;
    static Cgt_Eye _iris_point;
    static Rect leftEyeRect(0,0,0,0), rightEyeRect(0,0,0,0);
    static vector<Rect> eyes;
    static Point left_top, right_bottom;

    face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0, Size(40, 40));
    if(faces.empty() == false) {
        //取最大的脸
        largestFace.width=0;
        largestFace.height=0;
        for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++) {
            if ((r->width * r->height) > (largestFace.width * largestFace.height))
                largestFace = *r;
        }

        // 将最大人脸区域赋给area
        _area.left  = largestFace.x;
        _area.right  = largestFace.x + largestFace.width;
        _area.top  = largestFace.y;
        _area.bottom = largestFace.y + largestFace.height;

        ///////////针对最大的脸检测人眼////////////////////////////////

        eyeArea = largestFace;
        eyeArea.height = eyeArea.height/1.2; //仅对人脸的上半部分检测人眼,以减少错误率 //调整一下参数,只对上半部分有时检测不出来
        smallImgROI = (*frame)(eyeArea);
        eyes_cascade.detectMultiScale(smallImgROI, eyes, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
        if(eyes.size()>=2) { //必须至少有两只眼被检出
            vector<Rect>::const_iterator nr = eyes.begin();
            leftEyeRect = *nr;
            nr++;
            rightEyeRect = *nr;
        } else {
            //fprintf(stderr, "必须至少有两只眼被检出!\n");
            return;
        }

        _iris_point.xleft = cvRound(largestFace.x + leftEyeRect.x + leftEyeRect.width*0.5);  //左眼中心的x坐标
        _iris_point.yleft = cvRound(largestFace.y + leftEyeRect.y + leftEyeRect.height*0.5);  //左眼中心的y坐标
        _iris_point.xright = cvRound(largestFace.x + rightEyeRect.x + rightEyeRect.width*0.5);  //右眼中心的x坐标
        _iris_point.yright = cvRound(largestFace.y + rightEyeRect.y + rightEyeRect.height*0.5); //右眼中心的y坐标
        //对眼睛的后期验证:
        //不允许左眼在右眼右边
//        if(iris_point.xleft >= iris_point.xright )
//            fprintf(stderr, "11111111111"), nRetCode = false;
        //不允许眼睛在边界(由于,初始化的值为0,这也意味着如果少于两个眼检测出来,则认为检测失败)
        if ((_iris_point.xleft==0) || (_iris_point.yleft==0) ||(_iris_point.xright==0) || (_iris_point.yright==0) )
            return;
        //不允许两只眼上下倾斜过多(也防止一些误检)
        if (abs(_iris_point.yright-_iris_point.yleft) > (largestFace.width/3) )
            return;
        //不允许两只眼左右间距小于1/4人脸宽度(也防止一些误检)
        if (abs(_iris_point.xright-_iris_point.xleft) < (largestFace.width/4) )
            return;

        //画出框到的人脸,验证调试用
        left_top.x = _area.left;
        left_top.y = _area.top;
        right_bottom.x = _area.right;
        right_bottom.y = _area.bottom;
        rectangle(*frame, left_top, right_bottom, CV_RGB(0,255,0), 2, 8, 0);
        left_top.x = _iris_point.xleft;
        left_top.y = _iris_point.yleft;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(*frame, left_top, right_bottom, CV_RGB(90, 255, 0), 2, 8, 0);
        left_top.x = _iris_point.xright;
        left_top.y = _iris_point.yright;
        right_bottom.x = left_top.x;
        right_bottom.y = left_top.y;
        rectangle(*frame, left_top, right_bottom, CV_RGB(0, 255, 0), 2, 8, 0);
    }
}
开发者ID:innoink,项目名称:SiftFaceRec,代码行数:99,代码来源:face_detector.cpp

示例4: faceDetection

Mat faceDetection(Mat &videoFrame, double scaleFactor, int scaledWidth, bool eyesDetect, bool mouthDetect, CascadeClassifier &mFaceDetector, CascadeClassifier &mMouthDetector, CascadeClassifier &mEyeDetector, QString personName ) {
    Mat defaultFace;
    Mat grayscaleFace;
    Mat equalizedFace;
    Mat resizedFace;
    Mat savedFace;

const double CHANGE_IMAGE = 0.5; //0.3;      // Change in face from last time
const double CHANGE_TIME = 0.5;//1.0;       // Time change old - new

/*grayscale image*/
    videoFrame.copyTo( defaultFace );

     if (defaultFace.channels() == 3) {
         cvtColor(defaultFace, grayscaleFace, CV_BGR2GRAY);
     }
     else if (defaultFace.channels() == 4) {
         cvtColor(defaultFace, grayscaleFace, CV_BGRA2GRAY);
     }
     else {
         grayscaleFace = defaultFace;
     }

/*shrinked image*/
     float scale = defaultFace.cols / (float)scaledWidth;
     if (defaultFace.cols > scaledWidth) {
         int scaledHeight = cvRound(defaultFace.rows / scale);
         resize(grayscaleFace, resizedFace, Size(scaledWidth, scaledHeight));
     }
     else {
         resizedFace = grayscaleFace;
     }

/**/
     vector< cv::Rect > faceVec;
     mFaceDetector.detectMultiScale( resizedFace, faceVec, scaleFactor );


     for( size_t i=0; i<faceVec.size(); i++ ) {
          cv::rectangle( defaultFace, faceVec[i], CV_RGB(255,200,0), 2 );
          cv::Mat face = resizedFace( faceVec[i] );
          // EyesDetection
          if( eyesDetect == 1 ) {
                vector< cv::Rect > eyeVec;
                mEyeDetector.detectMultiScale(face, eyeVec);
                for( size_t j=0; j<eyeVec.size(); j++ ) {
                    cv::Rect rect = eyeVec[j];
                    rect.x += faceVec[i].x;
                    rect.y += faceVec[i].y;
                    //saveFace(face);
                    cv::rectangle(resizedFace, rect, CV_RGB(0,255,0), 2 );
                }
            }
         // MouthDetection
         /*if mouth was detected on second part of face, face is equalized, normalized by size and saved, if it is not as the face saved last time.*/
         if( mouthDetect == 1 ) {
                vector< cv::Rect > mouthVec;
                Rect halfRect = faceVec[i];
                halfRect.height /= 2;
                halfRect.y += halfRect.height;
                Mat halfFace = videoFrame( halfRect );
                mMouthDetector.detectMultiScale( halfFace, mouthVec, 3 );
                for( size_t j=0; j<mouthVec.size(); j++ ) {
                    cv::Rect rect = mouthVec[j];
                    rect.x += halfRect.x;
                    rect.y += halfRect.y;

                    Mat equalizedImg;
                    equalizeHist(face, equalizedImg);

                    Mat face_resized;
                    resize(equalizedImg, face_resized, Size(faceSize, faceSize), 1.0, 1.0, INTER_CUBIC);

                    double imageDiff = 10000000000.0;
                    if (oldFace.data) {
                        imageDiff = norm(face_resized, oldFace, NORM_L2)/(double)(face_resized.rows * face_resized.cols);
                    }



                    double current_time = (double)getTickCount();
                    double timeDiff_seconds = (current_time - oldTime)/getTickFrequency();
                    cout << "imageDiff " << imageDiff << "  and timeDiff " << timeDiff_seconds <<  "  and oldtime " << oldTime << "  and currentTime " <<current_time << "\n";

                    if ((imageDiff > CHANGE_IMAGE) && (timeDiff_seconds > CHANGE_TIME)) {

                            saveFace(face_resized, personName);
                            qDebug() << "Photo was saved";
                            cv::rectangle( defaultFace, rect, CV_RGB(255,255,255), 2 );

                            Mat displayedFaceRegion = defaultFace( faceVec[i]);
                            displayedFaceRegion += CV_RGB(90,90,90);

                            oldFace = face_resized.clone();
                            oldTime = current_time;

                }
            }
         }

//.........这里部分代码省略.........
开发者ID:xmikun00,项目名称:Videoteror,代码行数:101,代码来源:facedetection.cpp

示例5: gst_motiondetect_transform_ip

static GstFlowReturn
gst_motiondetect_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
{
  GstMessage *m = NULL;
  StbtMotionDetect *filter = GST_MOTIONDETECT (trans);
  if ((!filter) || (!buf)) {
    return GST_FLOW_OK;
  }

  GST_OBJECT_LOCK(filter);
  if (filter->enabled && filter->state != MOTION_DETECT_STATE_INITIALISING) {
    IplImage *referenceImageGrayTmp = NULL;
    static int frameNo = 1;

    filter->cvCurrentImage->imageData = (char *) GST_BUFFER_DATA (buf);
    cvCvtColor( filter->cvCurrentImage,
        filter->cvCurrentImageGray, CV_BGR2GRAY );

    if (filter->debugDirectory) {
      gst_motiondetect_log_image (filter->cvCurrentImageGray, 
          filter->debugDirectory, frameNo, "source.png");
    }

    if (filter->state == MOTION_DETECT_STATE_REFERENCE_IMAGE_ACQUIRED) {
      gboolean result;
      
      result = gst_motiondetect_apply(
          filter->cvReferenceImageGray, filter->cvCurrentImageGray,
          filter->cvMaskImage, filter->noiseThreshold);

      if (filter->debugDirectory) {
        if (result) {
          gst_motiondetect_log_image (filter->cvReferenceImageGray, 
              filter->debugDirectory, frameNo,
              "absdiff_not_masked_motion.png");
        } else {
          gst_motiondetect_log_image (filter->cvReferenceImageGray, 
              filter->debugDirectory, frameNo,
              "absdiff_not_masked_no_motion.png");
        }
        gst_motiondetect_log_image (filter->cvMaskImage,
              filter->debugDirectory, frameNo, "mask.png");
      }

      GstStructure *s = gst_structure_new ("motiondetect",
          "has_motion", G_TYPE_BOOLEAN, result,
          "masked", G_TYPE_BOOLEAN, (filter->mask != NULL),
          "mask_path", G_TYPE_STRING, filter->mask, NULL);
      m = gst_message_new_element (GST_OBJECT (filter), s);

      if (filter->display) {
        buf = gst_buffer_make_writable (buf);
        cvSubS (filter->cvCurrentImage, CV_RGB(100, 100, 100),
            filter->cvCurrentImage, filter->cvInvertedMaskImage);
        if (result) {
          cvAddS (filter->cvCurrentImage, CV_RGB(50, 0, 0),
              filter->cvCurrentImage, filter->cvMaskImage);
        }
      }
    }

    referenceImageGrayTmp = filter->cvReferenceImageGray;
    filter->cvReferenceImageGray = filter->cvCurrentImageGray;
    filter->cvCurrentImageGray = referenceImageGrayTmp;
    filter->state = MOTION_DETECT_STATE_REFERENCE_IMAGE_ACQUIRED;
    ++frameNo;
  }
  GST_OBJECT_UNLOCK(filter);

  if (m) {
    gst_element_post_message (GST_ELEMENT (filter), m);
  }

  return GST_FLOW_OK;
}
开发者ID:ekelly30,项目名称:stb-tester,代码行数:75,代码来源:gstmotiondetect.c

示例6: main

/*
 * -d 0,1,2,3,4 =>/dev/video0,video1,...
 * -f static file
 * 
 */
int main (int argc, char * argv[]) {
	//Two boolean variables
	char quit = 0; //Exit main program loop?
	char grab_frame = 1; //Do we grab a new frame from the camera?
	
	int thresh1=DEFAULT_TRACKBAR_VAL, thresh2=DEFAULT_TRACKBAR_VAL; //These two variables will hold trackbar positions.

	if(argc==1){
		camID=0;
		printf("set default camera 0\n");
	}else if(option(argc,argv) < 0 ){
		printf( "Wrong args!!!\n");
		exit(EXIT_FAILURE);
	}

	//These are pointers to IPL images, which will hold the result of our calculations
	IplImage *small_image = NULL; /*cvCreateImage(cvSize(IMG_WIDTH,IMG_HEIGHT),IPL_DEPTH_8U,3)*/; //size, depth, channels (RGB = 3)
	IplImage *small_grey_image = NULL;/*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //1 channel for greyscale
	IplImage *edge_image = NULL; /*cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1)*/; //We use cvGetSize to make sure the images are the same size. 
	
	//CvMemStorage and CvSeq are structures used for dynamic data collection. CvMemStorage contains pointers to the actual
	//allocated memory, but CvSeq is used to access this data. Here, it will hold the list of image contours.
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = 0;
	
	CvCapture *camera=NULL;
	if(camID>=0){
		camera = cvCreateCameraCapture(camID); //This function tries to connect to the first (0th) camera it finds and creates a structure to refer to it.
		if(!camera){ //cvCreateCameraCapture failed, most likely there is no camera connected.
			printf("Could not find a camera to capture from...\n"); //Notify the user...
			return -1; //And quit with an error.
		}
	}
	cvNamedWindow(MAIN_WINNAME, CV_WINDOW_AUTOSIZE); //Here we create a window and give it a name. The second argument tells the window to not automatically adjust its size.
	
	//We add two trackbars (sliders) to the window. These will be used to set the parameters for the Canny edge detection.
	cvCreateTrackbar("Thresh1", MAIN_WINNAME, &thresh1, 256, 0);
	cvCreateTrackbar("Thresh2", MAIN_WINNAME, &thresh2, 256, 0);
	
	//Set the trackbar position to the default value. 
	cvSetTrackbarPos("Thresh1", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL); //Trackbar name, window name, position
	cvSetTrackbarPos("Thresh2", MAIN_WINNAME, DEFAULT_TRACKBAR_VAL);
	
	//Now set the mouse callback function. We need to pass the location of the contours so that it will be able to access this information.
	cvSetMouseCallback(MAIN_WINNAME, onMouse, &contours); //Window name, function pointer, user-defined parameters.
	
	IplImage *frame=NULL; //This will point to the IPL image we will retrieve from the camera.
	IplImage *frame_org=NULL;
	if(camID < 0){
		frame_org = cvLoadImage(orgFile, CV_LOAD_IMAGE_UNCHANGED);
		frame = cvLoadImage(inFile, CV_LOAD_IMAGE_UNCHANGED);
	}
	//This is the main program loop. We exit the loop when the user sets quit to true by pressing the "esc" key
	while(!quit){
		int c =0;
		if(camID >= 0){
			c=cvWaitKey(30); //Wait 30 ms for the user to press a key.
			
			//Respond to key pressed.
			switch(c){
				case 32: //Space
					grab_frame = !grab_frame; //Reverse the value of grab_frame. That way, the user can toggle by pressing the space bar.
					break;
				case 27: //Esc: quit application when user presses the 'esc' key.
					quit = 1; //Get out of loop
					break;
			};

			//If we don't have to grab a frame, we're done for this pass.
			if(!grab_frame)continue;	

			//Grab a frame from the camera.
			frame = cvQueryFrame(camera);
			
			if(!frame) continue; //Couldn't get an image, try again next time.
		}
		
		//In computer vision, it's always better to work with the smallest images possible, for faster performance.
		//cvResize will use inter-linear interpolation to fit frame into small_image.
		if(small_image==NULL){
			small_image = cvCreateImage(cvSize(frame->width,frame->height),IPL_DEPTH_8U,3); //size, depth, channels (RGB = 3)
			printf("w=%d, h=%d\n", frame->width,frame->height);
		}
		if(small_grey_image==NULL) small_grey_image=cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);
		if(edge_image==NULL) edge_image = cvCreateImage(cvGetSize(small_image), IPL_DEPTH_8U, 1);

		cvResize(frame, small_image, CV_INTER_LINEAR);
		
		//Many computer vision algorithms do not use colour information. Here, we convert from RGB to greyscale before processing further.
		cvCvtColor(small_image, small_grey_image, CV_RGB2GRAY);

		//We then detect edges in the image using the Canny algorithm. This will return a binary image, one where the pixel values will be 255 for 
		//pixels that are edges and 0 otherwise. This is unlike other edge detection algorithms like Sobel, which compute greyscale levels.
		cvCanny(small_grey_image, edge_image, (double)thresh1, (double)thresh2, 3); //We use the threshold values from the trackbars and set the window size to 3
		
//.........这里部分代码省略.........
开发者ID:biotrump,项目名称:mip,代码行数:101,代码来源:findContour.c

示例7: main

int main( int argc, char** argv )
{
    const char *pstrWindowsSrcTitle = "Original picture";
    const char *pstrWindowsOutLineTitle = "Outline picture";

    const int IMAGE_WIDTH = 400;
    const int IMAGE_HEIGHT = 200;

    // 创建图像
    IplImage *pSrcImage = cvCreateImage(cvSize(IMAGE_WIDTH, IMAGE_HEIGHT), IPL_DEPTH_8U, 3);

    // 填充成白色
    cvRectangle(pSrcImage, cvPoint(0, 0), cvPoint(pSrcImage->width, pSrcImage->height), CV_RGB(255, 255, 255), CV_FILLED);

    // 画圆
    CvPoint ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    int nRadius = 80;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 0), CV_FILLED);
    ptCircleCenter = cvPoint(IMAGE_WIDTH / 4, IMAGE_HEIGHT / 2);
    nRadius = 30;
    cvCircle(pSrcImage, ptCircleCenter, nRadius, CV_RGB(255, 255, 255), CV_FILLED);

    // 画矩形
    CvPoint ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 20, 20);
    CvPoint ptRightBottom = cvPoint(IMAGE_WIDTH - 20, IMAGE_HEIGHT - 20);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(0, 255, 255), CV_FILLED);
    ptLeftTop = cvPoint(IMAGE_WIDTH / 2 + 60, 40);
    ptRightBottom = cvPoint(IMAGE_WIDTH - 60, IMAGE_HEIGHT - 40);
    cvRectangle(pSrcImage, ptLeftTop, ptRightBottom, CV_RGB(255, 255, 255), CV_FILLED);

    // 显示原图
    cvNamedWindow(pstrWindowsSrcTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsSrcTitle, pSrcImage);

    // 转为灰度图
    IplImage *pGrayImage =  cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 1);
    cvCvtColor(pSrcImage, pGrayImage, CV_BGR2GRAY);

    // 转为二值图
    IplImage *pBinaryImage = cvCreateImage(cvGetSize(pGrayImage), IPL_DEPTH_8U, 1);
    cvThreshold(pGrayImage, pBinaryImage, 250, 255, CV_THRESH_BINARY);


    // 检索轮廓并返回检测到的轮廓的个数
    CvMemStorage *pcvMStorage = cvCreateMemStorage();
    CvSeq *pcvSeq = NULL;
    cvFindContours(pBinaryImage, pcvMStorage, &pcvSeq, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

    // 画轮廓图
    IplImage *pOutlineImage = cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_8U, 3);
    int nLevels = 5;

    // 填充成白色
    cvRectangle(pOutlineImage, cvPoint(0, 0), cvPoint(pOutlineImage->width, pOutlineImage->height), CV_RGB(255, 255, 255), CV_FILLED);
    cvDrawContours(pOutlineImage, pcvSeq, CV_RGB(255,0,0), CV_RGB(0,255,0), nLevels, 2);

    // 显示轮廓图
    cvNamedWindow(pstrWindowsOutLineTitle, CV_WINDOW_AUTOSIZE);
    cvShowImage(pstrWindowsOutLineTitle, pOutlineImage);

    cvWaitKey(0);

    cvReleaseMemStorage(&pcvMStorage);

    cvDestroyWindow(pstrWindowsSrcTitle);
    cvDestroyWindow(pstrWindowsOutLineTitle);
    cvReleaseImage(&pSrcImage);
    cvReleaseImage(&pGrayImage);
    cvReleaseImage(&pBinaryImage);
    cvReleaseImage(&pOutlineImage);

    return 0;
}
开发者ID:kyyang28,项目名称:opencv,代码行数:73,代码来源:main.cpp

示例8: meanShift


//.........这里部分代码省略.........
            return;
            for(i=0;i<points2.size();i++)
            {
                bmp.push_back( points2[i]);
                tempBd.push_back(bdescriptors.row(queryIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
                tempBd.push_back(descriptors2.row(trainIdxs[i]));
                tempbk.push_back(keypoints2[trainIdxs[i]]);
            }
        }
        else
        {
            H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD );

            if( !H12.empty() )
            {
                Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);

                for( size_t i1 = 0; i1 < points1.size(); i1++ )
                {
                    double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0));
                    if(diff  <= 20)
                    {
                        matchesMask[i1]=1;
                        bmp.push_back( points2[i1]);
                        tempBd.push_back(bdescriptors.row(queryIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);
                        tempBd.push_back(descriptors2.row(trainIdxs[i1]));
                        tempbk.push_back(keypoints2[trainIdxs[i1]]);

                    }
                }

                drawMatches( img1ROI, bkeypoints, img2ROI, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                             , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                             );

            }
        }
        imshow("bm",drawImg);

        //============edit part ====
        shiftPoints(bmp, box);
        vector<int> bflag(bmp.size(),0);

        for(i=0;i<bmp.size();i++)
            bflag[i]=0;

        vector<int> ft(matchedPoints2.size(),0);

        for(i=0;i<matchedPoints2.size();i++)
        {
            ft[i]=0;
            for(j=0; j< bmp.size(); j++)
            {
                double diff = norm (matchedPoints2[i] - bmp[j]);
                // cout << diff << endl;
                if(diff < 0.5)
                {
                    bflag[j]=1;
                    ft[i]=1;
                    break;
                }
            }
开发者ID:mdqyy,项目名称:surfmst,代码行数:67,代码来源:mean.cpp

示例9: doIteration


//.........这里部分代码省略.........
        mDesc1.push_back(descriptors1.row(queryIdxs[i]));
        mDesc2.push_back(descriptors2.row(trainIdxs[i]));
    }


    vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
    vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
    vector<char> matchesMask( filteredMatches.size(), 0 );//,  matchesMask2( filteredMatches.size(), 1 );;

    Mat drawImg;// drawImg2;

    cout << "points2.size \t" << points2.size() << endl;
    cout <<"HELLO \t" << endl;

    if( RANSAC_THREHOLD >= 0 )
    {
        if (points2.size() < 4 )
        {
            cout << "matchedPoints1 less than 4, hence prev ROI is retained" << endl;

            for(size_t i1=0;i1<points2.size();i1++)
            {
                matchesMask[i1] = 1;
                matchedPoints1.push_back( points1[i1]);
                matchedPoints2.push_back( points2[i1]);

                matchedDesc1.push_back(descriptors1.row(queryIdxs[i1]));
                matchedDesc2.push_back(descriptors2.row(trainIdxs[i1]));

                tempkey.push_back(keypoints2[trainIdxs[i1]]);
                MP1.push_back(points2[i1]);
            }
        }
        else
        {
            H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD );

            if( !H12.empty() )
            {

                Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);

                vector<Point2f> points2Shift(points2.size());
                points2Shift = points2;
                shiftPoints(points2Shift, box);
                Point2f boxCenter;
                boxCenter.x = box.x + box.width/2;
                boxCenter.y = box.y + box.height/2;

                for( size_t i1 = 0; i1 < points1.size(); i1++ )
                {
                    double descDiff = pow(norm(mDesc1.row(i1) - mDesc2.row(i1)) , 2);
                    //  if(descDiff < 0.08)
                    {
                        double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0));
                        if(diff  <= 30)
                        {
                          //  cout << diff << endl;
                            matchesMask[i1] = 1;
                            matchedPoints1.push_back( points1[i1]);
                            matchedPoints2.push_back( points2[i1]);

                            matchedDesc1.push_back(descriptors1.row(queryIdxs[i1]));
                            matchedDesc2.push_back(descriptors2.row(trainIdxs[i1]));

                            tempkey.push_back(keypoints2[trainIdxs[i1]]);
                            MP1.push_back(points2[i1]);
                        }
                    }

                }
            }
            //              drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg2, CV_RGB(255, 255, 0), CV_RGB(255,255, 255), matchesMask2
            //              #if DRAW_RICH_KEYPOINTS_MODE
            //                                   , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
            //              #endif
            //                                 );
            drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                         , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                         );

            cout << endl;
            imshow( "doiter", drawImg );

//            Mat newimg = img1.clone();
//             KeyPoint::convert(keypoints1, points1);
//            for(size_t i=0;i<points1.size();i++)
//                 circle(newimg, points1[i], 2, Scalar(255,0,255),2);

//             imshow( "doimg", newimg );
//            points1.clear();
// waitKey(0);

        }
    }
    // waitKey(0);

}
开发者ID:mdqyy,项目名称:surfmst,代码行数:101,代码来源:mean.cpp

示例10: cvRenderTracks

void cvRenderTracks(CvTracks const tracks, IplImage *imgSource, IplImage *imgDest, unsigned short mode, CvFont *font)
{
  CV_FUNCNAME("cvRenderTracks");
  __CV_BEGIN__;

  CV_ASSERT(imgDest&&(imgDest->depth==IPL_DEPTH_8U)&&(imgDest->nChannels==3));

  if ((mode&CV_TRACK_RENDER_ID)&&(!font))
  {
    if (!defaultFont)
    {
  font = defaultFont = new CvFont;
  cvInitFont(font, CV_FONT_HERSHEY_DUPLEX, 0.5, 0.5, 0, 1);
  // Other fonts:
  //   CV_FONT_HERSHEY_SIMPLEX, CV_FONT_HERSHEY_PLAIN,
  //   CV_FONT_HERSHEY_DUPLEX, CV_FONT_HERSHEY_COMPLEX,
  //   CV_FONT_HERSHEY_TRIPLEX, CV_FONT_HERSHEY_COMPLEX_SMALL,
  //   CV_FONT_HERSHEY_SCRIPT_SIMPLEX, CV_FONT_HERSHEY_SCRIPT_COMPLEX
    }
    else
  font = defaultFont;
  }

  if (mode)
  {
    for (CvTracks::const_iterator it=tracks.begin(); it!=tracks.end(); ++it)
    {
  if (mode&CV_TRACK_RENDER_ID)
    if (!it->second->inactive)
    {
      stringstream buffer;
      buffer << it->first;
      cvPutText(imgDest, buffer.str().c_str(), cvPoint((int)it->second->centroid.x, (int)it->second->centroid.y), font, CV_RGB(255.,255.,0.));
    }

  if (mode&CV_TRACK_RENDER_BOUNDING_BOX)
    if (it->second->inactive)
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 0., 50.));
    else
      cvRectangle(imgDest, cvPoint(it->second->minx, it->second->miny), cvPoint(it->second->maxx-1, it->second->maxy-1), CV_RGB(0., 255., 0.));

  if (mode&CV_TRACK_RENDER_TO_LOG)
  {
    clog << "Track " << it->second->id << endl;
    if (it->second->inactive)
      clog << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      clog << " - Associated with blob " << it->second->label << endl;
    clog << " - Lifetime " << it->second->lifetime << endl;
    clog << " - Active " << it->second->active << endl;
    clog << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    clog << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    clog << endl;
  }

  if (mode&CV_TRACK_RENDER_TO_STD)
  {
    cout << "Track " << it->second->id << endl;
    if (it->second->inactive)
      cout << " - Inactive for " << it->second->inactive << " frames" << endl;
    else
      cout << " - Associated with blobs " << it->second->label << endl;
    cout << " - Lifetime " << it->second->lifetime << endl;
    cout << " - Active " << it->second->active << endl;
    cout << " - Bounding box: (" << it->second->minx << ", " << it->second->miny << ") - (" << it->second->maxx << ", " << it->second->maxy << ")" << endl;
    cout << " - Centroid: (" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
    cout << endl;
  }
    }
  }

  __CV_END__;
}
开发者ID:seylum,项目名称:vcounter,代码行数:73,代码来源:cvtrack.cpp

示例11: printf

void ImageViewer::displayMatches(QPainter& painter)const
{
	QPoint pt1, pt2;

	if (siftObj1.keypoints==NULL){
		printf("ERROR : Keypoints NULL\n");
		exit(-1);
	}
		
	if (dispMatch && lastComparison.tab_match!=NULL && !siftObj1.IsEmpty() ){
		// Display matches
		for (int i=0;i<lastComparison.nb_match;i++) {
			pt1.setX(ROUND(lastComparison.tab_match[i].x1));
			pt1.setY(ROUND(lastComparison.tab_match[i].y1));
			pt2.setX(ROUND(lastComparison.tab_match[i].x2));
			pt2.setY(ROUND(lastComparison.tab_match[i].y2 + siftObj1.im->height));
			
			painter.setBrush(Qt::white);
			if (lastComparison.tab_match[i].id==0)
				painter.setPen(Qt::red); //red for discarded matches
			else painter.setPen(Qt::green); //green
			
			painter.drawLine(pt1, pt2);
			painter.drawEllipse(pt1, 3, 3);
			painter.drawEllipse(pt2, 3, 3);
		}
	}
	
	#ifdef AAA
		
	//IplImage * im,* imcol;
	QSize s;
	//QPoint pt1, pt2;
	//CvScalar color;
	int i,j,im2null=0;
	Keypoint k1*=siftObj1->keypoints;
	Keypoint k2*=siftObj2->keypoints;
		/*Affine transform of the image border*/
		
		if (param.size_m()>0) {
			Matrice p1(2,1), p2(2,1), p3(2,1), p4(2,1), transl(2,1);
			transl.set_val(0,0,0);
			transl.set_val(1,0,im1->height);
			p1.set_val(0,0,0);
			p1.set_val(1,0,0);
			p2.set_val(0,0,im1->width);
			p2.set_val(1,0,0);
			p3.set_val(0,0,im1->width);
			p3.set_val(1,0,im1->height);
			p4.set_val(0,0,0);
			p4.set_val(1,0,im1->height);
			
			p1=Transform(p1,param)+transl;
			p2=Transform(p2,param)+transl;
			p3=Transform(p3,param)+transl;
			p4=Transform(p4,param)+transl;
			
			color=CV_RGB(0,128,255); //light blue
			pt1.x=ROUND(p1.get_val(0,0));
			pt1.y=ROUND(p1.get_val(1,0));
			pt2.x=ROUND(p2.get_val(0,0));
			pt2.y=ROUND(p2.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p2.get_val(0,0));
			pt1.y=ROUND(p2.get_val(1,0));
			pt2.x=ROUND(p3.get_val(0,0));
			pt2.y=ROUND(p3.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p3.get_val(0,0));
			pt1.y=ROUND(p3.get_val(1,0));
			pt2.x=ROUND(p4.get_val(0,0));
			pt2.y=ROUND(p4.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p4.get_val(0,0));
			pt1.y=ROUND(p4.get_val(1,0));
			pt2.x=ROUND(p1.get_val(0,0));
			pt2.y=ROUND(p1.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			
			/* Draw the border of the object */
			CvMemStorage *storage= cvCreateMemStorage (0); /* Memory used by openCV */
			int header_size = sizeof( CvContour );
			CvSeq *contours;
			
			IplImage* imthres = cvCreateImage(cvSize(im1->width,im1->height),IPL_DEPTH_8U, 1 );
			cvCopy( im1, imthres, 0 );
			
			/* First find the contour of a thresholded image*/
			
			cvThreshold(imthres, imthres, border_threshold, 255, CV_THRESH_BINARY );
			cvFindContours ( imthres, storage, &contours, header_size, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
			
			/* For each contour found*/
			
			while ( contours != NULL) {
				double area=fabs(cvContourArea(contours,CV_WHOLE_SEQ)); // compute area
				if ( area > 20) {
					for (int i=0;i<contours->total;i++) {
						
						/* Compute transform of contour*/
//.........这里部分代码省略.........
开发者ID:lwinkler,项目名称:hlfe,代码行数:101,代码来源:imageviewer.cpp

示例12: computeVectors

static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){
	if(DEBUG){
		std::cout << "-- VECTOR COMPUTING" << std::endl;		
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size 640x480
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	//--SURF CORNERS--
	if(DEBUG){
		std::cout << "--- SURF CORNERS" << std::endl;
	}
        color = CV_RGB(0,255,0);
        CvMemStorage* storage2 = cvCreateMemStorage(0);
        CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1);
        CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
        cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params );
        if(DEBUG){
			printf("Image Descriptors: %d\n", imageDescriptors->total);
		}
        for( int j = 0; j < imageKeypoints->total; j++ ){
            CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j );
			center.x = cvRound(r->pt.x);
            center.y = cvRound(r->pt.y);
			if(DEBUG){
				printf("j: %d \t", j);               
				printf("total: %d \t", imageKeypoints->total);               
				printf("valor hessiano: %f \t", r->hessian);
				printf("x: %d \t", center.x);
				printf("y: %d \n", center.y);
			}
			// Agrego el Punto en donde es la region que nos interesa
			cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 );
			// Lleno la matriz con los vectores
			relevancePointToVector(center.x, center.y, wROI, hROI, 5);
		}
	//--SURF CORNERS


	// calculate motion gradient orientation and valid orientation mask
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
	
	// Compute Motion on 4x4 Cuadrants
	if(DEBUG){
		std::cout << "--- MOTION CUADRANTS" << std::endl;
	}
	i	 = 25;
	color = CV_RGB(255,0,0);
	magnitude = 30;
	for (int r = 0; r < size.height; r += hROI){
		for (int c = 0; c < size.width; c += wROI){
			comp_rect.x = c;
			comp_rect.y = r;
			comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI;
			comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI;

			cvSetImageROI( mhi, comp_rect );
			cvSetImageROI( orient, comp_rect );
			cvSetImageROI( mask, comp_rect );
			cvSetImageROI( silh, comp_rect );
			cvSetImageROI( img, comp_rect );

			// Process Motion
			angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
			angle = 360.0 - angle;  // adjust for images with top-left origin
			count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
			roi = cvGetImageROI(mhi);
			center = cvPoint( (comp_rect.x + comp_rect.width/2),
					  (comp_rect.y + comp_rect.height/2) );
			cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
			cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
			cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );	
			
			if(DEBUG){
				std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi
			}
			cvResetImageROI( mhi );
			cvResetImageROI( orient );
			cvResetImageROI( mask );
			cvResetImageROI( silh );
			cvResetImageROI(img);
			relevanceDirectionToVector(i, angle);
			++i;
		}
	}

	// Compute Global Motion
	if(DEBUG){
		std::cout << "--- MOTION GLOBAL" << std::endl;
	}
	comp_rect = cvRect( 0, 0, size.width, size.height );
	color = CV_RGB(255,255,255);
//.........这里部分代码省略.........
开发者ID:sabs231,项目名称:hand-gesture-recon,代码行数:101,代码来源:trainAndClassify.cpp

示例13: while

void ObjectTester::RunVideoDemo()
{
	GenericObjectDetector detector;

	visualsearch::io::OpenCVCameraIO cam;
	if( !cam.InitCamera() )
		return;
	//KinectDataMan kinectDM;

	//if( !kinectDM.InitKinect() )
		//return;

	bool doRank = true;
	// start fetching stream data
	while(1)
	{
		Mat cimg, dmap;
		//kinectDM.GetColorDepth(cimg, dmap);
		cam.QueryNextFrame(visualsearch::io::STREAM_COLOR, cimg);

		// resize image
		Size newsz;
		ToolFactory::compute_downsample_ratio(Size(cimg.cols, cimg.rows), 300, newsz);
		resize(cimg, cimg, newsz);
		//resize(dmap, dmap, newsz);
		//normalize(dmap, dmap, 0, 255, NORM_MINMAX);

		// get objects
		vector<ImgWin> objwins, salwins;
		if( !detector.ProposeObjects(cimg, dmap, objwins, salwins, doRank) )
			continue;

		//////////////////////////////////////////////////////////////////////////
		// draw best k windows
		int topK = MIN(6, objwins.size());
		int objimgsz = newsz.height / topK;
		int canvas_h = newsz.height;
		int canvas_w = newsz.width + 10 + objimgsz*2;
		Mat canvas(canvas_h, canvas_w, CV_8UC3);
		canvas.setTo(Vec3b(0,0,0));
		// get top windows
		vector<Mat> detimgs(topK);
		vector<Mat> salimgs(topK);
		for (int i=0; i<topK; i++)
		{
			cimg(objwins[i]).copyTo(detimgs[i]);
			resize(detimgs[i], detimgs[i], Size(objimgsz, objimgsz));
			cimg(salwins[i]).copyTo(salimgs[i]);
			resize(salimgs[i], salimgs[i], Size(objimgsz, objimgsz));
		}

		// draw boxes on input
		Scalar objcolor(0, 255, 0);
		Scalar salcolor(0, 0, 255);
		for(int i=0; i<topK; i++)
		{
			//rectangle(cimg, objwins[i], objcolor);
			rectangle(cimg, salwins[i], salcolor);
		}
		circle(cimg, Point(cimg.cols/2, cimg.rows/2), 2, CV_RGB(255,0,0));
		// copy input image
		cimg.copyTo(canvas(Rect(0, 0, cimg.cols, cimg.rows)));

		// copy subimg
		for (int i=0; i<detimgs.size(); i++)
		{
			Rect objbox(cimg.cols+10, i*objimgsz, objimgsz, objimgsz);
			detimgs[i].copyTo(canvas(objbox));
			Rect salbox(cimg.cols+10+objimgsz, i*objimgsz, objimgsz, objimgsz);
			salimgs[i].copyTo(canvas(salbox));
		}

		resize(canvas, canvas, Size(canvas.cols*2, canvas.rows*2));
		//if(doRank)
			//putText(canvas, "Use Ranking", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, CV_RGB(255, 0, 0));
		imshow("object proposals", canvas);
		if( waitKey(10) == 'q' )
			break;
	}
}
开发者ID:flyfj,项目名称:Saliency,代码行数:80,代码来源:ObjectTester.cpp

示例14: putDistance

static void putDistance(IplImage *Image,
                        std::vector<cv_tracker::image_rect_ranged> objects,
                        int threshold_height,
                        const char* objectLabel)
{
  char distance_string[32];
  CvFont dfont;
  float hscale	    = 0.7f;
  float vscale	    = 0.7f;
  float italicscale = 0.0f;
  int	thickness   = 1;

  CvFont      dfont_label;
  float       hscale_label = 0.5f;
  float       vscale_label = 0.5f;
  CvSize      text_size;
  int         baseline     = 0;

  cvInitFont(&dfont_label, CV_FONT_HERSHEY_COMPLEX, hscale_label, vscale_label, italicscale, thickness, CV_AA);
  cvGetTextSize(objectLabel,
                &dfont_label,
                &text_size,
                &baseline);

  for (unsigned int i=0; i<objects.size(); i++)
    {
      if (objects.at(i).rect.y > threshold_height) // temporal way to avoid drawing detections in the sky
        {
          if (!isNearlyNODATA(objects.at(i).range))
            {
              /* put label */
              CvPoint labelOrg = cvPoint(objects.at(i).rect.x - OBJ_RECT_THICKNESS,
                                         objects.at(i).rect.y - baseline - OBJ_RECT_THICKNESS);

              cvRectangle(Image,
                          cvPoint(labelOrg.x + 0, labelOrg.y + baseline),
                          cvPoint(labelOrg.x + text_size.width, labelOrg.y - text_size.height),
                          CV_RGB(0, 0, 0), // label background is black
                          -1, 8, 0
                          );
              cvPutText(Image,
                        objectLabel,
                        labelOrg,
                        &dfont_label,
                        CV_RGB(255, 255, 255) // label text color is white
                        );

              /* put distance data */
              cvRectangle(Image,
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 45),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 5),
                          cv::Point(objects.at(i).rect.x + (objects.at(i).rect.width/2) + (((int)log10(objects.at(i).range/100)+1) * 8 + 38),
                                    objects.at(i).rect.y + objects.at(i).rect.height + 30),
                          cv::Scalar(255,255,255),
                          -1);

              cvInitFont (&dfont,
                          CV_FONT_HERSHEY_COMPLEX,
                          hscale,
                          vscale,
                          italicscale,
                          thickness,
                          CV_AA);

              sprintf(distance_string, "%.2f m", objects.at(i).range / 100); //unit of length is meter
              cvPutText(Image,
                        distance_string,
                        cvPoint(objects.at(i).rect.x + (objects.at(i).rect.width/2) - (((int)log10(objects.at(i).range/100)+1) * 5 + 40),
                                objects.at(i).rect.y + objects.at(i).rect.height + 25),
                        &dfont,
                        CV_RGB(255, 0, 0));
            }
        }
    }
}
开发者ID:Keerecles,项目名称:Autoware,代码行数:75,代码来源:scan_image_d_viewer.cpp

示例15: strcpy

/*!
  Initialize the display size, position and title.

  \param width, height : Width and height of the window.
  \param x, y : The window is set at position x,y (column index, row index).
  \param title : Window title.

*/
void
vpDisplayOpenCV::init(unsigned int width, unsigned int height,
                      int x, int y,
                      const char *title)
{
  this->width  = width;
  this->height = height;
  this->windowXPosition = x;
  this->windowYPosition = y;
  int flags = CV_WINDOW_AUTOSIZE;
  if (title != NULL)
  {
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[strlen(title) + 1] ;
    strcpy(this->title, title) ;
  }
  else{
    if (this->title != NULL)
    {
      delete [] this->title ;
      this->title = NULL ;
    }
    this->title = new char[50] ;
    sprintf(this->title,"Unnamed ViSP display <%02d>",count) ;
  }
  count++;
  /* Create the window*/
  window = cvNamedWindow( this->title, flags );
  cvMoveWindow( this->title, x, y );
  move = false;
  lbuttondown = false;
  mbuttondown = false;
  rbuttondown = false;
  lbuttonup = false;
  mbuttonup = false;
  rbuttonup = false;
  cvSetMouseCallback( this->title, on_mouse, this );
  /* Create background pixmap */
//   background = cvCreateImage(cvSize((int)width,(int)height),IPL_DEPTH_8U,3);
//
//   cvShowImage( this->title,background);

  col = new CvScalar[vpColor::id_unknown] ;

  /* Create color */
  vpColor pcolor; // Predefined colors
  pcolor = vpColor::lightBlue;
  col[vpColor::id_lightBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::blue;
  col[vpColor::id_blue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkBlue;
  col[vpColor::id_darkBlue]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightRed;
  col[vpColor::id_lightRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::red;
  col[vpColor::id_red]    = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkRed;
  col[vpColor::id_darkRed]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGreen;
  col[vpColor::id_lightGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::green;
  col[vpColor::id_green]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGreen;
  col[vpColor::id_darkGreen]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::yellow;
  col[vpColor::id_yellow] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::cyan;
  col[vpColor::id_cyan]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::orange;
  col[vpColor::id_orange] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::purple;
  col[vpColor::id_purple] = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::white;
  col[vpColor::id_white]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::black;
  col[vpColor::id_black]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::lightGray;
  col[vpColor::id_lightGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::gray;
  col[vpColor::id_gray]  = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;
  pcolor = vpColor::darkGray;
  col[vpColor::id_darkGray]   = CV_RGB(pcolor.R, pcolor.G, pcolor.B) ;

  font = new CvFont;
  cvInitFont( font, CV_FONT_HERSHEY_PLAIN, 0.70f,0.70f);

  CvSize fontSize;
  int baseline;
//.........这里部分代码省略.........
开发者ID:GVallicrosa,项目名称:Armed-turtlebot,代码行数:101,代码来源:vpDisplayOpenCV.cpp


注:本文中的CV_RGB函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。