当前位置: 首页>>代码示例>>C++>>正文


C++ cv::Scalar方法代码示例

本文整理汇总了C++中cv::Scalar方法的典型用法代码示例。如果您正苦于以下问题:C++ cv::Scalar方法的具体用法?C++ cv::Scalar怎么用?C++ cv::Scalar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv的用法示例。


在下文中一共展示了cv::Scalar方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: callback

void callback(int, void *)
{
  vector<Vec4i> lines;
  HoughLinesP(sent_to_callback, lines, 1, CV_PI / 180, lineThresh, minLineLength, maxLineGap);

  frame.copyTo(imgLines);
  imgLines = Scalar(0, 0, 0);
  vector<double> angles(lines.size());

  lineCount = lines.size();
  int j = 0;
  for (size_t i = 0; i < lines.size(); i++)
  {
    Vec4i l = lines[i];
    line(imgLines, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 255, 0), 1, CV_AA);
    if ((l[2] == l[0]) || (l[1] == l[3])) continue;
    angles[j] = atan(static_cast<double>(l[2] - l[0]) / (l[1] - l[3]));
    j++;
  }

  imshow("LINES", imgLines + frame);

  // if num of lines are large than one or two stray lines won't affect the mean
  // much
  // but if they are small in number than mode has to be taken to save the error
  // due to those stray line

  if (lines.size() > 0 && lines.size() < 10)
    finalAngle = computeMode(angles);
  else if (lines.size() > 0)
    finalAngle = computeMean(angles);
}
开发者ID:harshv834,项目名称:auv,代码行数:32,代码来源:line_angle.cpp

示例2: display

int display(Mat im, CMT & cmt)
{
    //Visualize the output
    //It is ok to draw on im itself, as CMT only uses the grayscale image
    for(size_t i = 0; i < cmt.points_active.size(); i++)
    {
        circle(im, cmt.points_active[i], 2, Scalar(0, 255, 0));
    }

    Scalar color;
    if (cmt.confidence < 0.3) {
      color = Scalar(0, 0, 255);
    } else if (cmt.confidence < 0.4) {
      color = Scalar(0, 255, 255);
    } else {
      color = Scalar(255, 0, 0);
    }
    Point2f vertices[4];
    cmt.bb_rot.points(vertices);
    for (int i = 0; i < 4; i++)
    {
        line(im, vertices[i], vertices[(i+1)%4], color);
    }

    imshow(WIN_NAME, im);

    return waitKey(5);
}
开发者ID:mvines,项目名称:CppMT,代码行数:28,代码来源:main.cpp

示例3: center

TEST(hole_filling_test, elliptical_hole_on_repeated_texture_should_give_good_result)
{
    Mat img = imread("test_images/brick_pavement.jpg");
    convert_for_computation(img, 0.5f);

    // Add some hole
    Mat hole_mask = Mat::zeros(img.size(), CV_8U);

    Point center(100, 110);
    Size axis(20, 5);
    float angle = 20;
    ellipse(hole_mask, center, axis, angle, 0, 360, Scalar(255), -1);
    int patch_size = 7;
    HoleFilling hf(img, hole_mask, patch_size);

    // Dump image with hole as black region.
    Mat img_with_hole_bgr;
    cvtColor(img, img_with_hole_bgr, CV_Lab2BGR);
    img_with_hole_bgr.setTo(Scalar(0,0,0), hole_mask);
    imwrite("brick_pavement_hole.exr", img_with_hole_bgr);

    // Dump reconstructed image
    Mat filled = hf.run();
    cvtColor(filled, filled, CV_Lab2BGR);
    imwrite("brick_pavement_hole_filled.exr", filled);


    // The reconstructed image should be close to the original one, in this very simple case.
    Mat img_bgr;
    cvtColor(img, img_bgr, CV_Lab2BGR);
    double ssd = norm(img_bgr, filled, cv::NORM_L2SQR);
    EXPECT_LT(ssd, 0.2);
}
开发者ID:panmari,项目名称:patchMatch,代码行数:33,代码来源:HoleFillingTest.cpp

示例4: hf

TEST(hole_filling_test, rectangular_hole_on_repeated_texture_should_give_good_result)
{
    Mat img = imread("test_images/brick_pavement.jpg");
    convert_for_computation(img, 0.5f);

    // Add some hole
    Mat hole_mask = Mat::zeros(img.size(), CV_8U);

    hole_mask(Rect(72, 65, 5, 20)) = 255;
    int patch_size = 7;
    HoleFilling hf(img, hole_mask, patch_size);

    // Dump image with hole as black region.
    Mat img_with_hole_bgr;
    cvtColor(img, img_with_hole_bgr, CV_Lab2BGR);
    img_with_hole_bgr.setTo(Scalar(0,0,0), hole_mask);
    imwrite("brick_pavement_hole.exr", img_with_hole_bgr);

    // Dump reconstructed image
    Mat filled = hf.run();
    cvtColor(filled, filled, CV_Lab2BGR);
    imwrite("brick_pavement_hole_filled.exr", filled);


    // The reconstructed image should be close to the original one, in this very simple case.
    Mat img_bgr;
    cvtColor(img, img_bgr, CV_Lab2BGR);
    double ssd = norm(img_bgr, filled, cv::NORM_L2SQR);
    EXPECT_LT(ssd, 0.2);
}
开发者ID:panmari,项目名称:patchMatch,代码行数:30,代码来源:HoleFillingTest.cpp

示例5: onMouse

static void onMouse(int event, int x, int y, int flags, void *param)
{
    Mat im_draw;
    im_select.copyTo(im_draw);

    if(event == CV_EVENT_LBUTTONUP && !tl_set)
    {
        tl = Point(x,y);
        tl_set = true;
    }

    else if(event == CV_EVENT_LBUTTONUP && tl_set)
    {
        br = Point(x,y);
        br_set = true;
        screenLog(im_draw, "Initializing...");
    }

    if (!tl_set) screenLog(im_draw, "Click on the top left corner of the object");
    else
    {
        rectangle(im_draw, tl, Point(x, y), Scalar(255,0,0));

        if (!br_set) screenLog(im_draw, "Click on the bottom right corner of the object");
    }

    imshow(win_name_, im_draw);
}
开发者ID:AloshkaD,项目名称:CppMT,代码行数:28,代码来源:gui.cpp

示例6: display

int display(Mat im, CMT & cmt)
{
    //Visualize the output
    //It is ok to draw on im itself, as CMT only uses the grayscale image
    for(size_t i = 0; i < cmt.points_active.size(); i++)
    {
        circle(im, cmt.points_active[i], 2, Scalar(255,0,0));
    }

    Point2f vertices[4];
    cmt.bb_rot.points(vertices);
    for (int i = 0; i < 4; i++)
    {
        line(im, vertices[i], vertices[(i+1)%4], Scalar(255,0,0));
    }

    imshow(WIN_NAME, im);

    return waitKey(CV_UPDATE_TIME);
}
开发者ID:andrewramsay,项目名称:cmt-uvc-realsense,代码行数:20,代码来源:main.cpp

示例7: Display

void FingerTracker::Display(Mat frame, Candidate candidate) const
{
	if (candidate.m_found)
	{
		rectangle(frame, candidate.m_windowRect.tl(), candidate.m_windowRect.br(), Scalar(0, 255, 255));
		rectangle(frame, candidate.m_fingerPosition - Point(2,2), candidate.m_fingerPosition + Point(2,2), Scalar(255, 0, 0), 2);
	}
#if ENABLE_LINE_DRAWING
	Point prev(-1,-1);
	for (auto point : m_points)
	{
		if (prev.x != -1)
		{
			line(frame, prev, point, Scalar(0, 255, 0));
		}
		prev = point;
	}

#endif
	imshow("Camera", frame);
}
开发者ID:brejski,项目名称:FingerTracking,代码行数:21,代码来源:FingerTracker.cpp

示例8: screenLog

void screenLog(Mat im_draw, const string text)
{
    int font = cv::FONT_HERSHEY_SIMPLEX;
    float font_scale = 0.5;
    int thickness = 1;
    int baseline;

    Size text_size = cv::getTextSize(text, font, font_scale, thickness, &baseline);

    Point bl_text = Point(0,text_size.height);
    Point bl_rect = bl_text;

    bl_rect.y += baseline;

    Point tr_rect = bl_rect;
    tr_rect.x = im_draw.cols; //+= text_size.width;
    tr_rect.y -= text_size.height + baseline;

    rectangle(im_draw, bl_rect, tr_rect, Scalar(0,0,0), -1);

    putText(im_draw, text, bl_text, font, font_scale, Scalar(255,255,255));
}
开发者ID:AloshkaD,项目名称:CppMT,代码行数:22,代码来源:gui.cpp

示例9: Setup

void FingerTracker::Setup()
{
	VideoCapture capture(0);
	if (!capture.isOpened())
	{
		throw std::runtime_error("Could not start camera capture");
	}

	int windowSize = 25;
	int Xpos = 200;
	int Ypos = 50;
	int update = 0;
	int buttonClicked = 0;
	namedWindow("RGB", CV_WINDOW_AUTOSIZE);
	createTrackbar("X", "RGB",  &Xpos, 320, TrackBarCallback, (void*)&update);
	createTrackbar("Y", "RGB",  &Ypos, 240, TrackBarCallback, (void*)&update);
	createTrackbar("Size", "RGB",  &windowSize, 100, TrackBarCallback, (void*)&update);
	setMouseCallback("RGB", MouseCallback, (void*)&buttonClicked);
	Mat fingerWindowBackground, fingerWindowBackgroundGray;

	m_calibrationData.reset(new CalibrationData());
	bool ticking = false;
	std::chrono::system_clock::time_point start = std::chrono::system_clock::now();
    while (true)
	{  		
		Mat frame, frameHSV;

		if (capture.read(frame))
		{
			flip(frame, frame, 1);

			pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));

			Rect fingerWindow(Point(Xpos, Ypos), Size(windowSize, windowSize*3));
			if (Xpos + windowSize >= frame.cols || Ypos + windowSize*3 >= frame.rows)
			{
				windowSize = 20;
				Xpos = 200;
				Ypos = 50;
				update = 0;
			}
			else if (buttonClicked == 1)
			{
				frame(fingerWindow).copyTo(fingerWindowBackground);
				cvtColor(fingerWindowBackground, fingerWindowBackgroundGray, CV_BGR2GRAY);
				buttonClicked  = 0;
				update = 0;
				cvDestroyAllWindows();
			}

			if (fingerWindowBackgroundGray.rows && !m_calibrationData->m_ready)
			{
				Mat diff, thd;
				absdiff(frame(fingerWindow), fingerWindowBackground, diff);
				std::vector<Mat> ch;
				split(diff, ch);
				threshold(ch[0], ch[0], m_calibrationDiffThreshold, 255, 0);
				threshold(ch[1], ch[1], m_calibrationDiffThreshold, 255, 0);
				threshold(ch[2], ch[2], m_calibrationDiffThreshold, 255, 0);
				thd = ch[0];
				add(thd, ch[1], thd);
				add(thd, ch[2], thd);
				medianBlur(thd, thd, 5);

				Mat top, middle, bottom;
				Rect r1 = Rect(0, 0, thd.cols, thd.rows/3);
				Rect r2 = Rect(0, thd.rows / 3 + 1, thd.cols, thd.rows/3);
				Rect r3 = Rect(0, thd.rows * 2 / 3 + 1, thd.cols, thd.rows -  thd.rows * 2 / 3 - 1);
				top = thd(r1);
				middle = thd(r2);
				bottom = thd(r3);			

				auto percentageTop = countNonZero(top) * 100.0 / top.size().area();
				auto percentageMiddle = countNonZero(middle) * 100.0 / middle.size().area();
				auto percentageBottom = countNonZero(bottom) * 100.0 / bottom.size().area();

				bool topReady = false;
				bool middleReady = false;
				bool bottomReady = false;

				Scalar c1, c2, c3;
				if (percentageTop > m_calibrationTopLowerThd && percentageTop < m_calibrationTopUpperThd)
				{
					topReady = true;
					c1 = Scalar(0, 255, 255);
				}
				else
				{
					c1 = Scalar(0, 0, 255);
				}

				if (percentageMiddle > m_calibrationMiddleLowerThd && percentageMiddle < m_calibrationMiddleUppperThd)
				{
					middleReady = true;					
					c2 = Scalar(0, 255, 255);
				}
				else
				{
					c2 = Scalar(0, 0, 255);
				}
//.........这里部分代码省略.........
开发者ID:brejski,项目名称:FingerTracking,代码行数:101,代码来源:FingerTracker.cpp

示例10: Process

void FingerTracker::Process(Mat frame)
{
#if ENABLE_DEBUG_WINDOWS
	Mat img_display;
	frame.copyTo(img_display);
#endif
	
	//	Process only Region of Interest i.e. region around current finger position
	Rect roi = Rect(		
		Point(std::max(m_currentCandidate.m_windowRect.tl().x - m_roiSpanX, 0), std::max(m_currentCandidate.m_windowRect.tl().y - m_roiSpanY, 0)), 		
		Point(std::min(m_currentCandidate.m_windowRect.tl().x + m_roiSpanX + m_calibrationData->m_fingerPatch.cols, frame.cols), 
			  std::min(m_currentCandidate.m_windowRect.tl().y + m_roiSpanY + m_calibrationData->m_fingerPatch.rows, frame.rows)));

	Mat frameRoi;
	frame(roi).copyTo(frameRoi);

	//================TEMPLATE MATCHING
	int result_cols =  frameRoi.cols - m_calibrationData->m_fingerPatch.cols + 1;
	int result_rows = frameRoi.rows - m_calibrationData->m_fingerPatch.rows + 1;
	assert(result_cols > 0 && result_rows > 0);

	Mat scoreMap;
	scoreMap.create(result_cols, result_rows, CV_32FC1);

	//	Compare current frame roi region to known candidate
	//	Using OpenCV matchTemplate function with correlation coefficient matching method
	matchTemplate(frameRoi, m_calibrationData->m_fingerPatch, scoreMap, 3);

	//================HISTOGRAM BACK PROJECTION
	MatND backProjection;
	Mat frameHSV;
	cvtColor(frameRoi, frameHSV, CV_BGR2HSV);

	calcBackProject(&frameHSV, 1, m_calibrationData->m_channels, m_calibrationData->m_hist, backProjection, (const float**)(m_calibrationData->m_ranges), 1, true);

	Mat backProjectionThresholded;
	threshold(backProjection, backProjectionThresholded, m_backProjectionThreshold, 255, 0);
	erode(backProjectionThresholded, backProjectionThresholded, getStructuringElement(MORPH_RECT, Size(2 * m_erosionSize + 1, 2 * m_erosionSize + 1), Point(m_erosionSize, m_erosionSize)));
	dilate(backProjectionThresholded, backProjectionThresholded, getStructuringElement(MORPH_RECT, Size(2 * m_dilationSize + 1, 2 * m_dilationSize + 1), Point(m_dilationSize, m_dilationSize)));

	Mat backProjectionThresholdedShifted;
	Rect shifted(Rect(m_calibrationData->m_fingerPatch.cols - 1, m_calibrationData->m_fingerPatch.rows - 1, scoreMap.cols, scoreMap.rows));
	backProjectionThresholded(shifted).copyTo(backProjectionThresholdedShifted);

	Mat maskedOutScoreMap(scoreMap.size(), CV_8U);
	scoreMap.copyTo(maskedOutScoreMap, backProjectionThresholdedShifted);

	//====================Localizing the best match with minMaxLoc
	double minVal; double maxVal; Point minLoc; Point maxLoc;
	Point matchLoc;
	minMaxLoc(maskedOutScoreMap, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
	matchLoc = maxLoc + roi.tl();

	m_currentCandidate.m_confidence = static_cast<float>(maxVal);

	if (maxVal > m_candidateDetecionConfidenceThreshold)
	{
		m_currentCandidate.m_found = true;
		m_currentCandidate.m_windowRect = Rect(matchLoc, Point(matchLoc.x + m_calibrationData->m_fingerPatch.cols , matchLoc.y + m_calibrationData->m_fingerPatch.rows));		

		//================Find finger position
		Mat fingerWindowThresholded;
		backProjectionThresholded(Rect(maxLoc, Point(maxLoc.x + m_calibrationData->m_fingerPatch.cols , maxLoc.y + m_calibrationData->m_fingerPatch.rows))).copyTo(fingerWindowThresholded);
		
		m_currentCandidate.m_fingerPosition = GetFingerTopPosition(fingerWindowThresholded) + matchLoc;

#if ENABLE_DEBUG_WINDOWS
		rectangle(img_display, m_currentCandidate.m_windowRect.tl(), m_currentCandidate.m_windowRect.br(), Scalar(255,0,0), 2, 8, 0 );
		rectangle(scoreMap, m_currentCandidate.m_windowRect.tl(), m_currentCandidate.m_windowRect.br(), Scalar::all(0), 2, 8, 0 );
		rectangle(img_display, m_currentCandidate.m_fingerPosition, m_currentCandidate.m_fingerPosition + Point(5,5), Scalar(255,0,0));
#endif
	}
	else
	{
		m_currentCandidate.m_found = false;
	}

#if ENABLE_DEBUG_WINDOWS
	std::stringstream ss;
	ss << maxVal;
	putText(img_display, ss.str(), Point(50, 50), 0, 0.5, Scalar(255,255,255), 1);

	imshow("Overlays", img_display);
	imshow("Results", scoreMap);
	imshow("ResultMasked", maskedOutScoreMap);
#endif
}
开发者ID:brejski,项目名称:FingerTracking,代码行数:87,代码来源:FingerTracker.cpp

示例11: srcGray

/*
 * Class:     io_github_melvincabatuan_fullbodydetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_fullbodydetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "haarcascade_fullbody.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
//.........这里部分代码省略.........
开发者ID:DeLaSalleUniversity-Manila,项目名称:FullBodyDetection,代码行数:101,代码来源:ImageProcessing.cpp

示例12: main


//.........这里部分代码省略.........
	catch (const std::runtime_error& error) {
		appLogger.error(error.what());
		return EXIT_FAILURE;
	}
	
	//const string windowName = "win";

	// Create the output directory if it doesn't exist yet
	if (!boost::filesystem::exists(outputPath)) {
		boost::filesystem::create_directory(outputPath);
	}
	

	std::chrono::time_point<std::chrono::system_clock> start, end;
	Mat img;
	morphablemodel::OpenCVCameraEstimation epnpCameraEstimation(morphableModel); // todo: this can all go to only init once
	morphablemodel::AffineCameraEstimation affineCameraEstimation(morphableModel);
	vector<imageio::ModelLandmark> landmarks;


	labeledImageSource->next();
	start = std::chrono::system_clock::now();
	appLogger.info("Starting to process " + labeledImageSource->getName().string());
	img = labeledImageSource->getImage();

	LandmarkCollection lms = labeledImageSource->getLandmarks();
	vector<shared_ptr<Landmark>> lmsv = lms.getLandmarks();
	landmarks.clear();
	Mat landmarksImage = img.clone(); // blue rect = the used landmarks
	for (const auto& lm : lmsv) {
		lm->draw(landmarksImage);
		//if (lm->getName() == "right.eye.corner_outer" || lm->getName() == "right.eye.corner_inner" || lm->getName() == "left.eye.corner_outer" || lm->getName() == "left.eye.corner_inner" || lm->getName() == "center.nose.tip" || lm->getName() == "right.lips.corner" || lm->getName() == "left.lips.corner") {
		landmarks.emplace_back(imageio::ModelLandmark(lm->getName(), lm->getPosition2D()));
		cv::rectangle(landmarksImage, cv::Point(cvRound(lm->getX() - 2.0f), cvRound(lm->getY() - 2.0f)), cv::Point(cvRound(lm->getX() + 2.0f), cvRound(lm->getY() + 2.0f)), cv::Scalar(255, 0, 0));
		//}
	}

	// Start affine camera estimation (Aldrian paper)
	Mat affineCamLandmarksProjectionImage = landmarksImage.clone(); // the affine LMs are currently not used (don't know how to render without z-vals)
	Mat affineCam = affineCameraEstimation.estimate(landmarks);
	for (const auto& lm : landmarks) {
		Vec3f tmp = morphableModel.getShapeModel().getMeanAtPoint(lm.getName());
		Mat p(4, 1, CV_32FC1);
		p.at<float>(0, 0) = tmp[0];
		p.at<float>(1, 0) = tmp[1];
		p.at<float>(2, 0) = tmp[2];
		p.at<float>(3, 0) = 1;
		Mat p2d = affineCam * p;
		Point2f pp(p2d.at<float>(0, 0), p2d.at<float>(1, 0)); // Todo: check
		cv::circle(affineCamLandmarksProjectionImage, pp, 4.0f, Scalar(0.0f, 255.0f, 0.0f));
	}
	// End Affine est.

	// Estimate the shape coefficients
	vector<float> fittedCoeffs;
	{
		// $\hat{V} \in R^{3N\times m-1}$, subselect the rows of the eigenvector matrix $V$ associated with the $N$ feature points
		// And we insert a row of zeros after every third row, resulting in matrix $\hat{V}_h \in R^{4N\times m-1}$:
		Mat V_hat_h = Mat::zeros(4 * landmarks.size(), morphableModel.getShapeModel().getNumberOfPrincipalComponents(), CV_32FC1);
		int rowIndex = 0;
		for (const auto& lm : landmarks) {
			Mat basisRows = morphableModel.getShapeModel().getPcaBasis(lm.getName()); // getPcaBasis should return the not-normalized basis I think
			basisRows.copyTo(V_hat_h.rowRange(rowIndex, rowIndex + 3));
			rowIndex += 4; // replace 3 rows and skip the 4th one, it has all zeros
		}
		// Form a block diagonal matrix $P \in R^{3N\times 4N}$ in which the camera matrix C (P_Affine, affineCam) is placed on the diagonal:
开发者ID:fulrbuaa,项目名称:FeatureDetection,代码行数:67,代码来源:fitter2.cpp

示例13: main


//.........这里部分代码省略.........
	} catch (const boost::property_tree::ptree_error& error) {
		appLogger.error(error.what());
		return EXIT_FAILURE;
	}
	catch (const std::runtime_error& error) {
		appLogger.error(error.what());
		return EXIT_FAILURE;
	}

	// Create the output directory if it doesn't exist yet
	if (!boost::filesystem::exists(outputPath)) {
		boost::filesystem::create_directory(outputPath);
	}
	
	std::chrono::time_point<std::chrono::system_clock> start, end;
	Mat img;
	vector<imageio::ModelLandmark> landmarks;
	float lambda = 15.0f;

	LandmarkMapper landmarkMapper(landmarkMappings);

	labeledImageSource->next();
	start = std::chrono::system_clock::now();
	appLogger.info("Starting to process " + labeledImageSource->getName().string());
	img = labeledImageSource->getImage();

	LandmarkCollection lms = labeledImageSource->getLandmarks();
	LandmarkCollection didLms = landmarkMapper.convert(lms);
	landmarks.clear();
	Mat landmarksImage = img.clone(); // blue rect = the used landmarks
	for (const auto& lm : didLms.getLandmarks()) {
		lm->draw(landmarksImage);
		landmarks.emplace_back(imageio::ModelLandmark(lm->getName(), lm->getPosition2D()));
		cv::rectangle(landmarksImage, cv::Point(cvRound(lm->getX() - 2.0f), cvRound(lm->getY() - 2.0f)), cv::Point(cvRound(lm->getX() + 2.0f), cvRound(lm->getY() + 2.0f)), cv::Scalar(255, 0, 0));
	}

	// Start affine camera estimation (Aldrian paper)
	Mat affineCamLandmarksProjectionImage = landmarksImage.clone(); // the affine LMs are currently not used (don't know how to render without z-vals)
	
	// Convert the landmarks to clip-space
	vector<imageio::ModelLandmark> landmarksClipSpace;
	for (const auto& lm : landmarks) {
		cv::Vec2f clipCoords = render::utils::screenToClipSpace(lm.getPosition2D(), img.cols, img.rows);
		imageio::ModelLandmark lmcs(lm.getName(), Vec3f(clipCoords[0], clipCoords[1], 0.0f), lm.isVisible());
		landmarksClipSpace.push_back(lmcs);
	}
	
	Mat affineCam = fitting::estimateAffineCamera(landmarksClipSpace, morphableModel);

	// Render the mean-face landmarks projected using the estimated camera:
	for (const auto& lm : landmarks) {
		Vec3f modelPoint = morphableModel.getShapeModel().getMeanAtPoint(lm.getName());
		cv::Vec2f screenPoint = fitting::projectAffine(modelPoint, affineCam, img.cols, img.rows);
		cv::circle(affineCamLandmarksProjectionImage, Point2f(screenPoint), 4.0f, Scalar(0.0f, 255.0f, 0.0f));
	}

	// Estimate the shape coefficients:
	// Detector variances: Should not be in pixels. Should be normalised by the IED. Normalise by the image dimensions is not a good idea either, it has nothing to do with it. See comment in fitShapeToLandmarksLinear().
	// Let's just use the hopefully reasonably set default value for now (around 3 pixels)
	vector<float> fittedCoeffs = fitting::fitShapeToLandmarksLinear(morphableModel, affineCam, landmarksClipSpace, lambda);

	// Obtain the full mesh and render it using the estimated camera:
	Mesh mesh = morphableModel.drawSample(fittedCoeffs, vector<float>()); // takes standard-normal (not-normalised) coefficients

	render::SoftwareRenderer softwareRenderer(img.cols, img.rows);
	Mat fullAffineCam = fitting::calculateAffineZDirection(affineCam);
开发者ID:Amos-zq,项目名称:FeatureDetection,代码行数:67,代码来源:fittingRenderer.cpp

示例14: main


//.........这里部分代码省略.........

    CvCapture *capture = cvCaptureFromFile(video_filename.c_str());

    int current_frame = cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);
    int total_frames = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
    int frame_width = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
    int frame_height = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);


    double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);

    cerr << "Video File Name: " << video_filename << endl;
    cerr << "Frames Per Second: " << fps << endl;
    cerr << "Frame Count: " << total_frames << endl;
    cerr << "Video Dimensions: " << frame_width << "x" << frame_height << endl;


    long start_time = time(NULL);

    vector<KeyPoint> target_keypoints;
    Mat target_descriptors;

    read_descriptors_and_keypoints(feature_filename, target_descriptors, target_keypoints);

    cout << "target_keypoints.size(): " << target_keypoints.size() << endl;

    current_frame = 0;

    while (current_frame < total_frames) {
        if (current_frame % 100 == 0) {
            cout << "FPS: " << current_frame/((double)time(NULL) - (double)start_time) << ", " << current_frame << "/" << total_frames << " frames. " << endl;
//            break;
        }

        Mat frame(cvarrToMat(cvQueryFrame(capture)));
        current_frame = cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);

        vector<KeyPoint> frame_keypoints, matched_keypoints;
        Mat frame_descriptors, matched_descriptors;

        get_keypoints_and_descriptors(frame, frame_descriptors, frame_keypoints);

        float min_min_distance = 128.0;
        float max_min_distance = 0.0;
        float min_max_distance = 128.0;
        float max_max_distance = 0.0;

        for (int i = 0; i < target_descriptors.rows; i++) {
            //get the minimum euclidian distance of each descriptor in the current frame from each common_descriptor
            float min_euclidian_distance = 128.0;
            float max_euclidian_distance = 0.0;

            for (int j = 0; j < frame_descriptors.rows; j++) {
                float euclidian_distance = 0;

                for (int k = 0; k < target_descriptors.cols; k++) {
                    float tmp = target_descriptors.at<float>(i,k) - frame_descriptors.at<float>(j,k);
                    min_euclidian_distance += tmp * tmp;
                }   
                euclidian_distance = sqrt(min_euclidian_distance);

                if (euclidian_distance < min_euclidian_distance) min_euclidian_distance = euclidian_distance;
                if (euclidian_distance > max_euclidian_distance) max_euclidian_distance = euclidian_distance;
            }   

            cout << "\tmin_euclidian_distance[" << i << "]: " << min_euclidian_distance << ", max_euclidian_distance: " << max_euclidian_distance << endl;

            if (min_min_distance > min_euclidian_distance) min_min_distance = min_euclidian_distance;
            if (max_min_distance < min_euclidian_distance) max_min_distance = min_euclidian_distance;
            if (min_max_distance > max_euclidian_distance) min_max_distance = max_euclidian_distance;
            if (max_max_distance < max_euclidian_distance) max_max_distance = max_euclidian_distance;

            if (min_euclidian_distance <= 1.6) {
                int x = target_keypoints[i].pt.x;
                int y = target_keypoints[i].pt.y;

                matched_keypoints.push_back( target_keypoints[i] );
                matched_descriptors.push_back( target_descriptors.row(i) );

                cout << "matched keypoint with x: " << x << ", y: " << y << endl;
            }   
        }   

		// Code to draw the points.
        Mat frame_with_target_keypoints;
        drawKeypoints(frame, matched_keypoints, frame_with_target_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

        rectangle(frame_with_target_keypoints,  Point(remove_rect_1_x1, remove_rect_1_y1), Point(remove_rect_1_x2, remove_rect_1_y2), Scalar(0, 0, 255), 1, 8, 0);
        rectangle(frame_with_target_keypoints,  Point(remove_rect_2_x1, remove_rect_2_y1), Point(remove_rect_2_x2, remove_rect_2_y2), Scalar(0, 0, 255), 1, 8, 0);

		imshow("SURF - Frame", frame_with_target_keypoints);
		if(cvWaitKey(15) == 27) break;
	}

    cvDestroyWindow("SURF");

    cvReleaseCapture(&capture);

    return 0;
}
开发者ID:Kazz47,项目名称:wildlife_at_home,代码行数:101,代码来源:wildlife_feature_matcher.cpp


注:本文中的cv::Scalar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。