当前位置: 首页>>代码示例>>C++>>正文


C++ FaceTracker类代码示例

本文整理汇总了C++中FaceTracker的典型用法代码示例。如果您正苦于以下问题:C++ FaceTracker类的具体用法?C++ FaceTracker怎么用?C++ FaceTracker使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了FaceTracker类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: main

int main(){
  FaceTracker F;
  vector <string> class_name;
  vector <int> class_label;
  vector <Rect> face_pos;
  Mat frame,prev_img,buffer;
  vector < vector <Point2f> > features;
  string classifier="fisher";
  
  VideoCapture cap;
  cap.open("/home/rajeev/Dropbox/vios_team_use/face_recognition/videos/bbt_s04e18_hd.avi");
  namedWindow("Features",1);
  while(1){
    cap >> frame;
    if (frame.empty())
       break;
    F.klt_track_face(class_name, class_label, face_pos, frame, prev_img, features, classifier);   
    frame.copyTo(prev_img);
    frame.copyTo(buffer);
    for (int i=0; i<features.size();i++){
      for (int j=0; j<features[i].size();j++){
        circle(buffer, features[i][j], 2, CV_RGB(0,255,0), 1, 8, 0);
      }
    }
    imshow("Features",frame);
    if(waitKey(20) == 27)
     break;
  }
  return 0;
}
开发者ID:ESE519,项目名称:Authoring_Tools_4_product_tagging,代码行数:30,代码来源:main2.cpp

示例2: get_facial_points

int get_facial_points(Mat& face, vector<Point_<double> >& points)
{
    FaceTracker * tracker = LoadFaceTracker(DefaultFaceTrackerModelPathname().c_str());
    FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(DefaultFaceTrackerParamsPathname().c_str());

    Mat frame_gray;
    cvtColor(face, frame_gray, CV_RGB2GRAY );

    int result = tracker->NewFrame(frame_gray, tracker_params);

    vector<Point_<double> > shape;
    Pose pose;

    if (result >= 1) {
        points = tracker->getShape();
        pose = tracker->getPose();
    } else {
        return 0;
    }

    delete tracker;
    delete tracker_params; 

    return 1;
}
开发者ID:schue,项目名称:face-analysis-sdk,代码行数:25,代码来源:main.cpp

示例3: toggleMute

void CameraToolBox::toggleMute() {
    delete _doubleClickTimer;
    _doubleClickTimer = NULL;

    FaceTracker* faceTracker = Application::getInstance()->getSelectedFaceTracker();
    if (faceTracker) {
        faceTracker->toggleMute();
    }
}
开发者ID:bakarih,项目名称:hifi,代码行数:9,代码来源:CameraToolBox.cpp

示例4: FaceTrackingStaticThread

DWORD WINAPI KinectWindow::FaceTrackingStaticThread(PVOID lpParam)
{
    KinectWindow* context = static_cast<KinectWindow*>(lpParam);
    if (context)
    {
		FaceTracker* pFaceTracker;
		context->GetFaceTraker(&pFaceTracker);

		return pFaceTracker->FaceTrackingThread();
    }
    return 0;
}
开发者ID:tangguang,项目名称:KinectV1Explorer_GuangTang,代码行数:12,代码来源:KinectWindow.cpp

示例5: run_image_mode

int
run_image_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{  
  FaceTracker * tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  cv::Mat image;
  cv::Mat_<uint8_t> gray_image = load_grayscale_image(image_argument->c_str(), &image);

  int result = tracker->NewFrame(gray_image, tracker_params);

  std::vector<cv::Point_<double> > shape;
  std::vector<cv::Point3_<double> > shape3;
  Pose pose;
  
  if (result >= cfg.tracking_threshold) {
    shape = tracker->getShape();
    shape3 = tracker->get3DShape();
    pose = tracker->getPose();
  }

  if (!have_argument_p(landmarks_argument)) {
    display_data(cfg, image, shape, pose); 
  } else if (shape.size() > 0) {
    if (cfg.save_3d_points)
      save_points3(landmarks_argument->c_str(), shape3);
    else
      save_points(landmarks_argument->c_str(), shape);
  }
 
  delete tracker;
  delete tracker_params; 
  
  return 0;
}
开发者ID:23119841,项目名称:face-analysis-sdk,代码行数:37,代码来源:main.cpp

示例6: calculateMouthShapes

void Head::simulate(float deltaTime, bool isMine, bool billboard) {
    //  Update audio trailing average for rendering facial animations
    const float AUDIO_AVERAGING_SECS = 0.05f;
    const float AUDIO_LONG_TERM_AVERAGING_SECS = 30.0f;
    _averageLoudness = glm::mix(_averageLoudness, _audioLoudness, glm::min(deltaTime / AUDIO_AVERAGING_SECS, 1.0f));

    if (_longTermAverageLoudness == -1.0f) {
        _longTermAverageLoudness = _averageLoudness;
    } else {
        _longTermAverageLoudness = glm::mix(_longTermAverageLoudness, _averageLoudness, glm::min(deltaTime / AUDIO_LONG_TERM_AVERAGING_SECS, 1.0f));
    }

    if (isMine) {
        MyAvatar* myAvatar = static_cast<MyAvatar*>(_owningAvatar);
        
        // Only use face trackers when not playing back a recording.
        if (!myAvatar->isPlaying()) {
            FaceTracker* faceTracker = Application::getInstance()->getActiveFaceTracker();
            _isFaceTrackerConnected = faceTracker != NULL && !faceTracker->isMuted();
            if (_isFaceTrackerConnected) {
                _blendshapeCoefficients = faceTracker->getBlendshapeCoefficients();

                if (typeid(*faceTracker) == typeid(DdeFaceTracker)) {

                    if (Menu::getInstance()->isOptionChecked(MenuOption::UseAudioForMouth)) {
                        calculateMouthShapes();

                        const int JAW_OPEN_BLENDSHAPE = 21;
                        const int MMMM_BLENDSHAPE = 34;
                        const int FUNNEL_BLENDSHAPE = 40;
                        const int SMILE_LEFT_BLENDSHAPE = 28;
                        const int SMILE_RIGHT_BLENDSHAPE = 29;
                        _blendshapeCoefficients[JAW_OPEN_BLENDSHAPE] += _audioJawOpen;
                        _blendshapeCoefficients[SMILE_LEFT_BLENDSHAPE] += _mouth4;
                        _blendshapeCoefficients[SMILE_RIGHT_BLENDSHAPE] += _mouth4;
                        _blendshapeCoefficients[MMMM_BLENDSHAPE] += _mouth2;
                        _blendshapeCoefficients[FUNNEL_BLENDSHAPE] += _mouth3;
                    }

                    applyEyelidOffset(getFinalOrientationInWorldFrame());
                }
            }

            auto eyeTracker = DependencyManager::get<EyeTracker>();
            _isEyeTrackerConnected = eyeTracker->isTracking();
        }

        if (!myAvatar->getStandingHMDSensorMode()) {
            //  Twist the upper body to follow the rotation of the head, but only do this with my avatar,
            //  since everyone else will see the full joint rotations for other people.  
            const float BODY_FOLLOW_HEAD_YAW_RATE = 0.1f;
            const float BODY_FOLLOW_HEAD_FACTOR = 0.66f;
            float currentTwist = getTorsoTwist();
            setTorsoTwist(currentTwist + (getFinalYaw() * BODY_FOLLOW_HEAD_FACTOR - currentTwist) * BODY_FOLLOW_HEAD_YAW_RATE);
        }
    }
   
    if (!(_isFaceTrackerConnected || billboard)) {

        if (!_isEyeTrackerConnected) {
            // Update eye saccades
            const float AVERAGE_MICROSACCADE_INTERVAL = 1.0f;
            const float AVERAGE_SACCADE_INTERVAL = 6.0f;
            const float MICROSACCADE_MAGNITUDE = 0.002f;
            const float SACCADE_MAGNITUDE = 0.04f;
            const float NOMINAL_FRAME_RATE = 60.0f;

            if (randFloat() < deltaTime / AVERAGE_MICROSACCADE_INTERVAL) {
                _saccadeTarget = MICROSACCADE_MAGNITUDE * randVector();
            } else if (randFloat() < deltaTime / AVERAGE_SACCADE_INTERVAL) {
                _saccadeTarget = SACCADE_MAGNITUDE * randVector();
            }
            _saccade += (_saccadeTarget - _saccade) * pow(0.5f, NOMINAL_FRAME_RATE * deltaTime);
        } else {
            _saccade = glm::vec3();
        }

        //  Detect transition from talking to not; force blink after that and a delay
        bool forceBlink = false;
        const float TALKING_LOUDNESS = 100.0f;
        const float BLINK_AFTER_TALKING = 0.25f;
        if ((_averageLoudness - _longTermAverageLoudness) > TALKING_LOUDNESS) {
            _timeWithoutTalking = 0.0f;
        
        } else if (_timeWithoutTalking < BLINK_AFTER_TALKING && (_timeWithoutTalking += deltaTime) >= BLINK_AFTER_TALKING) {
            forceBlink = true;
        }
                                 
        //  Update audio attack data for facial animation (eyebrows and mouth)
        const float AUDIO_ATTACK_AVERAGING_RATE = 0.9f;
        _audioAttack = AUDIO_ATTACK_AVERAGING_RATE * _audioAttack + (1.0f - AUDIO_ATTACK_AVERAGING_RATE) * fabs((_audioLoudness - _longTermAverageLoudness) - _lastLoudness);
        _lastLoudness = (_audioLoudness - _longTermAverageLoudness);
        
        const float BROW_LIFT_THRESHOLD = 100.0f;
        if (_audioAttack > BROW_LIFT_THRESHOLD) {
            _browAudioLift += sqrtf(_audioAttack) * 0.01f;
        }
        _browAudioLift = glm::clamp(_browAudioLift *= 0.7f, 0.0f, 1.0f);
        
        const float BLINK_SPEED = 10.0f;
//.........这里部分代码省略.........
开发者ID:hoster123,项目名称:hifi,代码行数:101,代码来源:Head.cpp

示例7: if

/// <summary>
/// Process Kinect window menu commands
/// </summary>
/// <param name="commanId">ID of the menu item</param>
/// <param name="param">Parameter passed in along with the commmand ID</param>
/// <param name="previouslyChecked">Check status of menu item before command is issued</param>
void KinectSettings::ProcessMenuCommand(WORD commandId, WORD param, bool previouslyChecked)
{
	DWORD ExitCode;
	FaceTracker* pFaceTracker;
	InbedAPPs* pFallDetect;
	DepthInbedAPPs* pDepthInbedApps;
	LegRaisExcer* pLegRaisExer;
	HandRaisExcer* pHandRaisExer;
	
	m_pKinectWindow->GetFaceTraker(&pFaceTracker);
	m_pPrimaryView->GetFallDetect(&pFallDetect);
	m_pDepthStream->GetDepthInbedAPPs(&pDepthInbedApps);
	m_pPrimaryView->GetLegRaisExcer(&pLegRaisExer);
	m_pPrimaryView->GetHandRaisExcer(&pHandRaisExer);

    if (ID_COLORSTREAM_PAUSE == commandId)
    {
        // Pause color stream
        if (m_pColorStream)
        {
            m_pColorStream->PauseStream(!previouslyChecked);
        }
    }
    else if (ID_COLORSTREAM_RESOLUTION_START <= commandId && ID_COLORSTREAM_RESOLUTION_END >= commandId)
    {
        // Set color stream format and resolution
        if (!m_pColorStream)
        {
            return;
        }

        switch (commandId)
        {
        case ID_RESOLUTION_RGBRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RGBRESOLUTION1280X960FPS12:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_1280x960);
            break;

        case ID_RESOLUTION_YUVRESOLUTION640X480FPS15:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_YUV);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_INFRAREDRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_INFRARED);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RAWBAYERRESOLUTION640X480FPS30:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_RAW_BAYER);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_640x480);
            break;

        case ID_RESOLUTION_RAWBAYERRESOLUTION1280X960FPS12:
            m_pColorStream->SetImageType(NUI_IMAGE_TYPE_COLOR_RAW_BAYER);
            m_pColorStream->SetImageResolution(NUI_IMAGE_RESOLUTION_1280x960);
            break;

        default:
            return;
        }

        m_pColorStream->OpenStream();
    }
    else if (ID_DEPTHSTREAM_PAUSE == commandId)
    {
        // Pause depth stream
        if(m_pDepthStream)
        {
            m_pDepthStream->PauseStream(!previouslyChecked);
        }
    }
    else if (ID_DEPTHSTREAM_RANGEMODE_START <= commandId && ID_DEPTHSTREAM_RANGEMODE_END >= commandId)
    {
        // Set depth stream range mode
        bool nearMode = false;
        switch (commandId)
        {
        case ID_RANGEMODE_DEFAULT:
            nearMode = false;
            break;

        case ID_RANGEMODE_NEAR:
            nearMode = true;
            break;

        default:
            return;
        }
//.........这里部分代码省略.........
开发者ID:tangguang,项目名称:KinectV1Explorer_GuangTang,代码行数:101,代码来源:KinectSettings.cpp

示例8: main

int main()
{
	try
	{
		FaceTracker faceTracker;
		faceTracker.Initialize();
		faceTracker.Start(true);
		FatigueDetection fatigueDetection;
		
		int simpleCounter = 0;

		int* IDPtr = faceTracker.GetIDs();

		//std::cout << *IDPtr << std::endl;
;

		for (;;) {
			IDPtr = faceTracker.GetIDs();
			if (simpleCounter % 15 == 0) {
				/*fatigueDetection.DetectFatigue(faceTracker.GetPose());
				fatigueDetection.DetectYawn(faceTracker.GetFaceFeatures());*/

				std::cout << '#' << simpleCounter / 15;
				
				int personNum = 0;
				for (personNum; personNum < 6; personNum++) {
					if (*(IDPtr + personNum) != -1) {
						std::cout << "\t" << *(IDPtr + personNum) << '\t';
					}
				}

				std::cout << std::endl;

				if (faceTracker.FaceFound()) {
					std::cout << faceTracker.GetFaceFeatures();
					std::cout << "Is Yawning? : " << fatigueDetection.DetectYawn(faceTracker.GetFaceFeatures()) << std::endl;

				}
				
				else {
					std::cout << "FACE NOT FOUND! >:(" << std::endl;
					personNum = 0;
					for (personNum; personNum < 6; personNum++) {
						IDPtr[personNum] = -1;
					}
				}
				std::cout << std::endl;
			}
			simpleCounter++;

			if (cv::waitKey(33) == 'a')
			{
				break;
			}
		}
	}
	catch (FaceTrackerException& error)
	{
		std::cout << error.what() << std::endl;
	}
	return EXIT_SUCCESS;
}
开发者ID:scanavan,项目名称:PoseEstimation,代码行数:62,代码来源:main.cpp

示例9: main

/** @function main */
int main( int argc, const char** argv )
{
	// check arguements
//	if(argc < 2)
//	{
//		printf("ERROR: Please enter window position x & y\n");
//		exit(0);
//	}
//	
//	int windowX = atoi(argv[1]); // usually 5
//	int windowY = atoi(argv[2]); // usually 14
	int windowX = 1274; // usually 5
	int windowY = 280; // usually 14
	
	CvCapture* capture;
	IplImage* frame;
	FaceTracker* faceTracker = new FaceTracker("/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
	capture = cvCaptureFromCAM(2);
	
	if(capture)
	{
		cvNamedWindow ("img", CV_WINDOW_AUTOSIZE);
		cvMoveWindow("img", windowX, windowY);
		
		//Set the background to black
		image = cvLoadImage( "../../data/grandTetons_very_small.jpg", CV_LOAD_IMAGE_UNCHANGED) ; 
		
		//background = cvCreateImage(cvSize(1024, 768),image->depth,image->nChannels);
		background = cvCreateImage(cvSize(resX, resY),image->depth,image->nChannels);
		
		bgMax = Point(0,0);
		bgMax = Point(background->width-1,background->height-1);
		cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
		
		setupParticles();
		
		Face face;
		face.x = background->width/2;
		face.y = background->height/2;
		face.radius = 1;
		
		while( true )
		{
			
			frame = cvQueryFrame( capture );
			faceTracker->findFace(frame);
			
			if (faceTracker->numFaces) {
						
				// scale face position in relation to background width				
				int xPerc = frame->width - faceTracker->face.x; // invert to flip
				
				xPerc = (float)xPerc * ((float)image->width/(float)frame->width);
				xPerc += xOffset;
				
				
				int yPerc = faceTracker->face.y;
				
				yPerc = (float)yPerc * ((float)image->height/(float)frame->height);
				yPerc += yOffset;
				
				
				int rPerc = faceTracker->face.radius;				// scale face position in relation to background width				
				rPerc = (float)rPerc * ((float)image->width/(float)frame->width);
				
				//printf("x %d y %d r %d\n", xPerc, yPerc, rPerc);

				face.x = xPerc;
				face.y = yPerc;
				face.radius = rPerc;
				
				face.radius *= 1.8;
				
				cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
				update(&face);
				
				// put video behind image
				cvFlip(frame, NULL, 1);		// flip image so it mirrors the user
				cvSetImageROI(background, cvRect(xOffset, yOffset, image->width-1, image->height));
				cvResize(frame, background, CV_INTER_LINEAR);
				cvResetImageROI(background);
			}
			else {
				face.x = 0;
				face.y = 0;
				face.radius = 1;
				cvRectangle(background, Point(0,0), Point(background->width,background->height), cvScalar(0, 0, 0), CV_FILLED, 8, 0);
				update(&face);
			}

			draw(background);
				
			cvShowImage("img", background);
			
			int c = waitKey(1);
			switch (c) {
				case 'c':
				case 'C':
					cursorMode = ( cursorMode + 1 > 1 ) ? 0 : 1 ; 
//.........这里部分代码省略.........
开发者ID:voidnoise,项目名称:kswipe,代码行数:101,代码来源:main.cpp

示例10: toggleCameraMute

void AvatarInputs::toggleCameraMute() {
    FaceTracker* faceTracker = Application::getInstance()->getSelectedFaceTracker();
    if (faceTracker) {
        faceTracker->toggleMute();
    }
}
开发者ID:DaveDubUK,项目名称:hifi,代码行数:6,代码来源:AvatarInputs.cpp

示例11: run_video_mode

int
run_video_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{
  FaceTracker *tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  assert(tracker);
  assert(tracker_params);

  cv::VideoCapture input(image_argument->c_str());
  if (!input.isOpened())
    throw make_runtime_error("Unable to open video file '%s'", image_argument->c_str());

  cv::Mat image;

  std::vector<char> pathname_buffer;
  pathname_buffer.resize(1000);

  input >> image;
  int frame_number = 1;

  while ((image.rows > 0) && (image.cols > 0)) {
    if (cfg.verbose) {
      printf(" Frame number %d\r", frame_number);
      fflush(stdout);
    }

    cv::Mat_<uint8_t> gray_image;
    if (image.type() == cv::DataType<cv::Vec<uint8_t,3> >::type)
      cv::cvtColor(image, gray_image, CV_BGR2GRAY);
    else if (image.type() == cv::DataType<uint8_t>::type)
      gray_image = image;
    else
      throw make_runtime_error("Do not know how to convert video frame to a grayscale image.");

    int result = tracker->Track(gray_image, tracker_params);

    std::vector<cv::Point_<double> > shape;
    std::vector<cv::Point3_<double> > shape3D;
    Pose pose;

    if (result >= cfg.tracking_threshold) {
      shape = tracker->getShape();
      shape3D = tracker->get3DShape();
      pose = tracker->getPose();
    } else {
      tracker->Reset();
    }

    if (!have_argument_p(landmarks_argument)) {
      display_data(cfg, image, shape, pose);
    } else if (shape.size() > 0) {
      snprintf(pathname_buffer.data(), pathname_buffer.size(), landmarks_argument->c_str(), frame_number);

      if (cfg.save_3d_points)	
	save_points3(pathname_buffer.data(), shape3D);
      else
	save_points(pathname_buffer.data(), shape);

      if (cfg.verbose)
	display_data(cfg, image, shape, pose);
    } else if (cfg.verbose) {
      display_data(cfg, image, shape, pose);
    }

    input >> image;
    frame_number++;
  }

  delete tracker;
  delete tracker_params; 

  return 0;
}
开发者ID:23119841,项目名称:face-analysis-sdk,代码行数:76,代码来源:main.cpp

示例12: run_lists_mode

// Helpers
int
run_lists_mode(const Configuration &cfg,
	       const CommandLineArgument<std::string> &image_argument,
	       const CommandLineArgument<std::string> &landmarks_argument)
{
  FaceTracker * tracker = LoadFaceTracker(cfg.model_pathname.c_str());
  FaceTrackerParams *tracker_params  = LoadFaceTrackerParams(cfg.params_pathname.c_str());

  std::list<std::string> image_pathnames = read_list(image_argument->c_str());
  std::list<std::string> landmark_pathnames;
  if (have_argument_p(landmarks_argument)) {
    landmark_pathnames = read_list(landmarks_argument->c_str());
    if (landmark_pathnames.size() != image_pathnames.size())
      throw make_runtime_error("Number of pathnames in list '%s' does not match the number in '%s'",
			       image_argument->c_str(), landmarks_argument->c_str());
  }

  std::list<std::string>::const_iterator image_it     = image_pathnames.begin();
  std::list<std::string>::const_iterator landmarks_it = landmark_pathnames.begin();
  const int number_of_images = image_pathnames.size();
  int current_image_index = 1;

  for (; image_it != image_pathnames.end(); image_it++) {
    if (cfg.verbose) {
      printf(" Image %d/%d\r", current_image_index, number_of_images);    
      fflush(stdout);
    }
    current_image_index++;

    cv::Mat image;
    cv::Mat_<uint8_t> gray_image = load_grayscale_image(image_it->c_str(), &image);
    int result = tracker->NewFrame(gray_image, tracker_params);

    std::vector<cv::Point_<double> > shape;
    std::vector<cv::Point3_<double> > shape3D;
    Pose pose;
    if (result >= cfg.tracking_threshold) {
      shape = tracker->getShape();
      shape3D = tracker->get3DShape();
      pose = tracker->getPose();
    } else {
      tracker->Reset();
    }

    if (!have_argument_p(landmarks_argument)) {
      display_data(cfg, image, shape, pose);
    } else if (shape.size() > 0) {
      if (cfg.save_3d_points)	
	save_points3(landmarks_it->c_str(), shape3D);
      else
	save_points(landmarks_it->c_str(), shape);

      if (cfg.verbose)
	display_data(cfg, image, shape, pose);
    } else if (cfg.verbose) {
      display_data(cfg, image, shape, pose);
    }

    if (have_argument_p(landmarks_argument))
      landmarks_it++;
  }  

  delete tracker;
  delete tracker_params; 
  
  return 0;
}
开发者ID:23119841,项目名称:face-analysis-sdk,代码行数:68,代码来源:main.cpp

示例13: main

int main(int, char**)
{
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened())  return -1; // check if we succeeded

    Mat frame;
    namedWindow("tracking",1);

    while(true)
    {
        cap >> frame;

        tracker.track(frame);

        for(auto face_center : tracker.faces_centers){
             ellipse( frame, face_center, Size( 5, 5), 0, 0, 360, Scalar( 255, 255, 0 ), 4, 8, 0 ); // could be boxe-rectangle ~ size of face
        }

        imshow("tracking", frame);
        if(waitKey(30) >= 0) break;	

    }

    return 0;
}
开发者ID:alexis-jacq,项目名称:camera_tracker,代码行数:25,代码来源:capture.cpp

示例14: process

// ------------------------------
void Moustachizer::process(Mat frame) {
	
	//circle(frame, Point(300,300), 300, Scalar(255,0,0), 3);
	Mat grayFrame = frame.clone();
	cvtColor(frame, grayFrame, CV_RGB2GRAY);
	equalizeHist(grayFrame, grayFrame);
	imshow("grayFrame", grayFrame);
	faceTracker.search( grayFrame );
	
	
	
	for(int i=0; i<faceTracker.faces.size(); i++)
	{
		Face face = faceTracker.faces[i];
		face.draw( frame );
		
		float scale =  (float)face.boundingBox.width / stache.size().width;
		
		Mat stache_resized;
		Mat mask_resized;
		resize(stache, stache_resized, Size(), scale, scale);
		resize(mask, mask_resized, Size(), scale, scale);
		
		float xpos = face.boundingBox.x;
		float ypos = face.boundingBox.y + (face.boundingBox.height * .60);
		Rect pos = Rect(xpos, ypos, stache_resized.size().width, stache_resized.size().height);
		
		/*
		 Rect frame = Rect(0, 0, input.size().width, input.size().height);
		 Rect intersection = pos & frame;
		 Mat fg = stache_resized(Rect(0,0,intersection.width,intersection.height));
		 Mat bg = input(Rect(xpos,ypos,intersection.width,intersection.height));
		 */
		
		Mat bg = frame(pos);
		stache_resized.copyTo(bg, mask_resized);	
	}
	
	//cvtColor(input, input, CV_GRAY2RGB);
	imshow("preview", frame);
	
	cvWaitKey(1);
}
开发者ID:drjou,项目名称:Unlogo,代码行数:44,代码来源:moustachizer.cpp

示例15: init

// ------------------------------
int Moustachizer::init(const char* argstr) {

	faceTracker.init();
	
	const char* fileName = "images/moustache4.jpg";
	stache = imread(fileName, 1);
	
	// OpenCV can't load 4 channel images, which is a huge pain
	// so I am pulling out the Value channel from the moustache image
	// to use as a mask for drawing the moustache into the main frame.
	Mat hsvimg;
	cvtColor(stache, hsvimg, CV_RGB2HSV);
	vector<Mat> hsvchannels;
	split(hsvimg, hsvchannels);	
	bitwise_not(hsvchannels[2], mask); 
	erode(mask, mask, Mat(), Point(-1,-1), 4);
	dilate(mask, mask, Mat(), Point(-1,-1), 2);
	
	return 0;
}
开发者ID:drjou,项目名称:Unlogo,代码行数:21,代码来源:moustachizer.cpp


注:本文中的FaceTracker类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。