当前位置: 首页>>代码示例>>C++>>正文


C++ Context::WaitOneUpdateAll方法代码示例

本文整理汇总了C++中xn::Context::WaitOneUpdateAll方法的典型用法代码示例。如果您正苦于以下问题:C++ Context::WaitOneUpdateAll方法的具体用法?C++ Context::WaitOneUpdateAll怎么用?C++ Context::WaitOneUpdateAll使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xn::Context的用法示例。


在下文中一共展示了Context::WaitOneUpdateAll方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: glutDisplay

// this function is called each frame
void glutDisplay(void) {
    //send stuff twice every frame
    executeInputLoop();
    executeInputLoop();

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;

	g_DepthGenerator.GetMetaData(depthMD);

	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	glDisable(GL_TEXTURE_2D);

	if (!g_bPause) {
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

	// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);

	//draw the suff with openGL
	DrawDepthMap(depthMD, sceneMD);
	glutSwapBuffers();
}
开发者ID:Lammmark,项目名称:tuiframework,代码行数:34,代码来源:KinectDeviceApp.cpp

示例2: Loop

void Loop(void)
{
	XnStatus nRetVal = XN_STATUS_OK;


	while (g_notDone)
	{

		if ((nRetVal = g_context.WaitOneUpdateAll(g_depth)) != XN_STATUS_OK)
			//if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
		{
			fprintf(stderr,"Could not update data: %s\n", xnGetStatusString(nRetVal));
			continue;
		}
		if (g_haveDepth)
		{
			const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
			ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);
			FindFingertip();
		}

		if (g_haveImage)
		{
			const XnRGB24Pixel* pImageMap = g_image.GetRGB24ImageMap();
			ProcessImageFrame(pImageMap, g_depthWidth, g_depthHeight);
		}



		ShowFrame();

		CheckKeys();
	}
}
开发者ID:Topographic0cean,项目名称:src,代码行数:34,代码来源:PrimesenseTestbed.cpp

示例3: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	XnMapOutputMode mode;
	g_DepthGenerator.GetMapOutputMode(mode);
#ifdef USE_GLUT
	glOrtho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);
#elif defined(USE_GLES)
	glOrthof(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);
		// Update NITE tree
		g_pSessionManager->Update(&g_Context);
#ifdef USE_GLUT
		PrintSessionState(g_SessionState);
#endif
	}

#ifdef USE_GLUT
	glutSwapBuffers();
#endif
}
开发者ID:avinashb-sd,项目名称:MaestroRepo,代码行数:36,代码来源:main.cpp

示例4: waitKinect

/**   
	* @fn waitKinect
	* Wait Kinect
    * This function defines the standby state kinect
    * While not made any motion is put on hold
*/
bool waitKinect()
{
	if (g_init_kinect == true)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);

		// Process the data
		g_pSessionManager->Update(&g_Context);	
	}	
	return g_session_started;
}
开发者ID:chverma,项目名称:IPV_SpaceInvaders,代码行数:18,代码来源:Kinect.cpp

示例5: updateKinect

/**   
   * @fn updateKinect
   * update de movements of Kinect
*/
K_SHIP_MOVE updateKinect()
{
	if (g_session_started == true)
	{
		g_init_kinect = false;
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);

		// Process the data
		g_pSessionManager->Update(&g_Context);
	}
	
	return g_dir;
}
开发者ID:chverma,项目名称:IPV_SpaceInvaders,代码行数:18,代码来源:Kinect.cpp

示例6: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#elif defined(USE_GLES)
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);
	}

	if (g_bStep)
	{
		g_bStep = false;
		g_bPause = true;
	}
	
	// Process the data
	//DRAW
	g_DepthGenerator.GetMetaData(depthMD);
	g_SceneAnalyzer.GetMetaData(sceneMD);

	DrawDepthMap(depthMD, sceneMD);
#ifdef USE_GLUT
	if (g_bPrintFrameID)
	{
		DrawFrameID(depthMD.FrameID());
	}
#endif

#ifdef USE_GLUT
	glutSwapBuffers();
#endif
}
开发者ID:Such1337Hax0r,项目名称:python_kinect_socketio,代码行数:51,代码来源:main.cpp

示例7: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);
	}

		// Process the data
		//DRAW
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
开发者ID:imclab,项目名称:Bingsu,代码行数:49,代码来源:main.cpp

示例8: glutDisplay

// this function is called each frame
void glutDisplay ()
{
	clock_t t1 = clock();
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
        xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

		// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	if(Show_Image == FALSE)
		DrawDepthMap(depthMD, sceneMD,COM_tracker,Bounding_Box);
	else
	{
		DrawImageMap(imageMD, depthMD, sceneMD,COM_tracker,Bounding_Box);
	}
#ifndef USE_GLES
	glutSwapBuffers();
#endif
	clock_t t2 = clock();
        std::cout << t2 - t1 << std::endl;
}
开发者ID:ZewiHugo,项目名称:UserTracker_modified,代码行数:46,代码来源:main.cpp

示例9: xnGetStatusString

reader_result kinect_reader2::get_next() {
	XnUserID aUsers[15];
	XnUInt16 nUsers = 15;
	XnSkeletonJointPosition joint;

	while(true) {
		// Read next available data
		nRetVal = g_Context.WaitOneUpdateAll(g_DepthGenerator);
		if(nRetVal != XN_STATUS_OK) {
			printf("Failed updating data: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		if(g_UserGenerator.GetNumberOfUsers() > 0) {

			g_UserGenerator.GetUsers(aUsers, nUsers);
			for(int i=0; i< nUsers; i++)
			{
				if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i]))
				{
					reader_result new_tuple;

					// player
					new_tuple.push_back(boost::any((int)aUsers[i]));

					// 1 = XN_SKEL_HEAD, ..., 24 = XN_SKEL_RIGHT_FOOD
					for(short j = 1; j <= 24; j++) {
						g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], (XnSkeletonJoint)j, joint);
						new_tuple.push_back(boost::any(boost::lexical_cast<std::string>((float)joint.position.X)));
						new_tuple.push_back(boost::any(boost::lexical_cast<std::string>((float)joint.position.Y)));
						new_tuple.push_back(boost::any(boost::lexical_cast<std::string>((float)joint.position.Z)));
						new_tuple.push_back(boost::any(boost::lexical_cast<std::string>((float)joint.fConfidence)));
					}

					return new_tuple;
				}
			}
		}
	}
}
开发者ID:CCJY,项目名称:engine-1,代码行数:40,代码来源:kinect_reader2.cpp

示例10: glutDisplay

// this function is called each frame
void glutDisplay (void)
{
    xn::SceneMetaData sceneMD;
    xn::DepthMetaData depthMD;
    xn::ImageMetaData imageMD;

    glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Setup the OpenGL viewpoint
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();

    g_DepthGenerator.GetMetaData(depthMD);
    g_ImageGenerator.GetMetaData(imageMD);

#ifndef USE_GLES
    glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
    glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

    glDisable(GL_TEXTURE_2D);

    // Read next available data
    g_Context.WaitOneUpdateAll(g_UserGenerator);

    // Process the data
    g_DepthGenerator.GetMetaData(depthMD);
    g_UserGenerator.GetUserPixels(0, sceneMD);
    g_ImageGenerator.GetMetaData(imageMD);

    // Draw the input fetched from the Kinect
    DrawKinectInput(depthMD, sceneMD, imageMD);

#ifndef USE_GLES
    glutSwapBuffers();
#endif
}
开发者ID:ami-lab,项目名称:AmI-Platform,代码行数:40,代码来源:main.cpp

示例11: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
#ifndef USE_GLES
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

	// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
  DrawDepthMap(depthMD, sceneMD);

  processUsers(depthMD.FrameID());  
#ifndef USE_GLES
	glutSwapBuffers();
#endif
}
开发者ID:arnthorm,项目名称:Fable,代码行数:38,代码来源:main.cpp

示例12: draw

void draw() {
	glClear( GL_COLOR_BUFFER_BIT );

	XnMapOutputMode mode;
	g_DepthGenerator.GetMapOutputMode(mode);

	Matrix4f projectionMatrix = Matrix4f();
	projectionMatrix.Ortho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);

	Vector primaryPoint;
	primaryPoint = pointHandler->getPrimaryPoint();
	Vector movement = primaryPoint - position;
	movement.normalize();
	position += movement;
	position.print();

	Matrix4f modelViewMatrix;
	modelViewMatrix.translate(position.getX(), position.getY(), 0);

	Matrix4f modelViewProjectionMatrix;
	modelViewProjectionMatrix = projectionMatrix * modelViewMatrix;
	//glUniformMatrix4fv(matLoc, 1, GL_FALSE, modelViewProjectionMatrix); 
	// Use our shader
	glUseProgram(programObject);

	modelViewProjectionMatrix.uniformMatrix(matLoc);

	// Draw the triangle
	glDrawArrays(GL_LINE_LOOP, 0, 3);

	g_Context.WaitOneUpdateAll(g_DepthGenerator);
	// Update NITE tree
	g_pSessionManager->Update(&g_Context);
	PrintSessionState(g_SessionState);

	SDL_GL_SwapBuffers();
}
开发者ID:cphoward,项目名称:AquaKinect,代码行数:37,代码来源:main.cpp

示例13: main


//.........这里部分代码省略.........
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
    }

    g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");

    //XnUserID aUsers[MAX_NUM_USERS];
    //XnUInt16 nUsers;
    //XnSkeletonJointTransformation torsoJoint;
    //XnSkeletonJointTransformation neckJoint;
    //XnSkeletonJointTransformation headJoint;
    //XnSkeletonJointOrientation headorientation;
   printf("Starting to run\n");
    if(g_bNeedPose)
    {
        printf("Assume calibration pose\n");
    }
    int k = 0;
    float xr;
    float yr;
    float zr;
    float xa;
    float ya;
    float za;
	//while (!xnOSWasKeyboardHit())    /////////////////modify here!
	while (1)  // the super loop begins here!!
    {
		
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        // print the torso information for the first user already tracking
        nUsers=MAX_NUM_USERS;
        g_UserGenerator.GetUsers(aUsers, nUsers);  //generate a user
        for(XnUInt16 i=0; i<nUsers; i++)
        {
            if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])==FALSE)
                continue; //if the condition is satisfied, the following commands should never be generated.
						  //"Any remaining statements in the current iteration are not executed. "
			/*
  XN_SKEL_HEAD          = 1,    XN_SKEL_NECK            = 2,
  XN_SKEL_TORSO         = 3,    XN_SKEL_WAIST           = 4,
  XN_SKEL_LEFT_COLLAR        = 5,    XN_SKEL_LEFT_SHOULDER        = 6,
  XN_SKEL_LEFT_ELBOW        = 7,  XN_SKEL_LEFT_WRIST          = 8,
  XN_SKEL_LEFT_HAND          = 9,    XN_SKEL_LEFT_FINGERTIP    =10,
  XN_SKEL_RIGHT_COLLAR    =11,    XN_SKEL_RIGHT_SHOULDER    =12,
  XN_SKEL_RIGHT_ELBOW        =13,  XN_SKEL_RIGHT_WRIST          =14,
  XN_SKEL_RIGHT_HAND      =15,    XN_SKEL_RIGHT_FINGERTIP    =16,
  XN_SKEL_LEFT_HIP          =17,    XN_SKEL_LEFT_KNEE            =18,
  XN_SKEL_LEFT_ANKLE        =19,  XN_SKEL_LEFT_FOOT            =20,
  XN_SKEL_RIGHT_HIP          =21,    XN_SKEL_RIGHT_KNEE          =22,
  XN_SKEL_RIGHT_ANKLE        =23,    XN_SKEL_RIGHT_FOOT          =24    
			 * 
			 * */
            g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_TORSO,torsoJoint);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_NECK,neckJoint);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_HEAD,headJoint);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_LEFT_SHOULDER,leftshoulder);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_RIGHT_SHOULDER,rightshoulder);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointOrientation(aUsers[i], XN_SKEL_HEAD, headorientation);
            //---------------------------------------------------
            
            /*
开发者ID:YanyangChen,项目名称:KVPJ,代码行数:67,代码来源:NiSimpleSkeleton.cpp

示例14: main


//.........这里部分代码省略.........
        printf("Supplied user generator doesn't support skeleton\n");
        return 1;
    }
    nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    CHECK_RC(nRetVal, "Register to user callbacks");
    nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
    CHECK_RC(nRetVal, "Register to calibration start");
    nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
    CHECK_RC(nRetVal, "Register to calibration complete");
    //ensure calibration
    if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
    {
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
        {
            printf("Pose required, but not supported\n");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
    }
    //set skeleton profile (all joints)
    g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    //start to generate
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");



    printf("%c[%d;%d;%dmPrimeSense Skeleton Benchmark Application\n ", 0x1B, BRIGHT,BLUE,BG_BLACK);
    printf("%c[%d;%d;%dmSet Maximal users to %d ", 0x1B, BRIGHT,BLUE,BG_BLACK,numOfUser);
	printf("%c[%dm\n", 0x1B, 0);
    printf("Starting to run\n");
    if(g_bNeedPose)
    {
        printf("Assume calibration pose\n");
    }

    XnUInt32 epochTime = 0;
    //each 30 frames (1 second) we start the CPU resources usages
    while (!xnOSWasKeyboardHit())
    {
    	if (Sample)
    	{	//get the beginning sample of CPU resources
    		getrusage(RUSAGE_SELF, &ru);
    		timStart=ru.ru_utime;
    		t=(double)timStart.tv_sec * 1000000.0 + (double)timStart.tv_usec \
    		   + (double)ru.ru_stime.tv_sec*1000000.0+(double)ru.ru_stime.tv_usec;
    		//get the wall clock time
    		gettimeofday(&Wall,NULL);

    		w=(double)Wall.tv_sec * 1000000.0 + (double)Wall.tv_usec;
    		Sample = false;
    	}
    	g_Context.WaitOneUpdateAll(g_UserGenerator);
    	xnFPSMarkFrame(&xnFPS);
        // print the torso information for the first user already tracking every 1 second to prevent CPU of printf
        if(TotalFrames % 30 == 0)
        {
			nUsers=MAX_NUM_USERS;
			g_UserGenerator.GetUsers(aUsers, nUsers);
			int numTracked=0;
			int userToPrint=-1;
			for(XnUInt16 i=0; i<nUsers; i++)
			{
				if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])==FALSE)
					continue;

				g_UserGenerator.GetSkeletonCap().GetSkeletonJoint(aUsers[i],XN_SKEL_TORSO,torsoJoint);
				printf("User %d Located At distance of %6.2f mm from the sensor\n",aUsers[i],torsoJoint.position.position.Z);

			 }
			//get the finish sample of the CPU resources
			getrusage(RUSAGE_SELF, &ru);
			timEnd=ru.ru_utime;
			t = (double)timEnd.tv_sec * 1000000.0 + (double)timEnd.tv_usec \
				+	(double)ru.ru_stime.tv_sec*1000000.0+(double)ru.ru_stime.tv_usec	- t;
			//get the wall clock
			gettimeofday(&Wall,NULL);

			w = (double)Wall.tv_sec * 1000000.0 + (double)Wall.tv_usec - w;

			XnDouble fps=xnFPSCalc(&xnFPS);
			//print stuff.
			printf("%c[%d;%d;%dmCPU Utilization=%3.2f%%\t", 0x1B, BRIGHT,RED,BG_BLACK,(double)100*t/w);
			printf("%c[%d;%d;%dmFPS=%3.2f ", 0x1B, BRIGHT,RED,BG_BLACK,(double)fps);
			printf("%c[%dm\n", 0x1B, 0);
			Sample= true;

		}
        TotalFrames++;
        
    }
    g_scriptNode.Release();
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Context.Release();

}
开发者ID:stozpark,项目名称:openni,代码行数:101,代码来源:NiSkeletonBenchmark.cpp

示例15: main


//.........这里部分代码省略.........
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
            LOG_E("%s", "Pose required, but not supported");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_SkeletonCap.GetCalibrationPose(g_strPose);
    }
    
    g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    
    // 表示用の画像データの作成
    XnMapOutputMode mapMode;
    g_ImageGenerator.GetMapOutputMode(mapMode);
    g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);

    LOG_I("%s", "Starting to run");
    if(g_bNeedPose){
        LOG_I("%s", "Assume calibration pose");
    }

    xn::Recorder recorder;
    if( DO_RECORED && !USE_RECORED_DATA ){
        // レコーダーの作成
        LOG_I("%s", "Setup Recorder");
        nRetVal = recorder.Create(g_Context);
        CHECK_RC(nRetVal, "Create recorder");
        
        // 保存設定
        nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH);
        CHECK_RC(nRetVal, "Set recorder destination file");
        
        // 深度、ビデオカメラ入力を保存対象として記録開始
        nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add depth node to recording");
        nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add image node to recording");
        
        LOG_I("%s", "Recorder setup done.");
    }

    while (!xnOSWasKeyboardHit())
    {
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        if( DO_RECORED  && !USE_RECORED_DATA ){
            nRetVal = recorder.Record();
            CHECK_RC(nRetVal, "Record");
        }

        // ビデオカメラ画像の生データを取得
        xn::ImageMetaData imageMetaData;
        g_ImageGenerator.GetMetaData(imageMetaData);
        // メモリコピー
        xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize);
        // BGRからRGBに変換して表示
        cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR);

        // UserGeneratorからユーザー識別ピクセルを取得
        xn::SceneMetaData sceneMetaData;
        g_UserGenerator.GetUserPixels(0, sceneMetaData);
        
        XnUserID allUsers[MAX_NUM_USERS];
        XnUInt16 nUsers = MAX_NUM_USERS;
        g_UserGenerator.GetUsers(allUsers, nUsers);
        for (int i = 0; i < nUsers; i++) {
            
            // キャリブレーションに成功しているかどうか
            if (g_SkeletonCap.IsTracking(allUsers[i])) {
                // スケルトンを描画
                DrawSkelton(allUsers[i], i);
            }
        }
        
        // 表示
        cvShowImage("User View", g_rgbImage);

        // ESCもしくはqが押されたら終了させる
        if (cvWaitKey(10) == 27) {
            break;
        }
    }

    if( !USE_RECORED_DATA ){
        g_scriptNode.Release();
    }
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Context.Release();

	if (g_rgbImage != NULL) {
		cvReleaseImage(&g_rgbImage);	
	}
	g_Context.Shutdown();

    
}
开发者ID:sparkgene,项目名称:recorder_test,代码行数:101,代码来源:main.cpp


注:本文中的xn::Context::WaitOneUpdateAll方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。