当前位置: 首页>>代码示例>>C++>>正文


C++ Context::WaitAndUpdateAll方法代码示例

本文整理汇总了C++中xn::Context::WaitAndUpdateAll方法的典型用法代码示例。如果您正苦于以下问题:C++ Context::WaitAndUpdateAll方法的具体用法?C++ Context::WaitAndUpdateAll怎么用?C++ Context::WaitAndUpdateAll使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xn::Context的用法示例。


在下文中一共展示了Context::WaitAndUpdateAll方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);

	glutSwapBuffers();
	Sleep(5);	// todo: adjust
}
开发者ID:ndruger,项目名称:kinect-modeling-tool,代码行数:32,代码来源:main.cpp

示例2: grabFrame

bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:25,代码来源:cap_openni.cpp

示例3: glutDisplay

// this function is called each frame
void glutDisplay (void)
{
	// Read next available data
	g_Context.WaitAndUpdateAll();

	// Process the data
	g_pSessionManager->Update(&g_Context);

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	#ifdef USE_GLUT
	glOrtho(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0);
	#else
	glOrthof(0, GL_WIN_SIZE_X, 0, GL_WIN_SIZE_Y, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	// Draw the MyBoxes
	g_pBox[0]->Draw();
	g_pBox[1]->Draw();
	g_pBox[2]->Draw();

	// Draw the slider
	DrawSlider();

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
开发者ID:sravanthib,项目名称:openni_annotator,代码行数:36,代码来源:main.cpp

示例4: Loop

void Loop(int sock)
{
	XnStatus nRetVal = XN_STATUS_OK;
	struct timespec last,now;
	double nowtime, starttime;

	clock_gettime(CLOCK_REALTIME, &last);
	double lasttime = (double)(last.tv_sec)  + ((double)(last.tv_nsec))/1000000000;
	int frames=0;

	while (g_notDone)
	{
		if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
		{
			fprintf(stderr,"Could not update ir: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
		const XnRGB24Pixel* pImage    = NULL;//g_image.GetRGB24ImageMap();
		const XnIRPixel*    pIR       = NULL;//g_ir.GetIRMap(); 

		ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);

		FindFingertip();

		frames++;
		clock_gettime(CLOCK_REALTIME, &now);
	        nowtime = (double)(now.tv_sec)  + ((double)(now.tv_nsec))/1000000000;

		if (g_stabalize) // If we are still stablizing then don't do anything
		{
			if ((nowtime - starttime) >= STABILIZATION_TIME_SECS) 
			{
				g_stabalize = FALSE;
				g_set_background = TRUE;
			}
		}
		else if (g_calibrate)  // Do we need to calibrate?
				Calibrate(sock);
		else 
				SendFingers(sock);  // otherwise just send the touches
		
		/*
		if (nowtime - lasttime >= 1.0)
		{
			printf("%d FPS\n", (int)(frames/(nowtime-lasttime)));
			lasttime = nowtime;
			frames = 0;
			if (sock >= 0 && pDepthMap != 0 )// && pImage != 0 )//&& pIR != 0)
				SendImage(sock,pDepthMap, pImage, pIR, g_depthWidth, g_depthHeight);
		}
		*/
	}
}
开发者ID:Topographic0cean,项目名称:src,代码行数:55,代码来源:ImageFlinger.cpp

示例5: EEThreadProc

/*
 DWORD WINAPI EEThreadProc(LPVOID lpThreadParameter)
 {
	{
		g_Context.WaitAndUpdateAll();
		if (g_bRecord)
		{
			g_pRecorder->Record();
		}
	}
	return 0;
 }
 */
int EEThreadProc(void *lpThreadParameter)
{
	{
		g_Context.WaitAndUpdateAll();
		if (g_bRecord)
		{
			g_pRecorder->Record();
		}
	}
	return 0;
}
开发者ID:imclab,项目名称:StickFigureOSC,代码行数:24,代码来源:StickFigure.cpp

示例6: run

    // メインループ
    void run()
    {
        int nAudioNextBuffer = 0;

        printf ("Press any key to exit...\n");

        // 今のデータを捨てる
        audio.WaitAndUpdateData();

        while (!xnOSWasKeyboardHit()) {
            // データの更新
            XnStatus nRetVal = context.WaitAndUpdateAll();
            if (nRetVal != XN_STATUS_OK) {
                throw std::runtime_error(xnGetStatusString(nRetVal));
            }

            // バッファの取得
            WAVEHDR* pHeader = &AudioBuffers[nAudioNextBuffer];
            if ((pHeader->dwFlags & WHDR_DONE) == 0) {
                printf("No audio buffer is available!. Audio buffer will be lost!\n");
                continue;
            }

            // WAVEヘッダのクリーンアップ
            MMRESULT mmRes = waveOutUnprepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
            }

            // WAVEデータの取得
            pHeader->dwBufferLength = audio.GetDataSize();
            pHeader->dwFlags = 0;
            xnOSMemCopy(pHeader->lpData, audio.GetAudioBuffer(), pHeader->dwBufferLength);

            // WAVEヘッダの初期化
            mmRes = waveOutPrepareHeader(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
                continue;
            }

            // WAVEデータを出力キューに入れる
            mmRes = waveOutWrite(hWaveOut, pHeader, sizeof(WAVEHDR));
            if ( mmRes != MMSYSERR_NOERROR ) {
                OutputErrorText( mmRes );
                continue;
            }

            // 次のバッファインデックス
            nAudioNextBuffer = (nAudioNextBuffer + 1) % NUMBER_OF_AUDIO_BUFFERS;
        }
    }
开发者ID:DaikiMaekawa,项目名称:openni_sandbox,代码行数:53,代码来源:main.cpp

示例7: gl_onDisplay

void gl_onDisplay()
{
  gContext.WaitAndUpdateAll();
  glClear(GL_COLOR_BUFFER_BIT);	

  for(int i = 0; i < 2; i++){
    if(gDrawOrder[i] != NULL){
      gDrawOrder[i]->draw();
    }
  }
  
  glutSwapBuffers();
}
开发者ID:phelrine,项目名称:kica,代码行数:13,代码来源:kivi.cpp

示例8: glutDisplay

// this function is called each frame
void glutDisplay (void)
{
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();
	XnMapOutputMode mode;
	g_DepthGenerator.GetMapOutputMode(mode);
	glOrtho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);
	glDisable(GL_TEXTURE_2D);
	g_Context.WaitAndUpdateAll();
	g_pSessionManager->Update(&g_Context);
	PrintSessionState(g_SessionState);
	glutSwapBuffers();
}
开发者ID:AF83,项目名称:ucengine-kinect,代码行数:16,代码来源:main.cpp

示例9: gl_onDisplay

void gl_onDisplay()
{
  gContext.WaitAndUpdateAll();
  glClear(GL_COLOR_BUFFER_BIT);	

  gDepthDrawer->draw();
  gSkeletonDrawer->draw();
  gImageDrawer->draw();

  if(capture->isCapturing()){
    capture->captureSkeleton(gUserGenerator.GetSkeletonCap());
    gl_labelCapturingUser(capture->getCapturingUser());
  }
  
  glutSwapBuffers();
}
开发者ID:phelrine,项目名称:kica,代码行数:16,代码来源:kica.cpp

示例10: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}
	ros::Time tstamp=ros::Time::now();
		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);
		std::vector<mapping_msgs::PolygonalMap> pmaps;
		body_msgs::Skeletons skels;
		getSkels(pmaps,skels);
		ROS_DEBUG("skels size %d \n",pmaps.size());
		if(pmaps.size()){

		   skels.header.stamp=tstamp;
		   skels.header.seq = depthMD.FrameID();
		   skels.header.frame_id="/openni_depth_optical_frame";
		   skel_pub.publish(skels);
	      pmaps.front().header.stamp=tstamp;
	      pmaps.front().header.seq = depthMD.FrameID();
	      pmaps.front().header.frame_id="/openni_depth_optical_frame";
		   pmap_pub.publish(pmaps[0]);
		}


	glutSwapBuffers();
}
开发者ID:ljxin1993,项目名称:GestureRecognition_Kinect-ROS,代码行数:47,代码来源:main.cpp

示例11: main

int main(int argc, char **argv) {
  ros::init(argc, argv, "scene_tracker");
  ros::NodeHandle nh;

  string configFilename = ros::package::getPath("openni_tracker") + "/openni_tracker.xml";
  XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
  CHECK_RC(nRetVal, "InitFromXml");

  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
  CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK) {
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

  XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

        
  ros::NodeHandle pnh("~");
  string frame_id("openni_depth_frame");
  pnh.getParam("camera_frame_id", frame_id);
                
	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		publishTransforms(frame_id);
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
开发者ID:Cognitive-Robotics-Lab,项目名称:TurtleRPI,代码行数:39,代码来源:cluster.cpp

示例12: captureOne

bool DataCapture::captureOne()
{
    XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
    if( rc != XN_STATUS_OK )
    {
        std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // grab image
    imageGen_.GetMetaData(imageMd_);
    const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
    for( unsigned int i = 0; i < 640 * 480; ++i )
    {
        pRgbData_[3*i] = rgbData->nRed;
        pRgbData_[3*i + 1] = rgbData->nGreen;
        pRgbData_[3*i + 2] = rgbData->nBlue;
        ++rgbData;
    }

    // grab depth image
    depthGen_.GetMetaData(depthMd_);
    const uint16_t* pDepthDataU16 = depthMd_.Data();
    for( int i = 0; i < 640 * 480; ++i)
    {
        uint16_t d = pDepthDataU16[i];
        if( d != 0 )
        {
            pDepthData_[i] = (d * 255)/2048;
        }
        else
        {
            pDepthData_[i] = 0; // should be NAN
        }
    }
    return true;
}
开发者ID:cvilas,项目名称:scratch,代码行数:37,代码来源:main.cpp

示例13: main

int main(int argc, char **argv)
{
    ros::init(argc, argv, "kinnect");
    ros::NodeHandle nh;

    string configFilename = ros::package::getPath("kinect") + "/config/openni.xml";
    XnStatus nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());
    CHECK_RC(nRetVal, "InitFromXml");

	// init kinect user IDs to 0 to indicate they're invalid.
	for (int i = 0; i < NUSERS; i++)
	{
		kinectUsers[i].id = 0;
	}

    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
    CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK)
	{
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
	{
		ROS_INFO("Supplied user generator doesn't support skeleton");
		return 1;
	}

    XnCallbackHandle hUserCallbacks;
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);

	XnCallbackHandle hCalibrationCallbacks;
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
	{
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
		{
			ROS_INFO("Pose required, but not supported");
			return 1;
		}

		XnCallbackHandle hPoseCallbacks;
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);

		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");

	ros::Rate r(30);

	ros::NodeHandle pnh("~");
	string frame_id("openni_depth_frame");
	pnh.getParam("camera_frame_id", frame_id);
                
	ros::Publisher user_pub = nh.advertise<kinect::User>("kinect_users", 1000);

	while (ros::ok()) {
		g_Context.WaitAndUpdateAll();
		publishTransforms(frame_id, user_pub);
		r.sleep();
	}

	g_Context.Shutdown();
	return 0;
}
开发者ID:TjlHope,项目名称:Rutler,代码行数:74,代码来源:user_tracker.cpp

示例14: main

int main(int argc, char **argv){
  ros::init(argc, argv, "pplTracker");
  ros::NodeHandle nh;

  std::string configFilename = ros::package::getPath("people_tracker_denbyk") + "/init/openni_tracker.xml";
  genericUserCalibrationFileName = ros::package::getPath("people_tracker_denbyk") + "/init/GenericUserCalibration.bin";

  ros::Rate loop_rate(1);

  //valore di ritorno Xn
  XnStatus nRetVal;

  //while (ros::ok())
  while (nh.ok())
  {
    //inizializzo contesto openni
    //ROS_INFO(configFilename.c_str(),xnGetStatusString(nRetVal));
    nRetVal = g_Context.InitFromXmlFile(configFilename.c_str());

    //TODO: remove
    nRetVal = XN_STATUS_OK;

    //riprovo tra un po'
    if(nRetVal != XN_STATUS_OK)
    {
      ROS_INFO("InitFromXml failed: %s Retrying in 3 seconds...", xnGetStatusString(nRetVal));
      ros::Duration(3).sleep();

    }
    else
    {
      break;
    }
  }


  //std::string frame_id;

  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);


  if(nRetVal != XN_STATUS_OK)
  {
    ROS_ERROR("Find depth generator failed: %s", xnGetStatusString(nRetVal));
  }

  //cerca nodo ti tipo user generator e lo salva in g_UserGenerator
  nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);


  if (nRetVal != XN_STATUS_OK)
  {
    //crea lo userGenerator del g_context. SE non riesce probabilmente manca NITE
    nRetVal = g_UserGenerator.Create(g_Context);

    if (nRetVal != XN_STATUS_OK)
    {
      ROS_ERROR("NITE is likely missing: Please install NITE >= 1.5.2.21. Check the readme for download information. Error Info: User generator failed: %s", xnGetStatusString(nRetVal));
      return nRetVal;
    }
  }
  //veriica che lo userGenerator creato supporti SKELETON
  if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
  {
    ROS_INFO("Supplied user generator doesn't support skeleton");
    return EXIT_FAILURE;
  }

  //imposto la modalità dello skeleton, quali giunzioni rilevare e quali no.
  //in questo caso upper è il torso/la testa
  g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_UPPER);

  //setto varie callbacks per entrata, uscita e rientro user nello UserGenerator
  XnCallbackHandle hUserCallbacks;
  g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
  g_UserGenerator.RegisterToUserExit(User_OutOfScene, NULL, hUserCallbacks);
  g_UserGenerator.RegisterToUserReEnter(User_BackIntoScene, NULL, hUserCallbacks);

  //attivo la generazione dei vari generators
  nRetVal = g_Context.StartGeneratingAll();

  if(nRetVal != XN_STATUS_OK)
  {
    ROS_ERROR("StartGenerating failed: %s", xnGetStatusString(nRetVal));
  }

  //recupera un parametro passato al nodo dal server dei parametri ROS.
  //usando la chiave camera_frame_id e lo memorizza nella variabile frame_id
  std::string frame_id("camera_depth_frame");
  nh.getParam("camera_frame_id", frame_id);


  std::cout << "init ok\n";
  //ciclo principale.
  while(nh.ok())
  {
    //aggiorna il contesto e aspetta
    g_Context.WaitAndUpdateAll();
    //pubblica le trasformazioni su frame_id
    publishTransforms(frame_id);
//.........这里部分代码省略.........
开发者ID:denbyk,项目名称:people_tracker_denbyk,代码行数:101,代码来源:OLDPeopleTracker_denbyk.cpp

示例15: glut_display

void glut_display() {
	xn::DepthMetaData pDepthMapMD;
	xn::ImageMetaData pImageMapMD;
	XnUserID pUser[2];
	XnUInt16 nUsers=2;
#ifdef DEBUGOUT
	ofstream datei;
#endif

	glEnable(GL_TEXTURE_2D);

	pUser[0] = 0;
	pUser[1] = 0;

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	/*glFrustum(	-ROOM_X/2+(1500.0/1750.0)*static_cast<int>(headtrans.x),
				ROOM_X/2+(1500.0/1750.0)*static_cast<int>(headtrans.x),
				-ROOM_Y/2-(1500.0/1750.0)*static_cast<int>(headtrans.y),
				ROOM_Y/2-(1500.0/1750.0)*static_cast<int>(headtrans.y),
				1525,
				2525);*/

	float nearplane = 1525;
	float screenaspect = ROOM_X/ROOM_Y;
	glFrustum(	nearplane*(-0.5 * screenaspect + headtrans.x)/headtrans.z,
				nearplane*( 0.5 * screenaspect + headtrans.x)/headtrans.z,
				nearplane*(-0.5 + headtrans.y)/headtrans.z,
				nearplane*( 0.5 + headtrans.y)/headtrans.z,
				nearplane,
				2525);


	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();

	nRetVal = context.WaitAndUpdateAll();
	checkError("Fehler beim Aktualisieren der Daten", nRetVal);

	/* Anzahl der User auslesen und in Objekten speichern */
	user.GetUsers(pUser, nUsers);
	if(pUser[0]!=0 && pUserOld!=1) {
		cout << "User 1 erkannt" << endl;
		pUserOld=1;
	}

	xn::SkeletonCapability pSkeleton = user.GetSkeletonCap();
	XnCallbackHandle hnd;
	pSkeleton.RegisterCalibrationCallbacks(skel_cal_start, skel_cal_end, 0,hnd);
	
	if(calibration){

	}

	pSkeleton.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
	if(pSkeleton.IsCalibrated(pUser[0])) {
		XnSkeletonJointTransformation head;
		pSkeleton.StartTracking(pUser[0]);
		pSkeleton.GetSkeletonJoint(pUser[0], XN_SKEL_HEAD, head);

		if(head.position.fConfidence && head.orientation.fConfidence) {
/*			headtrans.rotmat[0] = head.orientation.orientation.elements[0];
			headtrans.rotmat[1] = head.orientation.orientation.elements[1];
			headtrans.rotmat[2] = head.orientation.orientation.elements[2];
			headtrans.rotmat[3] = 0;
			
			headtrans.rotmat[4] = head.orientation.orientation.elements[3];
			headtrans.rotmat[5] = head.orientation.orientation.elements[4];
			headtrans.rotmat[6] = head.orientation.orientation.elements[5];
			headtrans.rotmat[7] = 0;
			
			headtrans.rotmat[8] = -head.orientation.orientation.elements[6];
			headtrans.rotmat[9] = -head.orientation.orientation.elements[7];
			headtrans.rotmat[10] =-head.orientation.orientation.elements[8];
			headtrans.rotmat[11] = 0;

			headtrans.rotmat[12] = 0;
			headtrans.rotmat[13] = 0;
			headtrans.rotmat[14] = 0;
			headtrans.rotmat[15] = 1;*/

			headtrans.x = head.position.position.X;
			headtrans.y = head.position.position.Y;
			headtrans.z = head.position.position.Z;
			
			 
#ifdef CONSOLEOUT
			clearScreen();
			cout	<< "Confidence Position: " << head.position.fConfidence
					<< " X: " << head.position.position.X
					<< " Y: " << head.position.position.Y
					<< " Z: " << head.position.position.Z << endl

					<< "Confidence Rotation: " << head.orientation.fConfidence << endl
					<< "\t" << headtrans.rotmat[0]
					<< "\t" << headtrans.rotmat[4]
					<< "\t" << headtrans.rotmat[8] << endl
					<< "\t" << headtrans.rotmat[1]
//.........这里部分代码省略.........
开发者ID:digitalaudiotape,项目名称:Particle-Cloud-Voyeurism,代码行数:101,代码来源:main.cpp


注:本文中的xn::Context::WaitAndUpdateAll方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。