当前位置: 首页>>代码示例>>C++>>正文


C++ UserGenerator::GetUserPixels方法代码示例

本文整理汇总了C++中xn::UserGenerator::GetUserPixels方法的典型用法代码示例。如果您正苦于以下问题:C++ UserGenerator::GetUserPixels方法的具体用法?C++ UserGenerator::GetUserPixels怎么用?C++ UserGenerator::GetUserPixels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xn::UserGenerator的用法示例。


在下文中一共展示了UserGenerator::GetUserPixels方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);

	glutSwapBuffers();
	Sleep(5);	// todo: adjust
}
开发者ID:ndruger,项目名称:kinect-modeling-tool,代码行数:32,代码来源:main.cpp

示例2: glutDisplay

// this function is called each frame
void glutDisplay(void) {
    //send stuff twice every frame
    executeInputLoop();
    executeInputLoop();

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;

	g_DepthGenerator.GetMetaData(depthMD);

	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	glDisable(GL_TEXTURE_2D);

	if (!g_bPause) {
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

	// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);

	//draw the suff with openGL
	DrawDepthMap(depthMD, sceneMD);
	glutSwapBuffers();
}
开发者ID:Lammmark,项目名称:tuiframework,代码行数:34,代码来源:KinectDeviceApp.cpp

示例3: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_DepthGenerator);
	}

		// Process the data
		//DRAW
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
开发者ID:imclab,项目名称:Bingsu,代码行数:49,代码来源:main.cpp

示例4: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}
	ros::Time tstamp=ros::Time::now();
		// Process the data
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		DrawDepthMap(depthMD, sceneMD);
		std::vector<mapping_msgs::PolygonalMap> pmaps;
		body_msgs::Skeletons skels;
		getSkels(pmaps,skels);
		ROS_DEBUG("skels size %d \n",pmaps.size());
		if(pmaps.size()){

		   skels.header.stamp=tstamp;
		   skels.header.seq = depthMD.FrameID();
		   skels.header.frame_id="/openni_depth_optical_frame";
		   skel_pub.publish(skels);
	      pmaps.front().header.stamp=tstamp;
	      pmaps.front().header.seq = depthMD.FrameID();
	      pmaps.front().header.frame_id="/openni_depth_optical_frame";
		   pmap_pub.publish(pmaps[0]);
		}


	glutSwapBuffers();
}
开发者ID:ljxin1993,项目名称:GestureRecognition_Kinect-ROS,代码行数:47,代码来源:main.cpp

示例5: glutDisplay

// this function is called each frame
void glutDisplay ()
{
	clock_t t1 = clock();
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
        xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

		// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	if(Show_Image == FALSE)
		DrawDepthMap(depthMD, sceneMD,COM_tracker,Bounding_Box);
	else
	{
		DrawImageMap(imageMD, depthMD, sceneMD,COM_tracker,Bounding_Box);
	}
#ifndef USE_GLES
	glutSwapBuffers();
#endif
	clock_t t2 = clock();
        std::cout << t2 - t1 << std::endl;
}
开发者ID:ZewiHugo,项目名称:UserTracker_modified,代码行数:46,代码来源:main.cpp

示例6: getRGB

// Callback: Finished calibration
void XN_CALLBACK_TYPE
UserCalibration_CalibrationEnd(xn::SkeletonCapability& capability, XnUserID nId, XnBool bSuccess, void* pCookie)
{
	if (bSuccess)
	{
		// Calibration succeeded - save this first calibration and start tracking the user
		ROS_INFO("[bk_skeletal_tracker] Calibration complete, now tracking user %d", nId);
		
		g_bhasCal=true;
		first_calibrated_user_ = nId;
		g_UserGenerator.GetSkeletonCap().SaveCalibrationData(nId, 0);
		g_UserGenerator.GetSkeletonCap().StartTracking(nId);
	
		// Save the user's calibration
		
		// Get mask of this user
		xn::SceneMetaData sceneMD;
		cv::Mat label_image;
		g_UserGenerator.GetUserPixels(0, sceneMD);
		getUserLabelImage(sceneMD, label_image);
		label_image = (label_image == nId);
		
		xn::ImageMetaData imageMD;
		//g_ImageGenerator.GetMetaData(imageMD);
		
		cv::Mat rgb;
		rgb = getRGB(imageMD);
		
		original_cal_.init(rgb, label_image);
		user_cal_ = original_cal_;
	}
	else
	{
		// Calibration failed
		ROS_INFO("[bk_skeletal_tracker] Calibration failed for user %d", nId);
		
		if (g_bNeedPose)
			g_UserGenerator.GetPoseDetectionCap().StartPoseDetection(g_strPose, nId);
		else
			g_UserGenerator.GetSkeletonCap().RequestCalibration(nId, true);
	}
}
开发者ID:Aharobot,项目名称:bk-ros,代码行数:43,代码来源:main.cpp

示例7: publishTransforms

void publishTransforms(const std::string& frame_id, image_transport::Publisher& pub)
{
	XnUserID users[15];
	XnUInt16 users_count = 15;
	g_UserGenerator.GetUsers(users, users_count);

	for (int i = 0; i < users_count; ++i)
	{
		XnUserID user = users[i];
		if (!g_UserGenerator.GetSkeletonCap().IsTracking(user))
			continue;

		publishTransform(user, XN_SKEL_HEAD, frame_id, "head");
		publishTransform(user, XN_SKEL_NECK, frame_id, "neck");
		publishTransform(user, XN_SKEL_TORSO, frame_id, "torso");

		publishTransform(user, XN_SKEL_LEFT_SHOULDER, frame_id, "left_shoulder");
		publishTransform(user, XN_SKEL_LEFT_ELBOW, frame_id, "left_elbow");
		publishTransform(user, XN_SKEL_LEFT_HAND, frame_id, "left_hand");

		publishTransform(user, XN_SKEL_RIGHT_SHOULDER, frame_id, "right_shoulder");
		publishTransform(user, XN_SKEL_RIGHT_ELBOW, frame_id, "right_elbow");
		publishTransform(user, XN_SKEL_RIGHT_HAND, frame_id, "right_hand");

		publishTransform(user, XN_SKEL_LEFT_HIP, frame_id, "left_hip");
		publishTransform(user, XN_SKEL_LEFT_KNEE, frame_id, "left_knee");
		publishTransform(user, XN_SKEL_LEFT_FOOT, frame_id, "left_foot");

		publishTransform(user, XN_SKEL_RIGHT_HIP, frame_id, "right_hip");
		publishTransform(user, XN_SKEL_RIGHT_KNEE, frame_id, "right_knee");
		publishTransform(user, XN_SKEL_RIGHT_FOOT, frame_id, "right_foot");
	}

	// publish segmented image
	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	PublishPeopleImage(depthMD, sceneMD, pub);
}
开发者ID:WalkingMachine,项目名称:Short-Circuit,代码行数:40,代码来源:openni_tracker.cpp

示例8: glutDisplay

// this function is called each frame
void glutDisplay (void)
{
    xn::SceneMetaData sceneMD;
    xn::DepthMetaData depthMD;
    xn::ImageMetaData imageMD;

    glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Setup the OpenGL viewpoint
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();

    g_DepthGenerator.GetMetaData(depthMD);
    g_ImageGenerator.GetMetaData(imageMD);

#ifndef USE_GLES
    glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
    glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

    glDisable(GL_TEXTURE_2D);

    // Read next available data
    g_Context.WaitOneUpdateAll(g_UserGenerator);

    // Process the data
    g_DepthGenerator.GetMetaData(depthMD);
    g_UserGenerator.GetUserPixels(0, sceneMD);
    g_ImageGenerator.GetMetaData(imageMD);

    // Draw the input fetched from the Kinect
    DrawKinectInput(depthMD, sceneMD, imageMD);

#ifndef USE_GLES
    glutSwapBuffers();
#endif
}
开发者ID:ami-lab,项目名称:AmI-Platform,代码行数:40,代码来源:main.cpp

示例9: main


//.........这里部分代码省略.........
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
            LOG_E("%s", "Pose required, but not supported");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_SkeletonCap.GetCalibrationPose(g_strPose);
    }
    
    g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    
    // 表示用の画像データの作成
    XnMapOutputMode mapMode;
    g_ImageGenerator.GetMapOutputMode(mapMode);
    g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);

    LOG_I("%s", "Starting to run");
    if(g_bNeedPose){
        LOG_I("%s", "Assume calibration pose");
    }

    xn::Recorder recorder;
    if( DO_RECORED && !USE_RECORED_DATA ){
        // レコーダーの作成
        LOG_I("%s", "Setup Recorder");
        nRetVal = recorder.Create(g_Context);
        CHECK_RC(nRetVal, "Create recorder");
        
        // 保存設定
        nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH);
        CHECK_RC(nRetVal, "Set recorder destination file");
        
        // 深度、ビデオカメラ入力を保存対象として記録開始
        nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add depth node to recording");
        nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL);
        CHECK_RC(nRetVal, "Add image node to recording");
        
        LOG_I("%s", "Recorder setup done.");
    }

    while (!xnOSWasKeyboardHit())
    {
        g_Context.WaitOneUpdateAll(g_UserGenerator);
        if( DO_RECORED  && !USE_RECORED_DATA ){
            nRetVal = recorder.Record();
            CHECK_RC(nRetVal, "Record");
        }

        // ビデオカメラ画像の生データを取得
        xn::ImageMetaData imageMetaData;
        g_ImageGenerator.GetMetaData(imageMetaData);
        // メモリコピー
        xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize);
        // BGRからRGBに変換して表示
        cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR);

        // UserGeneratorからユーザー識別ピクセルを取得
        xn::SceneMetaData sceneMetaData;
        g_UserGenerator.GetUserPixels(0, sceneMetaData);
        
        XnUserID allUsers[MAX_NUM_USERS];
        XnUInt16 nUsers = MAX_NUM_USERS;
        g_UserGenerator.GetUsers(allUsers, nUsers);
        for (int i = 0; i < nUsers; i++) {
            
            // キャリブレーションに成功しているかどうか
            if (g_SkeletonCap.IsTracking(allUsers[i])) {
                // スケルトンを描画
                DrawSkelton(allUsers[i], i);
            }
        }
        
        // 表示
        cvShowImage("User View", g_rgbImage);

        // ESCもしくはqが押されたら終了させる
        if (cvWaitKey(10) == 27) {
            break;
        }
    }

    if( !USE_RECORED_DATA ){
        g_scriptNode.Release();
    }
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Context.Release();

	if (g_rgbImage != NULL) {
		cvReleaseImage(&g_rgbImage);	
	}
	g_Context.Shutdown();

    
}
开发者ID:sparkgene,项目名称:recorder_test,代码行数:101,代码来源:main.cpp

示例10: spin

void KinectDataPub::spin(bool async)
{
  static bool onceThrough = false;

  if (!onceThrough){
    XnStatus nRetVal = XN_STATUS_OK;

    nRetVal = context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    onceThrough = true;

    printf("Starting to run\n");
    if (needPose){
      printf("Assume calibration pose\n");
    }
  }

  XnUserID aUsers[MAX_NUM_USERS];
  XnUInt16 nUsers;
  SkeletonInfo skels[MAX_NUM_USERS];
  SensorMsg msg;
  xn::DepthMetaData depthMD;
  xn::SceneMetaData sceneMD;

  while(!xnOSWasKeyboardHit()){
    context.WaitOneUpdateAll(userGen);

    /**
     * nUsers is used both as input and output parameters.
     * When it's used as input parameter, it indicates the size of the user ID
     * array, while, when it's used as output parameter, it means the number
     * of users recognized. Therefore, it needs to be re-initialized in a loop.
     */
    nUsers = MAX_NUM_USERS;
    userGen.GetUsers(aUsers, nUsers);
    memset(&msg, 0, sizeof(SensorMsg));

    for (XnUInt16 i = 0; i < nUsers; ++i){
      if (userGen.GetSkeletonCap().IsTracking(aUsers[i])){
	// get the user's skeleton
	skels[i].user = aUsers[i];
	getSkeleton(skels[i]);
      }
      else
	skels[i].user = 0; // user is not tracked
    }

    depthGen.GetMetaData(depthMD); // depth map
    userGen.GetUserPixels(0, sceneMD); // labels

    msg.nUsers = nUsers; // num of users detected, either tracked or not
    msg.pSkels = skels;
    msg.pDepthMD = &depthMD;
    msg.pSceneMD = &sceneMD;

    for (int i = 0; i < subscribers.size(); ++i)
      subscribers[i]->notify(RAD_SENSOR_MSG, &msg);

    if (async)
      return;
  }
}
开发者ID:Rajesh-Ru,项目名称:act-rec-proj-2013,代码行数:62,代码来源:KinectDataPub.cpp

示例11: mexFunction

/* The matlab mex function */
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] )
{
    XnUInt64 *MXadress;
    double *Pos;
    
    int Jdimsc[2];
    Jdimsc[0]=225; Jdimsc[1]=7;
    plhs[0] = mxCreateNumericArray(2, Jdimsc, mxDOUBLE_CLASS, mxREAL);
    Pos = mxGetPr(plhs[0]);
     
    if(nrhs==0)
    {
       printf("Open failed: Give Pointer to Kinect as input\n");
       mexErrMsgTxt("Kinect Error"); 
    }
        
    MXadress = (XnUInt64*)mxGetData(prhs[0]);
    if(MXadress[0]>0){ g_Context = ((xn::Context*) MXadress[0])[0]; }
    if(MXadress[2]>0)
	{ 
		g_DepthGenerator = ((xn::DepthGenerator*) MXadress[2])[0]; 
	}
	else
	{
		mexErrMsgTxt("No Depth Node in Kinect Context"); 
	}
    if(MXadress[4]>0)
	{ 
		g_UserGenerator = ((xn::UserGenerator*) MXadress[4])[0]; 
	}
    else
	{
		mexErrMsgTxt("No User Node in Kinect Context"); 
	}

    XnStatus nRetVal = XN_STATUS_OK;

    XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
    {
        printf("Supplied user generator doesn't support skeleton\n");
        return;
    }
    g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

    if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
    {
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
        {
            printf("Pose required, but not supported\n");
            return;
        }
        g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
        g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
    }

    g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
 
    char strLabel[50] = "";
    XnUserID aUsers[15];
    XnUInt16 nUsers = 15;
    int r=0;
    xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	
    // Process the data
    g_DepthGenerator.GetMetaData(depthMD);
    g_UserGenerator.GetUserPixels(0, sceneMD);
    g_UserGenerator.GetUsers(aUsers, nUsers);
 	for (int i = 0; i < nUsers; ++i)
 	{
        if (g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i]))
		{
            //printf(strLabel, "%d - Looking for pose", aUsers[i]);
   
            XnSkeletonJointPosition joint[15];
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_HEAD, joint[0]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_NECK, joint[1]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_SHOULDER, joint[2]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_ELBOW, joint[3]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_HAND, joint[4]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_SHOULDER, joint[5]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_ELBOW, joint[6]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_HAND, joint[7]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_TORSO, joint[8]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_HIP, joint[9]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_KNEE, joint[10]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_FOOT, joint[11]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_HIP, joint[12]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_KNEE, joint[13]);
            g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_FOOT, joint[14]);

            XnPoint3D pt[1];
            for(int j=0; j<15; j++)
            {
                Pos[j            +r]=aUsers[i];
                Pos[j+Jdimsc[0]  +r]=joint[j].fConfidence;
//.........这里部分代码省略.........
开发者ID:BIANZiyang,项目名称:RGBD_Abnormal_Detection,代码行数:101,代码来源:mxNiSkeleton.cpp

示例12: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

		g_DepthGenerator.GetMetaData(depthMD);
		g_ImageGenerator.GetMetaData(imageMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);


		DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
开发者ID:sravanthib,项目名称:openni_annotator,代码行数:62,代码来源:main.cpp

示例13: main

int main(int argc, char **argv) {
	ros::init(argc, argv, "PersonTracker");
	ros::NodeHandle nh;
	ros::Rate loop_rate(10);
    ROS_INFO("Initalizing Arm Connection....");
    ros::Publisher leftArmPublisher = nh.advertise<prlite_kinematics::SphereCoordinate>("/armik/n0/position", 1000);
    ros::Publisher rightArmPublisher = nh.advertise<prlite_kinematics::SphereCoordinate>("/armik/n1/position", 1000);
	yourmom = nh.advertise<prlite_kinematics::some_status_thing>("kinect_hack_status", 1000);
    ros::Duration(2.0).sleep();
    prlite_kinematics::SphereCoordinate coord;
	prlite_kinematics::some_status_thing status;
    coord.radius = 35.0;
    coord.phi = 0.0;
    coord.theta = 0.0;
	status.lulz = 0;	//initialized/reset
	yourmom.publish(status);
    leftArmPublisher.publish(coord);
    rightArmPublisher.publish(coord);
	ROS_INFO("Initalizing Kinect + NITE....");
	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;
	nRetVal = g_Context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	
	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (nRetVal);
	} else if (nRetVal != XN_STATUS_OK) {
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return (nRetVal);
	}

	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
	CHECK_RC(nRetVal, "Find depth generator");
	nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
	if (nRetVal != XN_STATUS_OK)
	{
		nRetVal = g_UserGenerator.Create(g_Context);
		CHECK_RC(nRetVal, "Find user generator");
	}

	XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
	if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
	{
		printf("Supplied user generator doesn't support skeleton\n");
		return 1;
	}
	g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
	g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);

	if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
	{
		g_bNeedPose = TRUE;
		if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
		{
			printf("Pose required, but not supported\n");
			return 1;
		}
		g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
		g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
	}

	g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);

	nRetVal = g_Context.StartGeneratingAll();
	CHECK_RC(nRetVal, "StartGenerating");
    ROS_INFO("Ready To Go!\n");

	while (ros::ok()) {
		// Update OpenNI
		g_Context.WaitAndUpdateAll();
		xn::SceneMetaData sceneMD;
		xn::DepthMetaData depthMD;
		g_DepthGenerator.GetMetaData(depthMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);
		// Print positions
		XnUserID aUsers[15];
		XnUInt16 nUsers = 15;
		g_UserGenerator.GetUsers(aUsers, nUsers);
		for (int i = 0; i < nUsers; ++i) {
			if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])) {
				// Read joint positions for person (No WRISTs)
				XnSkeletonJointPosition torsoPosition, lShoulderPosition, rShoulderPosition, neckPosition, headPosition, lElbowPosition, rElbowPosition;
                XnSkeletonJointPosition rWristPosition, lWristPosition, rHipPosition, lHipPosition, lKneePosition, rKneePosition;
                XnSkeletonJointPosition lFootPosition, rFootPosition;
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_TORSO, torsoPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_NECK, neckPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_HEAD, headPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_SHOULDER, lShoulderPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_SHOULDER, rShoulderPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_ELBOW, lElbowPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_ELBOW, rElbowPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_HAND, lWristPosition);
				g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_HAND, rWristPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_HIP, lHipPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_HIP, rHipPosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_LEFT_KNEE, lKneePosition);
                g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i], XN_SKEL_RIGHT_KNEE, rKneePosition);
//.........这里部分代码省略.........
开发者ID:rqou,项目名称:prlite-pc,代码行数:101,代码来源:kinect_joints.cpp

示例14: matrixCalc

	void matrixCalc(void *outputs)
	{
		TML::Matrix out1(outputs, 0);
		TML::Matrix out2(outputs, 1);
		TML::Matrix out3(outputs, 2);
		TML::Matrix out4(outputs, 3);
		
		xn::DepthMetaData depthMD;
		xn::SceneMetaData sceneMD;
		xn::ImageMetaData imageMD;
		
		depth.GetMetaData(depthMD);
		user.GetUserPixels(0, sceneMD);
		image.GetMetaData(imageMD);
		
		context.WaitNoneUpdateAll();
		
		t_jit_matrix_info tmi;
		memset(&tmi, 0, sizeof(tmi));
		tmi.dimcount = 2;
		tmi.planecount = 1;
		tmi.dimstride[0] = 4;
		tmi.dimstride[1] = depthMD.XRes()*4;
		int width = tmi.dim[0] = depthMD.XRes();
		int height = tmi.dim[1] = depthMD.YRes();
		tmi.type = _jit_sym_float32;
		
		out1.resizeTo(&tmi);
		
		tmi.planecount = 1;
		tmi.dimstride[0] = 1;
		tmi.dimstride[1] = depthMD.XRes();
		tmi.type = _jit_sym_char;
		out2.resizeTo(&tmi);
		
		tmi.planecount = 4;
		tmi.dimstride[0] = 4;
		tmi.dimstride[1] = depthMD.XRes()*4;
		tmi.type = _jit_sym_char;
		out3.resizeTo(&tmi);
		
		const XnDepthPixel* pDepth = depthMD.Data();
		float *depthData = (float*)out1.data();
		
		//Copy depth data
		int x,y;
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				depthData[0] = (float)pDepth[0]/powf(2, 15);
				
				depthData++;
				pDepth++;
			}
		}
		
		//Get the users
		unsigned char *userData = (unsigned char*)out2.data();
		const XnLabel* pLabels = sceneMD.Data();
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				userData[0] = pLabels[0];
				
				userData++;
				pLabels++;
			}
		}
		
		//Get the colors
		const XnRGB24Pixel* pPixels = imageMD.RGB24Data();
		unsigned char *pixData = (unsigned char*)out3.data();
		for (y=0; y<height; y++)
		{
			for (x=0; x<width; x++)
			{
				pixData[0] = 0;
				pixData[1] = pPixels[0].nRed;
				pixData[2] = pPixels[0].nGreen;
				pixData[3] = pPixels[0].nBlue;
				
				pixData+=4;
				pPixels++;
			}
		}
		
		//For all the users -- output the joint info...
		XnUserID aUsers[15];
		XnUInt16 nUsers = 15;
		user.GetUsers(aUsers, nUsers);
		
		int rUsers = 0;
		
		xn::SkeletonCapability sc = user.GetSkeletonCap();
		
		int i;
		for (i=0; i<nUsers; i++)
		{
//.........这里部分代码省略.........
开发者ID:kidaa,项目名称:Synthesis,代码行数:101,代码来源:tml.jit.OpenNI.cpp

示例15: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	if(gScene == NULL) return;
	
	gScene->simulate(1.0f/30.0f);
	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);

	// 射影マトリックス
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();
	gluPerspective(100.0f, (float)glutGet(GLUT_WINDOW_WIDTH)/(float)glutGet(GLUT_WINDOW_HEIGHT), 1.0f, 10000.0f); // 視点の位置

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();
	gluLookAt(gEye.x, gEye.y, gEye.z, gEye.x + gDir.x, gEye.y + gDir.y, gEye.z + gDir.z, 0.0f, 1.0f, 0.0f);
	
	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	g_DepthGenerator.GetMetaData(depthMD);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

	for(int i=0;i<2;i++){
		for(int j=0;j<15;j++){
			oldjpoint[i][j]=jpoint[i][j];
		}
	}
	// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	CalculateJoint();

	head.x=(jpoint[0][0].X);
	head.y=(jpoint[0][0].Y);
	head.z=(jpoint[0][0].Z);

	neck.x=(jpoint[0][1].X);
	neck.y=(jpoint[0][1].Y);
	neck.z=(jpoint[0][1].Z);
	
	rshoulder.x=(jpoint[0][2].X);
	rshoulder.y=(jpoint[0][2].Y);
	rshoulder.z=(jpoint[0][2].Z);

	relbow.x=(jpoint[0][3].X*2+oldjpoint[0][3].X)/3;
	relbow.y=(jpoint[0][3].Y*2+oldjpoint[0][3].Y)/3;
	relbow.z=(jpoint[0][3].Z*2+oldjpoint[0][3].Z)/3;

	rhand.x=(jpoint[0][4].X*2+oldjpoint[0][4].X)/3;
	rhand.y=(jpoint[0][4].Y*2+oldjpoint[0][4].Y)/3;
	rhand.z=(jpoint[0][4].Z*2+oldjpoint[0][4].Z)/3;

	lshoulder.x=(jpoint[0][5].X*2+oldjpoint[0][5].X)/3;
	lshoulder.y=(jpoint[0][5].Y*2+oldjpoint[0][5].Y)/3;
	lshoulder.z=(jpoint[0][5].Z*2+oldjpoint[0][5].Z)/3;

	lelbow.x=(jpoint[0][6].X*2+oldjpoint[0][6].X)/3;
	lelbow.y=(jpoint[0][6].Y*2+oldjpoint[0][6].Y)/3;
	lelbow.z=(jpoint[0][6].Z*2+oldjpoint[0][6].Z)/3;

	lhand.x=(jpoint[0][7].X*2+oldjpoint[0][7].X)/3;
	lhand.y=(jpoint[0][7].Y*2+oldjpoint[0][7].Y)/3;
	lhand.z=(jpoint[0][7].Z*2+oldjpoint[0][7].Z)/3;

	torso.x=(jpoint[0][8].X*2+oldjpoint[0][8].X)/3;
	torso.y=(jpoint[0][8].Y*2+oldjpoint[0][8].Y)/3;
	torso.z=(jpoint[0][8].Z*2+oldjpoint[0][8].Z)/3;

	rhip.x=(jpoint[0][9].X*2+oldjpoint[0][9].X)/3;
	rhip.y=(jpoint[0][9].Y*2+oldjpoint[0][9].Y)/3;
	rhip.z=(jpoint[0][9].Z*2+oldjpoint[0][9].Z)/3;

	rknee.x=(jpoint[0][10].X*2+oldjpoint[0][10].X)/3;
	rknee.y=(jpoint[0][10].Y*2+oldjpoint[0][10].Y)/3;
	rknee.z=(jpoint[0][10].Z*2+oldjpoint[0][10].Z)/3;

	rfoot.x=(jpoint[0][11].X*2+oldjpoint[0][11].X)/3;
	rfoot.y=(jpoint[0][11].Y*2+oldjpoint[0][11].Y)/3;
	rfoot.z=(jpoint[0][11].Z*2+oldjpoint[0][11].Z)/3;

	lhip.x=(jpoint[0][12].X*2+oldjpoint[0][12].X)/3;
	lhip.y=(jpoint[0][12].Y*2+oldjpoint[0][12].Y)/3;
	lhip.z=(jpoint[0][12].Z*2+oldjpoint[0][12].Z)/3;

	lknee.x=(jpoint[0][13].X*2+oldjpoint[0][13].X)/3;
	lknee.y=(jpoint[0][13].Y*2+oldjpoint[0][13].Y)/3;
	lknee.z=(jpoint[0][13].Z*2+oldjpoint[0][13].Z)/3;

	lfoot.x=(jpoint[0][14].X*2+oldjpoint[0][14].X)/3;
	lfoot.y=(jpoint[0][14].Y*2+oldjpoint[0][14].Y)/3;
	lfoot.z=(jpoint[0][14].Z*2+oldjpoint[0][14].Z)/3;

	printf("%f, %f, %f\n",rightreduction.x, rightreduction.y, rightreduction.z);
	printf("%f, %f, %f\n",leftreduction.x, leftreduction.y, leftreduction.z);
//.........这里部分代码省略.........
开发者ID:kio327,项目名称:PKgame,代码行数:101,代码来源:main.cpp


注:本文中的xn::UserGenerator::GetUserPixels方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。