本文整理汇总了C++中xn::DepthGenerator类的典型用法代码示例。如果您正苦于以下问题:C++ DepthGenerator类的具体用法?C++ DepthGenerator怎么用?C++ DepthGenerator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DepthGenerator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: draw
void draw() {
glClear( GL_COLOR_BUFFER_BIT );
XnMapOutputMode mode;
g_DepthGenerator.GetMapOutputMode(mode);
Matrix4f projectionMatrix = Matrix4f();
projectionMatrix.Ortho(0, mode.nXRes, mode.nYRes, 0, -1.0, 1.0);
Vector primaryPoint;
primaryPoint = pointHandler->getPrimaryPoint();
Vector movement = primaryPoint - position;
movement.normalize();
position += movement;
position.print();
Matrix4f modelViewMatrix;
modelViewMatrix.translate(position.getX(), position.getY(), 0);
Matrix4f modelViewProjectionMatrix;
modelViewProjectionMatrix = projectionMatrix * modelViewMatrix;
//glUniformMatrix4fv(matLoc, 1, GL_FALSE, modelViewProjectionMatrix);
// Use our shader
glUseProgram(programObject);
modelViewProjectionMatrix.uniformMatrix(matLoc);
// Draw the triangle
glDrawArrays(GL_LINE_LOOP, 0, 3);
g_Context.WaitOneUpdateAll(g_DepthGenerator);
// Update NITE tree
g_pSessionManager->Update(&g_Context);
PrintSessionState(g_SessionState);
SDL_GL_SwapBuffers();
}
示例2: DrawCircle
void DrawCircle(xn::UserGenerator& userGenerator,
xn::DepthGenerator& depthGenerator,
XnUserID player, XnSkeletonJoint eJoint, float radius, XnFloat *color3f)
{
XnSkeletonJointPosition joint;
userGenerator.GetSkeletonCap().GetSkeletonJointPosition(player, eJoint, joint);
if (joint.fConfidence < 0.5){
return;
}
XnPoint3D pt;
pt = joint.position;
depthGenerator.ConvertRealWorldToProjective(1, &pt, &pt);
float cx = pt.X;
float cy = pt.Y;
float r = radius;
int num_segments = 16;
glColor3f(color3f[0], color3f[1], color3f[2]);
glBegin(GL_TRIANGLE_FAN);
glVertex2f(cx , cy);
for(int i = 0; i <= num_segments; i++)
{
float theta = 2.0f * 3.1415926f * float(i) / float(num_segments);//get the current angle
float x = r * cosf(theta);//calculate the x component
float y = r * sinf(theta);//calculate the y component
glVertex2f(x +cx , y + cy);//output vertex
}
glEnd();
}
示例3: getJointPoint
XnPoint3D getJointPoint(XnUserID player, XnSkeletonJoint eJoint) {
XnPoint3D pt;
if (!g_UserGenerator.GetSkeletonCap().IsCalibrated(player))
{
printf("not calibrated!\n");
return pt;
}
if (!g_UserGenerator.GetSkeletonCap().IsTracking(player))
{
printf("not tracked!\n");
return pt;
}
XnSkeletonJointPosition joint;
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(player, eJoint, joint);
if (joint.fConfidence < 0.5)
{
return pt;
}
g_DepthGenerator.ConvertRealWorldToProjective(2, &joint.position, &pt);
return pt;
}
示例4: DrawDepthMap
//.........这里部分代码省略.........
pDepth++;
pLabels++;
pDestImage+=3;
}
pDestImage += (texWidth - g_nXRes) *3;
}
}
else
{
xnOSMemSet(pDepthTexBuf, 0, 3*2*g_nXRes*g_nYRes);
}
glBindTexture(GL_TEXTURE_2D, depthTexID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, texWidth, texHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, pDepthTexBuf);
// Display the OpenGL texture map
glColor4f(0.75,0.75,0.75,1);
glEnable(GL_TEXTURE_2D);
DrawTexture(dmd.XRes(),dmd.YRes(),0,0);
glDisable(GL_TEXTURE_2D);
char strLabel[50] = "";
XnUserID aUsers[15];
XnUInt16 nUsers = 15;
g_UserGenerator.GetUsers(aUsers, nUsers);
for (int i = 0; i < nUsers; ++i)
{
if (g_bPrintID)
{
XnPoint3D com;
g_UserGenerator.GetCoM(aUsers[i], com);
g_DepthGenerator.ConvertRealWorldToProjective(1, &com, &com);
xnOSMemSet(strLabel, 0, sizeof(strLabel));
if (!g_bPrintState)
{
// Tracking
sprintf(strLabel, "%d", aUsers[i]);
}
else if (g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i]))
{
// Tracking
sprintf(strLabel, "%d - Tracking", aUsers[i]);
}
else if (g_UserGenerator.GetSkeletonCap().IsCalibrating(aUsers[i]))
{
// Calibrating
sprintf(strLabel, "%d - Calibrating [%s]", aUsers[i], GetCalibrationErrorString(m_Errors[aUsers[i]].first));
}
else
{
// Nothing
sprintf(strLabel, "%d - Looking for pose [%s]", aUsers[i], GetPoseErrorString(m_Errors[aUsers[i]].second));
}
glColor4f(1-Colors[i%nColors][0], 1-Colors[i%nColors][1], 1-Colors[i%nColors][2], 1);
glRasterPos2i(com.X, com.Y);
glPrintString(GLUT_BITMAP_HELVETICA_18, strLabel);
}
//Draw skeleton
if (g_bDrawSkeleton && g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i]))
{
glBegin(GL_LINES);
glColor4f(1-Colors[aUsers[i]%nColors][0], 1-Colors[aUsers[i]%nColors][1], 1-Colors[aUsers[i]%nColors][2], 1);
DrawLimb(aUsers[i], XN_SKEL_HEAD, XN_SKEL_NECK);
DrawLimb(aUsers[i], XN_SKEL_NECK, XN_SKEL_LEFT_SHOULDER);
DrawLimb(aUsers[i], XN_SKEL_LEFT_SHOULDER, XN_SKEL_LEFT_ELBOW);
DrawLimb(aUsers[i], XN_SKEL_LEFT_ELBOW, XN_SKEL_LEFT_HAND);
DrawLimb(aUsers[i], XN_SKEL_NECK, XN_SKEL_RIGHT_SHOULDER);
DrawLimb(aUsers[i], XN_SKEL_RIGHT_SHOULDER, XN_SKEL_RIGHT_ELBOW);
DrawLimb(aUsers[i], XN_SKEL_RIGHT_ELBOW, XN_SKEL_RIGHT_HAND);
DrawLimb(aUsers[i], XN_SKEL_LEFT_SHOULDER, XN_SKEL_TORSO);
DrawLimb(aUsers[i], XN_SKEL_RIGHT_SHOULDER, XN_SKEL_TORSO);
DrawLimb(aUsers[i], XN_SKEL_TORSO, XN_SKEL_LEFT_HIP);
DrawLimb(aUsers[i], XN_SKEL_LEFT_HIP, XN_SKEL_LEFT_KNEE);
DrawLimb(aUsers[i], XN_SKEL_LEFT_KNEE, XN_SKEL_LEFT_FOOT);
DrawLimb(aUsers[i], XN_SKEL_TORSO, XN_SKEL_RIGHT_HIP);
DrawLimb(aUsers[i], XN_SKEL_RIGHT_HIP, XN_SKEL_RIGHT_KNEE);
DrawLimb(aUsers[i], XN_SKEL_RIGHT_KNEE, XN_SKEL_RIGHT_FOOT);
DrawLimb(aUsers[i], XN_SKEL_LEFT_HIP, XN_SKEL_RIGHT_HIP);
glEnd();
}
}
}
示例5: captureDepth
cv::Mat xncv::captureDepth(const xn::DepthGenerator& generator)
{
xn::DepthMetaData meta;
generator.GetMetaData(meta);
return cv::Mat(meta.YRes(), meta.XRes(), cv::DataType<ushort>::type, (void*)meta.Data());
}
示例6: mexFunction
/* The matlab mex function */
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] )
{
XnUInt64 *MXadress;
double *Pos;
int Jdimsc[2];
Jdimsc[0]=225; Jdimsc[1]=7;
plhs[0] = mxCreateNumericArray(2, Jdimsc, mxDOUBLE_CLASS, mxREAL);
Pos = mxGetPr(plhs[0]);
if(nrhs==0)
{
printf("Open failed: Give Pointer to Kinect as input\n");
mexErrMsgTxt("Kinect Error");
}
MXadress = (XnUInt64*)mxGetData(prhs[0]);
if(MXadress[0]>0){ g_Context = ((xn::Context*) MXadress[0])[0]; }
if(MXadress[2]>0)
{
g_DepthGenerator = ((xn::DepthGenerator*) MXadress[2])[0];
}
else
{
mexErrMsgTxt("No Depth Node in Kinect Context");
}
if(MXadress[4]>0)
{
g_UserGenerator = ((xn::UserGenerator*) MXadress[4])[0];
}
else
{
mexErrMsgTxt("No User Node in Kinect Context");
}
XnStatus nRetVal = XN_STATUS_OK;
XnCallbackHandle hUserCallbacks, hCalibrationCallbacks, hPoseCallbacks;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
{
printf("Supplied user generator doesn't support skeleton\n");
return;
}
g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
g_UserGenerator.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, NULL, hCalibrationCallbacks);
if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
{
g_bNeedPose = TRUE;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
{
printf("Pose required, but not supported\n");
return;
}
g_UserGenerator.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, NULL, hPoseCallbacks);
g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
}
g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
char strLabel[50] = "";
XnUserID aUsers[15];
XnUInt16 nUsers = 15;
int r=0;
xn::SceneMetaData sceneMD;
xn::DepthMetaData depthMD;
// Process the data
g_DepthGenerator.GetMetaData(depthMD);
g_UserGenerator.GetUserPixels(0, sceneMD);
g_UserGenerator.GetUsers(aUsers, nUsers);
for (int i = 0; i < nUsers; ++i)
{
if (g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i]))
{
//printf(strLabel, "%d - Looking for pose", aUsers[i]);
XnSkeletonJointPosition joint[15];
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_HEAD, joint[0]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_NECK, joint[1]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_SHOULDER, joint[2]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_ELBOW, joint[3]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_HAND, joint[4]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_SHOULDER, joint[5]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_ELBOW, joint[6]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_HAND, joint[7]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_TORSO, joint[8]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_HIP, joint[9]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_KNEE, joint[10]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_LEFT_FOOT, joint[11]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_HIP, joint[12]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_KNEE, joint[13]);
g_UserGenerator.GetSkeletonCap().GetSkeletonJointPosition(aUsers[i],XN_SKEL_RIGHT_FOOT, joint[14]);
XnPoint3D pt[1];
for(int j=0; j<15; j++)
{
Pos[j +r]=aUsers[i];
Pos[j+Jdimsc[0] +r]=joint[j].fConfidence;
//.........这里部分代码省略.........
示例7: initialize
bool initialize(){
std::cout << "initializing kinect... " << endl;
#ifdef USE_MS_SKD
int iSensorCount = 0;
if ( NuiGetSensorCount(&iSensorCount) < 0 )
return false;
// Look at each Kinect sensor
for (int i = 0; i < iSensorCount; ++i)
{
// Create the sensor so we can check status, if we can't create it, move on to the next
if ( NuiCreateSensorByIndex(i, &g_pNuiSensor) < 0 ) continue;
// Get the status of the sensor, and if connected, then we can initialize it
if( 0== g_pNuiSensor->NuiStatus() ){
g_pNuiSensor = g_pNuiSensor;
break;
}
// This sensor wasn't OK, so release it since we're not using it
g_pNuiSensor->Release();
}
if (NULL != g_pNuiSensor)
{
// Initialize the Kinect and specify that we'll be using depth
if ( g_pNuiSensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH) >= 0 )
{
// Create an event that will be signaled when depth data is available
g_hNextDepthFrameEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
// Open a depth image stream to receive depth frames
g_pNuiSensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_DEPTH,
NUI_IMAGE_RESOLUTION_640x480,
0,
2,
g_hNextDepthFrameEvent,
&g_pDepthStreamHandle);
}
}
else
return false;
#endif
#ifdef USE_OPENNI
// Initialize context object
g_RetVal = g_Context.Init();
g_RetVal = g_DepthGenerator.Create(g_Context);
if (g_RetVal != XN_STATUS_OK)
printf("Failed creating DEPTH generator %s\n", xnGetStatusString(g_RetVal));
XnMapOutputMode outputMode;
outputMode.nXRes = g_im_w;
outputMode.nYRes = g_im_h;
outputMode.nFPS = g_fps;
g_RetVal = g_DepthGenerator.SetMapOutputMode(outputMode);
if (g_RetVal != XN_STATUS_OK){
printf("Failed setting the DEPTH output mode %s\n", xnGetStatusString(g_RetVal));
return false;
}
g_RetVal = g_Context.StartGeneratingAll();
if (g_RetVal != XN_STATUS_OK){
printf("Failed starting generating all %s\n", xnGetStatusString(g_RetVal));
return false;
}
#endif
#ifdef USE_LIBFREENECT
depth_mid = (uint16_t*)malloc(640*480*2);
depth_front = (uint16_t*)malloc(640*480*2);
if (freenect_init(&f_ctx, NULL) < 0) {
printf("freenect_init() failed\n");
return 1;
}
freenect_set_log_level(f_ctx, FREENECT_LOG_ERROR);
freenect_select_subdevices(f_ctx, (freenect_device_flags)(FREENECT_DEVICE_MOTOR | FREENECT_DEVICE_CAMERA));
int nr_devices = freenect_num_devices (f_ctx);
printf ("Number of devices found: %d\n", nr_devices);
int user_device_number = 0;
if (nr_devices < 1) {
freenect_shutdown(f_ctx);
return false;
}
if (freenect_open_device(f_ctx, &f_dev, user_device_number) < 0) {
printf("Could not open device\n");
freenect_shutdown(f_ctx);
//.........这里部分代码省略.........
示例8: main
int main() {
const unsigned int nBackgroundTrain = 30;
const unsigned short touchDepthMin = 10;
const unsigned short touchDepthMax = 20;
const unsigned int touchMinArea = 50;
const bool localClientMode = true; // connect to a local client
const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
const char* windowName = "Debug";
const Scalar debugColor0(0,0,128);
const Scalar debugColor1(255,0,0);
const Scalar debugColor2(255,255,255);
int xMin = 110;
int xMax = 560;
int yMin = 120;
int yMax = 320;
Mat1s depth(480, 640); // 16 bit depth (in millimeters)
Mat1b depth8(480, 640); // 8 bit depth
Mat3b rgb(480, 640); // 8 bit depth
Mat3b debug(480, 640); // debug visualization
Mat1s foreground(640, 480);
Mat1b foreground8(640, 480);
Mat1b touch(640, 480); // touch mask
Mat1s background(480, 640);
vector<Mat1s> buffer(nBackgroundTrain);
CHECK_RC(initOpenNI("niConfig.xml"), "initOpenNI");
// TUIO server object
TuioServer* tuio;
if (localClientMode) {
tuio = new TuioServer();
} else {
tuio = new TuioServer("192.168.0.1",3333,false);
}
TuioTime time;
// create some sliders
namedWindow(windowName);
createTrackbar("xMin", windowName, &xMin, 640);
createTrackbar("xMax", windowName, &xMax, 640);
createTrackbar("yMin", windowName, &yMin, 480);
createTrackbar("yMax", windowName, &yMax, 480);
// create background model (average depth)
for (unsigned int i=0; i<nBackgroundTrain; i++) {
CHECK_RC(xnContext.WaitAndUpdateAll(), "xnContext.WaitAndUpdateAll()");
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
buffer[i] = depth;
}
average(buffer, background);
while ( waitKey(1) != 27 ) {
// read available data
xnContext.WaitAndUpdateAll();
// update 16 bit depth matrix
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
//xnImgeGenertor.GetGrayscale8ImageMap()
// update rgb image
//rgb.data = (uchar*) xnImgeGenertor.GetRGB24ImageMap(); // segmentation fault here
//cvtColor(rgb, rgb, CV_RGB2BGR);
// extract foreground by simple subtraction of very basic background model
foreground = background - depth;
// find touch mask by thresholding (points that are close to background = touch points)
touch = (foreground > touchDepthMin) & (foreground < touchDepthMax);
// extract ROI
Rect roi(xMin, yMin, xMax - xMin, yMax - yMin);
Mat touchRoi = touch(roi);
// find touch points
vector< vector<Point2i> > contours;
vector<Point2f> touchPoints;
findContours(touchRoi, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point2i(xMin, yMin));
for (unsigned int i=0; i<contours.size(); i++) {
Mat contourMat(contours[i]);
// find touch points by area thresholding
if ( contourArea(contourMat) > touchMinArea ) {
Scalar center = mean(contourMat);
Point2i touchPoint(center[0], center[1]);
touchPoints.push_back(touchPoint);
}
}
// send TUIO cursors
time = TuioTime::getSessionTime();
//.........这里部分代码省略.........
示例9: glutDisplay
// this function is called each frame
void glutDisplay (void)
{
if(gScene == NULL) return;
gScene->simulate(1.0f/30.0f);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
// 射影マトリックス
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(100.0f, (float)glutGet(GLUT_WINDOW_WIDTH)/(float)glutGet(GLUT_WINDOW_HEIGHT), 1.0f, 10000.0f); // 視点の位置
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(gEye.x, gEye.y, gEye.z, gEye.x + gDir.x, gEye.y + gDir.y, gEye.z + gDir.z, 0.0f, 1.0f, 0.0f);
xn::SceneMetaData sceneMD;
xn::DepthMetaData depthMD;
g_DepthGenerator.GetMetaData(depthMD);
if (!g_bPause)
{
// Read next available data
g_Context.WaitAndUpdateAll();
}
for(int i=0;i<2;i++){
for(int j=0;j<15;j++){
oldjpoint[i][j]=jpoint[i][j];
}
}
// Process the data
g_DepthGenerator.GetMetaData(depthMD);
g_UserGenerator.GetUserPixels(0, sceneMD);
CalculateJoint();
head.x=(jpoint[0][0].X);
head.y=(jpoint[0][0].Y);
head.z=(jpoint[0][0].Z);
neck.x=(jpoint[0][1].X);
neck.y=(jpoint[0][1].Y);
neck.z=(jpoint[0][1].Z);
rshoulder.x=(jpoint[0][2].X);
rshoulder.y=(jpoint[0][2].Y);
rshoulder.z=(jpoint[0][2].Z);
relbow.x=(jpoint[0][3].X*2+oldjpoint[0][3].X)/3;
relbow.y=(jpoint[0][3].Y*2+oldjpoint[0][3].Y)/3;
relbow.z=(jpoint[0][3].Z*2+oldjpoint[0][3].Z)/3;
rhand.x=(jpoint[0][4].X*2+oldjpoint[0][4].X)/3;
rhand.y=(jpoint[0][4].Y*2+oldjpoint[0][4].Y)/3;
rhand.z=(jpoint[0][4].Z*2+oldjpoint[0][4].Z)/3;
lshoulder.x=(jpoint[0][5].X*2+oldjpoint[0][5].X)/3;
lshoulder.y=(jpoint[0][5].Y*2+oldjpoint[0][5].Y)/3;
lshoulder.z=(jpoint[0][5].Z*2+oldjpoint[0][5].Z)/3;
lelbow.x=(jpoint[0][6].X*2+oldjpoint[0][6].X)/3;
lelbow.y=(jpoint[0][6].Y*2+oldjpoint[0][6].Y)/3;
lelbow.z=(jpoint[0][6].Z*2+oldjpoint[0][6].Z)/3;
lhand.x=(jpoint[0][7].X*2+oldjpoint[0][7].X)/3;
lhand.y=(jpoint[0][7].Y*2+oldjpoint[0][7].Y)/3;
lhand.z=(jpoint[0][7].Z*2+oldjpoint[0][7].Z)/3;
torso.x=(jpoint[0][8].X*2+oldjpoint[0][8].X)/3;
torso.y=(jpoint[0][8].Y*2+oldjpoint[0][8].Y)/3;
torso.z=(jpoint[0][8].Z*2+oldjpoint[0][8].Z)/3;
rhip.x=(jpoint[0][9].X*2+oldjpoint[0][9].X)/3;
rhip.y=(jpoint[0][9].Y*2+oldjpoint[0][9].Y)/3;
rhip.z=(jpoint[0][9].Z*2+oldjpoint[0][9].Z)/3;
rknee.x=(jpoint[0][10].X*2+oldjpoint[0][10].X)/3;
rknee.y=(jpoint[0][10].Y*2+oldjpoint[0][10].Y)/3;
rknee.z=(jpoint[0][10].Z*2+oldjpoint[0][10].Z)/3;
rfoot.x=(jpoint[0][11].X*2+oldjpoint[0][11].X)/3;
rfoot.y=(jpoint[0][11].Y*2+oldjpoint[0][11].Y)/3;
rfoot.z=(jpoint[0][11].Z*2+oldjpoint[0][11].Z)/3;
lhip.x=(jpoint[0][12].X*2+oldjpoint[0][12].X)/3;
lhip.y=(jpoint[0][12].Y*2+oldjpoint[0][12].Y)/3;
lhip.z=(jpoint[0][12].Z*2+oldjpoint[0][12].Z)/3;
lknee.x=(jpoint[0][13].X*2+oldjpoint[0][13].X)/3;
lknee.y=(jpoint[0][13].Y*2+oldjpoint[0][13].Y)/3;
lknee.z=(jpoint[0][13].Z*2+oldjpoint[0][13].Z)/3;
lfoot.x=(jpoint[0][14].X*2+oldjpoint[0][14].X)/3;
lfoot.y=(jpoint[0][14].Y*2+oldjpoint[0][14].Y)/3;
lfoot.z=(jpoint[0][14].Z*2+oldjpoint[0][14].Z)/3;
printf("%f, %f, %f\n",rightreduction.x, rightreduction.y, rightreduction.z);
printf("%f, %f, %f\n",leftreduction.x, leftreduction.y, leftreduction.z);
//.........这里部分代码省略.........
示例10: main
int main() {
const unsigned int nBackgroundTrain = 30;
const unsigned short touchDepthMin = 10;
const unsigned short touchDepthMax = 20;
const unsigned int touchMinArea = 50;
const bool localClientMode = true; // connect to a local client
const double debugFrameMaxDepth = 4000; // maximal distance (in millimeters) for 8 bit debug depth frame quantization
const char* windowName = "Debug";
const char* colorWindowName = "image";
const Scalar debugColor0(0, 0, 128);
const Scalar debugColor1(255, 0, 0);
const Scalar debugColor2(255, 255, 255);
const Scalar debugColor3(0, 255, 255);
const Scalar debugColor4(255, 0, 255);
int xMin = 50;
int xMax = 550;
int yMin = 50;
int yMax = 300;
Mat1s depth(480, 640); // 16 bit depth (in millimeters)
Mat1b depth8(480, 640); // 8 bit depth
Mat3b rgb(480, 640); // 8 bit depth
Mat3b debug(480, 640); // debug visualization
Mat1s foreground(640, 480);
Mat1b foreground8(640, 480);
Mat1b touch(640, 480); // touch mask
Mat1s background(480, 640);
vector<Mat1s> buffer(nBackgroundTrain);
IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
initOpenNI("niConfig.xml");
// TUIO server object
TuioServer* tuio;
if (localClientMode) {
tuio = new TuioServer();
} else {
tuio = new TuioServer("192.168.0.2", 3333, false);
}
TuioTime time;
namedWindow(colorWindowName);
createTrackbar("xMin", colorWindowName, &xMin, 640);
createTrackbar("xMax", colorWindowName, &xMax, 640);
createTrackbar("yMin", colorWindowName, &yMin, 480);
createTrackbar("yMax", colorWindowName, &yMax, 480);
// create some sliders
namedWindow(windowName);
createTrackbar("xMin", windowName, &xMin, 640);
createTrackbar("xMax", windowName, &xMax, 640);
createTrackbar("yMin", windowName, &yMin, 480);
createTrackbar("yMax", windowName, &yMax, 480);
Keyboard * piano = new Keyboard();
(*piano).initKeyMap();
system("qjackctl &");
sleep(4);
JackByTheNotes * notesJack = new JackByTheNotes();
notesJack->connect();
sleep(2);
system("sudo jack_connect Piano:Rubinstein system:playback_1 &");
map<double, timeval> keys;
// create background model (average depth)
for (unsigned int i = 0; i < nBackgroundTrain; i++) {
xnContext.WaitAndUpdateAll();
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
buffer[i] = depth;
}
average(buffer, background);
while (waitKey(1) != 27) {
// read available data
xnContext.WaitAndUpdateAll();
// update 16 bit depth matrix
depth.data = (uchar*) xnDepthGenerator.GetDepthMap();
//xnImgeGenertor.GetGrayscale8ImageMap()
XnRGB24Pixel* xnRgb =
const_cast<XnRGB24Pixel*>(xnImgeGenertor.GetRGB24ImageMap());
// IplImage * image = cvCreateImage(cvSize(640, 480), 8, 3);
// IplImage * convertedImage = cvCreateImage(cvSize(640, 480), 8, 3);
cvSetData (image, xnRgb, 640 * 3);
cvConvertImage(image, convertedImage, CV_CVTIMG_SWAP_RB);
bool color = true;
rgb = convertedImage;
// cvtColor(rgb,rgb,CV_RGB2BGR);
//.........这里部分代码省略.........
示例11: DrawDepthMap
//.........这里部分代码省略.........
{
pDestImage[0] = 0;
pDestImage[1] = 0;
pDestImage[2] = 0;
}
pDepth++;
pLabels++;
pDestImage+=3;
}
pDestImage += (texWidth - g_nXRes) *3;
}
}
glBindTexture(GL_TEXTURE_2D, depthTexID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, texWidth, texHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, pDepthTexBuf);
// Display the OpenGL texture map
glColor4f(0.75,0.75,0.75,1);
glEnable(GL_TEXTURE_2D);
DrawTexture(dmd.XRes(),dmd.YRes(),0,0);
glDisable(GL_TEXTURE_2D);
char strLabel[20] = "";
XnUserID aUsers[15];
XnUInt16 nUsers = 15;
g_UserGenerator.GetUsers(aUsers, nUsers);
for (int i = 0; i < nUsers; ++i)
{
XnPoint3D com;
g_UserGenerator.GetCoM(aUsers[i], com);
g_DepthGenerator.ConvertRealWorldToProjective(1, &com, &com);
if (aUsers[i] == player)
sprintf(strLabel, "%d (Player)", aUsers[i]);
else
sprintf(strLabel, "%d", aUsers[i]);
glColor4f(1-Colors[i%nColors][0], 1-Colors[i%nColors][1], 1-Colors[i%nColors][2], 1);
glRasterPos2i(com.X, com.Y);
glPrintString(GLUT_BITMAP_HELVETICA_18, strLabel);
}
// Draw skeleton of user
if (player != 0)
{
glBegin(GL_LINES);
glColor4f(1-Colors[player%nColors][0], 1-Colors[player%nColors][1], 1-Colors[player%nColors][2], 1);
// gesture
static int gesture = 0;
static XnPoint3D previousLeftHandPt;
XnPoint3D newLeftHandPt;
newLeftHandPt = getJointPoint(player, XN_SKEL_LEFT_HAND);
if(previousLeftHandPt.X > 0 && previousLeftHandPt.X < 640)
if(previousLeftHandPt.X - newLeftHandPt.X > 60)
gesture = 1;
else if(previousLeftHandPt.X - newLeftHandPt.X < -60)
gesture = 2;
else if(previousLeftHandPt.Y - newLeftHandPt.Y > 60)
gesture = 3;
else if(previousLeftHandPt.Y - newLeftHandPt.Y < -60)
示例12: XnShiftToDepthInit
XnStatus XnFileDevice::UpdateS2DTables(const xn::DepthGenerator& depth)
{
XnStatus nRetVal = XN_STATUS_OK;
XnUInt64 nTemp;
XnDouble dTemp;
// get config
XnShiftToDepthConfig config;
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_ZERO_PLANE_DISTANCE, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nZeroPlaneDistance = (XnDepthPixel)nTemp;
nRetVal = depth.GetRealProperty(XN_STREAM_PROPERTY_ZERO_PLANE_PIXEL_SIZE, dTemp);
XN_IS_STATUS_OK(nRetVal);
config.fZeroPlanePixelSize = (XnFloat)dTemp;
nRetVal = depth.GetRealProperty(XN_STREAM_PROPERTY_EMITTER_DCMOS_DISTANCE, dTemp);
XN_IS_STATUS_OK(nRetVal);
config.fEmitterDCmosDistance = (XnFloat)dTemp;
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_MAX_SHIFT, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nDeviceMaxShiftValue = (XnUInt32)nTemp;
config.nDeviceMaxDepthValue = depth.GetDeviceMaxDepth();
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_CONST_SHIFT, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nConstShift = (XnUInt32)nTemp;
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_PIXEL_SIZE_FACTOR, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nPixelSizeFactor = (XnUInt32)nTemp;
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_PARAM_COEFF, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nParamCoeff = (XnUInt32)nTemp;
nRetVal = depth.GetIntProperty(XN_STREAM_PROPERTY_SHIFT_SCALE, nTemp);
XN_IS_STATUS_OK(nRetVal);
config.nShiftScale = (XnUInt32)nTemp;
config.nDepthMinCutOff = 0;
config.nDepthMaxCutOff = (XnDepthPixel)config.nDeviceMaxDepthValue;
if (!m_ShiftToDepth.bIsInitialized)
{
nRetVal = XnShiftToDepthInit(&m_ShiftToDepth, &config);
XN_IS_STATUS_OK(nRetVal);
}
else
{
nRetVal = XnShiftToDepthUpdate(&m_ShiftToDepth, &config);
XN_IS_STATUS_OK(nRetVal);
}
// notify
nRetVal = m_pNotifications->OnNodeGeneralPropChanged(m_pNotificationsCookie, depth.GetName(), XN_STREAM_PROPERTY_S2D_TABLE, m_ShiftToDepth.nShiftsCount * sizeof(XnDepthPixel), m_ShiftToDepth.pShiftToDepthTable);
XN_IS_STATUS_OK(nRetVal);
nRetVal = m_pNotifications->OnNodeGeneralPropChanged(m_pNotificationsCookie, depth.GetName(), XN_STREAM_PROPERTY_D2S_TABLE, m_ShiftToDepth.nDepthsCount * sizeof(XnUInt16), m_ShiftToDepth.pDepthToShiftTable);
XN_IS_STATUS_OK(nRetVal);
return (XN_STATUS_OK);
}
示例13: main
int main(int argc, char **argv)
{
commInit("192.168.1.255", 54321);
XnStatus nRetVal = XN_STATUS_OK;
xn::EnumerationErrors errors;
const char *fn = NULL;
if (fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
else {
printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
return XN_STATUS_ERROR;
}
printf("Reading config from: '%s'\n", fn);
nRetVal = g_Context.InitFromXmlFile(fn, g_scriptNode, &errors);
if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
{
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
return (nRetVal);
}
else if (nRetVal != XN_STATUS_OK)
{
printf("Open failed: %s\n", xnGetStatusString(nRetVal));
return (nRetVal);
}
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
CHECK_RC(nRetVal,"No depth");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
if (nRetVal != XN_STATUS_OK)
{
nRetVal = g_UserGenerator.Create(g_Context);
CHECK_RC(nRetVal, "Find user generator");
}
XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
{
printf("Supplied user generator doesn't support skeleton\n");
return 1;
}
nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
CHECK_RC(nRetVal, "Register to user callbacks");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
CHECK_RC(nRetVal, "Register to calibration start");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
CHECK_RC(nRetVal, "Register to calibration complete");
if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
{
g_bNeedPose = TRUE;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
{
printf("Pose required, but not supported\n");
return 1;
}
nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
CHECK_RC(nRetVal, "Register to Pose Detected");
g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
}
g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
nRetVal = g_Context.StartGeneratingAll();
CHECK_RC(nRetVal, "StartGenerating");
XnUserID aUsers[MAX_NUM_USERS];
XnUInt16 nUsers;
int j;
printf("Starting to run\n");
if(g_bNeedPose)
{
printf("Assume calibration pose\n");
}
XnUInt32 epochTime = 0;
while (!xnOSWasKeyboardHit())
{
g_Context.WaitOneUpdateAll(g_UserGenerator);
// print the torso information for the first user already tracking
nUsers=MAX_NUM_USERS;
g_UserGenerator.GetUsers(aUsers, nUsers);
int numTracked=0;
int userToPrint=-1;
for(XnUInt16 i=0; i<nUsers; i++)
{
if(g_UserGenerator.GetSkeletonCap().IsTracking(aUsers[i])==FALSE)
continue;
if(getJoints(aUsers[i])){
/**
printf("Left Elbow: %.2f\nLeft Shoulder Roll: %.2f\nLeft Shoulder Pitch: %.2f\nHead Pitch: %.2f\n",
findAngle(jointArr[6], jointArr[4], jointArr[8], 0),
findAngle(jointArr[4], jointArr[10], jointArr[6], 0),
findAngle(jointArr[4], jointArr[10], jointArr[6], 1),
findAngle(jointArr[2], jointArr[1], jointArr[0], 1)
//.........这里部分代码省略.........
示例14: captureOne
bool DataCapture::captureOne()
{
XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
if( rc != XN_STATUS_OK )
{
std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
return false;
}
// grab image
imageGen_.GetMetaData(imageMd_);
const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
for( unsigned int i = 0; i < 640 * 480; ++i )
{
pRgbData_[3*i] = rgbData->nRed;
pRgbData_[3*i + 1] = rgbData->nGreen;
pRgbData_[3*i + 2] = rgbData->nBlue;
++rgbData;
}
// grab depth image
depthGen_.GetMetaData(depthMd_);
const uint16_t* pDepthDataU16 = depthMd_.Data();
for( int i = 0; i < 640 * 480; ++i)
{
uint16_t d = pDepthDataU16[i];
if( d != 0 )
{
pDepthData_[i] = (d * 255)/2048;
}
else
{
pDepthData_[i] = 0; // should be NAN
}
}
return true;
}
示例15: main
int main ( int argc, char ** argv )
{
//
// Initializing Calibration Related
//
// ARTagHelper artagHelper ( colorImgWidth, colorImgHeight, ARTAG_CONFIG_FILE, ARTAG_POS_FILE );
ARTagHelper artagHelper ( colorImgWidth, colorImgHeight, ARTAG_CONFIG_A3_FILE, ARTAG_POS_A3_FILE );
ExtrCalibrator extrCalibrator ( 6, KINECT_INTR_FILE, KINECT_DIST_FILE );
// unsigned char * kinectImgBuf = new unsigned char[colorImgWidth * colorImgHeight * 3];
//
// Initializing OpenNI Settings
//
int ctlWndKey = -1;
XnStatus nRetVal = XN_STATUS_OK;
xn::EnumerationErrors errors;
//
// Initialize Context Object
//
nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, g_ScriptNode, &errors );
if ( nRetVal == XN_STATUS_NO_NODE_PRESENT )
{
XnChar strError[1024];
errors.ToString ( strError, 1024 );
printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
system ( "pause" );
return ( nRetVal );
}
else if ( nRetVal != XN_STATUS_OK )
{
printf ( "Open FAILED:\n%s\n", xnGetStatusString ( nRetVal ) );
system ( "pause" );
return ( nRetVal );
}
//
// Handle the Depth Generator Node.
//
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
if ( nRetVal != XN_STATUS_OK )
{
printf ( "No Depth Node Exists! Please Check your XML.\n" );
return ( nRetVal );
}
//
// Handle the Image Generator node
//
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
if ( nRetVal != XN_STATUS_OK )
{
printf ( "No Image Node Exists! Please Check your XML.\n" );
return ( nRetVal );
}
// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );
g_DepthGen.GetMetaData ( g_DepthMD );
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );
//
// Create OpenCV Showing Window and Related Data Structures
//
cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::Mat depthImgMat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
cv::Mat depthImgShow ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC3 );
cv::Mat colorImgMat ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );
#define ARTAG_DEBUG
#ifdef ARTAG_DEBUG
cv::setMouseCallback ( IMAGE_WIN_NAME, ClickOnMouse, 0 );
#endif
bool flipColor = true;
//
// Start to Loop
//
while ( ctlWndKey != ESC_KEY_VALUE )
{
//.........这里部分代码省略.........