本文整理汇总了C++中xn::ImageGenerator::GetMetaData方法的典型用法代码示例。如果您正苦于以下问题:C++ ImageGenerator::GetMetaData方法的具体用法?C++ ImageGenerator::GetMetaData怎么用?C++ ImageGenerator::GetMetaData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xn::ImageGenerator
的用法示例。
在下文中一共展示了ImageGenerator::GetMetaData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
// Save new data from OpenNI
void NiRecorder::update(const xn::DepthGenerator &dg, const xn::ImageGenerator &ig)
{
// Save latest depth frame
xn::DepthMetaData dmd;
dg.GetMetaData(dmd);
frames[next_to_write].depth_frame.CopyFrom(dmd);
// Save latest image frame
xn::ImageMetaData imd;
ig.GetMetaData(imd);
frames[next_to_write].image_frame.CopyFrom(imd);
// See if buffer is already full
if (buffer_count < buffer_size)
{
buffer_count++;
}
// Make sure cylic buffer pointers are good
next_to_write++;
if (next_to_write == buffer_size)
{
next_to_write = 0;
}
}
示例2: grabFrame
bool CvCapture_OpenNI::grabFrame()
{
if( !isOpened() )
return false;
bool isGrabbed = false;
if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
{
isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
}
else
{
XnStatus status = context.WaitAndUpdateAll();
if( status != XN_STATUS_OK )
return false;
if( depthGenerator.IsValid() )
depthGenerator.GetMetaData( depthMetaData );
if( imageGenerator.IsValid() )
imageGenerator.GetMetaData( imageMetaData );
isGrabbed = true;
}
return isGrabbed;
}
示例3: Update
// Save new data from OpenNI
void Update(const xn::DepthGenerator& depthGenerator, const xn::ImageGenerator& imageGenerator)
{
if (m_bDepth)
{
// Save latest depth frame
xn::DepthMetaData dmd;
depthGenerator.GetMetaData(dmd);
m_pFrames[m_nNextWrite].depthFrame.CopyFrom(dmd);
}
if (m_bImage)
{
// Save latest image frame
xn::ImageMetaData imd;
imageGenerator.GetMetaData(imd);
m_pFrames[m_nNextWrite].imageFrame.CopyFrom(imd);
}
// See if buffer is already full
if (m_nBufferCount < m_nBufferSize)
{
m_nBufferCount++;
}
// Make sure cylic buffer pointers are good
m_nNextWrite++;
if (m_nNextWrite == m_nBufferSize)
{
m_nNextWrite = 0;
}
}
示例4: glutDisplay
// this function is called each frame
void glutDisplay ()
{
clock_t t1 = clock();
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
xn::SceneMetaData sceneMD;
xn::DepthMetaData depthMD;
xn::ImageMetaData imageMD;
g_DepthGenerator.GetMetaData(depthMD);
g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif
glDisable(GL_TEXTURE_2D);
if (!g_bPause)
{
// Read next available data
g_Context.WaitOneUpdateAll(g_UserGenerator);
}
// Process the data
g_DepthGenerator.GetMetaData(depthMD);
g_ImageGenerator.GetMetaData(imageMD);
g_UserGenerator.GetUserPixels(0, sceneMD);
if(Show_Image == FALSE)
DrawDepthMap(depthMD, sceneMD,COM_tracker,Bounding_Box);
else
{
DrawImageMap(imageMD, depthMD, sceneMD,COM_tracker,Bounding_Box);
}
#ifndef USE_GLES
glutSwapBuffers();
#endif
clock_t t2 = clock();
std::cout << t2 - t1 << std::endl;
}
示例5: glutDisplay
// this function is called each frame
void glutDisplay (void)
{
xn::SceneMetaData sceneMD;
xn::DepthMetaData depthMD;
xn::ImageMetaData imageMD;
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
g_DepthGenerator.GetMetaData(depthMD);
g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif
glDisable(GL_TEXTURE_2D);
// Read next available data
g_Context.WaitOneUpdateAll(g_UserGenerator);
// Process the data
g_DepthGenerator.GetMetaData(depthMD);
g_UserGenerator.GetUserPixels(0, sceneMD);
g_ImageGenerator.GetMetaData(imageMD);
// Draw the input fetched from the Kinect
DrawKinectInput(depthMD, sceneMD, imageMD);
#ifndef USE_GLES
glutSwapBuffers();
#endif
}
示例6: captureOne
bool DataCapture::captureOne()
{
XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
if( rc != XN_STATUS_OK )
{
std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
return false;
}
// grab image
imageGen_.GetMetaData(imageMd_);
const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
for( unsigned int i = 0; i < 640 * 480; ++i )
{
pRgbData_[3*i] = rgbData->nRed;
pRgbData_[3*i + 1] = rgbData->nGreen;
pRgbData_[3*i + 2] = rgbData->nBlue;
++rgbData;
}
// grab depth image
depthGen_.GetMetaData(depthMd_);
const uint16_t* pDepthDataU16 = depthMd_.Data();
for( int i = 0; i < 640 * 480; ++i)
{
uint16_t d = pDepthDataU16[i];
if( d != 0 )
{
pDepthData_[i] = (d * 255)/2048;
}
else
{
pDepthData_[i] = 0; // should be NAN
}
}
return true;
}
示例7: main
//.........这里部分代码省略.........
g_bNeedPose = TRUE;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
LOG_E("%s", "Pose required, but not supported");
return 1;
}
nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
CHECK_RC(nRetVal, "Register to Pose Detected");
g_SkeletonCap.GetCalibrationPose(g_strPose);
}
g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
nRetVal = g_Context.StartGeneratingAll();
CHECK_RC(nRetVal, "StartGenerating");
// 表示用の画像データの作成
XnMapOutputMode mapMode;
g_ImageGenerator.GetMapOutputMode(mapMode);
g_rgbImage = cvCreateImage(cvSize(mapMode.nXRes, mapMode.nYRes), IPL_DEPTH_8U, 3);
LOG_I("%s", "Starting to run");
if(g_bNeedPose){
LOG_I("%s", "Assume calibration pose");
}
xn::Recorder recorder;
if( DO_RECORED && !USE_RECORED_DATA ){
// レコーダーの作成
LOG_I("%s", "Setup Recorder");
nRetVal = recorder.Create(g_Context);
CHECK_RC(nRetVal, "Create recorder");
// 保存設定
nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, RECORD_FILE_PATH);
CHECK_RC(nRetVal, "Set recorder destination file");
// 深度、ビデオカメラ入力を保存対象として記録開始
nRetVal = recorder.AddNodeToRecording(g_DepthGenerator, XN_CODEC_NULL);
CHECK_RC(nRetVal, "Add depth node to recording");
nRetVal = recorder.AddNodeToRecording(g_ImageGenerator, XN_CODEC_NULL);
CHECK_RC(nRetVal, "Add image node to recording");
LOG_I("%s", "Recorder setup done.");
}
while (!xnOSWasKeyboardHit())
{
g_Context.WaitOneUpdateAll(g_UserGenerator);
if( DO_RECORED && !USE_RECORED_DATA ){
nRetVal = recorder.Record();
CHECK_RC(nRetVal, "Record");
}
// ビデオカメラ画像の生データを取得
xn::ImageMetaData imageMetaData;
g_ImageGenerator.GetMetaData(imageMetaData);
// メモリコピー
xnOSMemCopy(g_rgbImage->imageData, imageMetaData.RGB24Data(), g_rgbImage->imageSize);
// BGRからRGBに変換して表示
cvCvtColor(g_rgbImage, g_rgbImage, CV_RGB2BGR);
// UserGeneratorからユーザー識別ピクセルを取得
xn::SceneMetaData sceneMetaData;
g_UserGenerator.GetUserPixels(0, sceneMetaData);
XnUserID allUsers[MAX_NUM_USERS];
XnUInt16 nUsers = MAX_NUM_USERS;
g_UserGenerator.GetUsers(allUsers, nUsers);
for (int i = 0; i < nUsers; i++) {
// キャリブレーションに成功しているかどうか
if (g_SkeletonCap.IsTracking(allUsers[i])) {
// スケルトンを描画
DrawSkelton(allUsers[i], i);
}
}
// 表示
cvShowImage("User View", g_rgbImage);
// ESCもしくはqが押されたら終了させる
if (cvWaitKey(10) == 27) {
break;
}
}
if( !USE_RECORED_DATA ){
g_scriptNode.Release();
}
g_DepthGenerator.Release();
g_UserGenerator.Release();
g_Context.Release();
if (g_rgbImage != NULL) {
cvReleaseImage(&g_rgbImage);
}
g_Context.Shutdown();
}
示例8: captureRGB
cv::Mat xncv::captureRGB(const xn::ImageGenerator& generator)
{
xn::ImageMetaData meta;
generator.GetMetaData(meta);
return cv::Mat(meta.YRes(), meta.XRes(),cv::DataType<cv::Vec3b>::type, (void*)meta.RGB24Data());
}
示例9: main
int main ( int argc, char ** argv )
{
//
// Initializing Calibration Related
//
// ARTagHelper artagHelper ( colorImgWidth, colorImgHeight, ARTAG_CONFIG_FILE, ARTAG_POS_FILE );
ARTagHelper artagHelper ( colorImgWidth, colorImgHeight, ARTAG_CONFIG_A3_FILE, ARTAG_POS_A3_FILE );
ExtrCalibrator extrCalibrator ( 6, KINECT_INTR_FILE, KINECT_DIST_FILE );
// unsigned char * kinectImgBuf = new unsigned char[colorImgWidth * colorImgHeight * 3];
//
// Initializing OpenNI Settings
//
int ctlWndKey = -1;
XnStatus nRetVal = XN_STATUS_OK;
xn::EnumerationErrors errors;
//
// Initialize Context Object
//
nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, g_ScriptNode, &errors );
if ( nRetVal == XN_STATUS_NO_NODE_PRESENT )
{
XnChar strError[1024];
errors.ToString ( strError, 1024 );
printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
system ( "pause" );
return ( nRetVal );
}
else if ( nRetVal != XN_STATUS_OK )
{
printf ( "Open FAILED:\n%s\n", xnGetStatusString ( nRetVal ) );
system ( "pause" );
return ( nRetVal );
}
//
// Handle the Depth Generator Node.
//
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
if ( nRetVal != XN_STATUS_OK )
{
printf ( "No Depth Node Exists! Please Check your XML.\n" );
return ( nRetVal );
}
//
// Handle the Image Generator node
//
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
if ( nRetVal != XN_STATUS_OK )
{
printf ( "No Image Node Exists! Please Check your XML.\n" );
return ( nRetVal );
}
// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );
g_DepthGen.GetMetaData ( g_DepthMD );
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );
//
// Create OpenCV Showing Window and Related Data Structures
//
cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::Mat depthImgMat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
cv::Mat depthImgShow ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC3 );
cv::Mat colorImgMat ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );
#define ARTAG_DEBUG
#ifdef ARTAG_DEBUG
cv::setMouseCallback ( IMAGE_WIN_NAME, ClickOnMouse, 0 );
#endif
bool flipColor = true;
//
// Start to Loop
//
while ( ctlWndKey != ESC_KEY_VALUE )
{
//.........这里部分代码省略.........
示例10: glutDisplay
// this function is called each frame
void glutDisplay (void)
{
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
// g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
xn::SceneMetaData sceneMD;
xn::DepthMetaData depthMD;
xn::ImageMetaData imageMD;
g_DepthGenerator.GetMetaData(depthMD);
g_ImageGenerator.GetMetaData(imageMD);
#ifdef USE_GLUT
glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif
glDisable(GL_TEXTURE_2D);
if (!g_bPause)
{
// Read next available data
g_Context.WaitAndUpdateAll();
}
// Process the data
//DRAW
// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
// g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
g_DepthGenerator.GetMetaData(depthMD);
g_ImageGenerator.GetMetaData(imageMD);
g_UserGenerator.GetUserPixels(0, sceneMD);
DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);
if (g_nPlayer != 0)
{
XnPoint3D com;
g_UserGenerator.GetCoM(g_nPlayer, com);
if (com.Z == 0)
{
g_nPlayer = 0;
FindPlayer();
}
}
#ifdef USE_GLUT
glutSwapBuffers();
#endif
}
示例11: glut_display
void glut_display() {
xn::DepthMetaData pDepthMapMD;
xn::ImageMetaData pImageMapMD;
#ifdef DEBUGOUT
ofstream datei;
#endif
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
gluPerspective(45, WINDOW_SIZE_X/WINDOW_SIZE_Y, 1000, 5000);
// glOrtho(0, WINDOW_SIZE_X, WINDOW_SIZE_Y, 0, -128, 128);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
// glTranslatef(-12.8/640.0, 9.0/480.0, 0);
// glTranslatef(-12.8/630.0, 9.0/480.0,0);
glScalef(scalex, scaley, 1.0);
glTranslatef(transx/630, transy/480, 0.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
rot_angle+=0.7;
// Warten auf neue Daten vom Tiefengenerator
nRetVal = context.WaitAndUpdateAll();
checkError("Fehler beim Aktualisieren der Daten", nRetVal);
// Aktuelle Depth Metadaten auslesen
depth.GetMetaData(pDepthMapMD);
// Aktuelle Depthmap auslesen
const XnDepthPixel* pDepthMap = depth.GetDepthMap();
if(maxdepth==-1)
maxdepth = getMaxDepth(pDepthMap);
// Aktuelle Image Metadaten auslesen
image.GetMetaData(pImageMapMD);
//Aktuelles Bild auslesen
const XnRGB24Pixel* pImageMap = image.GetRGB24ImageMap();
glColor3f(1, 1, 1);
// XnDepthPixel maxdepth = depth.GetDeviceMaxDepth();
const unsigned int xres = pDepthMapMD.XRes();
const unsigned int yres = pDepthMapMD.YRes();
#ifdef DEBUGOUT
datei.open("daniel.txt", ios::out);
#endif
for(unsigned int y=0; y<yres-1; y++) {
for(unsigned int x=0; x<xres; x++) {
aDepthMap[x+y*xres] = static_cast<GLubyte>(static_cast<float>(pDepthMap[x+y*xres])/static_cast<float>(maxdepth)*255);
}
}
/*
glEnable(GL_TEXTURE_2D);
glPushMatrix();
glLoadIdentity();
glTranslatef(-800, 0, -2000);
glBindTexture(GL_TEXTURE_2D, texture_rgb);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
glBegin(GL_QUADS);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(640,0,0);
glTexCoord2f(1,0); glVertex3f(640,480,0);
glTexCoord2f(0,0); glVertex3f(0,480,0);
glEnd();
glPopMatrix();
glPushMatrix();
glLoadIdentity();
glTranslatef(-800, 600, -2000);
glBindTexture(GL_TEXTURE_2D, texture_depth);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE8, 640, 480, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, aDepthMap);
glBegin(GL_QUADS);
glTexCoord2f(0,1); glVertex3f(0,0,0);
glTexCoord2f(1,1); glVertex3f(640,0,0);
glTexCoord2f(1,0); glVertex3f(640,480,0);
glTexCoord2f(0,0); glVertex3f(0,480,0);
glEnd();
glPopMatrix();*/
glPushMatrix();
glLoadIdentity();
glTranslatef(-100, -100, -2000);
glRotatef(cx,0,1,0);
glRotatef(cy,1,0,0);
glTranslatef(-320, -240, 1000);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture_rgb);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 640, 480, 0, GL_RGB, GL_UNSIGNED_BYTE, pImageMap);
glBegin(GL_POINTS);
for(unsigned int y=0; y<yres-1; y++) {
for(unsigned int x=0; x<630; x++) {
if(pDepthMap[x+y*xres]!=0) {
//.........这里部分代码省略.........
示例12: matrixCalc
void matrixCalc(void *outputs)
{
TML::Matrix out1(outputs, 0);
TML::Matrix out2(outputs, 1);
TML::Matrix out3(outputs, 2);
TML::Matrix out4(outputs, 3);
xn::DepthMetaData depthMD;
xn::SceneMetaData sceneMD;
xn::ImageMetaData imageMD;
depth.GetMetaData(depthMD);
user.GetUserPixels(0, sceneMD);
image.GetMetaData(imageMD);
context.WaitNoneUpdateAll();
t_jit_matrix_info tmi;
memset(&tmi, 0, sizeof(tmi));
tmi.dimcount = 2;
tmi.planecount = 1;
tmi.dimstride[0] = 4;
tmi.dimstride[1] = depthMD.XRes()*4;
int width = tmi.dim[0] = depthMD.XRes();
int height = tmi.dim[1] = depthMD.YRes();
tmi.type = _jit_sym_float32;
out1.resizeTo(&tmi);
tmi.planecount = 1;
tmi.dimstride[0] = 1;
tmi.dimstride[1] = depthMD.XRes();
tmi.type = _jit_sym_char;
out2.resizeTo(&tmi);
tmi.planecount = 4;
tmi.dimstride[0] = 4;
tmi.dimstride[1] = depthMD.XRes()*4;
tmi.type = _jit_sym_char;
out3.resizeTo(&tmi);
const XnDepthPixel* pDepth = depthMD.Data();
float *depthData = (float*)out1.data();
//Copy depth data
int x,y;
for (y=0; y<height; y++)
{
for (x=0; x<width; x++)
{
depthData[0] = (float)pDepth[0]/powf(2, 15);
depthData++;
pDepth++;
}
}
//Get the users
unsigned char *userData = (unsigned char*)out2.data();
const XnLabel* pLabels = sceneMD.Data();
for (y=0; y<height; y++)
{
for (x=0; x<width; x++)
{
userData[0] = pLabels[0];
userData++;
pLabels++;
}
}
//Get the colors
const XnRGB24Pixel* pPixels = imageMD.RGB24Data();
unsigned char *pixData = (unsigned char*)out3.data();
for (y=0; y<height; y++)
{
for (x=0; x<width; x++)
{
pixData[0] = 0;
pixData[1] = pPixels[0].nRed;
pixData[2] = pPixels[0].nGreen;
pixData[3] = pPixels[0].nBlue;
pixData+=4;
pPixels++;
}
}
//For all the users -- output the joint info...
XnUserID aUsers[15];
XnUInt16 nUsers = 15;
user.GetUsers(aUsers, nUsers);
int rUsers = 0;
xn::SkeletonCapability sc = user.GetSkeletonCap();
int i;
for (i=0; i<nUsers; i++)
{
//.........这里部分代码省略.........
示例13: start_kinect
void start_kinect() {
XnStatus nRetVal = XN_STATUS_OK;
xn::EnumerationErrors errors;
UsersCount = 0;
const char *fn = NULL;
if (fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
else {
printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
//return XN_STATUS_ERROR;
}
printf("Reading config from: '%s'\n", fn);
nRetVal = g_Context.InitFromXmlFile(fn, g_scriptNode, &errors);
if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
{
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
//return (nRetVal);
}
else if (nRetVal != XN_STATUS_OK)
{
printf("Open failed: %s\n", xnGetStatusString(nRetVal));
//return (nRetVal);
}
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
CHECK_RC(nRetVal,"No depth");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
CHECK_RC(nRetVal,"No image");
nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
if (nRetVal != XN_STATUS_OK)
{
nRetVal = g_UserGenerator.Create(g_Context);
CHECK_RC(nRetVal, "Find user generator");
}
XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON))
{
printf("Supplied user generator doesn't support skeleton\n");
//return 1;
}
nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
CHECK_RC(nRetVal, "Register to user callbacks");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
CHECK_RC(nRetVal, "Register to calibration start");
nRetVal = g_UserGenerator.GetSkeletonCap().RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
CHECK_RC(nRetVal, "Register to calibration complete");
if (g_UserGenerator.GetSkeletonCap().NeedPoseForCalibration())
{
g_bNeedPose = TRUE;
if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
{
printf("Pose required, but not supported\n");
//return 1;
}
nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
CHECK_RC(nRetVal, "Register to Pose Detected");
g_UserGenerator.GetSkeletonCap().GetCalibrationPose(g_strPose);
}
g_UserGenerator.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
nRetVal = g_Context.StartGeneratingAll();
CHECK_RC(nRetVal, "StartGenerating");
XnUserID aUsers[MAX_NUM_USERS];
XnUInt16 nUsers;
XnSkeletonJointTransformation anyjoint;
printf("Starting to run\n");
if(g_bNeedPose)
{
printf("Assume calibration pose\n");
}
XnUInt32 epochTime = 0;
while (!xnOSWasKeyboardHit())
{
g_Context.WaitOneUpdateAll(g_UserGenerator);
// print the torso information for the first user already tracking
nUsers=MAX_NUM_USERS;
g_UserGenerator.GetUsers(aUsers, nUsers);
int numTracked=0;
int userToPrint=-1;
WriteLock w_lock(myLock);
pDepthMap = g_depth.GetDepthMap();
pPixelMap = g_image.GetRGB24ImageMap();
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
pPixelPoint = g_imageMD.RGB24Data();
//.........这里部分代码省略.........
示例14: main
int main ( int argc, char * argv[] )
{
//
// Initialize OpenNI Settings
//
XnStatus nRetVal = XN_STATUS_OK;
xn::ScriptNode scriptNode;
xn::EnumerationErrors errors;
//
// Initialize Context Object
//
nRetVal = g_Context.InitFromXmlFile ( CONFIG_XML_PATH, scriptNode, &errors );
if ( nRetVal == XN_STATUS_NO_NODE_PRESENT ) {
XnChar strError[1024];
errors.ToString(strError, 1024);
printf ( "XN_STATUS_NO_NODE_PRESENT:\n%s\n", strError );
system ( "pause" );
return ( nRetVal );
}
else if ( nRetVal != XN_STATUS_OK ) {
printf ( "Open failed: %s\n", xnGetStatusString(nRetVal) );
system ( "pause" );
return ( nRetVal );
}
//
// Handle Image & Depth Generator Node
//
bool colorFlag = true;
bool depthFlag = true;
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_DEPTH, g_DepthGen );
if ( nRetVal != XN_STATUS_OK ) {
printf("No depth node exists!\n");
depthFlag = false;
}
nRetVal = g_Context.FindExistingNode ( XN_NODE_TYPE_IMAGE, g_ImageGen );
if ( nRetVal != XN_STATUS_OK ) {
printf("No image node exists!\n");
colorFlag = false;
}
// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );
if ( depthFlag ) {
g_DepthGen.GetMetaData ( g_DepthMD );
assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );
}
if ( colorFlag ) {
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
}
g_DepthImgShow = cv::Mat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC1 );
g_DepthImgMat = cv::Mat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
g_ColorImgMat = cv::Mat ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );
//
// Start to Loop
//
bool flipColor = true;
int ctlWndKey = -1;
g_StartTickCount = GetTickCount();
g_HeadTrackingFrameCount = 0;
while ( ctlWndKey != ESC_KEY_VALUE )
{
nRetVal = g_Context.WaitOneUpdateAll ( g_DepthGen );
// nRetVal = g_Context.WaitAnyUpdateAll();
#ifdef HANDLING_IMAGE_DATA
if ( colorFlag )
{
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_ImageMD.FullXRes() == g_ImageMD.XRes() );
assert ( g_ImageMD.FullYRes() == g_ImageMD.YRes() );
GlobalUtility::CopyColorRawBufToCvMat8uc3 ( (const XnRGB24Pixel *)(g_ImageMD.Data()), g_ColorImgMat );
if ( ctlWndKey == 's' || ctlWndKey == 'S' ) { // Switch
flipColor = !flipColor;
}
if ( flipColor ) {
cv::cvtColor ( g_ColorImgMat, g_ColorImgMat, CV_RGB2BGR );
}
cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::imshow ( IMAGE_WIN_NAME, g_ColorImgMat );
}
//.........这里部分代码省略.........
示例15: update
bool OpenNIVideo::update(osg::NodeVisitor* nv) {
//this is the main function of your video plugin
//you can either retrieve images from your video stream/camera/file
//or communicate with a thread to synchronize and get the data out
//the most important is to synchronize your data
//and copy the result to the VideoImageSteam used in this plugin
//
//0. you can collect some stats, for that you can use a timer
osg::Timer t;
{
//1. mutex lock access to the image video stream
OpenThreads::ScopedLock<OpenThreads::Mutex> _lock(this->getMutex());
osg::notify(osg::DEBUG_INFO)<<"osgART::OpenNIVideo::update() get new image.."<<std::endl;
XnStatus nRetVal = XN_STATUS_OK;
nRetVal=context.WaitAndUpdateAll();
CHECK_RC(nRetVal, "Update Data");
xnFPSMarkFrame(&xnFPS);
depth_generator.GetMetaData(depthMD);
const XnDepthPixel* pDepthMap = depthMD.Data();
//depth pixel floating point depth map.
image_generator.GetMetaData(imageMD);
const XnUInt8* pImageMap = imageMD.Data();
// Hybrid mode isn't supported in this sample
if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
{
std::cerr<<"The device depth and image resolution must be equal!"<<std::endl;
exit(1);
}
// RGB is the only image format supported.
if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
{
std::cerr<<"The device image format must be RGB24"<<std::endl;
exit(1);
}
const XnDepthPixel* pDepth=pDepthMap;
const XnUInt8* pImage=pImageMap;
XnDepthPixel zMax = depthMD.ZRes();
//convert float buffer to unsigned short
for ( unsigned int i=0; i<(depthMD.XRes() * depthMD.YRes()); ++i )
{
*(_depthBufferByte + i) = 255 * (float(*(pDepth + i)) / float(zMax));
}
memcpy(_videoStreamList[0]->data(),pImage, _videoStreamList[0]->getImageSizeInBytes());
memcpy(_videoStreamList[1]->data(),_depthBufferByte, _videoStreamList[1]->getImageSizeInBytes());
//3. don't forget to call this to notify the rest of the application
//that you have a new video image
_videoStreamList[0]->dirty();
_videoStreamList[1]->dirty();
}
//4. hopefully report some interesting data
if (nv) {
const osg::FrameStamp *framestamp = nv->getFrameStamp();
if (framestamp && _stats.valid())
{
_stats->setAttribute(framestamp->getFrameNumber(),
"Capture time taken", t.time_m());
}
}
// Increase modified count every X ms to ensure tracker updates
if (updateTimer.time_m() > 50) {
_videoStreamList[0]->dirty();
_videoStreamList[1]->dirty();
updateTimer.setStartTick();
}
return true;
}