本文整理汇总了C++中ImageGenerator::GetMetaData方法的典型用法代码示例。如果您正苦于以下问题:C++ ImageGenerator::GetMetaData方法的具体用法?C++ ImageGenerator::GetMetaData怎么用?C++ ImageGenerator::GetMetaData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ImageGenerator
的用法示例。
在下文中一共展示了ImageGenerator::GetMetaData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: generateFrame
// -----------------------------------------------------------------------------------------------------
// generateFrame
// -----------------------------------------------------------------------------------------------------
bool CameraDevice::generateFrame(IplImage* imgRGB, IplImage* imgDepth)
{
XnStatus nRetVal = XN_STATUS_OK;
const XnDepthPixel* pDepthMap = NULL;
const XnRGB24Pixel* pImageMap = NULL;
xnFPSMarkFrame(&g_xnFPS);
nRetVal = g_context.WaitAndUpdateAll();
if (nRetVal==XN_STATUS_OK)
{
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
pDepthMap = g_depthMD.Data();
pImageMap = g_image.GetRGB24ImageMap();
printf("Frame %02d (%dx%d) Depth at middle point: %u. FPS: %f\r",
g_depthMD.FrameID(),
g_depthMD.XRes(),
g_depthMD.YRes(),
g_depthMD(g_depthMD.XRes()/2, g_depthMD.YRes()/2),
xnFPSCalc(&g_xnFPS));
// convert to OpenCV buffers
convertImageRGB(pImageMap, imgRGB);
convertImageDepth(pDepthMap, imgDepth);
return true;
}
return false;
}
示例2: kinectUpdate
// Updates to the latest image obtained from the Kinect
int kinectUpdate(void)
{
XnStatus nRetVal = context.WaitAndUpdateAll();
g_image.GetMetaData(g_imageMD);
//nRetVal = context.WaitOneUpdateAll(depth);
depth.GetMetaData(depthMD);
return nRetVal;
}
示例3: GetColorAndDepthImages
// Gets the colour and depth data from the Kinect sensor.
bool GetColorAndDepthImages(ColorImage& colorImage, DepthImage& depthImage)
{
XnStatus rc = XN_STATUS_OK;
// Read a new frame, blocking operation
rc = deviceContext.WaitAnyUpdateAll();
if (rc != XN_STATUS_OK)
{
/*LOGE("Read failed: %s\n", xnGetStatusString(rc));*/
throw rc;
}
// Get handles to new data
static ImageMetaData colorImageMetaData;
static DepthMetaData depthImageMetaData;
colorImageGenerator.GetMetaData(colorImageMetaData);
depthImageGenerator.GetMetaData(depthImageMetaData);
// Validate images
if (!depthImageGenerator.IsValid() || !colorImageGenerator.IsValid())
{
/*LOGE("Error: Color or depth image is invalid.");*/
throw 1;
}
if (colorImageMetaData.Timestamp() <= mostRecentRGB)
return false;
// Fetch pointers to data
const XnRGB24Pixel* pColorImage = colorImageMetaData.RGB24Data(); //g_depth.GetRGB24ImageMap()
const XnDepthPixel* pDepthImage = depthImageMetaData.Data();// g_depth.GetDepthMap();
// Copy data over to arrays
memcpy(colorImage.data, pColorImage, sizeof(colorImage.data));
memcpy(depthImage.data, pDepthImage, sizeof(depthImage.data));
colorImage.rows = colorImage.maxRows;
colorImage.cols = colorImage.maxCols;
depthImage.rows = depthImage.maxRows;
depthImage.cols = depthImage.maxCols;
mostRecentRGB = colorImageMetaData.Timestamp();
return true;
}
示例4: prepare
XnStatus prepare(char useScene, char useDepth, char useImage, char useIr, char useHistogram)
{
//TODO handle possible failures! Gotcha!
if (useDepth)
{
mDepthGen.GetMetaData(depthMD);
nXRes = depthMD.XRes();
nYRes = depthMD.YRes();
pDepth = depthMD.Data();
if (useHistogram)
{
calcHist();
// rewind the pointer
pDepth = depthMD.Data();
}
}
if (useScene)
{
mUserGen.GetUserPixels(0, sceneMD);
nXRes = sceneMD.XRes();
nYRes = sceneMD.YRes();
pLabels = sceneMD.Data();
}
if (useImage)
{
mImageGen.GetMetaData(imageMD);
nXRes = imageMD.XRes();
nYRes = imageMD.YRes();
pRGB = imageMD.RGB24Data();
// HISTOGRAM?????
}
if (useIr)
{
mIrGen.GetMetaData(irMD);
nXRes = irMD.XRes();
nYRes = irMD.YRes();
pIR = irMD.Data();
// HISTOGRAM????
}
}
示例5: takePhoto
void takePhoto() {
static int index = 1;
char fname[256] = {0,};
sprintf(fname, "kinect%03d.txt", index++);
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
int const nx = g_depthMD.XRes();
int const ny = g_depthMD.YRes();
assert(nx == g_imageMD.XRes());
assert(ny == g_imageMD.YRes());
const XnDepthPixel* pDepth = g_depthMD.Data();
const XnUInt8* pImage = g_imageMD.Data();
FILE * file = fopen(fname, "wb");
fprintf(file, "%d\n%d\n\n", nx, ny);
for (int y = 0, di = 0, ri = 0, gi = 1, bi = 2; y < ny; y++) {
for (int x = 0; x < nx; x++, di++, ri += 3, gi += 3, bi += 3) {
int const r = pImage[ri];
int const g = pImage[gi];
int const b = pImage[bi];
int const d = pDepth[di];
assert(r >= 0);
assert(g >= 0);
assert(b >= 0);
assert(d >= 0);
assert(r <= 0xFF);
assert(g <= 0xFF);
assert(b <= 0xFF);
assert(d <= 0xFFFF);
fprintf(file, "%3d %3d %3d %5d\n", r, g, b, d);
}
fprintf(file, "\n");
}
fflush(file);
fclose(file);
}
示例6: readFrame
void readFrame()
{
if (!g_Depth.IsValid() && !g_Image.IsValid() && !g_IR.IsValid() && !g_Audio.IsValid()) // @@@dded
return;
XnStatus rc = XN_STATUS_OK;
if (g_pPrimary != NULL)
{
rc = g_Context.WaitOneUpdateAll(*g_pPrimary);
}
else
{
rc = g_Context.WaitAnyUpdateAll();
}
if (rc != XN_STATUS_OK)
{
printf("Error: %s\n", xnGetStatusString(rc));
}
if (g_Depth.IsValid())
{
g_Depth.GetMetaData(g_DepthMD);
}
if (g_Image.IsValid())
{
g_Image.GetMetaData(g_ImageMD);
}
if (g_IR.IsValid())
{
g_IR.GetMetaData(g_irMD);
}
if (g_Audio.IsValid())
{
g_Audio.GetMetaData(g_AudioMD);
}
}
示例7: kinectInit
// Set up OpenNI to obtain 8-bit mono images from the Kinect's RGB camera
int kinectInit(void)
{
XnStatus nRetVal = XN_STATUS_OK;
ScriptNode scriptNode;
EnumerationErrors errors;
printf("Reading config from: '%s'\n", SAMPLE_XML_PATH_LOCAL);
nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH_LOCAL, scriptNode, &errors);
nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
//g_image.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT);
g_image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
g_image.GetMetaData(g_imageMD);
nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
depth.GetMetaData(depthMD);
// nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
//nRetVal = depth.GetFrameSyncCap().FrameSyncWith(g_image);
return nRetVal;
}
示例8: captureRGB
void captureRGB(unsigned char* g_ucImageBuffer)
{
ImageMetaData imd;
_image.GetMetaData(imd);
unsigned int nValue = 0;
unsigned int nX = 0;
unsigned int nY = 0;
XnUInt16 g_nXRes = imd.XRes();
XnUInt16 g_nYRes = imd.YRes();
const XnRGB24Pixel * pImageMap = _image.GetRGB24ImageMap();
for (nY=0; nY<g_nYRes; nY++)
{
for (nX=0; nX < g_nXRes; nX++)
{
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+0] = pImageMap[nY*g_nXRes+nX].nBlue;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+1] = pImageMap[nY*g_nXRes+nX].nGreen;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+2] = pImageMap[nY*g_nXRes+nX].nRed;
((unsigned char*)g_ucImageBuffer)[(nY*g_nXRes+nX)*4+3] = 0x00;
}
}
}
示例9: glutDisplay
void glutDisplay (void){
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Setup the OpenGL viewpoint
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
SceneMetaData sceneMD;
DepthMetaData depthMD;
ImageMetaData imageMD;
g_DepthGenerator.GetMetaData(depthMD);
glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
glDisable(GL_TEXTURE_2D);
//XnStatus rc = g_Context.WaitOneUpdateAll(g_DepthGenerator);
XnStatus rc = g_Context.WaitAnyUpdateAll();
CHECK_RC("Wait Data",rc);
g_DepthGenerator.GetMetaData(depthMD);
if(g_UserGenerator.IsValid())
g_UserGenerator.GetUserPixels(0, sceneMD);
g_ImageGenerator.GetMetaData(imageMD);
DrawDepthMap(depthMD, sceneMD);
DrawImage(imageMD);
glutSwapBuffers();
}//glutdisplay
示例10: Init_Kinect
XnStatus Init_Kinect(EventOutSFNode* skltn,EventOutSFNode* hnz,EventOutSFNode* flr){
XnStatus rc=XN_STATUS_OK;
EnumerationErrors errors;
DepthMetaData g_depthMD;
ImageMetaData g_imageMD;
rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
CHECK_RC(rc, "InitFromXml");
rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
CHECK_RC(rc,"XN_NODE_TYPE_DEPTH");
rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
CHECK_RC(rc,"XN_NODE_TYPE_IMAGE");
rc= g_context.FindExistingNode(XN_NODE_TYPE_USER,g_user);
CHECK_RC(rc,"XN_NODE_TYPE_USER");
rc=g_context.FindExistingNode(XN_NODE_TYPE_SCENE,g_scene);
CHECK_RC(rc,"XN_NODE_TYPE_SCENE");
rc=g_context.FindExistingNode(XN_NODE_TYPE_HANDS,g_hands);
CHECK_RC(rc,"XN_NODE_TYPE_HANDS");
rc=g_context.FindExistingNode(XN_NODE_TYPE_GESTURE,g_gesture);
CHECK_RC(rc,"XN_NODE_TYPE_GESTURE");
g_depth.GetMetaData(g_depthMD);
g_fps=g_depthMD.FPS();
g_image.GetMetaData(g_imageMD);
rc=init_skeleton();
CHECK_RC(rc,"INIT SKELETON");
rc=init_hands();
CHECK_RC(rc,"INIT HANDS");
pix_w=g_depthMD.FullXRes();
pix_h=g_depthMD.FullYRes();
if(pix_h==0||pix_w==0){return XN_STATUS_ERROR;}
g_skltn=skltn;
g_hnz=hnz;
g_flr=flr;
if(NULL==g_skltn||NULL==g_hands||NULL==g_flr)return XN_STATUS_ERROR;
isInit=true;
return rc;
}
示例11: main
int main(int argc, char* argv[])
{
XnStatus rc;
EnumerationErrors errors;
rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
if (rc == XN_STATUS_NO_NODE_PRESENT)
{
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
return (rc);
}
else if (rc != XN_STATUS_OK)
{
printf("Open failed: %s\n", xnGetStatusString(rc));
return (rc);
}
rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
// Hybrid mode isn't supported in this sample
if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
{
printf ("The device depth and image resolution must be equal!\n");
return 1;
}
// RGB is the only image format supported.
if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
{
printf("The device image format must be RGB24\n");
return 1;
}
// Texture map init
g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));
// OpenGL init
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
glutCreateWindow ("OpenNI Simple Viewer");
glutFullScreen();
glutSetCursor(GLUT_CURSOR_NONE);
glutKeyboardFunc(glutKeyboard);
glutDisplayFunc(glutDisplay);
glutIdleFunc(glutIdle);
glDisable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
// Per frame code is in glutDisplay
glutMainLoop();
return 0;
}
示例12: glutDisplay
//----------------------------------------------------
// 描画処理
//----------------------------------------------------
void glutDisplay (void){
xnFPSMarkFrame(&g_xnFPS); // FPSの計測開始?
XnStatus rc = XN_STATUS_OK;
// 更新されたノードを待つ(どれでもいい)
rc = g_context.WaitAnyUpdateAll();
if (rc != XN_STATUS_OK){
printf("Read failed: %s\n", xnGetStatusString(rc));
printf("test\n");
return;
}
// イメージ・デプス・ユーザのデータを取得
g_image.GetMetaData(g_imageMD);
g_depth.GetMetaData(g_depthMD);
g_user.GetUserPixels(0, g_sceneMD);
// カラー・デプスバッファをクリア
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// 設定
setDepthHistgram(g_depth, g_depthMD, g_pDepthHist); // ヒストグラムの計算・作成
setTexture(); // テクスチャ設定
// 描画
drawImage(); // イメージデータの描画
// デバッグモードの文字は描画の行列と隔離
glMatrixMode(GL_PROJECTION); // 射影変換の行列の設定
//glLoadIdentity(); // スタックのクリア
glMatrixMode(GL_MODELVIEW); // モデルビュー変換の行列の設定
glLoadIdentity();
if(g_debugMode) glDebug(); // デバッグモード
// 一度だけスクリーンショットをとる
if(g_screenShotImageMode){
ostringstream fname;
fname << OUT_IMAGE_PATH ;//出力ファイル名
std::string name = fname.str();
g_glScreenShot.screenshot(name.c_str(), 24);
g_screenShotImageMode = !g_screenShotImageMode; // トグル
}
// 一度だけ深さデータを取得する
if(g_screenShotDepthMode){
ofstream ofs(OUT_DEPTH_PATH);
const XnDepthPixel* pDepth = g_depthMD.Data();
for (XnUInt y = 0; y < KINECT_IMAGE_HEIGHT; y ++){
for (XnUInt x = 0; x < KINECT_IMAGE_WIDTH; x ++, pDepth ++){
if(*pDepth < 2000){
ofs << (int)((*pDepth) * 2) << ',';
}else{
ofs << (*pDepth) << ',';
}
}
ofs << endl;
}
g_screenShotDepthMode = !g_screenShotDepthMode; // トグル
}
// Swap the OpenGL display buffers
glutSwapBuffers();
}
示例13: xnInit
//----------------------------------------------------
// OpenNI関連の初期化
//----------------------------------------------------
void xnInit(void){
XnStatus rc;
EnumerationErrors errors;
rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
if (rc == XN_STATUS_NO_NODE_PRESENT){
XnChar strError[1024];
errors.ToString(strError, 1024);
printf("%s\n", strError);
exit(1);
}else if (rc != XN_STATUS_OK){
printf("Open failed: %s\n", xnGetStatusString(rc));
exit(1);
}
//playerInit();
rc = xnFPSInit(&g_xnFPS, 180); // FPSの初期化
//CHECK_RC(rc, "FPS Init");
// デプス・イメージ・ユーザジェネレータの作成
rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
errorCheck(rc, "g_depth"); // エラーチェック
rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
errorCheck(rc, "g_image");
rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user);
//rc = g_user.Create(g_context);
errorCheck(rc, "g_user");
// ユーザー検出機能をサポートしているか確認
if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
//throw std::runtime_error("ユーザー検出をサポートしてません");
cout << "ユーザー検出をサポートしてません" << endl;
exit(1);
}
// レコーダーの設定
//rc = setRecorder(g_recorder, rc);
// ユーザコールバックの登録
XnCallbackHandle userCallbacks;
g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks);
// デプス・イメージ・ユーザデータの取得
g_depth.GetMetaData(g_depthMD);
g_image.GetMetaData(g_imageMD);
g_user.GetUserPixels(0, g_sceneMD);
// Hybrid mode isn't supported in this sample
// イメージとデプスの大きさが違うとエラー
if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){
printf ("The device depth and image resolution must be equal!\n");
exit(1);
}
// RGB is the only image format supported.
// フォーマットの確認
if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){
printf("The device image format must be RGB24\n");
exit(1);
}
// Texture map init
// フルスクリーン画面の大きさ調整
g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512; // 大きさによって512の倍数に調整(1024)
g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512; // 512
g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel)); // スクリーンの大きさ分の色情報の容量を確保
// 座標ポインタの初期化
g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 座標を入れるポインタを作成
g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel)); // 背景画像を入れるポインタを作成
g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D)); // 背景座標を入れるポインタを作成
g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel)); // 背景座標を入れるポインタを作成
}
示例14: Init
int Init()
{
XnStatus rc;
//Make sure our image types are the same as the OpenNI image types.
assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel));
assert(sizeof(XnDepthPixel) == sizeof(DepthPixel));
assert(sizeof(XnStatus) == sizeof(int));
// Load OpenNI xml settings
char filePath[255];
int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath));
filePath[length] = '\\';
strcpy(&filePath[length+1], SAMPLE_XML_PATH);
EnumerationErrors errors;
rc = deviceContext.InitFromXmlFile(filePath, &errors);
if (rc == XN_STATUS_NO_NODE_PRESENT)
{
//One reason would be if Microsoft SDK is installed beside PrimeSense. Device manager should say PrimeSense instead of Microsoft Kinect.
//XnChar strError[1024];
//errors.ToString(strError, 1024);
//LOGE("%s\n", strError);
return -1;
}
else if (rc != XN_STATUS_OK)
{
fprintf(stderr, "%s\n", xnGetStatusString(rc));
/*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/
return (rc);
}
// Retrieve colour and depth nodes
rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator);
rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator);
// Set mirror mode to off
SetMirrorMode(false);
// Get a frame to perform checks on it
ImageMetaData colorImageMetaData;
DepthMetaData depthImageMetaData;
depthImageGenerator.GetMetaData(depthImageMetaData);
colorImageGenerator.GetMetaData(colorImageMetaData);
// Hybrid mode isn't supported in this sample
if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes())
{
/*LOGE("The device depth and image resolution must be equal!\n");*/
return 1;
}
// RGB is the only image format supported.
if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
{
/*LOGE("The device image format must be RGB24\n");*/
return 1;
}
// Need to make sure the automatic alignment of colour and depth images is supported.
XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint");
if(!isSupported)
{
/*LOGE("Cannot set AlternativeViewPoint!\n");*/
return 1;
}
// Set it to VGA maps at 30 FPS
/*XnMapOutputMode mapMode;
mapMode.nXRes = XN_VGA_X_RES;
mapMode.nYRes = XN_VGA_Y_RES;
mapMode.nFPS = 60;
rc = g_depth.SetMapOutputMode(mapMode);
if(rc)
{
LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));
return 1;
}
mapMode.nFPS = 30;
rc = g_image.SetMapOutputMode(mapMode);
if(rc)
{
LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc));
return 1;
}*/
// Set automatic alignment of the colour and depth images.
rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator);
if(rc)
{
/*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/
return 1;
}
return XN_STATUS_OK;
//.........这里部分代码省略.........
示例15: main
int main(int argc, char* argv[])
{
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH);
if (nRetVal != XN_STATUS_OK)
{
printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal));
}
if (argc < 3)
{
printf("usage: %s <inputFile> <outputFile>\n", argv[0]);
return -1;
}
const char* strInputFile = argv[1];
const char* strOutputFile = argv[2];
Context context;
nRetVal = context.Init();
CHECK_RC(nRetVal, "Init");
// open input file
Player player;
nRetVal = context.OpenFileRecording("/media/6B58CB581C0AACF6/7.oni", player);
CHECK_RC(nRetVal, "Open input file");
// Get depth node from recording
DepthGenerator depth;
nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
CHECK_RC(nRetVal, "Find depth generator");
// Create mock node based on depth node from recording
MockDepthGenerator mockDepth;
nRetVal = mockDepth.CreateBasedOn(depth);
CHECK_RC(nRetVal, "Create mock depth node");
ImageGenerator image;
nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
CHECK_RC(nRetVal, "Find depth generator");
// Create mock node based on depth node from recording
MockImageGenerator mockImage;
nRetVal = mockImage.CreateBasedOn(image);
CHECK_RC(nRetVal, "Create mock depth node");
// create recorder
Recorder recorder;
nRetVal = recorder.Create(context);
CHECK_RC(nRetVal, "Create recorder");
nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, "/home/shaghayegh/up.oni");
CHECK_RC(nRetVal, "Set recorder destination file");
// add depth node to recorder
nRetVal = recorder.AddNodeToRecording(mockDepth);
CHECK_RC(nRetVal, "Add node to recording");
// nRetVal = recorder.AddNodeToRecording(mockImage);
// CHECK_RC(nRetVal, "Add node to recording");
nRetVal = player.SetRepeat(FALSE);
XN_IS_STATUS_OK(nRetVal);
XnUInt32 nNumFrames = 0;
nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames);
CHECK_RC(nRetVal, "Get player number of frames");
DepthMetaData depthMD;
ImageMetaData imageMD;
int frameNum = 0;
String path = "/media/6B58CB581C0AACF6/ebook/Articles/activity_recognition/data1/0512164529/";
while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF)
{
++frameNum;
CHECK_RC(nRetVal, "Read next frame");
// Get depth meta data
depth.GetMetaData(depthMD);
image.GetMetaData(imageMD);
//-----------------------------------------------//
// Transform depth! This is the interesting part //
//-----------------------------------------------//
/* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(),
but we're calling it just to be clear. */
nRetVal = depthMD.MakeDataWritable();
CHECK_RC(nRetVal, "Make depth data writable");
// nRetVal = imageMD.MakeDataWritable();
// CHECK_RC(nRetVal, "Make depth data writable");
String ficheroActualRGB;
// ficheroActualRGB = path +"RGB_" + boost::to_string(frameNum) + ".png";
String ficheroActualDepth = path +"Depth_"+ boost::to_string(frameNum) + ".png";
// Mat matFrameImage = imread(ficheroActualRGB, 1);
// resize(matFrameImage, matFrameImage, Size(640, 480), 0, 0, INTER_CUBIC);
Mat matFrameDepth = imread(ficheroActualDepth,1);
resize(matFrameDepth, matFrameDepth, Size(480, 640), 0, 0, INTER_CUBIC);
transformDepthMD(matFrameDepth,depthMD);
// transformImageMD(matFrameImage,imageMD);
// Pass the transformed data to the mock depth generator
nRetVal = mockDepth.SetData(depthMD);
CHECK_RC(nRetVal, "Set mock node new data");
// nRetVal = mockImage.SetData(imageMD);
// CHECK_RC(nRetVal, "Set mock node new data");
/* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */
nRetVal = recorder.Record();
CHECK_RC(nRetVal, "Record");
//.........这里部分代码省略.........