当前位置: 首页>>代码示例>>C++>>正文


C++ Context::FindExistingNode方法代码示例

本文整理汇总了C++中Context::FindExistingNode方法的典型用法代码示例。如果您正苦于以下问题:C++ Context::FindExistingNode方法的具体用法?C++ Context::FindExistingNode怎么用?C++ Context::FindExistingNode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Context的用法示例。


在下文中一共展示了Context::FindExistingNode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: connect

// -----------------------------------------------------------------------------------------------------
//  connect
// -----------------------------------------------------------------------------------------------------
bool CameraDevice::connect()
{
	//Connect to kinect
	printf("Connecting to Kinect... ");
	fflush(stdout);
	XnStatus nRetVal = XN_STATUS_OK;
	EnumerationErrors errors;
	ScriptNode script;
	nRetVal = g_context.InitFromXmlFile(Config::_PathKinectXmlFile.c_str(), script, &errors);
	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return false;
	}
	else if (nRetVal != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return false;
	}
	printf("OK\n");

	// allocate the point cloud buffer
	g_cloudPointSave.width = NBPIXELS_WIDTH;
    g_cloudPointSave.height = NBPIXELS_HEIGHT;
    g_cloudPointSave.points.resize(NBPIXELS_WIDTH*NBPIXELS_HEIGHT);

	nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	CHECK_RC(nRetVal, "Find depth generator");

	nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(nRetVal, "Find image generator");

	nRetVal = xnFPSInit(&g_xnFPS, 180);
	CHECK_RC(nRetVal, "FPS Init");

	g_context.SetGlobalMirror(false); // mirror image horizontally

	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
	if (g_depth.GetIntProperty ("ShadowValue", g_shadowValue) != XN_STATUS_OK)
		printf ("[OpenNIDriver] Could not read shadow value!");

	if (g_depth.GetIntProperty ("NoSampleValue", g_noSampleValue) != XN_STATUS_OK)
		printf ("[OpenNIDriver] Could not read no sample value!");

    return (nRetVal == XN_STATUS_OK);
}
开发者ID:animecomico,项目名称:kth-rgbd,代码行数:51,代码来源:CameraDevice.cpp

示例2: main

int main()
{
    XnStatus nRetVal = XN_STATUS_OK;

    Context context;
    EnumerationErrors errors;

    nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);

    if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
    {
        XnChar strError[1024];
        errors.ToString(strError, 1024);
        printf("%s\n", strError);
        return (nRetVal);
    }
    else if (nRetVal != XN_STATUS_OK)
    {
        printf("Open failed: %s\n", xnGetStatusString(nRetVal));
        return (nRetVal);
    }

    DepthGenerator depth;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
    CHECK_RC(nRetVal, "Find depth generator");

    XnFPSData xnFPS;
    nRetVal = xnFPSInit(&xnFPS, 180);
    CHECK_RC(nRetVal, "FPS Init");

    DepthMetaData depthMD;

    while (!xnOSWasKeyboardHit())
    {
        nRetVal = context.WaitOneUpdateAll(depth);
        if (nRetVal != XN_STATUS_OK)
        {
            printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal));
            continue;
        }

        xnFPSMarkFrame(&xnFPS);

        depth.GetMetaData(depthMD);
        const XnDepthPixel* pDepthMap = depthMD.Data();

        printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS));
    }

    context.Shutdown();

    return 0;
}
开发者ID:nixz,项目名称:OpenNI,代码行数:53,代码来源:NiSimpleRead.cpp

示例3: kinectInit

// Set up OpenNI to obtain 8-bit mono images from the Kinect's RGB camera
int kinectInit(void)
{
  XnStatus nRetVal = XN_STATUS_OK;
  ScriptNode scriptNode;
  EnumerationErrors errors;

  printf("Reading config from: '%s'\n", SAMPLE_XML_PATH_LOCAL);
  nRetVal = context.InitFromXmlFile(SAMPLE_XML_PATH_LOCAL, scriptNode, &errors);

  nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image); 
  //g_image.SetPixelFormat(XN_PIXEL_FORMAT_GRAYSCALE_8_BIT); 
  g_image.SetPixelFormat(XN_PIXEL_FORMAT_RGB24); 
  g_image.GetMetaData(g_imageMD);

  nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
  depth.GetMetaData(depthMD);

  //  nRetVal = depth.GetAlternativeViewPointCap().SetViewPoint(g_image);
  //nRetVal = depth.GetFrameSyncCap().FrameSyncWith(g_image);

 return nRetVal;
}
开发者ID:mlab-upenn,项目名称:HAWK-daemons,代码行数:23,代码来源:kinectd.cpp

示例4: playerInit

//----------------------------------------------------
// プレイヤーの初期化
//----------------------------------------------------
void playerInit(void){
	XnStatus rc;

	rc = g_context.Init();
	rc = g_context.OpenFileRecording(PLAYER_RECORDE_PATH, g_player);
	errorCheck(rc, "OpenFileRecording");		// エラーチェック

	rc = g_context.FindExistingNode(XN_NODE_TYPE_PLAYER, g_player);
	errorCheck(rc, "FindExistingNode_player");

	// サポートされるフォーマットの取得
	cout << "SupportedFormat:" <<
		g_player.GetSupportedFormat() << endl;

	// 再生速度の取得
	cout << "PlaybackSpeed:" << g_player.GetPlaybackSpeed() << endl;
}
开发者ID:Yusuke-Shimizu,项目名称:depthkey,代码行数:20,代码来源:main.cpp

示例5: setup

//--------------------------------------------------------------
void testApp::setup(){
	
	XnStatus rc;
	
	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, g_scriptNode, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return ;
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return;
	}
	
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	if (rc != XN_STATUS_OK)
	{
		printf("No depth node exists! Check your XML.");
		return;
	}
	
	g_depth.GetMetaData(g_depthMD);
	
	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));
	std::cout << " w:" << g_depthMD.FullXRes() << " h:" << g_depthMD.FullYRes() << std::endl;
	pixels = (unsigned char*)malloc(640*480*3*sizeof(unsigned char));
	tex.allocate(640, 480, GL_RGB);
}
开发者ID:mmlemon,项目名称:OpenNI_Advent_oFSample,代码行数:37,代码来源:testApp.cpp

示例6: Init_Kinect

XnStatus Init_Kinect(EventOutSFNode* skltn,EventOutSFNode* hnz,EventOutSFNode* flr){
	XnStatus rc=XN_STATUS_OK;
	EnumerationErrors errors;
	DepthMetaData g_depthMD;
	ImageMetaData g_imageMD;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	CHECK_RC(rc, "InitFromXml");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	CHECK_RC(rc,"XN_NODE_TYPE_DEPTH");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	CHECK_RC(rc,"XN_NODE_TYPE_IMAGE");
	rc=  g_context.FindExistingNode(XN_NODE_TYPE_USER,g_user); 
	CHECK_RC(rc,"XN_NODE_TYPE_USER");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_SCENE,g_scene);
	CHECK_RC(rc,"XN_NODE_TYPE_SCENE");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_HANDS,g_hands);
	CHECK_RC(rc,"XN_NODE_TYPE_HANDS");
	rc=g_context.FindExistingNode(XN_NODE_TYPE_GESTURE,g_gesture);
	CHECK_RC(rc,"XN_NODE_TYPE_GESTURE");
	g_depth.GetMetaData(g_depthMD);
	g_fps=g_depthMD.FPS();
	g_image.GetMetaData(g_imageMD);
	rc=init_skeleton();
	CHECK_RC(rc,"INIT SKELETON");
	rc=init_hands();
	CHECK_RC(rc,"INIT HANDS");
	pix_w=g_depthMD.FullXRes();
	pix_h=g_depthMD.FullYRes();
	if(pix_h==0||pix_w==0){return XN_STATUS_ERROR;}
	g_skltn=skltn;
	g_hnz=hnz;
	g_flr=flr;
	if(NULL==g_skltn||NULL==g_hands||NULL==g_flr)return XN_STATUS_ERROR;
	isInit=true;
	return rc;
}
开发者ID:dinglingwei402,项目名称:TempCode,代码行数:36,代码来源:ControlKinect1.cpp

示例7: main

int main()
{
	XnStatus nRetVal = XN_STATUS_OK;

	Context context;
	ScriptNode scriptNode;
	EnumerationErrors errors;

	const char *fn = NULL;
	if	(fileExists(SAMPLE_XML_PATH)) fn = SAMPLE_XML_PATH;
	else if (fileExists(SAMPLE_XML_PATH_LOCAL)) fn = SAMPLE_XML_PATH_LOCAL;
	else {
		printf("Could not find '%s' nor '%s'. Aborting.\n" , SAMPLE_XML_PATH, SAMPLE_XML_PATH_LOCAL);
		return XN_STATUS_ERROR;
	}
	printf("Reading config from: '%s'\n", fn);
	nRetVal = context.InitFromXmlFile(fn, scriptNode, &errors);

	if (nRetVal == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (nRetVal);
	}
	else if (nRetVal != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(nRetVal));
		return (nRetVal);
	}

	DepthGenerator depth;
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(nRetVal, "Find depth generator");

	XnFPSData xnFPS;
	nRetVal = xnFPSInit(&xnFPS, 180);
	CHECK_RC(nRetVal, "FPS Init");

	DepthMetaData depthMD;

	while (!xnOSWasKeyboardHit())
	{
		nRetVal = context.WaitOneUpdateAll(depth);
		if (nRetVal != XN_STATUS_OK)
		{
			printf("UpdateData failed: %s\n", xnGetStatusString(nRetVal));
			continue;
		}

		xnFPSMarkFrame(&xnFPS);

		depth.GetMetaData(depthMD);

		printf("Frame %d Middle point is: %u. FPS: %f\n", depthMD.FrameID(), depthMD(depthMD.XRes() / 2, depthMD.YRes() / 2), xnFPSCalc(&xnFPS));
	}

	depth.Release();
	scriptNode.Release();
	context.Release();

	return 0;
}
开发者ID:ABMNYZ,项目名称:OpenNI,代码行数:63,代码来源:NiSimpleRead.cpp

示例8: main

int main(int argc, char* argv[])
{
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}

	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);

	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);

	// Hybrid mode isn't supported in this sample
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

	// Texture map init
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes()-1) / 512) + 1) * 512;
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes()-1) / 512) + 1) * 512;
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));

	// OpenGL init
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
	glutInitWindowSize(GL_WIN_SIZE_X, GL_WIN_SIZE_Y);
	glutCreateWindow ("OpenNI Simple Viewer");
	glutFullScreen();
	glutSetCursor(GLUT_CURSOR_NONE);

	glutKeyboardFunc(glutKeyboard);
	glutDisplayFunc(glutDisplay);
	glutIdleFunc(glutIdle);

	glDisable(GL_DEPTH_TEST);
	glEnable(GL_TEXTURE_2D);

	// Per frame code is in glutDisplay
	glutMainLoop();

	return 0;
}
开发者ID:3david,项目名称:OpenNI,代码行数:64,代码来源:NiSimpleViewer.cpp

示例9: main

int main(int argc, char* argv[])
{
	XnStatus nRetVal = XN_STATUS_OK;
	nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH);
	if (nRetVal != XN_STATUS_OK)
	{
		printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal));
	}

	if (argc < 3)
	{
		printf("usage: %s <inputFile> <outputFile>\n", argv[0]);
		return -1;
	}

	const char* strInputFile = argv[1];
	const char* strOutputFile = argv[2];

	Context context;
	nRetVal = context.Init();
	CHECK_RC(nRetVal, "Init");

	// open input file
	Player player;
	nRetVal = context.OpenFileRecording(strInputFile, player);
	CHECK_RC(nRetVal, "Open input file");

	// Get depth node from recording
	DepthGenerator depth;
	nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(nRetVal, "Find depth generator");

	// Create mock node based on depth node from recording
	MockDepthGenerator mockDepth;
	nRetVal = mockDepth.CreateBasedOn(depth);
	CHECK_RC(nRetVal, "Create mock depth node");

	// create recorder
	Recorder recorder;

	nRetVal = recorder.Create(context);
	CHECK_RC(nRetVal, "Create recorder");

	nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, strOutputFile);
	CHECK_RC(nRetVal, "Set recorder destination file");

	// add depth node to recorder
	nRetVal = recorder.AddNodeToRecording(mockDepth);
	CHECK_RC(nRetVal, "Add node to recording");

	nRetVal = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(nRetVal);

	XnUInt32 nNumFrames = 0;

	nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames);
	CHECK_RC(nRetVal, "Get player number of frames");

	DepthMetaData depthMD;

	while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF)
	{
		CHECK_RC(nRetVal, "Read next frame");
		
		// Get depth meta data
		depth.GetMetaData(depthMD);

		//-----------------------------------------------//
		// Transform depth! This is the interesting part //
		//-----------------------------------------------//
		
		/* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(),
		   but we're calling it just to be clear. */
		nRetVal = depthMD.MakeDataWritable();
		CHECK_RC(nRetVal, "Make depth data writable");

		transformDepthMD(depthMD);

		// Pass the transformed data to the mock depth generator
		nRetVal = mockDepth.SetData(depthMD);
		CHECK_RC(nRetVal, "Set mock node new data");

		/* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */
		nRetVal = recorder.Record();
		CHECK_RC(nRetVal, "Record");

		printf("Recorded: frame %u out of %u\r", depthMD.FrameID(), nNumFrames);
	}

	printf("\n");

	return 0;
}
开发者ID:Clebeson,项目名称:OpenNI,代码行数:93,代码来源:NiRecordSynthetic.cpp

示例10: xnInit

//----------------------------------------------------
// OpenNI関連の初期化
//----------------------------------------------------
void xnInit(void){
	XnStatus rc;

	EnumerationErrors errors;
	rc = g_context.InitFromXmlFile(SAMPLE_XML_PATH, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT){
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		exit(1);
	}else if (rc != XN_STATUS_OK){
		printf("Open failed: %s\n", xnGetStatusString(rc));
		exit(1);
	}
	
	//playerInit();

	rc = xnFPSInit(&g_xnFPS, 180);	// FPSの初期化
	//CHECK_RC(rc, "FPS Init");

	// デプス・イメージ・ユーザジェネレータの作成
	rc = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	errorCheck(rc, "g_depth");		// エラーチェック
	rc = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	errorCheck(rc, "g_image");
	rc = g_context.FindExistingNode(XN_NODE_TYPE_USER, g_user);
	//rc = g_user.Create(g_context);
	errorCheck(rc, "g_user");

	// ユーザー検出機能をサポートしているか確認
	if (!g_user.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
		//throw std::runtime_error("ユーザー検出をサポートしてません");
		cout << "ユーザー検出をサポートしてません" << endl;
		exit(1);
	}

	// レコーダーの設定
	//rc = setRecorder(g_recorder, rc);

	// ユーザコールバックの登録
	XnCallbackHandle userCallbacks;
	g_user.RegisterUserCallbacks(UserDetected, UserLost, NULL, userCallbacks);

	// デプス・イメージ・ユーザデータの取得
	g_depth.GetMetaData(g_depthMD);
	g_image.GetMetaData(g_imageMD);
	g_user.GetUserPixels(0, g_sceneMD);

	// Hybrid mode isn't supported in this sample
	// イメージとデプスの大きさが違うとエラー
	if (g_imageMD.FullXRes() != g_depthMD.FullXRes() || g_imageMD.FullYRes() != g_depthMD.FullYRes()){
		printf ("The device depth and image resolution must be equal!\n");
		exit(1);
	}

	// RGB is the only image format supported.
	// フォーマットの確認
	if (g_imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24){
		printf("The device image format must be RGB24\n");
		exit(1);
	}

	// Texture map init
	// フルスクリーン画面の大きさ調整
	g_nTexMapX = (((unsigned short)(g_depthMD.FullXRes() - 1) / 512) + 1) * 512;	// 大きさによって512の倍数に調整(1024)
	g_nTexMapY = (((unsigned short)(g_depthMD.FullYRes() - 1) / 512) + 1) * 512;	// 512
	g_pTexMap = (XnRGB24Pixel*)malloc(g_nTexMapX * g_nTexMapY * sizeof(XnRGB24Pixel));	// スクリーンの大きさ分の色情報の容量を確保

	// 座標ポインタの初期化
	g_pPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));			// 座標を入れるポインタを作成
	g_pBackTex = (XnRGB24Pixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnRGB24Pixel));	// 背景画像を入れるポインタを作成
	g_pBackPoint = (XnPoint3D*)malloc(KINECT_IMAGE_SIZE * sizeof(XnPoint3D));		// 背景座標を入れるポインタを作成
	g_pBackDepth = (XnDepthPixel*)malloc(KINECT_IMAGE_SIZE * sizeof(XnDepthPixel));		// 背景座標を入れるポインタを作成
}
开发者ID:Yusuke-Shimizu,项目名称:depthkey,代码行数:77,代码来源:main.cpp

示例11: main

int main(int argc, char* argv[])
{

	EnumerationErrors errors;


    //rc = context.Init();
    rc = context.InitFromXmlFile(strPathToXML,&errors);
    if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		XnChar strError[1024];
		errors.ToString(strError, 1024);
		printf("%s\n", strError);
		return (rc);
	}
	else if (rc != XN_STATUS_OK)
	{
		printf("Open failed: %s\n", xnGetStatusString(rc));
		return (rc);
	}
	
	/* UNCOMMENT TO GET FILE READING 
    //rc = context.OpenFileRecording(strInputFile);
	//CHECK_RC(rc, "Open input file");

	//rc = context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
	//CHECK_RC(rc, "Get player node"); */ 

	rc = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
	CHECK_RC(rc, "Find depth generator");

	rc = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
	CHECK_RC(rc, "Find image generator");

    depth.GetMetaData(depthMD);
	image.GetMetaData(imageMD);

    //rc = player.SetRepeat(FALSE);
	XN_IS_STATUS_OK(rc);

    //rc = player.GetNumFrames(image.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

    //rc = player.GetNumFrames(depth.GetName(), nNumFrames);
	//CHECK_RC(rc, "Get player number of frames");
	//printf("%d\n",nNumFrames);

	// Hybrid mode isn't supported
	if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
	{
		printf ("The device depth and image resolution must be equal!\n");
		return 1;
	}

	// RGB is the only image format supported.
	if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		printf("The device image format must be RGB24\n");
		return 1;
	}

    avi = cvCreateVideoWriter(strOutputFile, 0, 30, cvSize(640,480), TRUE);

    depthMetersMat = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage = cvCreateImage( cvSize(640,480),16,1 );

    depthMetersMat2 = cvCreateMat(480, 640, CV_16UC1);
    kinectDepthImage2 = cvCreateImage( cvSize(640,480),16,1 );

    colorArr[0] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[1] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);
    colorArr[2] = cv::Mat(imageMD.YRes(),imageMD.XRes(),CV_8U);

    //prepare_for_face_detection();

    int b;
    int g;
    int r;

	while ((rc = image.WaitAndUpdateData()) != XN_STATUS_EOF && (rc = depth.WaitAndUpdateData()) != XN_STATUS_EOF) {
        if (rc != XN_STATUS_OK) {
            printf("Read failed: %s\n", xnGetStatusString(rc));
            break;
        }
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //XnUInt32 a;
        //a = g_imageMD.FPS;
        printf("%d\n",imageMD.FrameID());
        //a = g_depthMD.DataSize();
        //printf("%d\n",a);

        pDepth = depthMD.Data();
        pImageRow = imageMD.RGB24Data();

        for (unsigned int y=0; y<imageMD.YRes(); y++) {
            pPixel = pImageRow;
            uchar* Bptr = colorArr[0].ptr<uchar>(y);
//.........这里部分代码省略.........
开发者ID:rabbbit,项目名称:kinect-face-expression-recognition,代码行数:101,代码来源:NiSimpleViewer.cpp

示例12: Init

int Init()
{
	
	XnStatus rc;

	//Make sure our image types are the same as the OpenNI image types.
	assert(sizeof(XnRGB24Pixel) == sizeof(ColorPixel));
	assert(sizeof(XnDepthPixel) == sizeof(DepthPixel));
	assert(sizeof(XnStatus) == sizeof(int));

	// Load OpenNI xml settings
	char filePath[255];
	int length = Util::Helpers::GetExeDirectory(filePath, sizeof(filePath));
	filePath[length] = '\\';
	strcpy(&filePath[length+1], SAMPLE_XML_PATH);

	EnumerationErrors errors;
	rc = deviceContext.InitFromXmlFile(filePath, &errors);
	if (rc == XN_STATUS_NO_NODE_PRESENT)
	{
		//One reason would be if Microsoft SDK is installed beside PrimeSense.  Device manager should say PrimeSense instead of Microsoft Kinect.
		
		//XnChar strError[1024];
		//errors.ToString(strError, 1024);
		//LOGE("%s\n", strError);
		return -1;
	}
	else if (rc != XN_STATUS_OK)
	{
		fprintf(stderr, "%s\n", xnGetStatusString(rc));
		/*LOGE("Open failed: %s\n", xnGetStatusString(rc));*/
		return (rc);
	}

	// Retrieve colour and depth nodes
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_IMAGE, colorImageGenerator);
	rc = deviceContext.FindExistingNode(XN_NODE_TYPE_DEPTH, depthImageGenerator);

	// Set mirror mode to off
	SetMirrorMode(false);

	// Get a frame to perform checks on it
	ImageMetaData colorImageMetaData;
	DepthMetaData depthImageMetaData;
	depthImageGenerator.GetMetaData(depthImageMetaData);
	colorImageGenerator.GetMetaData(colorImageMetaData);

	// Hybrid mode isn't supported in this sample
	if (colorImageMetaData.FullXRes() != depthImageMetaData.FullXRes() || colorImageMetaData.FullYRes() != depthImageMetaData.FullYRes())
	{
		/*LOGE("The device depth and image resolution must be equal!\n");*/
		return 1;
	}

	// RGB is the only image format supported.
	if (colorImageMetaData.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
	{
		/*LOGE("The device image format must be RGB24\n");*/
		return 1;
	}
	
	// Need to make sure the automatic alignment of colour and depth images is supported.
	XnBool isSupported = depthImageGenerator.IsCapabilitySupported("AlternativeViewPoint");
	if(!isSupported)
	{
		/*LOGE("Cannot set AlternativeViewPoint!\n");*/
		return 1;
	}

	
	// Set it to VGA maps at 30 FPS
	/*XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 60;
	rc = g_depth.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}
	mapMode.nFPS = 30;
	rc = g_image.SetMapOutputMode(mapMode);
	if(rc)
	{
		LOGE("Failed to set image map mode: %s\n", xnGetStatusString(rc));
		return 1;
	}*/


	// Set automatic alignment of the colour and depth images.
	rc = depthImageGenerator.GetAlternativeViewPointCap().SetViewPoint(colorImageGenerator);
	if(rc)
	{
		/*LOGE("Failed to set depth map mode: %s\n", xnGetStatusString(rc));*/
		return 1;
	}


	return XN_STATUS_OK;
//.........这里部分代码省略.........
开发者ID:AaronGenest,项目名称:KinectArms,代码行数:101,代码来源:KinectSensor_OpenNi.cpp

示例13: main

//////////////////// Entry point //////////////////// 
int main(int argc, char* argv[]) 
{
	depthmask_for_mesh = cvCreateImage(MESH_SIZE, IPL_DEPTH_8U, 1);
	markerSize.width = -1; 
	markerSize.height = -1;

  //init OpenNI
  EnumerationErrors errors;
	switch (XnStatus rc = niContext.InitFromXmlFile(KINECT_CONFIG_FILENAME, &errors)) {
		case XN_STATUS_OK:
			break;
		case XN_STATUS_NO_NODE_PRESENT:
			XnChar strError[1024];	errors.ToString(strError, 1024);
			printf("%s\n", strError);
			return rc; break;
		default:
			printf("Open failed: %s\n", xnGetStatusString(rc));
			return rc;
	}

  //set camera parameter
  capture = new Camera(0, CAPTURE_SIZE, CAMERA_PARAMS_FILENAME);
	RegistrationParams = scaleParams(capture->getParameters(), double(REGISTRATION_SIZE.width)/double(CAPTURE_SIZE.width));

  //init parameter for rendering
  osg_init(calcProjection(RegistrationParams, capture->getDistortion(), REGISTRATION_SIZE));

  //for Kinect view
  loadKinectParams(KINECT_PARAMS_FILENAME, &kinectParams, &kinectDistort);
	kinectDistort =0;
	kinectParams->data.db[2]=320.0; 
	kinectParams->data.db[5]=240.0;

	//setting kinect context
	niContext.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth);
	niContext.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image);
	g_depth.GetMirrorCap().SetMirror(false);
	g_depth.GetAlternativeViewPointCap().SetViewPoint(g_image);

	//registration
	kinectReg = new RegistrationOPIRA(new OCVSurf());
	kinectReg->addResizedMarker(MARKER_FILENAME, 400);

	//physics
	m_world = new bt_ARMM_world();
	ground_grid = new float[GRID_SIZE];
	for (int i =0;i < GRID_SIZE; i++) {
		ground_grid[i] = 0; 
	}
#ifdef SIM_PARTICLES
	voxel_grid = new float[1200];
	for (int i =0;i < 1200; i++) {
		voxel_grid[i] = 0;
	}
#endif

	//controls
	KeyboardController *kc = new KeyboardController(m_world);
	XboxController *xc = new XboxController(m_world);

	loadKinectTransform(KINECT_TRANSFORM_FILENAME);

#ifdef USE_ARMM_VRPN
	//----->Server part
	m_Connection = new vrpn_Connection_IP();
	ARMM_server = new ARMM_Communicator(m_Connection	);

	//Open the imager server and set up channel zero to send our data.
	//if ( (ARMM_img_server = new vrpn_Imager_Server("ARMM_Image", m_Connection, MESH_SIZE.width, MESH_SIZE.height)) == NULL) {
	//	fprintf(stderr, "Could not open imager server\n");
	//	return -1;
	//}
	//if ( (channel_id = ARMM_img_server->add_channel("Grid")) == -1) {
	//	fprintf(stderr, "Could not add channel\n");
	//	return -1;
	//}
	ARMM_server->SetObjectsData(&(m_world->Objects_Body));
	ARMM_server->SetHandsData(&(m_world->HandObjectsArray));

  cout << "Created VRPN server." << endl;
	//<-----
#ifdef USE_ARMM_VRPN_RECEIVER 	//----->Receiver part
	ARMM_sever_receiver = new vrpn_Tracker_Remote (ARMM_CLIENT_IP);
	ARMM_sever_receiver->register_change_handler(NULL, handle_object);
#endif 	//<----- 

#endif

#ifdef USE_SKIN_SEGMENTATION	//Skin color look up
	_HandRegion.LoadSkinColorProbTable();
#endif

#ifdef USE_OPTICAL_FLOW
	prev_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	curr_gray = cvCreateImage(cvSize(OPFLOW_SIZE.width, OPFLOW_SIZE.height), IPL_DEPTH_8U, 1);
	flow_capture = new FlowCapture();
	flow_capture->Init();
#endif

//.........这里部分代码省略.........
开发者ID:horsewin,项目名称:ARMicroMachines,代码行数:101,代码来源:main.cpp

示例14: main

int main(int argc, char* argv[])
{
    XnStatus nRetVal = XN_STATUS_OK;
    nRetVal = xnLogInitFromXmlFile(SAMPLE_XML_PATH);
    if (nRetVal != XN_STATUS_OK)
    {
        printf("Log couldn't be opened: %s. Running without log", xnGetStatusString(nRetVal));
    }
    if (argc < 3)
    {
        printf("usage: %s <inputFile> <outputFile>\n", argv[0]);
        return -1;
    }
    const char* strInputFile = argv[1];
    const char* strOutputFile = argv[2];
    Context context;
    nRetVal = context.Init();
    CHECK_RC(nRetVal, "Init");
    // open input file
    Player player;
    nRetVal = context.OpenFileRecording("/media/6B58CB581C0AACF6/7.oni", player);
    CHECK_RC(nRetVal, "Open input file");
    // Get depth node from recording
    DepthGenerator depth;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_DEPTH, depth);
    CHECK_RC(nRetVal, "Find depth generator");
    // Create mock node based on depth node from recording
    MockDepthGenerator mockDepth;
    nRetVal = mockDepth.CreateBasedOn(depth);
    CHECK_RC(nRetVal, "Create mock depth node");


    ImageGenerator image;
    nRetVal = context.FindExistingNode(XN_NODE_TYPE_IMAGE, image);
    CHECK_RC(nRetVal, "Find depth generator");
    // Create mock node based on depth node from recording
    MockImageGenerator mockImage;
    nRetVal = mockImage.CreateBasedOn(image);
    CHECK_RC(nRetVal, "Create mock depth node");
    // create recorder
    Recorder recorder;
    nRetVal = recorder.Create(context);
    CHECK_RC(nRetVal, "Create recorder");
    nRetVal = recorder.SetDestination(XN_RECORD_MEDIUM_FILE, "/home/shaghayegh/up.oni");
    CHECK_RC(nRetVal, "Set recorder destination file");
    // add depth node to recorder
    nRetVal = recorder.AddNodeToRecording(mockDepth);
    CHECK_RC(nRetVal, "Add node to recording");
    // nRetVal = recorder.AddNodeToRecording(mockImage);
    // CHECK_RC(nRetVal, "Add node to recording");

    nRetVal = player.SetRepeat(FALSE);
    XN_IS_STATUS_OK(nRetVal);
    XnUInt32 nNumFrames = 0;
    nRetVal = player.GetNumFrames(depth.GetName(), nNumFrames);
    CHECK_RC(nRetVal, "Get player number of frames");
    DepthMetaData depthMD;
    ImageMetaData imageMD;
    int frameNum = 0;
    String path = "/media/6B58CB581C0AACF6/ebook/Articles/activity_recognition/data1/0512164529/";
    while ((nRetVal = depth.WaitAndUpdateData()) != XN_STATUS_EOF)
    {
        ++frameNum;
        CHECK_RC(nRetVal, "Read next frame");
        // Get depth meta data
        depth.GetMetaData(depthMD);
        image.GetMetaData(imageMD);

        //-----------------------------------------------//
        // Transform depth! This is the interesting part //
        //-----------------------------------------------//
        /* Enable the depth data to be modified. This is done implicitly by depthMD.WritableDepthMap(),
but we're calling it just to be clear. */
        nRetVal = depthMD.MakeDataWritable();
        CHECK_RC(nRetVal, "Make depth data writable");

        // nRetVal = imageMD.MakeDataWritable();
        // CHECK_RC(nRetVal, "Make depth data writable");

        String ficheroActualRGB;
        // ficheroActualRGB = path  +"RGB_" + boost::to_string(frameNum) + ".png";
        String ficheroActualDepth = path +"Depth_"+ boost::to_string(frameNum) + ".png";

        // Mat matFrameImage = imread(ficheroActualRGB, 1);
        // resize(matFrameImage, matFrameImage, Size(640, 480), 0, 0, INTER_CUBIC);
        Mat matFrameDepth = imread(ficheroActualDepth,1);
        resize(matFrameDepth, matFrameDepth, Size(480, 640), 0, 0, INTER_CUBIC);

        transformDepthMD(matFrameDepth,depthMD);
        // transformImageMD(matFrameImage,imageMD);
//         Pass the transformed data to the mock depth generator
        nRetVal = mockDepth.SetData(depthMD);
        CHECK_RC(nRetVal, "Set mock node new data");

        // nRetVal = mockImage.SetData(imageMD);
        // CHECK_RC(nRetVal, "Set mock node new data");

        /* We need to call recorder.Record explicitly because we're not using WaitAndUpdateAll(). */
        nRetVal = recorder.Record();
        CHECK_RC(nRetVal, "Record");
//.........这里部分代码省略.........
开发者ID:gharghabi,项目名称:MEng_thesis,代码行数:101,代码来源:convert_rgbd_to_oni_worksForDepth.cpp

示例15: initialize

/**
 * Initialize XN functions
 */
void initialize() 
{
    ImageMetaData imageMD;
    XnStatus status;
    int dummy;
    
    srand ( time(NULL) );

    // Initializing context and checking for enumeration errors
    status = g_Context.InitFromXmlFile(XML_CONFIG_FILE, &g_Error);
    checkEnumError(status, g_Error);

    // Finding nodes and checking for errors
    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator), "Finding depth node");
    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_SCENE, g_SceneAnalyzer), "Finding scene analizer");


    //  Note: when the image generation node is handled the program gets
    //  too slow.

    // STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator), "Finding image node");

    // Set view point of Depth generator to the image generator point of
    // view.
    // STATUS_CHECK(g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator), "Set View Point");

    STATUS_CHECK(g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator), "Finding user node");
    
    //g_ImageGenerator.GetMetaData(imageMD);
    
    // Checking camera pixel format
    //if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24) {
    //    reportError("Camera format not supported...!\n");
    //}

    // Checking user generator capabilities
    if(!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)) {
        reportError("Skeleton capability not supported\n");
    }
    
    if(!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)) {
        reportError("Pose detection capability not supported\n");
    }

    printf("Number of players: ");
    dummy = scanf("%d", &g_MaxPlayers);
    printf("\n");

    //Initialize user detector object
    g_UserDetector = UserDetector(g_UserGenerator, g_DepthGenerator);
    g_UserDetector.registerCallbacks();
    
    g_ZamusDetector = new Zamus(&g_UserDetector);
    g_LinqDetector = new Linq(&g_UserDetector);
    g_BusterDetector = new BusterDetector(g_ZamusDetector, &g_UserDetector);
    g_IceRodDetector = new IceRodDetector(g_LinqDetector, &g_UserDetector);

    // Initialize image render object
    g_SceneRenderer = SceneRenderer(&g_ImageGenerator,
                                    &g_DepthGenerator,
                                    &g_SceneAnalyzer,
                                    &g_UserDetector,
                                    g_ZamusDetector,
                                    g_LinqDetector);

    STATUS_CHECK(g_Context.StartGeneratingAll(), "Context generation");

    g_SFBgame = SuperFiremanBrothers(&g_UserDetector, 
                                     &g_SceneAnalyzer, 
                                     g_ZamusDetector, 
                                     g_LinqDetector, 
                                     g_MaxPlayers
                                    );

}
开发者ID:IAIshFons,项目名称:Super-Fireman-Brothers-Kinect-Game,代码行数:78,代码来源:main.cpp


注:本文中的Context::FindExistingNode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。