本文整理汇总了C++中xn::DepthMetaData::FullYRes方法的典型用法代码示例。如果您正苦于以下问题:C++ DepthMetaData::FullYRes方法的具体用法?C++ DepthMetaData::FullYRes怎么用?C++ DepthMetaData::FullYRes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xn::DepthMetaData
的用法示例。
在下文中一共展示了DepthMetaData::FullYRes方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SetupDepth
bool SetupDepth(xn::Context& g_context)
{
XnStatus nRetVal = XN_STATUS_OK;
fprintf(stderr,"Setting up the depth generator\n");
if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
{
printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
/*
if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
{
fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
*/
XnMapOutputMode mapMode;
mapMode.nXRes = XN_VGA_X_RES;
mapMode.nYRes = XN_VGA_Y_RES;
mapMode.nFPS = 30;
if ((nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
{
fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
g_depth.GetMetaData(g_depthMD);
g_depthWidth = g_depthMD.FullXRes();
g_depthHeight = g_depthMD.FullYRes();
return TRUE;
}
示例2: main
//.........这里部分代码省略.........
// g_DepthGen.GetAlternativeViewPointCap().SetViewPoint( g_ImageGen );
g_DepthGen.GetMetaData ( g_DepthMD );
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_ImageMD.PixelFormat() == XN_PIXEL_FORMAT_RGB24 );
assert ( g_DepthMD.PixelFormat() == XN_PIXEL_FORMAT_GRAYSCALE_16_BIT );
//
// Create OpenCV Showing Window and Related Data Structures
//
cv::namedWindow ( IMAGE_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::namedWindow ( DEPTH_WIN_NAME, CV_WINDOW_AUTOSIZE );
cv::Mat depthImgMat ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_16UC1 );
cv::Mat depthImgShow ( g_DepthMD.YRes(), g_DepthMD.XRes(), CV_8UC3 );
cv::Mat colorImgMat ( g_ImageMD.YRes(), g_ImageMD.XRes(), CV_8UC3 );
#define ARTAG_DEBUG
#ifdef ARTAG_DEBUG
cv::setMouseCallback ( IMAGE_WIN_NAME, ClickOnMouse, 0 );
#endif
bool flipColor = true;
//
// Start to Loop
//
while ( ctlWndKey != ESC_KEY_VALUE )
{
//
// Try to Get New Frame From Kinect
//
nRetVal = g_Context.WaitAnyUpdateAll ();
g_DepthGen.GetMetaData ( g_DepthMD );
g_ImageGen.GetMetaData ( g_ImageMD );
assert ( g_DepthMD.FullXRes() == g_DepthMD.XRes() && g_DepthMD.FullYRes() == g_DepthMD.YRes() );
assert ( g_ImageMD.FullXRes() == g_ImageMD.XRes() && g_ImageMD.FullYRes() == g_ImageMD.YRes() );
GlobalUtility::CopyColorRawBufToCvMat8uc3 ( (const XnRGB24Pixel *)(g_ImageMD.Data()), colorImgMat );
#ifdef SHOW_DEPTH_WINDOW
GlobalUtility::CopyDepthRawBufToCvMat16u ( (const XnDepthPixel *)(g_DepthMD.Data()), depthImgMat );
// GlobalUtility::ConvertDepthCvMat16uToYellowCvMat ( depthImgMat, depthImgShow );
GlobalUtility::ConvertDepthCvMat16uToGrayCvMat ( depthImgMat, depthImgShow );
cv::imshow ( DEPTH_WIN_NAME, depthImgShow );
#endif
ctlWndKey = cvWaitKey ( 15 );
if ( ctlWndKey == 'f' || ctlWndKey == 'F' )
{
artagHelper.Clear();
artagHelper.FindMarkerCorners ( (unsigned char *)(g_ImageMD.Data()) );
artagHelper.PrintMarkerCornersPos2dInCam ();
extrCalibrator.ExtrCalib ( artagHelper );
std::cout << "\nKinect Extr Matrix:" << std::endl;
extrCalibrator.PrintMatrix ( extrCalibrator.GetMatrix ( ExtrCalibrator::EXTR ) );
std::cout << "Reprojection ERROR = "
<< extrCalibrator.ComputeReprojectionErr ( artagHelper ) << std::endl
// << extrCalibrator.ComputeReprojectionErr ( artagHelper.m_MarkerCornerPosCam2d, artagHelper.m_MarkerCornerPos3d, 24 ) << std::endl
<< "Valid Marker Number = " << artagHelper.GetValidMarkerNumber() << std::endl
<< std::endl;
extrCalibrator.SaveMatrix ( ExtrCalibrator::EXTR, KINECT_EXTR_FILE );
}
if ( ctlWndKey == 's' || ctlWndKey == 'S' )
{
flipColor = !flipColor;
}
if ( flipColor ) {
cv::cvtColor ( colorImgMat, colorImgMat, CV_RGB2BGR );
}
artagHelper.DrawMarkersInCameraImage ( colorImgMat );
cv::imshow ( IMAGE_WIN_NAME, colorImgMat );
}
g_Context.Release ();
system ( "pause" );
exit ( EXIT_SUCCESS );
}
示例3: Update
// Save new data from OpenNI
void MovieMng::Update(const xn::DepthMetaData& dmd, const xn::ImageMetaData& imd)
{
// バッファ空き確認
if((m_nBufferSizeFile == 0) || (m_nBufferSizeSend == 0))
{
printf("バッファ空きなし!! ForFile:%d ForSend:%d\n", m_nBufferSizeFile, m_nBufferSizeSend);
return;
}
SingleFrame* pFrames = m_pFrames + m_nNextWrite;
if(m_nNextWrite == 0)
{
m_nImageHeight = imd.FullYRes();
m_nImageWidth = imd.FullXRes();
m_nDepthHeight = dmd.FullYRes();
m_nDepthWidth = dmd.FullXRes();
printf("Image Height:%d Width:%d Depth Height:%d Width:%d\n",
m_nImageHeight, m_nImageWidth, m_nDepthHeight, m_nDepthWidth);
}
#ifdef LOG_WRITE_ENABLE
fprintf(mmng_fp, "[%s] update. m_nNextWrite:%d\n", __FUNCTION__, m_nNextWrite);
#endif
if (m_bDepth)
{
pFrames->depthFrame.CopyFrom(dmd);
}
if (m_bImage)
{
pFrames->imageFrame.CopyFrom(imd);
}
// バッファ管理情報書き換え
if (m_bFileWrite)
{
EnterCriticalSection(&csFileWriter);
m_nDataCountFile++;
m_nBufferSizeFile--;
}
if (m_bSendDepth)
{
EnterCriticalSection(&csDepthSender);
m_nDataCountSend++;
m_nBufferSizeSend--;
}
m_nNextWrite++;
if (m_nBufferSize == m_nNextWrite)
{
m_nNextWrite = 0;
}
if (m_bFileWrite)
{
LeaveCriticalSection(&csFileWriter);
}
if (m_bSendDepth)
{
LeaveCriticalSection(&csDepthSender);
}
}
开发者ID:beckman16,项目名称:Real_Time_Head_Pose_Estimation_from_Consumer_Depth_Cameras_Client_Server,代码行数:66,代码来源:MovieMng.cpp
示例4: update
bool OpenNIVideo::update(osg::NodeVisitor* nv) {
//this is the main function of your video plugin
//you can either retrieve images from your video stream/camera/file
//or communicate with a thread to synchronize and get the data out
//the most important is to synchronize your data
//and copy the result to the VideoImageSteam used in this plugin
//
//0. you can collect some stats, for that you can use a timer
osg::Timer t;
{
//1. mutex lock access to the image video stream
OpenThreads::ScopedLock<OpenThreads::Mutex> _lock(this->getMutex());
osg::notify(osg::DEBUG_INFO)<<"osgART::OpenNIVideo::update() get new image.."<<std::endl;
XnStatus nRetVal = XN_STATUS_OK;
nRetVal=context.WaitAndUpdateAll();
CHECK_RC(nRetVal, "Update Data");
xnFPSMarkFrame(&xnFPS);
depth_generator.GetMetaData(depthMD);
const XnDepthPixel* pDepthMap = depthMD.Data();
//depth pixel floating point depth map.
image_generator.GetMetaData(imageMD);
const XnUInt8* pImageMap = imageMD.Data();
// Hybrid mode isn't supported in this sample
if (imageMD.FullXRes() != depthMD.FullXRes() || imageMD.FullYRes() != depthMD.FullYRes())
{
std::cerr<<"The device depth and image resolution must be equal!"<<std::endl;
exit(1);
}
// RGB is the only image format supported.
if (imageMD.PixelFormat() != XN_PIXEL_FORMAT_RGB24)
{
std::cerr<<"The device image format must be RGB24"<<std::endl;
exit(1);
}
const XnDepthPixel* pDepth=pDepthMap;
const XnUInt8* pImage=pImageMap;
XnDepthPixel zMax = depthMD.ZRes();
//convert float buffer to unsigned short
for ( unsigned int i=0; i<(depthMD.XRes() * depthMD.YRes()); ++i )
{
*(_depthBufferByte + i) = 255 * (float(*(pDepth + i)) / float(zMax));
}
memcpy(_videoStreamList[0]->data(),pImage, _videoStreamList[0]->getImageSizeInBytes());
memcpy(_videoStreamList[1]->data(),_depthBufferByte, _videoStreamList[1]->getImageSizeInBytes());
//3. don't forget to call this to notify the rest of the application
//that you have a new video image
_videoStreamList[0]->dirty();
_videoStreamList[1]->dirty();
}
//4. hopefully report some interesting data
if (nv) {
const osg::FrameStamp *framestamp = nv->getFrameStamp();
if (framestamp && _stats.valid())
{
_stats->setAttribute(framestamp->getFrameNumber(),
"Capture time taken", t.time_m());
}
}
// Increase modified count every X ms to ensure tracker updates
if (updateTimer.time_m() > 50) {
_videoStreamList[0]->dirty();
_videoStreamList[1]->dirty();
updateTimer.setStartTick();
}
return true;
}
示例5: SetupPrimesense
bool SetupPrimesense(void)
{
XnStatus nRetVal = XN_STATUS_OK;
if ((nRetVal = g_context.Init()) != XN_STATUS_OK)
{
fprintf(stderr,"Could not init OpenNI context: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
if ((nRetVal = g_depth.Create(g_context))!= XN_STATUS_OK)
{
fprintf(stderr,"Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
g_haveDepth = FALSE;
}
else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_depth)) != XN_STATUS_OK)
{
fprintf(stderr,"Could not find depth sensor: %s\n", xnGetStatusString(nRetVal));
g_haveDepth = FALSE;
}
if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK)
{
fprintf(stderr,"Could not create image generator: %s\n", xnGetStatusString(nRetVal));
g_haveImage = FALSE;
}
else if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK)
{
fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal));
g_haveImage = FALSE;
}
if (!g_haveImage && !g_haveDepth)
{
fprintf(stderr,"Could not find either depth or image sources.\n");
return FALSE;
}
XnMapOutputMode mapMode;
mapMode.nXRes = XN_VGA_X_RES;
mapMode.nYRes = XN_VGA_Y_RES;
mapMode.nFPS = 30;
if (g_haveDepth && ( (nRetVal = g_depth.SetMapOutputMode(mapMode)) != XN_STATUS_OK))
{
fprintf(stderr,"Could not set depth mode: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
if (g_haveDepth)
{
g_depth.GetMetaData(g_depthMD);
g_depthWidth = g_depthMD.FullXRes();
g_depthHeight = g_depthMD.FullYRes();
}
if (g_haveImage && (nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
{
fprintf(stderr,"Could not set image: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
if ((nRetVal = g_context.StartGeneratingAll()) != XN_STATUS_OK)
{
fprintf(stderr,"Could not start: %s\n", xnGetStatusString(nRetVal));
return FALSE;
}
return TRUE;
}