本文整理汇总了C++中openni::VideoFrameRef::getVideoMode方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef::getVideoMode方法的具体用法?C++ VideoFrameRef::getVideoMode怎么用?C++ VideoFrameRef::getVideoMode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openni::VideoFrameRef
的用法示例。
在下文中一共展示了VideoFrameRef::getVideoMode方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setPixels
void ColorStream::setPixels(openni::VideoFrameRef frame)
{
Stream::setPixels(frame);
openni::VideoMode m = frame.getVideoMode();
int w = m.getResolutionX();
int h = m.getResolutionY();
int num_pixels = w * h;
pix.allocate(w, h, 3);
if (m.getPixelFormat() == openni::PIXEL_FORMAT_RGB888)
{
const unsigned char *src = (const unsigned char*)frame.getData();
unsigned char *dst = pix.getBackBuffer().getPixels();
for (int i = 0; i < num_pixels; i++)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
src += 3;
dst += 3;
}
}
pix.swap();
}
示例2: toCVTImage
static void toCVTImage( Image& dst, const openni::VideoFrameRef& frame )
{
dst.reallocate( frame.getWidth(), frame.getHeight(), Openni2Helper::toIFormat( frame.getVideoMode().getPixelFormat() ) );
switch( frame.getVideoMode().getPixelFormat() ){
case openni::PIXEL_FORMAT_RGB888:
copyRGB( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
break;
default:
copyData( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
}
}
示例3: getDepthImage
// CV_16U
cv::Mat getDepthImage(openni::VideoFrameRef& depth_frame)
{
if(!depth_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = depth_frame.getVideoMode();
cv::Mat depth_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_16U, (char*)depth_frame.getData());
return depth_img.clone();
}
示例4: getColorImage
cv::Mat getColorImage(openni::VideoFrameRef& color_frame)
{
if(!color_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = color_frame.getVideoMode();
cv::Mat color_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_8UC3, (char*)color_frame.getData());
cv::Mat ret_img;
cv::cvtColor(color_img, ret_img, CV_RGB2BGR);
return ret_img;
}
示例5: showColorStream
// カラーストリームを表示できる形に変換する
cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
{
cv::Mat colorImage;
// Color ストリーム
if ( colorFrame.getVideoMode().getPixelFormat() ==
openni::PIXEL_FORMAT_RGB888 ) {
// OpenCV の形に変換する
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC3, (unsigned char*)colorFrame.getData() );
// BGR の並びを RGB に変換する
cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
}
// Xtion IR ストリーム
else if ( colorFrame.getVideoMode().getPixelFormat() ==
openni::PIXEL_FORMAT_GRAY16 ) {
// XitonのIRのフォーマットは16bitグレースケール
// 実際は255諧調らしく、CV_8Uに落とさないと見えない
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_16UC1, (unsigned short*)colorFrame.getData() );
colorImage.convertTo( colorImage, CV_8U );
}
// Kinect for Windows IR ストリーム
else {
// KinectのIRのフォーマットは8bitグレースケール
// Kinect SDKは16bitグレースケール
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC1, (unsigned char*)colorFrame.getData() );
}
return colorImage;
}
示例6: Display
void SampleViewer::Display()
{
nite::Status rc = m_pHandTracker->readFrame(&handFrame);
if (rc != nite::STATUS_OK)
{
printf("GetNextData failed\n");
return;
}
depthFrame = handFrame.getDepthFrame();
if (m_pTexMap == NULL)
{
// Texture map init
m_nTexMapX = MIN_CHUNKS_SIZE(depthFrame.getVideoMode().getResolutionX(), TEXTURE_SIZE);
m_nTexMapY = MIN_CHUNKS_SIZE(depthFrame.getVideoMode().getResolutionY(), TEXTURE_SIZE);
m_pTexMap = new openni::RGB888Pixel[m_nTexMapX * m_nTexMapY];
}
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -10000.0, 10000.0);
if (depthFrame.isValid())
{
calculateHistogram(m_pDepthHist, MAX_DEPTH, depthFrame);
}
memset(m_pTexMap, 0, m_nTexMapX*m_nTexMapY*sizeof(openni::RGB888Pixel));
float factor[3] = {1, 1, 1};
// check if we need to draw depth frame to texture
float av_x = 0;
float av_y = 0;
int counter= 0;
for(int i = 0; i<=7 ; i++)
note_on[i] = false;
if (depthFrame.isValid() && g_drawDepth)
{
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();
const openni::DepthPixel* pDepthRow1 = pDepthRow;
openni::RGB888Pixel* pTexRow = m_pTexMap + depthFrame.getCropOriginY() * m_nTexMapX;
int rowSize = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel);
glPointSize(2);
glBegin(GL_POINTS);
for (int y = 0; y < depthFrame.getHeight(); ++y)
{
const openni::DepthPixel* pDepth = pDepthRow;
openni::RGB888Pixel* pTex = pTexRow + depthFrame.getCropOriginX();
//chord_temp = 0;
for (int x = 0; x < depthFrame.getWidth(); ++x, ++pDepth, ++pTex)
{
if (*pDepth != 0)
{
factor[0] = Colors[colorCount][0];
factor[1] = Colors[colorCount][1];
factor[2] = Colors[colorCount][2];
int nHistValue = m_pDepthHist[*pDepth];
pTex->r = nHistValue*factor[0];
pTex->g = nHistValue*factor[1];
pTex->b = nHistValue*factor[2];
factor[0] = factor[1] = factor[2] = 1;
if(*pDepth <= 800)
{
//glColor3f(1,0,0);
glColor3f(float(*pDepth)/2000,float(*pDepth)/2000,float(*pDepth)/2000);
av_x = x + av_x;
counter++;
av_y = y + av_y;
}
else{
glColor3f(float(*pDepth)/2000,float(*pDepth)/2000,float(*pDepth)/2000);
}
glVertex3f(2*x,2*y,-*pDepth);
}
}
pDepthRow += rowSize;
pTexRow += m_nTexMapX;
}
//.........这里部分代码省略.........