本文整理汇总了C++中openni::VideoFrameRef::getHeight方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef::getHeight方法的具体用法?C++ VideoFrameRef::getHeight怎么用?C++ VideoFrameRef::getHeight使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openni::VideoFrameRef
的用法示例。
在下文中一共展示了VideoFrameRef::getHeight方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: paint
void KinectCamera::paint(QPainter *painter)
{
if (!fig)//如果设备未打开,先执行startcamera
{
startcamera();
if(m_streamsource=="depth")
{
int iMaxDepth = mDepthStream.getMaxPixelValue();
mDepthStream.readFrame( &mDepthFrame );
const cv::Mat mImageDepth(
mDepthFrame.getHeight(), mDepthFrame.getWidth(),
CV_16UC1, (void*)mDepthFrame.getData() );
cv::Mat mScaledDepth;
mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
QVector<QRgb> colorTable;
for(int k=0;k<256;++k)
{
colorTable.push_back( qRgb(k,k,k) );
}
KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
KinectDepthImage.setColorTable(colorTable);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
}
else
{
mColorStream.readFrame( &mColorFrame );
KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
}
}
else//如果设备以打开,直接执行
{
if(m_streamsource=="depth")
{
int iMaxDepth = mDepthStream.getMaxPixelValue();
mDepthStream.readFrame( &mDepthFrame );
const cv::Mat mImageDepth(
mDepthFrame.getHeight(), mDepthFrame.getWidth(),
CV_16UC1, (void*)mDepthFrame.getData() );
cv::Mat mScaledDepth;
mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
QVector<QRgb> colorTable;
for(int k=0;k<256;++k)
{
colorTable.push_back( qRgb(k,k,k) );
}
KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
KinectDepthImage.setColorTable(colorTable);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
}
else
{
mColorStream.readFrame( &mColorFrame );
KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
}
}
}
示例2: calculateHistogram
void calculateHistogram(float* pHistogram, int histogramSize, const openni::VideoFrameRef& frame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
// Calculate the accumulative histogram (the yellow display...)
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = frame.getStrideInBytes() / sizeof(openni::DepthPixel) - frame.getWidth();
int height = frame.getHeight();
int width = frame.getWidth();
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex-1];
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例3: calculateHistogram
/*
* Fuction to draw histogram of depth image
*/
void calculateHistogram(int* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int* pHistogram_temp = new int[histogramSize];
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
// Calculate the accumulative histogram (the yellow HandSegmentation...)
memset(pHistogram, 0, histogramSize*sizeof(int));
memset(pHistogram_temp, 0, histogramSize*sizeof(int));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0 && *pDepth <= MAX_DEPTH)
{
pHistogram_temp[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex < histogramSize; nIndex++)
{
pHistogram_temp[nIndex] += pHistogram_temp[nIndex-1];
pHistogram[nIndex] = (int)(256 * (1.0f - ((float)pHistogram_temp[nIndex] / nNumberOfPoints)));
}
}
}
示例4: convert_depth_map
void convert_depth_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
const void *data = in.getData();
int sizes[2] = {in.getHeight(), in.getWidth()};
cv::Mat s1, s2, s3;
s1 = cv::Mat(2, sizes, CV_16UC1, (void*)data);
cv::normalize(s1, s2, 0, 255, CV_MINMAX, CV_8UC1);
cv::cvtColor(s2,out, CV_GRAY2BGR);
/*
const nite::UserId* pLabels = map.getPixels();
for (int y=0; y<map.getHeight(); ++y)
{
for (int x=0;x<map.getWidth(); ++x, ++pLabels)
{
uint16_t &v = s1.at<uint16_t>(cv::Point(x,y));
if (!*pLabels)
v = 0;
}
}
*/
// cv::normalize(s1, out, 0, 255, CV_MINMAX, CV_8UC1);
}
示例5: displayFrame
void SampleViewer::displayFrame(const openni::VideoFrameRef& frame)
{
if (!frame.isValid())
return;
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)frame.getData();
openni::RGB888Pixel* pTexRow = m_pTexMap + frame.getCropOriginY() * m_nTexMapX;
int rowSize = frame.getStrideInBytes() / sizeof(openni::DepthPixel);
for (int y = 0; y < frame.getHeight(); ++y)
{
const openni::DepthPixel* pDepth = pDepthRow;
openni::RGB888Pixel* pTex = pTexRow + frame.getCropOriginX();
for (int x = 0; x < frame.getWidth(); ++x, ++pDepth, ++pTex)
{
if (*pDepth != 0)
{
int nHistValue = m_pDepthHist[*pDepth];
pTex->r = nHistValue;
pTex->g = nHistValue;
pTex->b = nHistValue;
}
}
pDepthRow += rowSize;
pTexRow += m_nTexMapX;
}
}
示例6: cvt_oniimage
inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m)
{
const void* data = src.getData();
void* datab = const_cast<void*>(data);
to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m);
to.set_format(image::FORMAT_DEPTH_16);
}
示例7: Calculate
void CGraph::Calculate(float* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex - 1];
}
if (nNumberOfPoints)
{
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例8: SetDepthFrame
void GeomDepthCalculator::SetDepthFrame(openni::VideoFrameRef& depthFrame)
{
int w = depthFrame.getWidth();
int h = depthFrame.getHeight();
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();
int rowSize = depthFrame.getStrideInBytes();
rowSize /= sizeof(openni::DepthPixel);
DepthFrame::FrameData<ushort>* frame = 0;
if (m_frames.size() < m_maxFrames)
{
frame = new DepthFrame::FrameData<ushort>();
}
else
{
frame = *m_frames.begin();
m_frames.erase(m_frames.begin());
}
frame->copyData(pDepthRow, w, h);
m_frames.push_back(frame);
m_avgFrame.createData(w,h);
_averageFrames();
m_frame->SetRawData(m_avgFrame.Data(), w, h);
m_frame->CalculateDepth();
if (m_calcNormals)
m_frame->CalculateNormals();
}
示例9: copyFrame
void copyFrame(openni::VideoFrameRef& frame, MRPT_DATA& dst){
const char* data = (const char*)frame.getData();
const int stride = frame.getStrideInBytes();
const int width = frame.getWidth();
const int height = frame.getHeight();
resize(dst, width, height);
for (int y = 0; y < height; ++y, data+=stride){
copyRow<NI_PIXEL, MRPT_DATA>(data, dst, width, y);
}
}
示例10: showDepthStream
cv::Mat showDepthStream( const openni::VideoFrameRef& depthFrame )
{
// 距離データを画像化する(16bit)
cv::Mat depthImage = cv::Mat( depthFrame.getHeight(),
depthFrame.getWidth(),
CV_16UC1, (unsigned short*)depthFrame.getData() );
// 0-10000mmまでのデータを0-255(8bit)にする
depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );
return depthImage;
}
示例11: showColorStream
// カラーストリームを表示できる形に変換する
cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
{
// OpenCV の形に変換する
cv::Mat colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC3, (unsigned char*)colorFrame.getData() );
// BGR の並びを RGB に変換する
cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
return colorImage;
}
示例12: toCVTImage
static void toCVTImage( Image& dst, const openni::VideoFrameRef& frame )
{
dst.reallocate( frame.getWidth(), frame.getHeight(), Openni2Helper::toIFormat( frame.getVideoMode().getPixelFormat() ) );
switch( frame.getVideoMode().getPixelFormat() ){
case openni::PIXEL_FORMAT_RGB888:
copyRGB( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
break;
default:
copyData( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
}
}
示例13: showColorStream
// カラーストリームを表示できる形に変換する
cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
{
cv::Mat colorImage;
// Color ストリーム
if ( colorFrame.getVideoMode().getPixelFormat() ==
openni::PIXEL_FORMAT_RGB888 ) {
// OpenCV の形に変換する
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC3, (unsigned char*)colorFrame.getData() );
// BGR の並びを RGB に変換する
cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
}
// Xtion IR ストリーム
else if ( colorFrame.getVideoMode().getPixelFormat() ==
openni::PIXEL_FORMAT_GRAY16 ) {
// XitonのIRのフォーマットは16bitグレースケール
// 実際は255諧調らしく、CV_8Uに落とさないと見えない
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_16UC1, (unsigned short*)colorFrame.getData() );
colorImage.convertTo( colorImage, CV_8U );
}
// Kinect for Windows IR ストリーム
else {
// KinectのIRのフォーマットは8bitグレースケール
// Kinect SDKは16bitグレースケール
colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC1, (unsigned char*)colorFrame.getData() );
}
return colorImage;
}
示例14: convert_pixel_map
void convert_pixel_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
const void *data = in.getData();
int sizes[2] = {in.getHeight(), in.getWidth()};
cv::Mat s1, &s2 = out;
s1 = cv::Mat(2, sizes, CV_8UC3, (void *)data);
cv::cvtColor(s1,s2, CV_RGB2BGR);
/*
const nite::UserId* pLabels = map.getPixels();
for (int y=0; y<map.getHeight(); ++y)
{
for (int x=0;x<map.getWidth(); ++x, ++pLabels)
{
cv::Vec3b &v = s2.at<cv::Vec3b>(cv::Point(x,y));
if (*pLabels == 0)
v = cv::Vec3b(0,0,0);
}
}
*/
}
示例15: display
void SampleViewer::display()
{
int changedIndex;
openni::Status rc = openni::OpenNI::waitForAnyStream(m_streams, 2, &changedIndex);
if (rc != openni::STATUS_OK)
{
printf("Wait failed\n");
return;
}
switch (changedIndex)
{
case 0:
m_depthStream.readFrame(&m_depthFrame); break;
case 1:
m_colorStream.readFrame(&m_colorFrame); break;
default:
printf("Error in wait\n");
}
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0, GL_WIN_SIZE_X, GL_WIN_SIZE_Y, 0, -1.0, 1.0);
if (m_depthFrame.isValid())
{
calculateHistogram(m_pDepthHist, MAX_DEPTH, m_depthFrame);
}
memset(m_pTexMap, 0, m_nTexMapX*m_nTexMapY*sizeof(openni::RGB888Pixel));
// check if we need to draw image frame to texture
if ((m_eViewState == DISPLAY_MODE_OVERLAY ||
m_eViewState == DISPLAY_MODE_IMAGE) && m_colorFrame.isValid())
{
const openni::RGB888Pixel* pImageRow = (const openni::RGB888Pixel*)m_colorFrame.getData();
openni::RGB888Pixel* pTexRow = m_pTexMap + m_colorFrame.getCropOriginY() * m_nTexMapX;
int rowSize = m_colorFrame.getStrideInBytes() / sizeof(openni::RGB888Pixel);
for (int y = 0; y < m_colorFrame.getHeight(); ++y)
{
const openni::RGB888Pixel* pImage = pImageRow;
openni::RGB888Pixel* pTex = pTexRow + m_colorFrame.getCropOriginX();
for (int x = 0; x < m_colorFrame.getWidth(); ++x, ++pImage, ++pTex)
{
*pTex = *pImage;
}
pImageRow += rowSize;
pTexRow += m_nTexMapX;
}
}
// check if we need to draw depth frame to texture
if ((m_eViewState == DISPLAY_MODE_OVERLAY ||
m_eViewState == DISPLAY_MODE_DEPTH) && m_depthFrame.isValid())
{
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)m_depthFrame.getData();
openni::RGB888Pixel* pTexRow = m_pTexMap + m_depthFrame.getCropOriginY() * m_nTexMapX;
int rowSize = m_depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel);
for (int y = 0; y < m_depthFrame.getHeight(); ++y)
{
const openni::DepthPixel* pDepth = pDepthRow;
openni::RGB888Pixel* pTex = pTexRow + m_depthFrame.getCropOriginX();
for (int x = 0; x < m_depthFrame.getWidth(); ++x, ++pDepth, ++pTex)
{
if (*pDepth != 0)
{
int nHistValue = m_pDepthHist[*pDepth];
pTex->r = nHistValue;
pTex->g = nHistValue;
pTex->b = 0;
}
}
pDepthRow += rowSize;
pTexRow += m_nTexMapX;
}
}
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP_SGIS, GL_TRUE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_nTexMapX, m_nTexMapY, 0, GL_RGB, GL_UNSIGNED_BYTE, m_pTexMap);
// Display the OpenGL texture map
glColor4f(1,1,1,1);
glBegin(GL_QUADS);
int nXRes = m_width;
int nYRes = m_height;
// upper left
//.........这里部分代码省略.........