本文整理汇总了C++中openni::VideoFrameRef::getData方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef::getData方法的具体用法?C++ VideoFrameRef::getData怎么用?C++ VideoFrameRef::getData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openni::VideoFrameRef
的用法示例。
在下文中一共展示了VideoFrameRef::getData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: paint
void KinectCamera::paint(QPainter *painter)
{
if (!fig)//如果设备未打开,先执行startcamera
{
startcamera();
if(m_streamsource=="depth")
{
int iMaxDepth = mDepthStream.getMaxPixelValue();
mDepthStream.readFrame( &mDepthFrame );
const cv::Mat mImageDepth(
mDepthFrame.getHeight(), mDepthFrame.getWidth(),
CV_16UC1, (void*)mDepthFrame.getData() );
cv::Mat mScaledDepth;
mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
QVector<QRgb> colorTable;
for(int k=0;k<256;++k)
{
colorTable.push_back( qRgb(k,k,k) );
}
KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
KinectDepthImage.setColorTable(colorTable);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
}
else
{
mColorStream.readFrame( &mColorFrame );
KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
}
}
else//如果设备以打开,直接执行
{
if(m_streamsource=="depth")
{
int iMaxDepth = mDepthStream.getMaxPixelValue();
mDepthStream.readFrame( &mDepthFrame );
const cv::Mat mImageDepth(
mDepthFrame.getHeight(), mDepthFrame.getWidth(),
CV_16UC1, (void*)mDepthFrame.getData() );
cv::Mat mScaledDepth;
mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );
QVector<QRgb> colorTable;
for(int k=0;k<256;++k)
{
colorTable.push_back( qRgb(k,k,k) );
}
KinectDepthImage= QImage((const unsigned char*)mScaledDepth.data,mDepthFrame.getWidth(), mDepthFrame.getHeight(),QImage::Format_Indexed8);
KinectDepthImage.setColorTable(colorTable);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectDepthImage);
}
else
{
mColorStream.readFrame( &mColorFrame );
KinectColorImage= QImage((const unsigned char*)mColorFrame.getData(),mColorFrame.getWidth(), mColorFrame.getHeight(),QImage::Format_RGB888);
painter->drawImage(boundingRect().adjusted(1, 1, -1, -1),KinectColorImage);
}
}
}
示例2: toCVTImage
static void toCVTImage( Image& dst, const openni::VideoFrameRef& frame )
{
dst.reallocate( frame.getWidth(), frame.getHeight(), Openni2Helper::toIFormat( frame.getVideoMode().getPixelFormat() ) );
switch( frame.getVideoMode().getPixelFormat() ){
case openni::PIXEL_FORMAT_RGB888:
copyRGB( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
break;
default:
copyData( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
}
}
示例3: displayFrame
void SampleViewer::displayFrame(const openni::VideoFrameRef& frame)
{
if (!frame.isValid())
return;
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)frame.getData();
openni::RGB888Pixel* pTexRow = m_pTexMap + frame.getCropOriginY() * m_nTexMapX;
int rowSize = frame.getStrideInBytes() / sizeof(openni::DepthPixel);
for (int y = 0; y < frame.getHeight(); ++y)
{
const openni::DepthPixel* pDepth = pDepthRow;
openni::RGB888Pixel* pTex = pTexRow + frame.getCropOriginX();
for (int x = 0; x < frame.getWidth(); ++x, ++pDepth, ++pTex)
{
if (*pDepth != 0)
{
int nHistValue = m_pDepthHist[*pDepth];
pTex->r = nHistValue;
pTex->g = nHistValue;
pTex->b = nHistValue;
}
}
pDepthRow += rowSize;
pTexRow += m_nTexMapX;
}
}
示例4: cvt_oniimage
inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m)
{
const void* data = src.getData();
void* datab = const_cast<void*>(data);
to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m);
to.set_format(image::FORMAT_DEPTH_16);
}
示例5: convert_depth_map
void convert_depth_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
const void *data = in.getData();
int sizes[2] = {in.getHeight(), in.getWidth()};
cv::Mat s1, s2, s3;
s1 = cv::Mat(2, sizes, CV_16UC1, (void*)data);
cv::normalize(s1, s2, 0, 255, CV_MINMAX, CV_8UC1);
cv::cvtColor(s2,out, CV_GRAY2BGR);
/*
const nite::UserId* pLabels = map.getPixels();
for (int y=0; y<map.getHeight(); ++y)
{
for (int x=0;x<map.getWidth(); ++x, ++pLabels)
{
uint16_t &v = s1.at<uint16_t>(cv::Point(x,y));
if (!*pLabels)
v = 0;
}
}
*/
// cv::normalize(s1, out, 0, 255, CV_MINMAX, CV_8UC1);
}
示例6: calculateHistogram
/*
* Fuction to draw histogram of depth image
*/
void calculateHistogram(int* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int* pHistogram_temp = new int[histogramSize];
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
// Calculate the accumulative histogram (the yellow HandSegmentation...)
memset(pHistogram, 0, histogramSize*sizeof(int));
memset(pHistogram_temp, 0, histogramSize*sizeof(int));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0 && *pDepth <= MAX_DEPTH)
{
pHistogram_temp[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex < histogramSize; nIndex++)
{
pHistogram_temp[nIndex] += pHistogram_temp[nIndex-1];
pHistogram[nIndex] = (int)(256 * (1.0f - ((float)pHistogram_temp[nIndex] / nNumberOfPoints)));
}
}
}
示例7: setPixels
void ColorStream::setPixels(openni::VideoFrameRef frame)
{
Stream::setPixels(frame);
openni::VideoMode m = frame.getVideoMode();
int w = m.getResolutionX();
int h = m.getResolutionY();
int num_pixels = w * h;
pix.allocate(w, h, 3);
if (m.getPixelFormat() == openni::PIXEL_FORMAT_RGB888)
{
const unsigned char *src = (const unsigned char*)frame.getData();
unsigned char *dst = pix.getBackBuffer().getPixels();
for (int i = 0; i < num_pixels; i++)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
src += 3;
dst += 3;
}
}
pix.swap();
}
示例8: calculateHistogram
void calculateHistogram(float* pHistogram, int histogramSize, const openni::VideoFrameRef& frame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
// Calculate the accumulative histogram (the yellow display...)
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = frame.getStrideInBytes() / sizeof(openni::DepthPixel) - frame.getWidth();
int height = frame.getHeight();
int width = frame.getWidth();
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex-1];
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例9: Calculate
void CGraph::Calculate(float* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex - 1];
}
if (nNumberOfPoints)
{
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例10: SetDepthFrame
void GeomDepthCalculator::SetDepthFrame(openni::VideoFrameRef& depthFrame)
{
int w = depthFrame.getWidth();
int h = depthFrame.getHeight();
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();
int rowSize = depthFrame.getStrideInBytes();
rowSize /= sizeof(openni::DepthPixel);
DepthFrame::FrameData<ushort>* frame = 0;
if (m_frames.size() < m_maxFrames)
{
frame = new DepthFrame::FrameData<ushort>();
}
else
{
frame = *m_frames.begin();
m_frames.erase(m_frames.begin());
}
frame->copyData(pDepthRow, w, h);
m_frames.push_back(frame);
m_avgFrame.createData(w,h);
_averageFrames();
m_frame->SetRawData(m_avgFrame.Data(), w, h);
m_frame->CalculateDepth();
if (m_calcNormals)
m_frame->CalculateNormals();
}
示例11: copyFrame
void copyFrame(openni::VideoFrameRef& frame, MRPT_DATA& dst){
const char* data = (const char*)frame.getData();
const int stride = frame.getStrideInBytes();
const int width = frame.getWidth();
const int height = frame.getHeight();
resize(dst, width, height);
for (int y = 0; y < height; ++y, data+=stride){
copyRow<NI_PIXEL, MRPT_DATA>(data, dst, width, y);
}
}
示例12: showDepthStream
cv::Mat showDepthStream( const openni::VideoFrameRef& depthFrame )
{
// 距離データを画像化する(16bit)
cv::Mat depthImage = cv::Mat( depthFrame.getHeight(),
depthFrame.getWidth(),
CV_16UC1, (unsigned short*)depthFrame.getData() );
// 0-10000mmまでのデータを0-255(8bit)にする
depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );
return depthImage;
}
示例13: showColorStream
// カラーストリームを表示できる形に変換する
cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
{
// OpenCV の形に変換する
cv::Mat colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC3, (unsigned char*)colorFrame.getData() );
// BGR の並びを RGB に変換する
cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
return colorImage;
}
示例14: getDepthImage
// CV_16U
cv::Mat getDepthImage(openni::VideoFrameRef& depth_frame)
{
if(!depth_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = depth_frame.getVideoMode();
cv::Mat depth_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_16U, (char*)depth_frame.getData());
return depth_img.clone();
}
示例15: getColorImage
cv::Mat getColorImage(openni::VideoFrameRef& color_frame)
{
if(!color_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = color_frame.getVideoMode();
cv::Mat color_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_8UC3, (char*)color_frame.getData());
cv::Mat ret_img;
cv::cvtColor(color_img, ret_img, CV_RGB2BGR);
return ret_img;
}