本文整理汇总了C++中openni::VideoFrameRef类的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef类的具体用法?C++ VideoFrameRef怎么用?C++ VideoFrameRef使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoFrameRef类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cvt_oniimage
inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m)
{
const void* data = src.getData();
void* datab = const_cast<void*>(data);
to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m);
to.set_format(image::FORMAT_DEPTH_16);
}
示例2: convert_depth_map
void convert_depth_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
const void *data = in.getData();
int sizes[2] = {in.getHeight(), in.getWidth()};
cv::Mat s1, s2, s3;
s1 = cv::Mat(2, sizes, CV_16UC1, (void*)data);
cv::normalize(s1, s2, 0, 255, CV_MINMAX, CV_8UC1);
cv::cvtColor(s2,out, CV_GRAY2BGR);
/*
const nite::UserId* pLabels = map.getPixels();
for (int y=0; y<map.getHeight(); ++y)
{
for (int x=0;x<map.getWidth(); ++x, ++pLabels)
{
uint16_t &v = s1.at<uint16_t>(cv::Point(x,y));
if (!*pLabels)
v = 0;
}
}
*/
// cv::normalize(s1, out, 0, 255, CV_MINMAX, CV_8UC1);
}
示例3: Calculate
void CGraph::Calculate(float* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex - 1];
}
if (nNumberOfPoints)
{
for (int nIndex = 1; nIndex < histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例4: calculateHistogram
/*
* Fuction to draw histogram of depth image
*/
void calculateHistogram(int* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
int* pHistogram_temp = new int[histogramSize];
int width = depthFrame.getWidth();
int height = depthFrame.getHeight();
// Calculate the accumulative histogram (the yellow HandSegmentation...)
memset(pHistogram, 0, histogramSize*sizeof(int));
memset(pHistogram_temp, 0, histogramSize*sizeof(int));
int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0 && *pDepth <= MAX_DEPTH)
{
pHistogram_temp[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex < histogramSize; nIndex++)
{
pHistogram_temp[nIndex] += pHistogram_temp[nIndex-1];
pHistogram[nIndex] = (int)(256 * (1.0f - ((float)pHistogram_temp[nIndex] / nNumberOfPoints)));
}
}
}
示例5: calculateHistogram
void calculateHistogram(float* pHistogram, int histogramSize, const openni::VideoFrameRef& frame)
{
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
// Calculate the accumulative histogram (the yellow display...)
memset(pHistogram, 0, histogramSize*sizeof(float));
int restOfRow = frame.getStrideInBytes() / sizeof(openni::DepthPixel) - frame.getWidth();
int height = frame.getHeight();
int width = frame.getWidth();
unsigned int nNumberOfPoints = 0;
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth != 0)
{
pHistogram[*pDepth]++;
nNumberOfPoints++;
}
}
pDepth += restOfRow;
}
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] += pHistogram[nIndex-1];
}
if (nNumberOfPoints)
{
for (int nIndex=1; nIndex<histogramSize; nIndex++)
{
pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
}
}
}
示例6: setPixels
void ColorStream::setPixels(openni::VideoFrameRef frame)
{
Stream::setPixels(frame);
openni::VideoMode m = frame.getVideoMode();
int w = m.getResolutionX();
int h = m.getResolutionY();
int num_pixels = w * h;
pix.allocate(w, h, 3);
if (m.getPixelFormat() == openni::PIXEL_FORMAT_RGB888)
{
const unsigned char *src = (const unsigned char*)frame.getData();
unsigned char *dst = pix.getBackBuffer().getPixels();
for (int i = 0; i < num_pixels; i++)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
src += 3;
dst += 3;
}
}
pix.swap();
}
示例7: SetDepthFrame
void GeomDepthCalculator::SetDepthFrame(openni::VideoFrameRef& depthFrame)
{
int w = depthFrame.getWidth();
int h = depthFrame.getHeight();
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();
int rowSize = depthFrame.getStrideInBytes();
rowSize /= sizeof(openni::DepthPixel);
DepthFrame::FrameData<ushort>* frame = 0;
if (m_frames.size() < m_maxFrames)
{
frame = new DepthFrame::FrameData<ushort>();
}
else
{
frame = *m_frames.begin();
m_frames.erase(m_frames.begin());
}
frame->copyData(pDepthRow, w, h);
m_frames.push_back(frame);
m_avgFrame.createData(w,h);
_averageFrames();
m_frame->SetRawData(m_avgFrame.Data(), w, h);
m_frame->CalculateDepth();
if (m_calcNormals)
m_frame->CalculateNormals();
}
示例8: copyFrame
void copyFrame(openni::VideoFrameRef& frame, MRPT_DATA& dst){
const char* data = (const char*)frame.getData();
const int stride = frame.getStrideInBytes();
const int width = frame.getWidth();
const int height = frame.getHeight();
resize(dst, width, height);
for (int y = 0; y < height; ++y, data+=stride){
copyRow<NI_PIXEL, MRPT_DATA>(data, dst, width, y);
}
}
示例9: showDepthStream
cv::Mat showDepthStream( const openni::VideoFrameRef& depthFrame )
{
// 距離データを画像化する(16bit)
cv::Mat depthImage = cv::Mat( depthFrame.getHeight(),
depthFrame.getWidth(),
CV_16UC1, (unsigned short*)depthFrame.getData() );
// 0-10000mmまでのデータを0-255(8bit)にする
depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );
return depthImage;
}
示例10: showColorStream
// カラーストリームを表示できる形に変換する
cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
{
// OpenCV の形に変換する
cv::Mat colorImage = cv::Mat( colorFrame.getHeight(),
colorFrame.getWidth(),
CV_8UC3, (unsigned char*)colorFrame.getData() );
// BGR の並びを RGB に変換する
cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
return colorImage;
}
示例11: getColorImage
cv::Mat getColorImage(openni::VideoFrameRef& color_frame)
{
if(!color_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = color_frame.getVideoMode();
cv::Mat color_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_8UC3, (char*)color_frame.getData());
cv::Mat ret_img;
cv::cvtColor(color_img, ret_img, CV_RGB2BGR);
return ret_img;
}
示例12: getDepthImage
// CV_16U
cv::Mat getDepthImage(openni::VideoFrameRef& depth_frame)
{
if(!depth_frame.isValid())
{
return cv::Mat();
}
openni::VideoMode video_mode = depth_frame.getVideoMode();
cv::Mat depth_img = cv::Mat(video_mode.getResolutionY(),
video_mode.getResolutionX(),
CV_16U, (char*)depth_frame.getData());
return depth_img.clone();
}
示例13: toggleStreamState
void toggleStreamState(openni::VideoStream& stream, openni::VideoFrameRef& frame, bool& isOn, openni::SensorType type, const char* name)
{
openni::Status nRetVal = openni::STATUS_OK;
if (!stream.isValid())
{
nRetVal = stream.create(g_device, type);
if (nRetVal != openni::STATUS_OK)
{
displayError("Failed to create %s stream:\n%s", name, openni::OpenNI::getExtendedError());
return;
}
}
if (isOn)
{
stream.stop();
frame.release();
}
else
{
nRetVal = stream.start();
if (nRetVal != openni::STATUS_OK)
{
displayError("Failed to start %s stream:\n%s", name, openni::OpenNI::getExtendedError());
return;
}
}
isOn = !isOn;
}
示例14: displayFrame
void SampleViewer::displayFrame(const openni::VideoFrameRef& frame)
{
if (!frame.isValid())
return;
const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)frame.getData();
openni::RGB888Pixel* pTexRow = m_pTexMap + frame.getCropOriginY() * m_nTexMapX;
int rowSize = frame.getStrideInBytes() / sizeof(openni::DepthPixel);
for (int y = 0; y < frame.getHeight(); ++y)
{
const openni::DepthPixel* pDepth = pDepthRow;
openni::RGB888Pixel* pTex = pTexRow + frame.getCropOriginX();
for (int x = 0; x < frame.getWidth(); ++x, ++pDepth, ++pTex)
{
if (*pDepth != 0)
{
int nHistValue = m_pDepthHist[*pDepth];
pTex->r = nHistValue;
pTex->g = nHistValue;
pTex->b = nHistValue;
}
}
pDepthRow += rowSize;
pTexRow += m_nTexMapX;
}
}
示例15: convert_pixel_map
void convert_pixel_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
const void *data = in.getData();
int sizes[2] = {in.getHeight(), in.getWidth()};
cv::Mat s1, &s2 = out;
s1 = cv::Mat(2, sizes, CV_8UC3, (void *)data);
cv::cvtColor(s1,s2, CV_RGB2BGR);
/*
const nite::UserId* pLabels = map.getPixels();
for (int y=0; y<map.getHeight(); ++y)
{
for (int x=0;x<map.getWidth(); ++x, ++pLabels)
{
cv::Vec3b &v = s2.at<cv::Vec3b>(cv::Point(x,y));
if (*pLabels == 0)
v = cv::Vec3b(0,0,0);
}
}
*/
}