本文整理汇总了C++中VideoFrameRef::getData方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef::getData方法的具体用法?C++ VideoFrameRef::getData怎么用?C++ VideoFrameRef::getData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrameRef
的用法示例。
在下文中一共展示了VideoFrameRef::getData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cloud_xyz_comp
std::stringstream cloud_xyz_comp(VideoFrameRef frame,VideoStream &stream,bool showstats)
{
std::stringstream comp_data;
pcl::PointCloud<pcl::PointXYZ> cloud;
if(frame.getData() == NULL) std::cout<<"Empty data\n..";
float px,py,pz;
DepthPixel *depthp,dp;
Status status;
cloud.width = frame.getWidth();
cloud.height = frame.getHeight();
cloud.resize(cloud.height * cloud.width);
int count=0;
depthp = (DepthPixel*)((char*)frame.getData());
for(int y=0;y<frame.getHeight();y++)
{
for(int x=0;x<frame.getWidth();x++)
{
dp=*depthp;
status = CoordinateConverter::convertDepthToWorld(stream,x,y,dp,&px,&py,&pz);
if(!handlestatus(status)) exit(0);
cloud.points[x + (frame.getStrideInBytes() * y)/2].x = (px);
cloud.points[x + (frame.getStrideInBytes() * y)/2].y = (py);
cloud.points[x + (frame.getStrideInBytes() * y)/2].z = (pz);
depthp++;
}
}
pcl::PointCloud<pcl::PointXYZ>::ConstPtr const_cloud(new pcl::PointCloud<pcl::PointXYZ>(cloud));
pcl::octree::PointCloudCompression<pcl::PointXYZ> *encoder;
pcl::octree::compression_Profiles_e comp_profile = pcl::octree::LOW_RES_ONLINE_COMPRESSION_WITHOUT_COLOR;
encoder = new pcl::octree::PointCloudCompression<pcl::PointXYZ>(comp_profile,showstats);
encoder->encodePointCloud(const_cloud,comp_data);
return comp_data;
}
示例2: analyzeFrame
void analyzeFrame(const VideoFrameRef& frame)
{
DepthPixel* pDepth;
RGB888Pixel* pColor;
int middleIndex = (frame.getHeight()+1)*frame.getWidth()/2;
switch (frame.getVideoMode().getPixelFormat())
{
case PIXEL_FORMAT_DEPTH_1_MM:
case PIXEL_FORMAT_DEPTH_100_UM:
pDepth = (DepthPixel*)frame.getData();
printf("[%08llu] %8d\n", (long long)frame.getTimestamp(),
pDepth[middleIndex]);
break;
case PIXEL_FORMAT_RGB888:
pColor = (RGB888Pixel*)frame.getData();
printf("[%08llu] 0x%02x%02x%02x\n", (long long)frame.getTimestamp(),
pColor[middleIndex].r&0xff,
pColor[middleIndex].g&0xff,
pColor[middleIndex].b&0xff);
break;
default:
printf("Unknown format\n");
}
}
示例3:
pcl::PointCloud<pcl::PointXYZ> cloud_xyz(VideoFrameRef frame,VideoStream &stream)
{
pcl::PointCloud<pcl::PointXYZ> cloud;
if(frame.getData() == NULL) std::cout<<"Empty data\n..";
float px,py,pz;
DepthPixel *depthp,dp;
Status status;
cloud.width = frame.getWidth();
cloud.height = frame.getHeight();
cloud.resize(cloud.height * cloud.width);
int count=0;
depthp = (DepthPixel*)((char*)frame.getData());
for(int y=0;y<frame.getHeight();y++)
{
for(int x=0;x<frame.getWidth();x++)
{
dp=*depthp;
status = CoordinateConverter::convertDepthToWorld(stream,x,y,dp,&px,&py,&pz);
if(!handlestatus(status)) exit(0);
cloud.points[x + (frame.getStrideInBytes() * y)/2].x = (px);
cloud.points[x + (frame.getStrideInBytes() * y)/2].y = (py);
cloud.points[x + (frame.getStrideInBytes() * y)/2].z = (pz);
depthp++;
}
}
return cloud;
}
示例4: readFrame
/**
Reads a frame.
@return If no frame can be read or the frame read is not valid returns false, otherwise returns true.
*/
bool Kinect::readFrame(cv::Mat &frame, CameraMode camMode)
{
VideoFrameRef irf;
int hIr, wIr;
VideoFrameRef colorf;
int hColor, wColor;
switch (camMode)
{
case (NI_SENSOR_DEPTH):
rc = depth.readFrame(&irf);
if (irf.isValid())
{
const uint16_t* imgBufIr = (const uint16_t*)irf.getData();
hIr = irf.getHeight();
wIr = irf.getWidth();
frame.create(hIr, wIr, CV_16U);
memcpy(frame.data, imgBufIr, hIr * wIr * sizeof(uint16_t));
frame.convertTo(frame, CV_8U);
return true;
}
else
{
cout << "ERROR: Frame not valid." << endl;
return false;
}
case (NI_SENSOR_COLOR):
rc = color.readFrame(&colorf);
if(colorf.isValid())
{
const openni::RGB888Pixel* imgBufColor = (const openni::RGB888Pixel*)colorf.getData();
hColor = colorf.getHeight();
wColor = colorf.getWidth();
frame.create(hColor, wColor, CV_8UC3);
memcpy(frame.data, imgBufColor, 3 * hColor * wColor * sizeof(uint8_t));
cvtColor(frame, frame, CV_BGR2RGB);
return true;
}
else
{
cout << "ERROR: Frame not valid." << endl;
return false;
}
default:
cout << "ERROR: No frame to be read. Object not initialize." << endl;
return false;
}
}
开发者ID:erictol,项目名称:Kinect-application-for-the-development-of-gross-motor-skills,代码行数:59,代码来源:Kinect.cpp
示例5: onNewFrame
void XtionDepthDriverImpl::onNewFrame(VideoStream &stream)
{
VideoFrameRef ref;
stream.readFrame(&ref);
_lastCaptured = XtionDepthImage(ref.getData(), ref.getDataSize(),
ref.getWidth(), ref.getHeight(), 0, this);
}
示例6: getNextData
Status ClosestPoint::getNextData(IntPoint3D& closestPoint, VideoFrameRef& rawFrame)
{
Status rc = m_pInternal->m_pDepthStream->readFrame(&rawFrame);
if (rc != STATUS_OK)
{
printf("readFrame failed\n%s\n", OpenNI::getExtendedError());
}
DepthPixel* pDepth = (DepthPixel*)rawFrame.getData();
bool found = false;
closestPoint.Z = 0xffff;
int width = rawFrame.getWidth();
int height = rawFrame.getHeight();
for (int y = 0; y < height; ++y)
for (int x = 0; x < width; ++x, ++pDepth)
{
if (*pDepth < closestPoint.Z && *pDepth != 0)
{
closestPoint.X = x;
closestPoint.Y = y;
closestPoint.Z = *pDepth;
found = true;
}
}
if (!found)
{
return STATUS_ERROR;
}
return STATUS_OK;
}
示例7: onNewFrame
virtual void onNewFrame(VideoStream& stream)
{
ros::Time ts = ros::Time::now();
VideoFrameRef frame;
stream.readFrame(&frame);
sensor_msgs::Image::Ptr img(new sensor_msgs::Image);
sensor_msgs::CameraInfo::Ptr info(new sensor_msgs::CameraInfo);
double scale = double(frame.getWidth()) / double(1280);
info->header.stamp = ts;
info->header.frame_id = frame_id_;
info->width = frame.getWidth();
info->height = frame.getHeight();
info->K.assign(0);
info->K[0] = 1050.0 * scale;
info->K[4] = 1050.0 * scale;
info->K[2] = frame.getWidth() / 2.0 - 0.5;
info->K[5] = frame.getHeight() / 2.0 - 0.5;
info->P.assign(0);
info->P[0] = 1050.0 * scale;
info->P[5] = 1050.0 * scale;
info->P[2] = frame.getWidth() / 2.0 - 0.5;
info->P[6] = frame.getHeight() / 2.0 - 0.5;
switch(frame.getVideoMode().getPixelFormat())
{
case PIXEL_FORMAT_GRAY8:
img->encoding = sensor_msgs::image_encodings::MONO8;
break;
case PIXEL_FORMAT_GRAY16:
img->encoding = sensor_msgs::image_encodings::MONO16;
break;
case PIXEL_FORMAT_YUV422:
img->encoding = sensor_msgs::image_encodings::YUV422;
break;
case PIXEL_FORMAT_RGB888:
img->encoding = sensor_msgs::image_encodings::RGB8;
break;
case PIXEL_FORMAT_SHIFT_9_2:
case PIXEL_FORMAT_DEPTH_1_MM:
img->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
break;
default:
ROS_WARN("Unknown OpenNI pixel format!");
break;
}
img->header.stamp = ts;
img->header.frame_id = frame_id_;
img->height = frame.getHeight();
img->width = frame.getWidth();
img->step = frame.getStrideInBytes();
img->data.resize(frame.getDataSize());
std::copy(static_cast<const uint8_t*>(frame.getData()), static_cast<const uint8_t*>(frame.getData()) + frame.getDataSize(), img->data.begin());
publish(img, info);
}
示例8: copyFrameToImage
void copyFrameToImage(VideoFrameRef frame, DepthImage& image)
{
if (image.height() != frame.getHeight() || image.width() != frame.getWidth())
{
image = DepthImage(
DepthImage::Data(
static_cast<const uint16_t*>(frame.getData()),
static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()),
frame.getWidth(),
frame.getHeight());
}
else
{
image.data(DepthImage::Data(
static_cast<const uint16_t*>(frame.getData()),
static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()));
}
}
示例9: analyzeFrame
void DepthCallback::analyzeFrame(const VideoFrameRef& frame)
{
Mat image;
image.create(frame.getHeight(),frame.getWidth(),CV_16UC1);
const openni::DepthPixel* pImageRow = (const openni::DepthPixel*)frame.getData();
memcpy(image.data,pImageRow,frame.getStrideInBytes() * frame.getHeight());
// image *= 16;
if (createCVWindow)
{
imshow( "DepthWindow", image );
waitKey(10);
}
// cout<<"New depth frame w: "<<frame.getWidth()<<" h: "<<frame.getHeight()<<endl;
if (saveOneFrame || saveFrameSequence)
{
char buffer[50];
sprintf(buffer,"depth%lld.png",frame.getTimestamp());
imwrite(buffer,image);
saveOneFrame = false;
std::cout<<"DepthCallback :: saved file "<<buffer<<std::endl;
}
if (publishRosMessage)
{
cv_bridge::CvImage aBridgeImage;
aBridgeImage.image = image;
aBridgeImage.encoding = "mono16";
cv::flip(aBridgeImage.image,aBridgeImage.image,1);
sensor_msgs::ImagePtr rosImage = aBridgeImage.toImageMsg();
// rosImage.get()->header.frame_id="/camera_depth_optical_frame";
rosImage.get()->header.frame_id=string("/") + string (m_CameraNamespace)+string("_depth_optical_frame");
rosImage.get()->encoding="16UC1";
rosImage.get()->header.stamp = ros::Time::now();
m_RosPublisher.publish(rosImage);
sensor_msgs::CameraInfo camInfo;
camInfo.width = frame.getWidth();
camInfo.height = frame.getHeight();
camInfo.distortion_model = "plumb_bob";
camInfo.K = {{570.3422241210938, 0.0, 314.5, 0.0, 570.3422241210938, 235.5, 0.0, 0.0, 1.0}};
camInfo.R = {{1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0}};
camInfo.P = {{570.3422241210938, 0.0, 314.5, -21.387834254417157, 0.0, 570.3422241210938, 235.5, 0.0, 0.0, 0.0, 1.0, 0.0}};
double D[5] = {0.0,0.0,0.0,0.0,0.0};
camInfo.D.assign(&D[0], &D[0]+5);
camInfo.header.frame_id = string("/") + string (m_CameraNamespace)+string("_depth_optical_frame");
camInfo.header.stamp = rosImage.get()->header.stamp;
m_RosCameraInfoPublisher.publish(camInfo);
}
}
示例10: consume_color
void KinectHelper::consume_color(VideoFrameRef frame){
/// Fetch new color frame from the frame listener class
/// Get color data from the frame just fetched
const openni::RGB888Pixel* imageBuffer = (const openni::RGB888Pixel*) frame.getData();
(*color_back_buffer) = QImage((uchar*) imageBuffer, frame.getWidth(), frame.getHeight(), QImage::Format_RGB888);
_mutex.lock();
qSwap(color_front_buffer, color_back_buffer);
_mutex.unlock();
}
示例11: printf
const short unsigned int *wb_kinect_get_range_image_mm(WbDeviceTag tag) {
if(kinectenable) {
int i;
int changedStreamDummy;
VideoStream* pStream = &depth;
rc = OpenNI::waitForAnyStream(&pStream, 1, &changedStreamDummy, SAMPLE_READ_WAIT_TIMEOUT);
if (rc != STATUS_OK)
{
printf("Wait failed! (timeout is %d ms)\n%s\n", SAMPLE_READ_WAIT_TIMEOUT, OpenNI::getExtendedError());
return NULL;
}
rc = depth.readFrame(&frame);
if (rc != STATUS_OK)
{
printf("Read failed!\n%s\n", OpenNI::getExtendedError());
return NULL;
}
if (frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_1_MM && frame.getVideoMode().getPixelFormat() != PIXEL_FORMAT_DEPTH_100_UM)
{
printf("Unexpected frame format\n");
return NULL;
}
DepthPixel* pDepth = (DepthPixel*)frame.getData();
height_pixels = frame.getHeight();
width_pixels = frame.getWidth();
if (width_pixels>160){
//Fix blind column
for(i=0;i<height_pixels;i++){
pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 3];
pDepth[i*width_pixels + 1] = pDepth[i*width_pixels + 3];
pDepth[i*width_pixels + 2] = pDepth[i*width_pixels + 3];
}
} else {
for(i=0;i<height_pixels;i++){
pDepth[i*width_pixels + 0] = pDepth[i*width_pixels + 1];
}
}
return pDepth;
}
// kinect not enable
fprintf(stderr, "2.Please enable the kinect before to use wb_kinect_get_range_image()\n");
return NULL;
}
示例12: saveDepthImage
// save depth images
void saveDepthImage(const std::string name, const VideoFrameRef &depthFrame)
{
int height = depthFrame.getHeight();
int width = depthFrame.getWidth();
cv::Mat image = cv::Mat::zeros(height, width, CV_16UC1);
DepthPixel *pDepth = (DepthPixel *)depthFrame.getData();
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
image.at<uint16_t>(i, j) = (uint16_t)(*pDepth);
pDepth++;
}
}
cv::imwrite(name, image);
}
示例13: compute_frame_bbox
BBox3 KinectHelper::compute_frame_bbox(VideoFrameRef frame){
// qDebug() << "KinectThread::setBoundingBox(VideoFrameRef frame)";
/// Get depth data from the frame just fetched
const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
float wx,wy,wz;
BBox3 box; box.setNull();
for (int y = 0; y<frame.getHeight(); ++y){
for(int x = 0; x<frame.getWidth(); ++x){
DepthPixel d = *pDepth;
pDepth++;
CoordinateConverter::convertDepthToWorld(depth, x, y, d, &wx, &wy, &wz);
box.extend( Vector3(wx,wy,wz) );
}
}
this->_bbox = box;
}
示例14: srcData
bool OpenNI2CameraImpl::copyInfrared( OpenNI2Camera::FrameView& frame )
{
frame.infraredUpdated = false;
VideoFrameRef src;
Status rc = m_infraredStream.readFrame( &src );
if( rc == STATUS_OK )
{
Array2DReadView< uint16_t > srcData( src.getData(),
{ src.getWidth(), src.getHeight() },
{ sizeof( uint16_t ), src.getStrideInBytes() } );
frame.infraredTimestampNS =
usToNS( static_cast< int64_t >( src.getTimestamp() ) );
frame.infraredFrameNumber = src.getFrameIndex();
frame.infraredUpdated = copy( srcData, frame.infrared );
return frame.infraredUpdated;
}
return false;
}
示例15: getNextData
Status NUIPoints::getNextData(std::list<cv::Point3f>& nuiPoints, VideoFrameRef& rawFrame)
{
Status rc = m_pInternal->m_pDepthStream->readFrame(&rawFrame);
if (rc != STATUS_OK)
{
printf("readFrame failed\n%s\n", OpenNI::getExtendedError());
}
DepthPixel* pDepth = (DepthPixel*)rawFrame.getData();
bool found = false;
cv::Point3f nuiPoint;
int width = rawFrame.getWidth();
int height = rawFrame.getHeight();
for (int x = 1; x < width / 6.0; ++x) {
for (int y = 1; y < height - (height / 4.5); ++y)
{
if (pDepth[y * width + x] < FAR_LIMIT
&& pDepth[y * width + x] != 0
&& (ZERO_TO_INFINITY(pDepth[(y + 1) * width + (x + 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
|| ZERO_TO_INFINITY(pDepth[(y - 1) * width + (x + 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
|| ZERO_TO_INFINITY(pDepth[(y + 1) * width + (x - 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD
|| ZERO_TO_INFINITY(pDepth[(y - 1) * width + (x - 1)]) - pDepth[y * width + x] > DEPTH_TRESHOLD))
{
nuiPoint.x = x;
nuiPoint.y = y;
nuiPoint.z = pDepth[y * width + x];
nuiPoints.push_back(nuiPoint);
}
}
}
if (nuiPoints.empty())
{
return STATUS_ERROR;
}
return STATUS_OK;
}