本文整理汇总了C++中xn::DepthGenerator::ConvertProjectiveToRealWorld方法的典型用法代码示例。如果您正苦于以下问题:C++ DepthGenerator::ConvertProjectiveToRealWorld方法的具体用法?C++ DepthGenerator::ConvertProjectiveToRealWorld怎么用?C++ DepthGenerator::ConvertProjectiveToRealWorld使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类xn::DepthGenerator
的用法示例。
在下文中一共展示了DepthGenerator::ConvertProjectiveToRealWorld方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: projectiveToWorld
XnPoint3D xncv::projectiveToWorld(const cv::Point& point, XnFloat z, const xn::DepthGenerator& depth)
{
XnPoint3D p;
p.X = static_cast<XnFloat>(point.x);
p.Y = static_cast<XnFloat>(point.y);
p.Z = static_cast<XnFloat>(z);
XnPoint3D p2;
depth.ConvertProjectiveToRealWorld(1, &p, &p2);
return p2;
}
示例2: retrievePointCloudMap
IplImage* CvCapture_OpenNI::retrievePointCloudMap()
{
if( !depthMetaData.Data() )
return 0;
cv::Mat depth;
getDepthMapFromMetaData( depthMetaData, depth, noSampleValue, shadowValue );
const int badPoint = INVALID_PIXEL_VAL;
const float badCoord = INVALID_COORDINATE_VAL;
int cols = depthMetaData.XRes(), rows = depthMetaData.YRes();
cv::Mat pointCloud_XYZ( rows, cols, CV_32FC3, cv::Scalar::all(badPoint) );
cv::Ptr<XnPoint3D> proj = new XnPoint3D[cols*rows];
cv::Ptr<XnPoint3D> real = new XnPoint3D[cols*rows];
for( int y = 0; y < rows; y++ )
{
for( int x = 0; x < cols; x++ )
{
int ind = y*cols+x;
proj[ind].X = (float)x;
proj[ind].Y = (float)y;
proj[ind].Z = depth.at<unsigned short>(y, x);
}
}
depthGenerator.ConvertProjectiveToRealWorld(cols*rows, proj, real);
for( int y = 0; y < rows; y++ )
{
for( int x = 0; x < cols; x++ )
{
// Check for invalid measurements
if( depth.at<unsigned short>(y, x) == badPoint ) // not valid
pointCloud_XYZ.at<cv::Point3f>(y,x) = cv::Point3f( badCoord, badCoord, badCoord );
else
{
int ind = y*cols+x;
pointCloud_XYZ.at<cv::Point3f>(y,x) = cv::Point3f( real[ind].X*0.001f, real[ind].Y*0.001f, real[ind].Z*0.001f); // from mm to meters
}
}
}
outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].mat = pointCloud_XYZ;
return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
}
示例3: _tmain
int _tmain(int argc, _TCHAR* argv[])
{
InitialKinect();
std::stringstream out_yml_filename;
cv::Mat Read_Depth;
cv::Mat c16BitDepth;
for (int k = 0; k < image_number; k++)
{
out_yml_filename.str( *(depthImgName+k) );
cv::FileStorage fsd( out_yml_filename.str(), cv::FileStorage::READ);
fsd["frameCount"] >> c16BitDepth;
c16BitDepth.convertTo( Read_Depth, CV_32FC1);
int idxShift, idx;
XnPoint3D* pDepthPointSet = new XnPoint3D[ 640 * 480 ];
pRealWorldCamaraPointSet = new XnPoint3D[ 640 * 480 ];
for ( int j = 0; j < 480; ++j )
{
idxShift = j * 640;
for ( int i = 0; i < 640; ++i)
{
idx = idxShift + i;
pDepthPointSet[idx].X = (int)i;
pDepthPointSet[idx].Y = (int)j;
pDepthPointSet[idx].Z = (int)Read_Depth.at<float>(idx);
}
}
mDepthGenerator.ConvertProjectiveToRealWorld( 640 * 480, pDepthPointSet, pRealWorldCamaraPointSet );
WriteToFile(pRealWorldCamaraPointSet, k);
cout << "Image " << RealNumber[k] << " Done!!\n";
fsd.release();
delete [] pDepthPointSet;
delete [] pRealWorldCamaraPointSet;
}
return 0;
}
示例4: DumpDepthMap
void DepthMapLogger::DumpDepthMap(const xn::DepthMetaData& dmd, const xn::SceneMetaData& smd)
{
static char name_str[20], comment_str[255];
// Don't do anything if the h5 file is not open
if(!p_h5_file_ || !p_frames_group_) { return; }
// References to various bits of the HDF5 output
H5File &file(*p_h5_file_);
Group &frames_group(*p_frames_group_);
// This frame's index is the number of frames we've previously saved
hsize_t this_frame_idx = frames_group.getNumObjs();
// Create this frame's group
snprintf(name_str, 20, "frame_%06lld", this_frame_idx);
snprintf(comment_str, 255, "Data for frame %lld", this_frame_idx);
Group this_frame_group(frames_group.createGroup(name_str));
this_frame_group.setComment(".", comment_str);
// Create attributes for this group
Attribute idx_attr = this_frame_group.createAttribute("idx", PredType::NATIVE_HSIZE, DataSpace());
idx_attr.write(PredType::NATIVE_HSIZE, &this_frame_idx);
// Create this frame's datasets
DSetCreatPropList creat_props;
uint16_t fill_value(0);
creat_props.setFillValue(PredType::NATIVE_UINT16, &fill_value);
hsize_t rows(static_cast<hsize_t>(dmd.YRes())), cols(static_cast<hsize_t>(dmd.XRes()));
hsize_t creation_dims[2] = { rows, cols };
hsize_t max_dims[2] = { rows, cols };
DataSpace mem_space(2, creation_dims, max_dims);
DataSet depth_ds(this_frame_group.createDataSet(
"depth", PredType::NATIVE_UINT16, mem_space, creat_props));
DataSet label_ds(this_frame_group.createDataSet(
"label", PredType::NATIVE_UINT16, mem_space, creat_props));
// Get depth and label buffers
const uint16_t *p_depths = dmd.Data();
const uint16_t *p_labels = smd.Data();
// Write depth data
depth_ds.write(p_depths, PredType::NATIVE_UINT16);
// Write label data
label_ds.write(p_labels, PredType::NATIVE_UINT16);
// Convert non-zero depth values into 3D point positions
XnPoint3D *pts = new XnPoint3D[rows*cols];
uint16_t *pt_labels = new uint16_t[rows*cols];
size_t n_pts(0);
for(size_t depth_idx(0); depth_idx < rows*cols; ++depth_idx) {
// Skip zero depth values
if(p_depths[depth_idx] == 0) {
continue;
}
// Store projective-values
pts[n_pts].X = depth_idx % cols;
pts[n_pts].Y = depth_idx / cols;
pts[n_pts].Z = p_depths[depth_idx];
pt_labels[n_pts] = p_labels[depth_idx];
++n_pts;
}
g_DepthGenerator.ConvertProjectiveToRealWorld(n_pts, pts, pts);
if (n_pts > 0)
{
// Create points dataset
hsize_t pts_creation_dims[2] = { n_pts, 3 };
hsize_t pts_max_dims[2] = { n_pts, 3 };
DataSpace pts_mem_space(2, pts_creation_dims, pts_max_dims);
DataSet pts_ds(this_frame_group.createDataSet(
"points", PredType::NATIVE_FLOAT, pts_mem_space, creat_props));
hsize_t pt_labels_creation_dims[1] = { n_pts };
hsize_t pt_labels_max_dims[1] = { n_pts };
DataSpace pt_labels_mem_space(1, pt_labels_creation_dims, pt_labels_max_dims);
DataSet pt_labels_ds(this_frame_group.createDataSet(
"point_labels", PredType::NATIVE_UINT16, pt_labels_mem_space, creat_props));
// Write points data
pts_ds.write(pts, PredType::NATIVE_FLOAT);
pt_labels_ds.write(pt_labels, PredType::NATIVE_UINT16);
}
// Create groups to store detected users
Group users_group(this_frame_group.createGroup("users"));
// Dump each user in turn
char strLabel[50] = "";
XnUserID aUsers[15];
XnUInt16 nUsers = 15;
g_UserGenerator.GetUsers(aUsers, nUsers);
for (int i = 0; i < nUsers; ++i)
{
// Create a group for this user
snprintf(name_str, 20, "user_%02d", aUsers[i]);
Group this_user_group(users_group.createGroup(name_str));
//.........这里部分代码省略.........
示例5: mexFunction
/* The matlab mex function */
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] )
{
double *Iout;
XnUInt64 *MXadress;
if(nrhs==0)
{
printf("Open failed: Give Pointer to Kinect as input\n");
mexErrMsgTxt("Kinect Error");
}
MXadress = (XnUInt64*)mxGetData(prhs[0]);
if(MXadress[0]>0){ g_Context = ((xn::Context*) MXadress[0])[0]; }
if(MXadress[2]>0)
{
g_DepthGenerator = ((xn::DepthGenerator*) MXadress[2])[0];
}
else
{
mexErrMsgTxt("No Depth Node in Kinect Context");
}
XnStatus nRetVal = XN_STATUS_OK;
xn::DepthMetaData depthMD;
// Process the data
g_DepthGenerator.GetMetaData(depthMD);
XnUInt16 g_nXRes = depthMD.FullXRes();
XnUInt16 g_nYRes = depthMD.FullYRes();
const XnDepthPixel* pDepth = depthMD.Data();
int Jdimsc[3];
Jdimsc[0]=g_nXRes;
Jdimsc[1]=g_nYRes;
Jdimsc[2]=3;
int npixels=Jdimsc[0]*Jdimsc[1];
int index;
XnPoint3D *pt_proj =new XnPoint3D[npixels];
XnPoint3D *pt_world=new XnPoint3D[npixels];
for(int y=0; y<Jdimsc[1]; y++)
{
for(int x=0; x<Jdimsc[0]; x++)
{
index=x+y*Jdimsc[0];
pt_proj[index].X=(XnFloat)x;
pt_proj[index].Y=(XnFloat)y;
pt_proj[index].Z=pDepth[x+y*Jdimsc[0]];
}
}
g_DepthGenerator.ConvertProjectiveToRealWorld ( (XnUInt32)npixels, pt_proj, pt_world);
plhs[0] = mxCreateNumericArray(3, Jdimsc, mxDOUBLE_CLASS, mxREAL);
Iout = mxGetPr(plhs[0]);
for(int y=0; y<Jdimsc[1]; y++)
{
for(int x=0; x<Jdimsc[0]; x++)
{
index=x+y*Jdimsc[0];
Iout[index]=pt_world[index].X;
Iout[index+npixels]=pt_world[index].Y;
Iout[index+npixels*2]=pt_world[index].Z;
}
}
delete[] pt_proj;
delete[] pt_world;
}