本文整理汇总了C++中mrpt::utils::CTimeLogger::enter方法的典型用法代码示例。如果您正苦于以下问题:C++ CTimeLogger::enter方法的具体用法?C++ CTimeLogger::enter怎么用?C++ CTimeLogger::enter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mrpt::utils::CTimeLogger
的用法示例。
在下文中一共展示了CTimeLogger::enter方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: rgb_cb
void rgb_cb(freenect_device *dev, void *img_data, uint32_t timestamp)
{
CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));
const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);
// Update of the timestamps at the end:
CObservation3DRangeScan &obs = obj->internal_latest_obs();
mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );
#ifdef KINECT_PROFILE_MEM_ALLOC
alloc_tim.enter("depth_rgb loadFromMemoryBuffer");
#endif
obs.hasIntensityImage = true;
if (obj->getVideoChannel()==CKinect::VIDEO_CHANNEL_RGB)
{
// Color image: We asked for Bayer data, so we can decode it outselves here
// and avoid having to reorder Green<->Red channels, as would be needed with
// the RGB image from freenect.
obs.intensityImageChannel = mrpt::slam::CObservation3DRangeScan::CH_VISIBLE;
obs.intensityImage.resize(frMode.width, frMode.height, CH_RGB, true /* origin=top-left */ );
#if MRPT_HAS_OPENCV
# if MRPT_OPENCV_VERSION_NUM<0x200
// Version for VERY OLD OpenCV versions:
IplImage *src_img_bayer = cvCreateImageHeader(cvSize(frMode.width,frMode.height),8,1);
src_img_bayer->imageDataOrigin = reinterpret_cast<char*>(img_data);
src_img_bayer->imageData = src_img_bayer->imageDataOrigin;
src_img_bayer->widthStep = frMode.width;
IplImage *dst_img_RGB = obs.intensityImage.getAs<IplImage>();
// Decode Bayer image:
cvCvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);
# else
// Version for modern OpenCV:
const cv::Mat src_img_bayer( frMode.height, frMode.width, CV_8UC1, img_data, frMode.width );
cv::Mat dst_img_RGB= cv::cvarrToMat( obs.intensityImage.getAs<IplImage>(), false /* dont copy buffers */ );
// Decode Bayer image:
cv::cvtColor(src_img_bayer, dst_img_RGB, CV_BayerGB2BGR);
# endif
#else
THROW_EXCEPTION("Need building with OpenCV!")
#endif
}
示例2: depth_cb
// ======== GLOBAL CALLBACK FUNCTIONS ========
void depth_cb(freenect_device *dev, void *v_depth, uint32_t timestamp)
{
const freenect_frame_mode frMode = freenect_get_current_video_mode(dev);
uint16_t *depth = reinterpret_cast<uint16_t *>(v_depth);
CKinect *obj = reinterpret_cast<CKinect*>(freenect_get_user(dev));
// Update of the timestamps at the end:
CObservation3DRangeScan &obs = obj->internal_latest_obs();
mrpt::synch::CCriticalSectionLocker lock( &obj->internal_latest_obs_cs() );
obs.hasRangeImage = true;
obs.range_is_depth = true;
#ifdef KINECT_PROFILE_MEM_ALLOC
alloc_tim.enter("depth_cb alloc");
#endif
// This method will try to exploit memory pooling if possible:
obs.rangeImage_setSize(frMode.height,frMode.width);
#ifdef KINECT_PROFILE_MEM_ALLOC
alloc_tim.leave("depth_cb alloc");
#endif
const CKinect::TDepth2RangeArray &r2m = obj->getRawDepth2RangeConversion();
for (int r=0;r<frMode.height;r++)
for (int c=0;c<frMode.width;c++)
{
// For now, quickly save the depth as it comes from the sensor, it'll
// transformed later on in getNextObservation()
const uint16_t v = *depth++;
obs.rangeImage.coeffRef(r,c) = r2m[v & KINECT_RANGES_TABLE_MASK];
}
obj->internal_tim_latest_depth() = timestamp;
}
示例3: render
/*---------------------------------------------------------------
render
---------------------------------------------------------------*/
void COpenGLViewport::render( const int render_width, const int render_height ) const
{
#if MRPT_HAS_OPENGL_GLUT
const CRenderizable *it = NULL; // Declared here for usage in the "catch"
try
{
// Change viewport:
// -------------------------------------------
const GLint vx = m_view_x>1 ? GLint(m_view_x) : ( m_view_x<0 ? GLint(render_width+m_view_x) : GLint( render_width * m_view_x ) );
const GLint vy = m_view_y>1 ? GLint(m_view_y) : ( m_view_y<0 ? GLint(render_height+m_view_y) : GLint( render_height * m_view_y ) );
GLint vw;
if (m_view_width>1) // >1 -> absolute pixels:
vw = GLint(m_view_width);
else if (m_view_width<0)
{ // Negative numbers: Specify the right side coordinates instead of the width:
if (m_view_width>=-1)
vw = GLint( -render_width * m_view_width - vx +1 );
else vw = GLint( -m_view_width - vx + 1 );
}
else // A factor:
{
vw = GLint( render_width * m_view_width );
}
GLint vh;
if (m_view_height>1) // >1 -> absolute pixels:
vh = GLint(m_view_height);
else if (m_view_height<0)
{ // Negative numbers: Specify the right side coordinates instead of the width:
if (m_view_height>=-1)
vh = GLint( -render_height * m_view_height - vy + 1);
else vh = GLint( -m_view_height - vy +1 );
}
else // A factor:
vh = GLint( render_height * m_view_height );
glViewport(vx,vy, vw, vh );
// Clear depth&/color buffers:
// -------------------------------------------
m_lastProjMat.viewport_width = vw;
m_lastProjMat.viewport_height = vh;
glScissor(vx,vy,vw,vh);
glEnable(GL_SCISSOR_TEST);
if ( !m_isTransparent )
{ // Clear color & depth buffers:
// Save?
GLdouble old_colors[4];
if (m_custom_backgb_color)
{
glGetDoublev(GL_COLOR_CLEAR_VALUE, old_colors );
glClearColor(m_background_color.R,m_background_color.G,m_background_color.B,m_background_color.A);
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_ACCUM_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
// Restore old colors:
if (m_custom_backgb_color)
glClearColor(old_colors[0],old_colors[1],old_colors[2],old_colors[3]);
}
else
{ // Clear depth buffer only:
glClear(GL_DEPTH_BUFFER_BIT);
}
glDisable(GL_SCISSOR_TEST);
// If we are in "image mode", rendering is much simpler: just set
// ortho projection and render the image quad:
if (m_isImageView)
{
#if defined(OPENGLVIEWPORT_ENABLE_TIMEPROFILING)
glv_timlog.enter("COpenGLViewport::render imageview");
#endif
// "Image mode" rendering:
// -----------------------------------
if (m_imageview_img) // should be ALWAYS true, but just in case!
{
// Note: The following code is inspired in the implementations:
// - libcvd, by Edward Rosten http://www.edwardrosten.com/cvd/
// - PTAM, by Klein & Murray http://www.robots.ox.ac.uk/~gk/PTAM/
const mrpt::utils::CImage *img = m_imageview_img.pointer();
const int img_w = img->getWidth();
const int img_h = img->getHeight();
if (img_w!=0 && img_h!=0)
{
// Prepare an ortho projection:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Need to adjust the aspect ratio?
//.........这里部分代码省略.........
示例4: Test_KinectOnlineOffline
//.........这里部分代码省略.........
{
CObservation3DRangeScanPtr newObs = thrPar.new_obs.get();
if (newObs && newObs->timestamp!=INVALID_TIMESTAMP &&
newObs->timestamp!=last_obs_tim )
{
// It IS a new observation:
last_obs_tim = newObs->timestamp;
// Update visualization ---------------------------------------
win3D.get3DSceneAndLock();
// Estimated grabbing rate:
win3D.addTextMessage(-350,-13, format("Timestamp: %s", mrpt::system::dateTimeLocalToString(last_obs_tim).c_str()), TColorf(0.6,0.6,0.6),"mono",10,mrpt::opengl::FILL, 100);
win3D.addTextMessage(-100,-30, format("%.02f Hz", thrPar.Hz ), TColorf(1,1,1),"mono",10,mrpt::opengl::FILL, 101);
// Show intensity image:
if (newObs->hasIntensityImage )
{
viewInt->setImageView(newObs->intensityImage); // This is not "_fast" since the intensity image may be needed later on.
}
win3D.unlockAccess3DScene();
// -------------------------------------------------------
// Create 3D points from RGB+D data
//
// There are several methods to do this.
// Switch the #if's to select among the options:
// See also: http://www.mrpt.org/Generating_3D_point_clouds_from_RGB_D_observations
// -------------------------------------------------------
if (newObs->hasRangeImage)
{
static mrpt::utils::CTimeLogger logger;
logger.enter("RGBD->3D");
// Pathway: RGB+D --> PCL <PointXYZ> --> XYZ opengl
#if 0
static pcl::PointCloud<pcl::PointXYZ> cloud;
logger.enter("RGBD->3D.projectInto");
newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
logger.leave("RGBD->3D.projectInto");
win3D.get3DSceneAndLock();
logger.enter("RGBD->3D.load in OpenGL");
gl_points->loadFromPointsMap(&cloud);
logger.leave("RGBD->3D.load in OpenGL");
win3D.unlockAccess3DScene();
#endif
// Pathway: RGB+D --> PCL <PointXYZRGB> --> XYZ+RGB opengl
#if 0
static pcl::PointCloud<pcl::PointXYZRGB> cloud;
logger.enter("RGBD->3D.projectInto");
newObs->project3DPointsFromDepthImageInto(cloud, false /* without obs.sensorPose */);
logger.leave("RGBD->3D.projectInto");
win3D.get3DSceneAndLock();
logger.enter("RGBD->3D.load in OpenGL");
gl_points->loadFromPointsMap(&cloud);
logger.leave("RGBD->3D.load in OpenGL");
win3D.unlockAccess3DScene();
#endif
// Pathway: RGB+D --> XYZ+RGB opengl
#if 1
win3D.get3DSceneAndLock();