当前位置: 首页>>代码示例>>C++>>正文


C++ NODELET_ERROR函数代码示例

本文整理汇总了C++中NODELET_ERROR函数的典型用法代码示例。如果您正苦于以下问题:C++ NODELET_ERROR函数的具体用法?C++ NODELET_ERROR怎么用?C++ NODELET_ERROR使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了NODELET_ERROR函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: callback

	void callback(const sensor_msgs::ImageConstPtr& depth)
	{
		if(depth->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
		   depth->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0 &&
		   depth->encoding.compare(sensor_msgs::image_encodings::MONO16)!=0)
		{
			NODELET_ERROR("Input type depth=32FC1,16UC1,MONO16");
			return;
		}

		if(pub_.getNumSubscribers())
		{
			if(depth->width == model_.getWidth() && depth->width == model_.getWidth())
			{
				cv_bridge::CvImagePtr imageDepthPtr = cv_bridge::toCvCopy(depth);
				model_.undistort(imageDepthPtr->image);
				pub_.publish(imageDepthPtr->toImageMsg());
			}
			else
			{
				NODELET_ERROR("Input depth image size (%dx%d) and distortion model "
						"size (%dx%d) don't match! Cannot undistort image.",
						depth->width, depth->height,
						model_.getWidth(), model_.getHeight());
			}
		}
	}
开发者ID:introlab,项目名称:rtabmap_ros,代码行数:27,代码来源:undistort_depth.cpp

示例2: onInit

	virtual void onInit()
	{
		ros::NodeHandle & nh = getNodeHandle();
		ros::NodeHandle & pnh = getPrivateNodeHandle();

		std::string modelPath;
		pnh.param("model", modelPath, modelPath);

		if(modelPath.empty())
		{
			NODELET_ERROR("undistort_depth: \"model\" parameter should be set!");
		}

		model_.load(modelPath);
		if(!model_.isValid())
		{
			NODELET_ERROR("Loaded distortion model from \"%s\" is not valid!", modelPath.c_str());
		}
		else
		{
			image_transport::ImageTransport it(nh);
			sub_ = it.subscribe("depth", 1, &UndistortDepth::callback, this);
			pub_ = it.advertise(uFormat("%s_undistorted", nh.resolveName("depth").c_str()), 1);
		}
	}
开发者ID:introlab,项目名称:rtabmap_ros,代码行数:25,代码来源:undistort_depth.cpp

示例3: NODELET_DEBUG

  bool PolygonPointsSampler::isValidMessage(
    const jsk_recognition_msgs::PolygonArray::ConstPtr& polygon_msg,
    const jsk_recognition_msgs::ModelCoefficientsArray::ConstPtr& coefficients_msg)
  {
    if (polygon_msg->polygons.size() == 0) {
      NODELET_DEBUG("empty polygons");
      return false;
    }
    if (coefficients_msg->coefficients.size() != polygon_msg->polygons.size()) {
      NODELET_ERROR("The size of coefficients and polygons are not same");
      return false;
    }

    std::string frame_id = polygon_msg->header.frame_id;
    for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
      if (frame_id != polygon_msg->polygons[i].header.frame_id) {
        NODELET_ERROR("Frame id of polygon is not same: %s, %s",
                      frame_id.c_str(),
                      polygon_msg->polygons[i].header.frame_id.c_str());
        return false;
      }
    }
    for (size_t i = 0; i < coefficients_msg->coefficients.size(); i++) {
      if (frame_id != coefficients_msg->coefficients[i].header.frame_id) {
        NODELET_ERROR("Frame id of coefficient is not same: %s, %s",
                      frame_id.c_str(),
                      coefficients_msg->coefficients[i].header.frame_id.c_str());
        return false;
      }
    }
    return true;
  }
开发者ID:YoheiKakiuchi,项目名称:jsk_recognition,代码行数:32,代码来源:polygon_points_sampler_nodelet.cpp

示例4: onInit

  virtual void onInit()
  {
    nh_ = getNodeHandle();
    it_ = boost::shared_ptr<image_transport::ImageTransport>(new image_transport::ImageTransport(nh_));
    local_nh_ = ros::NodeHandle("~");

    local_nh_.param("debug_view", debug_view_, false);
    subscriber_count_ = 0;
    prev_stamp_ = ros::Time(0, 0);

    image_transport::SubscriberStatusCallback img_connect_cb    = boost::bind(&FaceDetectionNodelet::img_connectCb, this, _1);
    image_transport::SubscriberStatusCallback img_disconnect_cb = boost::bind(&FaceDetectionNodelet::img_disconnectCb, this, _1);
    ros::SubscriberStatusCallback msg_connect_cb    = boost::bind(&FaceDetectionNodelet::msg_connectCb, this, _1);
    ros::SubscriberStatusCallback msg_disconnect_cb = boost::bind(&FaceDetectionNodelet::msg_disconnectCb, this, _1);
    img_pub_ = image_transport::ImageTransport(local_nh_).advertise("image", 1, img_connect_cb, img_disconnect_cb);
    msg_pub_ = local_nh_.advertise<opencv_apps::FaceArrayStamped>("faces", 1, msg_connect_cb, msg_disconnect_cb);
        
    if( debug_view_ ) {
      subscriber_count_++;
    }
    std::string face_cascade_name, eyes_cascade_name;
    local_nh_.param("face_cascade_name", face_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"));
    local_nh_.param("eyes_cascade_name", eyes_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml"));
        
    if( !face_cascade_.load( face_cascade_name ) ){ NODELET_ERROR("--Error loading %s", face_cascade_name.c_str()); };
    if( !eyes_cascade_.load( eyes_cascade_name ) ){ NODELET_ERROR("--Error loading %s", eyes_cascade_name.c_str()); };

    dynamic_reconfigure::Server<face_detection::FaceDetectionConfig>::CallbackType f =
      boost::bind(&FaceDetectionNodelet::reconfigureCallback, this, _1, _2);
    srv.setCallback(f);
  }
开发者ID:tuloski,项目名称:SherpaHighLevel,代码行数:31,代码来源:face_detection_nodelet.cpp

示例5: NODELET_ERROR

void
jsk_pcl_ros::Filter::input_indices_callback (const PointCloud2::ConstPtr &cloud, const PointIndicesConstPtr &indices)
{
  // No subscribers, no work
  if (pub_output_.getNumSubscribers () <= 0)
    return;

  // If cloud is given, check if it's valid
  if (!isValid (cloud))
  {
    NODELET_ERROR ("[%s::input_indices_callback] Invalid input!", getName ().c_str ());
    return;
  }
  // If indices are given, check if they are valid
  if (indices && !isValid (indices))
  {
    NODELET_ERROR ("[%s::input_indices_callback] Invalid indices!", getName ().c_str ());
    return;
  }

  /// DEBUG
  if (indices)
    NODELET_DEBUG ("[%s::input_indices_callback]\n"
                   "                                 - PointCloud with %d data points (%s), stamp %f, and frame %s on topic %s received.\n"
                   "                                 - PointIndices with %zu values, stamp %f, and frame %s on topic %s received.",
                   getName ().c_str (), 
                   cloud->width * cloud->height, pcl::getFieldsList (*cloud).c_str (), cloud->header.stamp.toSec (), cloud->header.frame_id.c_str (), pnh_->resolveName ("input").c_str (),
                   indices->indices.size (), indices->header.stamp.toSec (), indices->header.frame_id.c_str (), pnh_->resolveName ("indices").c_str ());
  else
    NODELET_DEBUG ("[%s::input_indices_callback] PointCloud with %d data points and frame %s on topic %s received.", getName ().c_str (), cloud->width * cloud->height, cloud->header.frame_id.c_str (), pnh_->resolveName ("input").c_str ());
  ///

  // Check whether the user has given a different input TF frame
  tf_input_orig_frame_ = cloud->header.frame_id;
  PointCloud2::ConstPtr cloud_tf;
  if (!tf_input_frame_.empty () && cloud->header.frame_id != tf_input_frame_)
  {
    NODELET_DEBUG ("[%s::input_indices_callback] Transforming input dataset from %s to %s.", getName ().c_str (), cloud->header.frame_id.c_str (), tf_input_frame_.c_str ());
    // Save the original frame ID
    // Convert the cloud into the different frame
    PointCloud2 cloud_transformed;
    tf_listener_.waitForTransform(cloud->header.frame_id, tf_input_frame_, cloud->header.stamp, ros::Duration(5.0)); 
    if (!pcl_ros::transformPointCloud (tf_input_frame_, *cloud, cloud_transformed, tf_listener_))
    {
      NODELET_ERROR ("[%s::input_indices_callback] Error converting input dataset from %s to %s.", getName ().c_str (), cloud->header.frame_id.c_str (), tf_input_frame_.c_str ());
      return;
    }
    cloud_tf = boost::make_shared<PointCloud2> (cloud_transformed);
  }
  else
    cloud_tf = cloud;

  // Need setInputCloud () here because we have to extract x/y/z
  IndicesPtr vindices;
  if (indices)
    vindices.reset (new std::vector<int> (indices->indices));

  computePublish (cloud_tf, vindices);
}
开发者ID:HiroyukiMikita,项目名称:jsk_recognition,代码行数:59,代码来源:filter.cpp

示例6: NODELET_ERROR

 void CoreStatus::exit()
 {
   if (service::waitForService ("roah_rsbb_shutdown", 500)) {
     std_srvs::Empty s;
     if (! service::call ("roah_rsbb_shutdown", s)) {
       NODELET_ERROR ("Error calling roah_rsbb_shutdown service");
     }
   }
   else {
     NODELET_ERROR ("Could not find roah_rsbb_shutdown service");
   }
 }
开发者ID:pmiraldo,项目名称:at_home_rsbb,代码行数:12,代码来源:core_status.cpp

示例7: NODELET_ERROR

  void TiltLaserListener::onInit()
  {
    DiagnosticNodelet::onInit();

    if (pnh_->hasParam("joint_name")) {
      pnh_->getParam("joint_name", joint_name_);
    }
    else {
      NODELET_ERROR("no ~joint_state is specified");
      return;
    }
    pnh_->param("overwrap_angle", overwrap_angle_, 0.0);
    std::string laser_type;
    pnh_->param("laser_type", laser_type, std::string("tilt_half_down"));
    if (laser_type == "tilt_half_up") {
      laser_type_ = TILT_HALF_UP;
    }
    else if (laser_type == "tilt_half_down") {
      laser_type_ = TILT_HALF_DOWN;
    }
    else if (laser_type == "tilt") {
      laser_type_ = TILT;
    }
    else if (laser_type == "infinite_spindle") {
      laser_type_ = INFINITE_SPINDLE;
    }
    else if (laser_type == "infinite_spindle_half") {
      laser_type_ = INFINITE_SPINDLE_HALF;
    }
    else {
      NODELET_ERROR("unknown ~laser_type: %s", laser_type.c_str());
      return;
    }
    pnh_->param("use_laser_assembler", use_laser_assembler_, false);
    if (use_laser_assembler_) {
      assemble_cloud_srv_
        = pnh_->serviceClient<laser_assembler::AssembleScans2>("assemble_scans2");
      cloud_pub_
        = advertise<sensor_msgs::PointCloud2>(*pnh_, "output_cloud", 1);
    }
    prev_angle_ = 0;
    prev_velocity_ = 0;
    start_time_ = ros::Time::now();
    clear_cache_service_ = pnh_->advertiseService(
      "clear_cache", &TiltLaserListener::clearCacheCallback,
      this);
    trigger_pub_ = advertise<jsk_recognition_msgs::TimeRange>(*pnh_, "output", 1);
    
  }
开发者ID:otsubo,项目名称:jsk_recognition,代码行数:49,代码来源:tilt_laser_listener_nodelet.cpp

示例8: lock

 void CloudOnPlane::predicate(const sensor_msgs::PointCloud2::ConstPtr& cloud_msg,
                              const jsk_recognition_msgs::PolygonArray::ConstPtr& polygon_msg)
 {
   boost::mutex::scoped_lock lock(mutex_);
   vital_checker_->poke();
   // check header
   if (!jsk_recognition_utils::isSameFrameId(*cloud_msg, *polygon_msg)) {
     NODELET_ERROR("frame_id does not match: cloud: %s, polygon: %s",
                       cloud_msg->header.frame_id.c_str(), polygon_msg->header.frame_id.c_str());
     return;
   }
   pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
   pcl::fromROSMsg(*cloud_msg, *cloud);
   // convert jsk_recognition_msgs::PolygonArray to jsk_recognition_utils::Polygon
   std::vector<jsk_recognition_utils::ConvexPolygon::Ptr> polygons;
   for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
     polygons.push_back(jsk_recognition_utils::ConvexPolygon::fromROSMsgPtr(polygon_msg->polygons[i].polygon));
   }
   
   for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
     jsk_recognition_utils::ConvexPolygon::Ptr poly = polygons[i];
     for (size_t j = 0; j < cloud->points.size(); j++) {
       Eigen::Vector3f p = cloud->points[j].getVector3fMap();
       if (poly->distanceSmallerThan(p, distance_thr_)) {
         buffer_->addValue(false);
         publishPredicate(cloud_msg->header, !buffer_->isAllTrueFilled());
         return;
       }
     }
   }
   buffer_->addValue(true);
   publishPredicate(cloud_msg->header, !buffer_->isAllTrueFilled());
 }
开发者ID:YoheiKakiuchi,项目名称:jsk_recognition,代码行数:33,代码来源:cloud_on_plane_nodelet.cpp

示例9: lock

 void DepthCalibration::calibrate(
     const sensor_msgs::Image::ConstPtr& msg,
     const sensor_msgs::CameraInfo::ConstPtr& camera_info)
 {
   boost::mutex::scoped_lock lock(mutex_);
   cv_bridge::CvImagePtr cv_ptr;
   try
   {
     cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_32FC1);
   }
   catch (cv_bridge::Exception& e)
   {
     NODELET_ERROR("cv_bridge exception: %s", e.what());
     return;
   }
   cv::Mat image = cv_ptr->image;
   cv::Mat output_image = image.clone();
   double cu = camera_info->P[2];
   double cv = camera_info->P[6];
   for(int v = 0; v < image.rows; v++) {
     for(int u = 0; u < image.cols; u++) {
       float z = image.at<float>(v, u);
       if (isnan(z)) {
         output_image.at<float>(v, u) = z;
       }
       else {
         output_image.at<float>(v, u) = applyModel(z, u, v, cu, cv);
       }
       //NODELET_INFO("z: %f", z);
     }
   }
   sensor_msgs::Image::Ptr ros_image = cv_bridge::CvImage(msg->header, "32FC1", output_image).toImageMsg();
   pub_.publish(ros_image);
 }
开发者ID:otsubo,项目名称:jsk_recognition,代码行数:34,代码来源:depth_calibration_nodelet.cpp

示例10: NODELET_ERROR

  void PlaneSupportedCuboidEstimator::updateParticlePolygonRelationship(
    ParticleCloud::Ptr particles)
  {
    if (latest_polygon_msg_->polygons.size() == 0) {
      NODELET_ERROR("no valid polygons, skip update relationship");
      return;
    }

    // The order of convexes and polygons_ should be same
    // because it is inside of critical section.
    std::vector<ConvexPolygon::Ptr> convexes(latest_polygon_msg_->polygons.size());
    for (size_t i = 0; i < latest_polygon_msg_->polygons.size(); i++) {
      ConvexPolygon::Ptr polygon = ConvexPolygon::fromROSMsgPtr(latest_polygon_msg_->polygons[i].polygon);
      polygon->decomposeToTriangles();
      convexes[i] = polygon;
    }

#ifdef _OPENMP
#pragma omp parallel for
#endif
    // Second, compute distances and assing polygons.
    for (size_t i = 0; i < particles->points.size(); i++) {
      size_t nearest_index = getNearestPolygon(particles->points[i], convexes);
      //particles->points[i].plane = polygons[nearest_index];
      particles->points[i].plane_index = (int)nearest_index;
    }
  }
开发者ID:rkoyama1623,项目名称:jsk_recognition,代码行数:27,代码来源:plane_supported_cuboid_estimator_nodelet.cpp

示例11: lock

 void ProjectImagePoint::project(
   const geometry_msgs::PointStamped::ConstPtr& msg)
 {
   vital_checker_->poke();
   boost::mutex::scoped_lock lock(mutex_);
   if (!camera_info_) {
     NODELET_WARN(
       "[ProjectImagePoint::project] camera info is not yet available");
     return;
   }
   image_geometry::PinholeCameraModel model;
   model.fromCameraInfo(camera_info_);
   cv::Point3d ray = model.projectPixelTo3dRay(
     cv::Point2d(msg->point.x, msg->point.y));
   geometry_msgs::Vector3Stamped vector;
   vector.header.frame_id = camera_info_->header.frame_id;
   vector.header = msg->header;
   vector.vector.x = ray.x;
   vector.vector.y = ray.y;
   vector.vector.z = ray.z;
   pub_vector_.publish(vector);
   if (ray.z == 0.0) {
     NODELET_ERROR("Z value of projected ray is 0");
     return;
   }
   double alpha = z_ / ray.z;
   geometry_msgs::PointStamped point;
   point.header = msg->header;
   point.header.frame_id = camera_info_->header.frame_id;
   point.point.x = ray.x * alpha;
   point.point.y = ray.y * alpha;
   point.point.z = ray.z * alpha;
   pub_.publish(point);
   
 }
开发者ID:YoheiKakiuchi,项目名称:jsk_recognition,代码行数:35,代码来源:project_image_point.cpp

示例12: if

 void LabDecomposer::decompose(
   const sensor_msgs::Image::ConstPtr& image_msg)
 {
   cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(
     image_msg, image_msg->encoding);
   cv::Mat image = cv_ptr->image;
   cv::Mat lab_image;
   std::vector<cv::Mat> lab_planes;
   if (image_msg->encoding == sensor_msgs::image_encodings::BGR8) {
     cv::cvtColor(image, lab_image, CV_BGR2Lab);
   }
   else if (image_msg->encoding == sensor_msgs::image_encodings::RGB8) {
     cv::cvtColor(image, lab_image, CV_RGB2Lab);
   }
   else {
     NODELET_ERROR("unsupported format to Lab: %s", image_msg->encoding.c_str());
     return;
   }
   cv::split(lab_image, lab_planes);
   cv::Mat l = lab_planes[0];
   cv::Mat a = lab_planes[1];
   cv::Mat b = lab_planes[2];
   pub_l_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    l).toImageMsg());
   pub_a_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    a).toImageMsg());
   pub_b_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    b).toImageMsg());
 }
开发者ID:otsubo,项目名称:jsk_recognition,代码行数:35,代码来源:lab_decomposer.cpp

示例13: NODELET_ERROR

 void NormalDirectionFilter::onInit()
 {
   DiagnosticNodelet::onInit();
   pnh_->param("use_imu", use_imu_, false);
   if (!use_imu_) {
     std::vector<double> direction;
     if (!jsk_topic_tools::readVectorParameter(*pnh_, "direction", direction)) {
       NODELET_ERROR("You need to specify ~direction");
       return;
     }
     jsk_recognition_utils::pointFromVectorToVector<std::vector<double>, Eigen::Vector3f>(
     direction, static_direction_);
   }
   else {
     tf_listener_ = TfListenerSingleton::getInstance();
   }
   
   srv_ = boost::make_shared <dynamic_reconfigure::Server<Config> > (*pnh_);
   dynamic_reconfigure::Server<Config>::CallbackType f =
     boost::bind (
       &NormalDirectionFilter::configCallback, this, _1, _2);
   srv_->setCallback (f);
   pnh_->param("queue_size", queue_size_, 200);
   pub_ = advertise<PCLIndicesMsg>(*pnh_, "output", 1);
   onInitPostProcess();
 }
开发者ID:knorth55,项目名称:jsk_recognition,代码行数:26,代码来源:normal_direction_filter_nodelet.cpp

示例14: switch

void DriverNodelet::publishRgbImage(const ImageBuffer& image, ros::Time time) const
{
  sensor_msgs::ImagePtr rgb_msg = boost::make_shared<sensor_msgs::Image >();
  rgb_msg->header.stamp = time;
  rgb_msg->header.frame_id = rgb_frame_id_;
  rgb_msg->height = image.metadata.height;
  rgb_msg->width = image.metadata.width;
  switch(image.metadata.video_format) {
    case FREENECT_VIDEO_RGB:
      rgb_msg->encoding = sensor_msgs::image_encodings::RGB8;
      rgb_msg->step = rgb_msg->width * 3;
      break;
    case FREENECT_VIDEO_BAYER:
      rgb_msg->encoding = sensor_msgs::image_encodings::BAYER_GRBG8;
      rgb_msg->step = rgb_msg->width;
      break;
    case FREENECT_VIDEO_YUV_RGB:
      rgb_msg->encoding = sensor_msgs::image_encodings::YUV422;
      rgb_msg->step = rgb_msg->width * 2;
      break;
    default:
      NODELET_ERROR("Unknown RGB image format received from libfreenect");
      // Unknown encoding -- don't publish
      return;
  }
  rgb_msg->data.resize(rgb_msg->height * rgb_msg->step);
  fillImage(image, reinterpret_cast<void*>(&rgb_msg->data[0]));
  
  pub_rgb_.publish(rgb_msg, getRgbCameraInfo(image, time));
  if (enable_rgb_diagnostics_)
      pub_rgb_freq_->tick();
}
开发者ID:jon-weisz,项目名称:freenect_stack,代码行数:32,代码来源:driver.cpp

示例15: lock

void DriverNodelet::rgbConnectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  bool need_rgb = pub_rgb_.getNumSubscribers() > 0;
  
  if (need_rgb && !device_->isImageStreamRunning())
  {
    // Can't stream IR and RGB at the same time. Give RGB preference.
    if (device_->isIRStreamRunning())
    {
      NODELET_ERROR("Cannot stream RGB and IR at the same time. Streaming RGB only.");
      device_->stopIRStream();
    }
    
    device_->startImageStream();
    startSynchronization();
    time_stamp_ = ros::Time(0,0); // starting an additional stream blocks for a while, could upset watchdog
  }
  else if (!need_rgb && device_->isImageStreamRunning())
  {
    stopSynchronization();
    device_->stopImageStream();

    // Start IR if it's been blocked on RGB subscribers
    bool need_ir = pub_ir_.getNumSubscribers() > 0;
    if (need_ir && !device_->isIRStreamRunning())
    {
      device_->startIRStream();
      time_stamp_ = ros::Time(0,0);
    }
  }
}
开发者ID:jon-weisz,项目名称:freenect_stack,代码行数:32,代码来源:driver.cpp


注:本文中的NODELET_ERROR函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。