本文整理汇总了C++中image_geometry::PinholeCameraModel::projectPixelTo3dRay方法的典型用法代码示例。如果您正苦于以下问题:C++ PinholeCameraModel::projectPixelTo3dRay方法的具体用法?C++ PinholeCameraModel::projectPixelTo3dRay怎么用?C++ PinholeCameraModel::projectPixelTo3dRay使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类image_geometry::PinholeCameraModel
的用法示例。
在下文中一共展示了PinholeCameraModel::projectPixelTo3dRay方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getCameraRay
void getCameraRay(const image_geometry::PinholeCameraModel& model, const cv::Point2d& pt, cv::Point3d* ray)
{
cv::Point2d rect_point;
rect_point = model.rectifyPoint(pt);
ROS_DEBUG("Rect Point: %f, %f",rect_point.x,rect_point.y);
*ray = model.projectPixelTo3dRay(rect_point);
}
示例2: generateStartPoints
void FindObjectOnPlane::generateStartPoints(
const cv::Point2f& point_2d,
const image_geometry::PinholeCameraModel& model,
const pcl::ModelCoefficients::Ptr& coefficients,
std::vector<cv::Point3f>& search_points_3d,
std::vector<cv::Point2f>& search_points_2d)
{
NODELET_INFO("generateStartPoints");
jsk_recognition_utils::Plane::Ptr plane
(new jsk_recognition_utils::Plane(coefficients->values));
cv::Point3d ray = model.projectPixelTo3dRay(point_2d);
Eigen::Vector3f projected_point = rayPlaneInteersect(ray, plane);
const double resolution_step = 0.01;
const int resolution = 5;
search_points_3d.clear();
search_points_2d.clear();
for (int i = - resolution; i < resolution; ++i) {
for (int j = - resolution; j < resolution; ++j) {
double x_diff = resolution_step * i;
double y_diff = resolution_step * j;
Eigen::Vector3f moved_point = projected_point + Eigen::Vector3f(x_diff, y_diff, 0);
Eigen::Vector3f projected_moved_point;
plane->project(moved_point, projected_moved_point);
cv::Point3f projected_moved_point_cv(projected_moved_point[0],
projected_moved_point[1],
projected_moved_point[2]);
search_points_3d.push_back(cv::Point3f(projected_moved_point_cv));
cv::Point2d p2d = model.project3dToPixel(projected_moved_point_cv);
search_points_2d.push_back(p2d);
}
}
}
示例3: PointFromPixel
pcl::PointXYZ PointFromPixel(const cv::Point& pixel, const tf::Transform& cameraFrameToWorldFrame, image_geometry::PinholeCameraModel cam) {
cv::Point3d cameraRay = cam.projectPixelTo3dRay(pixel);
tf::Point worldCameraOrigin = cameraFrameToWorldFrame * tf::Vector3(0, 0, 0);
tf::Point worldCameraStep = cameraFrameToWorldFrame * tf::Vector3(cameraRay.x, cameraRay.y, cameraRay.z) - worldCameraOrigin;
double zScale = -worldCameraOrigin.z()/worldCameraStep.z();
tf::Point ret = worldCameraOrigin + zScale * worldCameraStep;
return pcl::PointXYZ(ret.x(), ret.y(), 0);
}
示例4: rayPlaneInteersect
cv::Point2d FindObjectOnPlane::getUyEnd(
const cv::Point2d& ux_start,
const cv::Point2d& ux_end,
const image_geometry::PinholeCameraModel& model,
const jsk_recognition_utils::Plane::Ptr& plane)
{
cv::Point3d start_ray = model.projectPixelTo3dRay(ux_start);
cv::Point3d end_ray = model.projectPixelTo3dRay(ux_end);
Eigen::Vector3f ux_start_3d = rayPlaneInteersect(start_ray, plane);
Eigen::Vector3f ux_end_3d = rayPlaneInteersect(end_ray, plane);
Eigen::Vector3f ux_3d = ux_end_3d - ux_start_3d;
Eigen::Vector3f normal = plane->getNormal();
Eigen::Vector3f uy_3d = normal.cross(ux_3d).normalized();
Eigen::Vector3f uy_end_3d = ux_start_3d + uy_3d;
cv::Point2d uy_end = model.project3dToPixel(cv::Point3d(uy_end_3d[0],
uy_end_3d[1],
uy_end_3d[2]));
return uy_end;
}
示例5: project_uv_to_cloud_index
size_t project_uv_to_cloud_index(const pcl::PointCloud<PointT>& cloud, const cv::Point2d& image_point,
const image_geometry::PinholeCameraModel camera_model, double& distance)
{
// Assumes camera is at origin, pointed in the normal direction
size_t pt_pcl_index;
cv::Point3d pt_cv;
pt_cv = camera_model.projectPixelTo3dRay(image_point);
Eigen::Vector3f direction_eig(pt_cv.x, pt_cv.y, pt_cv.z);
Eigen::Vector3f origin_eig(0.0, 0.0, 0.0);
pt_pcl_index = closest_point_index_rayOMP(cloud, direction_eig, origin_eig, distance);
return (pt_pcl_index);
}
示例6: projectWithEigen
cv::Mat projectWithEigen()
{
// Transform meshes into camera frame
// For each frame in vector
for (int frame = 0; frame < mMeshFrameIDs.size(); frame++)
{
// Lookup current transform
Eigen::Isometry3 transform;
transform = transforms.at(mMeshFrameIDs[frame]);
// Get copy of mesh for each frame
ap::Mesh* sourceMesh;
ap::Mesh* transformedMesh;
//std::cerr << "Getting frame " << frame << " : " << mMeshFrameIDs[frame] << std::endl;
MeshMap::iterator scene_i = scenes.find(mMeshFrameIDs[frame]);
if (scenes.end() == scene_i) { continue; }
sourceMesh = scene_i->second;
MeshMap::iterator scene_t = transformedScenes.find(mMeshFrameIDs[frame]);
if (transformedScenes.end() == scene_t) { continue; }
transformedMesh = scene_t->second;
// Transform mesh into camera frame
for (int i = 0; i < sourceMesh->vertices.size(); i++)
{
Eigen::Vector3 newVertex = transform * sourceMesh->vertices[i];
//std::cerr << mesh->vertices[i].transpose() << "\t->\t" << newVertex.transpose() << std::endl;
transformedMesh->vertices[i] = newVertex;
}
}
// For each pixel in camera image
cv::Mat robotImage(mCameraModel.cameraInfo().height, mCameraModel.cameraInfo().width, CV_32F);
float* pixelPtr = (float*)robotImage.data;
float maxDepth = 0;
for (int v = 0; v < robotImage.rows; v++)
{
for (int u = 0; u < robotImage.cols; u++)
{
// Create a ray through the pixel
int pixelIdx = u + (v * robotImage.cols);
//std::cerr << "Pixel (" << u << "," << v << ")" << std::endl;
cv::Point2d pixel = cv::Point2d(u, v);
cv::Point3d cvRay = mCameraModel.projectPixelTo3dRay(pixel);
// Convert cvRay to ap::Ray
ap::Ray ray;
ray.point = Eigen::Vector3::Zero();
ray.vector.x() = cvRay.x; ray.vector.y() = cvRay.y; ray.vector.z() = cvRay.z;
ray.vector.normalize();
//std::cerr << ray.vector.transpose() << std::endl;
// For each frame in vector
for (int frame = 0; frame < mMeshFrameIDs.size(); frame++)
{
MeshMap::iterator scene_i = transformedScenes.find(mMeshFrameIDs[frame]);
if (transformedScenes.end() == scene_i)
{
continue;
}
ap::Mesh* mesh = scene_i->second;
// For each triangle in mesh
for (int i = 0; i < mesh->faces.size(); i++)
{
// Check for intersection. If finite, set distance
ap::Triangle triangle(mesh->vertices[mesh->faces[i].vertices[0]],
mesh->vertices[mesh->faces[i].vertices[1]],
mesh->vertices[mesh->faces[i].vertices[2]]);
Eigen::Vector3 intersection = ap::intersectRayTriangle(ray, triangle);
if (std::isfinite(intersection.x()))
{
float d = intersection.norm();
float val = pixelPtr[pixelIdx];
if (val == 0 || val > d)
{
pixelPtr[pixelIdx] = d;
}
if (d > maxDepth)
{
maxDepth = d;
}
}
}
}
}
}
// Return the matrix
if (maxDepth == 0) { maxDepth = 1;}
return robotImage/maxDepth;
}
示例7: image_callback
void FeatureTracker::image_callback(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
//need pose data for each picture, need to publish a camera pose
ros::Time acquisition_time = msg->header.stamp;
geometry_msgs::PoseStamped basePose;
geometry_msgs::PoseStamped mapPose;
basePose.pose.orientation.w=1.0;
ros::Duration timeout(3);
basePose.header.frame_id="/base_link";
mapPose.header.frame_id="/map";
try {
tf_listener_.waitForTransform("/camera_1_link", "/map", acquisition_time, timeout);
tf_listener_.transformPose("/map", acquisition_time,basePose,"/camera_1_link",mapPose);
printf("pose #%d %f %f %f\n",pic_number++,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
}
catch (tf::TransformException& ex) {
ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
printf("[map_maker] TF exception:\n%s", ex.what());
return;
}
cam_model.fromCameraInfo(info_msg);
// printf("callback called\n");
try
{
// if you want to work with color images, change from mono8 to bgr8
if(image_rect==NULL){
image_rect = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
last_image= cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
pyrA=cvCreateImage(cvSize(last_image->width+8,last_image->height/3.0), IPL_DEPTH_32F, 1);
pyrB=cvCloneImage(pyrA);
// printf("cloned image\n");
}
else{
//save the last image
cvCopy(image_rect,last_image);
cvCopy(bridge.imgMsgToCv(msg, "mono8"),image_rect);
// printf("copied image\n");
}
if(output_image==NULL){
output_image =cvCloneImage(image_rect);
}
if(eigImage==NULL){
eigImage =cvCloneImage(image_rect);
}
if(tempImage==NULL){
tempImage =cvCloneImage(image_rect);
}
}
catch (sensor_msgs::CvBridgeException& e)
{
ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
return;
}
if(image_rect!=NULL) {
cvCopy(image_rect,output_image);
printf("got image\n");
track_features(mapPose);
//draw features on the image
for(int i=0;i<last_feature_count;i++){
CvPoint center=cvPoint((int)features[i].x,(int)features[i].y);
cvCircle(output_image,center,10,cvScalar(150),2);
char strbuf [10];
int n=sprintf(strbuf,"%d",current_feature_id[ i] );
std::string text=std::string(strbuf,n);
CvFont font;
cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,1,1);
cvPutText(output_image,text.c_str(),cvPoint(center.x,center.y+20),&font,cvScalar(255));
cv::Point3d tempRay;
cv::Point2d tempPoint=cv::Point2d(features[i]);
cam_model.projectPixelTo3dRay(tempPoint,tempRay);
// printf("%f x %f y %f z\n",tempRay.x,tempRay.y,tempRay.z);
}
// featureList[0].print();
//determine error gradient
int min_features=10;
// printf("ypr %f %f %f\n",yaw,pitch,roll);
cv::Point3d error_sum=calc_error(min_features,0, 0, 0);
printf("total error is : %f\n",error_sum.x);
for(int i=0;i<featureList.size();i++){
//.........这里部分代码省略.........
示例8: track_features
void FeatureTracker::track_features(geometry_msgs::PoseStamped mapPose){
//set the initial number of features to the max number we want to find
int feature_count=num_features;
printf("pose %f %f %f\n",mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
int edge_pixels=5;
//check if there were features from the last image to keep tracking
if(last_feature_count>0){
//if there were call cvCalcOpticalFlowPyrLK();
//find matches between last good features and current image features
// store matches in featuresB
cvCalcOpticalFlowPyrLK(last_image,image_rect,pyrA,pyrB,features,featuresB, last_feature_count,cvSize(win_size,win_size) ,4,last_features_status,track_error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,.3),0);
}
printf("got image flow\n");
// assign last_feature_id values for matched features and set the non matched spots to -1
//find new features and subpixel them
//I SHOULD ADD THE IMAGE FLOW VALUES AS FEATURES NOW BEFORE FINDING NEW FEATURES
//find all good features
cvGoodFeaturesToTrack(image_rect, eigImage, tempImage, features, &feature_count, quality_level, min_distance, NULL, block_size);
//subpixel good features
cvFindCornerSubPix(image_rect,features,feature_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
printf("subpixeled image\n");
//for all the features in features B, find their matches in the newly found features
//add all the matches to their correct featuremanager, for the non matching, make a new
//feature manager and add them to it
//for all features by now we need their ray and the robot pose at that location
//draw dots on image where features are
//set the feature ids to a control value
for(int i=0;i<num_features;i++){
current_feature_id[i]=-1;
}
for(int i=0;i<last_feature_count;i++){
//for the previously found features in list b
if(last_features_status[i]>0){
for(int j=0;j<feature_count;j++){
//for every feature found in this image
//determine if the two overlap in a meaningful way
int xdiff=featuresB[i].x-features[j].x;
int ydiff=featuresB[i].y-features[j].y;
//if the pixels are within some margin of eachother
if(sqrt(xdiff*xdiff + ydiff*ydiff)<pixel_tracking_margin){
//if they do set the current id for j to the id of i
current_feature_id[j]=last_feature_id[i];
printf("feature found %d %d",last_feature_id[i],i);
}
}
}
}
printf("assigned IDs image\n");
for(int i=0;i<feature_count;i++){
printf("looping\n");
if(current_feature_id[i]>=0){
printf("prev feature match\n");
//if we matched a previous feature
//add our new feature to the previous features list
cv::Point3d tempRay;
cv::Point2d tempPoint=cv::Point2d(features[i]);
cam_model.projectPixelTo3dRay(tempPoint,tempRay);
if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
featureList[current_feature_id[i]].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
}else{
current_feature_id[i]=-1;
}
}else{
printf("new feature \n");
cv::Point3d tempRay;
cv::Point2d tempPoint=cv::Point2d(features[i]);
cam_model.projectPixelTo3dRay(tempPoint,tempRay);
if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
printf("new good feature \n");
//if we didn't
//create a new feature group in the list
current_feature_id[i]=feature_number;
//add the new feature to the feature list
featureList.push_back(FeatureManager());
//.........这里部分代码省略.........
示例9: imageCb
//.........这里部分代码省略.........
cv::Point2d right_pt(MySecond[0].x, MySecond[0].y);
cam_model_left.rectifyPoint(left_pt);
Matrix<float, 3, 1> Z;
Matrix<float, 6, 1> RES_KALMAN;
Z<<
MyFirst[0].x,
MySecond[0].x,
(MyFirst[0].y + MySecond[0].y) / 2.0f;
RES_KALMAN = kalman_xyz.getKalman(Z);
cout << "???" << endl;
cout << RES_KALMAN << endl;;
// cout << cam_model_left.rectifyPoint(left_pt).x << "," << cam_model_left.rectifyPoint(left_pt).y << "rectified" << endl;
// cout << cam_model_right.rectifyPoint(right_pt).x << "," << cam_model_right.rectifyPoint(right_pt).y << "rectified" << endl;
// cout << "notcal :: " << MyFirst[0].y - MySecond[0].y << endl;
// cout << "calibr :: " << cam_model_left.rectifyPoint(left_pt).y - cam_model_right.rectifyPoint(right_pt).y << endl;
float leftpt_y = MyFirst[0].y;
Matrix<float, 2, 1> X_kalman = kalman_x.getKalman(leftpt_y);
// cout << "KALMAN" << kalman_x.getKalman_1(leftpt_y) << "," << leftpt_y << endl;;
geometry_msgs::Point drone1_msg;
drone1_msg.x = leftpt_y;//get_lpf(&lpf_x,10);
drone1_msg.y = X_kalman(0, 0); //get_lpf(&lpf_z,10);
drone1_msg.z = X_kalman(1, 0); //get_lpf(&lpf_y,5);
pub_drone[0].publish(drone1_msg);
cv::Point3d ptr_left = cam_model_left.projectPixelTo3dRay(left_pt);
cv::Point3d ptr_right = cam_model_right.projectPixelTo3dRay(right_pt);
// getKalman_1();
// cout << ptr_left.y - ptr_right.y << endl;
// cout << "left 3d :: " << ptr_left.x << "," << ptr_left.y << "," << ptr_left.z << endl;
// cout << "right 3d :: " << ptr_right.x << "," << ptr_right.y << "," << ptr_right.z << endl << endl;
ptr_left = cam_model_left.projectPixelTo3dRay(cam_model_left.rectifyPoint(left_pt));
ptr_right = cam_model_right.projectPixelTo3dRay(cam_model_right.rectifyPoint(right_pt));
// cout << ptr_left.y - ptr_right.y << endl;
// cout << "left 3d :: " << ptr_left.x << "," << ptr_left.y << "," << ptr_left.z << endl;
// cout << "right 3d :: " << ptr_right.x << "," << ptr_right.y << "," << ptr_right.z << endl;
}
// cout << MySecond[0].x << "," << MySecond[0].y << endl;
if (Ccount == 1) {
sort(MyFirst.begin(), MyFirst.end(), MyCompare);
sort(MySecond.begin(), MySecond.end(), MyCompare);
Dron_size = MyFirst.size();
for (int i = 0; i < Dron_size; i++) {
double a1, b1, c1;
示例10: depthCallback
void depthCallback(const sensor_msgs::ImageConstPtr& original_image, const sensor_msgs::CameraInfoConstPtr& info){
if(need_for_wall==2){
need_for_wall = 0;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(original_image, enc::TYPE_16UC1);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
return;
}
cam_model.fromCameraInfo(info);
Mat depth_image = cv_ptr->image;
float x = 0;
float y = 360;
float wall_height = 0;
geometry_msgs::PointStamped xy_point_stamped, pipe_point_stamped;
walls_detection::Walls walls_all_message;
for(int i = 100; i<600; i+=110){
wall_height = 0;
y = 360;
x = i;
unsigned short wall_depth;
while(wall_height < 0.05 && y > 0){
cv::Point2d uv_point(x,y);
unsigned short wall_depth;
wall_depth = depth_image.at<unsigned short>(y, x);
cv::Point3d xy_point;
xy_point = cam_model.projectPixelTo3dRay(uv_point);
xy_point = xy_point * wall_depth;
xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
xy_point_stamped.header.stamp = ros::Time::now();
xy_point_stamped.point.x = 0.001*xy_point.x;
xy_point_stamped.point.y = 0.001*xy_point.y;
xy_point_stamped.point.z = 0.001*xy_point.z;
try {
tf_listener->waitForTransform("/pipe_link", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
tf_listener->transformPoint("/pipe_link", xy_point_stamped, pipe_point_stamped);
}
catch (tf::TransformException ex) {
ROS_ERROR("%s",ex.what());
}
wall_height = pipe_point_stamped.point.z;
y = y - 120;
}
std::cout<<"Sciana nr "<< i <<" - x: "<< pipe_point_stamped.point.x <<", y: "<< pipe_point_stamped.point.y <<", z: "<< pipe_point_stamped.point.z <<"\n";
if(wall_height < 0.05){
std::cout << "Nie ma sciany\n";
pipe_point_stamped.point.x = 0;
pipe_point_stamped.point.y = 0;
pipe_point_stamped.point.z = 0;
}
switch(i){
case 100:
walls_all_message.wall1 = pipe_point_stamped;
break;
case 210:
walls_all_message.wall2 = pipe_point_stamped;
break;
case 320:
walls_all_message.wall3 = pipe_point_stamped;
break;
case 430:
walls_all_message.wall4 = pipe_point_stamped;
break;
case 540:
walls_all_message.wall5 = pipe_point_stamped;
break;
}
}
walls_all.publish(walls_all_message);
}
}
示例11: depthCallback
void depthCallback(const sensor_msgs::ImageConstPtr& original_image, const sensor_msgs::CameraInfoConstPtr& info)
{
if(is_robot_running==0 && balls_written==1 && ball_chosen==0){
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(original_image, sensor_msgs::image_encodings::TYPE_16UC1);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
return;
}
cam_model.fromCameraInfo(info);
Mat depth_image = cv_ptr->image;
if(balls_written == 1){
for( size_t i = 0; i < circles_all.size(); i++ ){
if (pilki[i]!=0){
cv::Point2d uv_point(circles_all[i][0], circles_all[i][1]);
unsigned short ball_depth;
ball_depth = depth_image.at<unsigned short>(circles_all[i][1], circles_all[i][0])+20;
cv::Point3d xy_point;
xy_point = cam_model.projectPixelTo3dRay(uv_point);
xy_point = xy_point * ball_depth;
geometry_msgs::PointStamped xy_point_stamped, odom_point_stamped;
xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
xy_point_stamped.header.stamp = ros::Time::now();
xy_point_stamped.point.x = 0.001*xy_point.x;
xy_point_stamped.point.y = 0.001*xy_point.y;
xy_point_stamped.point.z = 0.001*xy_point.z;
try {
tf_listener->waitForTransform("/base_link", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
tf_listener->transformPoint("/base_link", xy_point_stamped, odom_point_stamped);
}
catch (tf::TransformException ex) {
ROS_ERROR("%s",ex.what());
}
if(odom_point_stamped.point.z > 0.05 || odom_point_stamped.point.z < 0.001){
std::cout << "Srodek pilki na wysokosci: " << odom_point_stamped.point.z << ".\n";
std::cout << "Skondensowane pilki: to nie pilka, jest za wysoko albo za nisko! \n";
pilki[i]=0;
}
}
}
depth_written = 1;
std::cout << "Policzylem wysokosc dla skondensowanych pilek \n";
int closest_ball = choose_closest_ball();
if(closest_ball == -1){
std::cout << "NIE WYBRANO ZADNEJ PILKI \n";
send_no_balls();
}
if(closest_ball != -1){
int wybrana_x = circles_all[closest_ball][0];
int wybrana_y = circles_all[closest_ball][1];
int wybrana_z = circles_all[closest_ball][2];
ball_chosen = 1;
std::cout << "Wybrana jedna pilka \n";
last_circle[0] = wybrana_x;
last_circle[1] = wybrana_y;
last_circle[2] = wybrana_z;
wybrana.x = last_circle[0];
wybrana.y = last_circle[1];
wybrana.z = last_circle[2];
draw_balls = 1;
cv::Point2d uv_point(wybrana.x, wybrana.y);
unsigned short ball_depth;
ball_depth = depth_image.at<unsigned short>(wybrana.y,wybrana.x)+20;
geometry_msgs::Point message_selected;
cv::Point3d xy_point;
xy_point = cam_model.projectPixelTo3dRay(uv_point);
xy_point = xy_point * ball_depth;
geometry_msgs::PointStamped xy_point_stamped, odom_point_stamped;
xy_point_stamped.header.frame_id = "/camera_rgb_optical_frame";
xy_point_stamped.header.stamp = ros::Time::now();
xy_point_stamped.point.x = 0.001*xy_point.x;
xy_point_stamped.point.y = 0.001*xy_point.y;
xy_point_stamped.point.z = 0.001*xy_point.z;
try {
tf_listener->waitForTransform("/odom", "/camera_rgb_optical_frame", ros::Time::now(), ros::Duration(10.0) );
//.........这里部分代码省略.........
示例12: locator
void locator()
{
namedWindow("Tracking");
int hMin, hMax, sMin, sMax, vMin, vMax,area_min;
hMin = 0;
//hMax = 124; // night values/???
hMax = 255;
//sMin = 95;
sMin = 126;
sMax = 255;
//vMin = 139;
vMin = 173;
vMax = 255;
area_min = 100;
Mat smoothed, hsvImg, t_img;
createTrackbar("blob min area","Tracking" ,&area_min ,1000);
createTrackbar("Hue Min", "Tracking", &hMin, 255);
createTrackbar("Hue Max", "Tracking", &hMax, 255);
createTrackbar("Sat Min", "Tracking", &sMin, 255);
createTrackbar("Sat Max", "Tracking", &sMax, 255);
createTrackbar("Val Min", "Tracking", &vMin, 255);
createTrackbar("Val MaX", "Tracking", &vMax, 255);
while(ros::ok())
{
Mat source = imageB;
Mat copy = imageB.clone();
GaussianBlur(source, smoothed, Size(9,9), 4);
cvtColor(smoothed, hsvImg, CV_BGR2HSV);
inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img);
CBlobResult blob;
IplImage i_img = t_img;
blob = CBlobResult(&i_img,NULL,0);
int num_blobs = blob.GetNumBlobs();
blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_);
num_blobs = blob.GetNumBlobs();
std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane
tf::StampedTransform transform;
tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5));
tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform);
for(int i =0;i<num_blobs;i++)
{
CBlob* bl = blob.GetBlob(i);
Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl));
//Use the width as the height
uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5;
circle(copy,uv,50,Scalar(255,0,0),5);
cv::Point3d xyz;
model.projectPixelTo3dRay(uv, xyz);
// Intersect ray with plane in virtual table frame
//Origin of camera frame wrt virtual table frame
tf::Point P0 = transform.getOrigin();
//Point at end of unit ray wrt virtual table frame
tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z);
// Origin of virtual table frame
tf::Point V0 = tf::Point(0.0,0.0,0.0);
// normal to the table plane
tf::Vector3 n(0, 0, 1);
// finding scaling value
double scale = (n.dot(V0-P0))/(n.dot(P1-P0));
tf::Point ball_pos = P0 + (P1-P0)*scale;
cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl;
}
imshow(WINDOW, copy);
waitKey(3);
imshow("edited", t_img);
waitKey(3);
ros::spinOnce();
}
}