本文整理汇总了C++中image_transport::Publisher类的典型用法代码示例。如果您正苦于以下问题:C++ Publisher类的具体用法?C++ Publisher怎么用?C++ Publisher使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Publisher类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char **argv)
{
dirName = argv[1];
toFeatures = pathToData + "/FeatureData/" + dirName;
toPhotos = pathToData + "/RenderedImages/" + dirName;
srand(time(0));
ros::init(argc ,argv, "ROS_Publisher");
NodeHandle node;
image_transport::ImageTransport it(node);
std::cout << "Starting image load" << endl;
loadImages();
cout << "Done Loading Images" << endl;
getchar();
mcl_data_subscriber = node.subscribe(mcl_data_publisher_name, 4, MyDataCallback);
time_t temptime = time(0);
std::cout << "Waiting for Handshake from Program .." << std::endl;
while(!handshake_recieved && (time(0) - temptime) < 20)
{
ros::spinOnce();
}
if(handshake_recieved)
std::cout << "Handshake recieved" << std::endl;
else
{
std::cout << "No handshake recieved";
return -1;
}
movement_publisher = node.advertise<std_msgs::String>("ROBOT_MOVEMENT_PUBLISHER", 4);
data_publisher = it.advertise(publish_image_data_under, 4, true);
char key = 'k';
namedWindow("Robot Image");
namedWindow("Top Match");
while(ros::ok() && key != 'q')
{
ros::spinOnce();
int i = rand()%image_names.size();
cv_bridge::CvImage out_msg;
ros::Time scan_time = ros::Time::now();
out_msg.header.stamp = scan_time;
out_msg.header.frame_id = "robot_image";
out_msg.encoding = sensor_msgs::image_encodings::BGR8;
out_msg.image = image_list[current_image];
if(!out_msg.image.empty())
data_publisher.publish(out_msg.toImageMsg());
ros::spinOnce();
// std::cout << "hererer" << std::endl;
imshow("Robot Image", image_list[current_image]);
imshow("Top Match", BestGuessImage);
key = cv::waitKey(2);
// if(key == ' ')
// current_image++;
// if(current_image == image_list.size())
// current_image = 0;
//ros::Duration(0.1).sleep();
}
std_msgs::String msg;
stringstream ss;
ss << killflag << "_";
msg.data = ss.str();
movement_publisher.publish(msg);
ros::spinOnce();
ros::spinOnce();
ros::Duration(0.1).sleep();
ros::spinOnce();
ros::shutdown();
}
示例2: do_work
void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
{
// Work on the image.
try
{
// Convert the image into something opencv can handle.
cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;
// Messages
opencv_apps::RotatedRectArrayStamped rects_msg, ellipses_msg;
rects_msg.header = msg->header;
ellipses_msg.header = msg->header;
// Do the work
cv::Mat src_gray;
/// Convert image to gray and blur it
if ( frame.channels() > 1 ) {
cv::cvtColor( frame, src_gray, cv::COLOR_RGB2GRAY );
} else {
src_gray = frame;
}
cv::blur( src_gray, src_gray, cv::Size(3,3) );
/// Create window
if( debug_view_) {
cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
}
cv::Mat threshold_output;
int max_thresh = 255;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::RNG rng(12345);
/// Detect edges using Threshold
cv::threshold( src_gray, threshold_output, threshold_, 255, cv::THRESH_BINARY );
/// Find contours
cv::findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
std::vector<cv::RotatedRect> minRect( contours.size() );
std::vector<cv::RotatedRect> minEllipse( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ minRect[i] = cv::minAreaRect( cv::Mat(contours[i]) );
if( contours[i].size() > 5 )
{ minEllipse[i] = cv::fitEllipse( cv::Mat(contours[i]) ); }
}
/// Draw contours + rotated rects + ellipses
cv::Mat drawing = cv::Mat::zeros( threshold_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
// contour
cv::drawContours( drawing, contours, (int)i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
// ellipse
cv::ellipse( drawing, minEllipse[i], color, 2, 8 );
// rotated rectangle
cv::Point2f rect_points[4]; minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
cv::line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
opencv_apps::RotatedRect rect_msg;
opencv_apps::Point2D rect_center;
opencv_apps::Size rect_size;
rect_center.x = minRect[i].center.x;
rect_center.y = minRect[i].center.y;
rect_size.width = minRect[i].size.width;
rect_size.height = minRect[i].size.height;
rect_msg.center = rect_center;
rect_msg.size = rect_size;
rect_msg.angle = minRect[i].angle;
opencv_apps::RotatedRect ellipse_msg;
opencv_apps::Point2D ellipse_center;
opencv_apps::Size ellipse_size;
ellipse_center.x = minEllipse[i].center.x;
ellipse_center.y = minEllipse[i].center.y;
ellipse_size.width = minEllipse[i].size.width;
ellipse_size.height = minEllipse[i].size.height;
ellipse_msg.center = ellipse_center;
ellipse_msg.size = ellipse_size;
ellipse_msg.angle = minEllipse[i].angle;
rects_msg.rects.push_back(rect_msg);
ellipses_msg.rects.push_back(ellipse_msg);
}
/// Create a Trackbar for user to enter threshold
if( debug_view_) {
if (need_config_update_) {
config_.threshold = threshold_;
srv.updateConfig(config_);
need_config_update_ = false;
}
cv::createTrackbar( "Threshold:", window_name_, &threshold_, max_thresh, trackbarCallback);
cv::imshow( window_name_, drawing );
//.........这里部分代码省略.........
示例3: imageCallback
//This function is called everytime a new image is published
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
//Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
cv_bridge::CvImagePtr cv_ptr;
try
{
//Always copy, returning a mutable CvImage
//OpenCV expects color images to use BGR channel order.
cv_ptr = cv_bridge::toCvCopy(original_image, enc::BGR8);
}
catch (cv_bridge::Exception& e)
{
//if there is an error during conversion, display it
ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
return;
}
/* //Invert Image
//Go through all the rows
for(int i=0; i<cv_ptr->image.rows; i++)
{
//Go through all the columns
for(int j=0; j<cv_ptr->image.cols; j++)
{
//Go through all the channels (b, g, r)
for(int k=0; k<cv_ptr->image.channels(); k++)
{
//Invert the image by subtracting image data from 255
cv_ptr->image.data[i*cv_ptr->image.rows*4+j*3 + k] = 255-cv_ptr->image.data[i*cv_ptr->image.rows*4+j*3 + k];
}
}
}
*/
cv::Mat img_mask_blue,img_hsv_blue,combined_Image;
cv::cvtColor(cv_ptr->image,img_hsv_blue,CV_BGR2HSV);
cv::inRange(img_hsv_blue,cv::Scalar(LowerH,LowerS,LowerV),cv::Scalar(UpperH,UpperS,UpperV),img_mask_blue);
cv::Mat img_mask_red,img_hsv_red;
cv::cvtColor(cv_ptr->image,img_hsv_red,CV_BGR2HSV);
cv::inRange(img_hsv_red,cv::Scalar(17, 15, 100), cv::Scalar(10, 255, 255),img_mask_red);
cv::addWeighted ( img_mask_red, 1, img_mask_blue, 1, 0.0, combined_Image);
// cv::Mat img_mask_green,img_hsv_green;
/// cv::cvtColor(cv_ptr->image,img_hsv_green,CV_BGR2HSV);
// cv::inRange(img_hsv_green,cv::Scalar(LowerH,LowerS,LowerV),cv::Scalar(UpperH,UpperS,UpperV),img_mask_green);
//Display the image using OpenCV
cv::imshow(WINDOW, combined_Image);
//cv::imshow(WINDOW, img_mask_red);
// cv::imshow(WINDOW, img_mask_green);
//Add some delay in miliseconds. The function only works if there is at least one HighGUI window created and the window is active. If there are several HighGUI windows, any of them can be active.
cv::waitKey(3);
//Calculate the moments of the thresholded image
//Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
pub.publish(cv_ptr->toImageMsg());
}
示例4: do_work
void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
{
// Work on the image.
try
{
// Convert the image into something opencv can handle.
cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;
// Messages
opencv_apps::FlowArrayStamped flows_msg;
flows_msg.header = msg->header;
if( debug_view_) {
cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
if (need_config_update_) {
srv.updateConfig(config_);
need_config_update_ = false;
}
}
// Do the work
if ( frame.channels() > 1 ) {
cv::cvtColor( frame, gray, cv::COLOR_BGR2GRAY );
} else {
frame.copyTo(gray);
}
if( prevgray.data )
{
cv::calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cv::cvtColor(prevgray, cflow, cv::COLOR_GRAY2BGR);
//drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
int step = 16;
cv::Scalar color = cv::Scalar(0, 255, 0);
for(int y = 0; y < cflow.rows; y += step)
for(int x = 0; x < cflow.cols; x += step)
{
const cv::Point2f& fxy = flow.at<cv::Point2f>(y, x);
cv::line(cflow, cv::Point(x,y), cv::Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color);
cv::circle(cflow, cv::Point(x,y), 2, color, -1);
opencv_apps::Flow flow_msg;
opencv_apps::Point2D point_msg;
opencv_apps::Point2D velocity_msg;
point_msg.x = x;
point_msg.y = y;
velocity_msg.x = fxy.x;
velocity_msg.y = fxy.y;
flow_msg.point = point_msg;
flow_msg.velocity = velocity_msg;
flows_msg.flow.push_back(flow_msg);
}
}
std::swap(prevgray, gray);
//-- Show what you got
if( debug_view_) {
cv::imshow( window_name_, cflow );
int c = cv::waitKey(1);
}
// Publish the image.
sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, "bgr8", cflow).toImageMsg();
img_pub_.publish(out_img);
msg_pub_.publish(flows_msg);
}
catch (cv::Exception &e)
{
NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
}
prev_stamp_ = msg->header.stamp;
}
示例5: imageCallback
void imageCallback(const sensor_msgs::ImageConstPtr & msg){
#ifdef PRINT_ROS_INFO
ROS_INFO("Got image message.");
#endif
// get the compressed image, and convert it to Opencv format.
cv::Mat img;
try{
img = cv_bridge::toCvShare(msg, "bgr8")->image;
}
catch(cv_bridge::Exception & e){
ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
}
#ifdef PRINT_ROS_INFO
ROS_INFO("Converting image done.");
#endif
//std::cout << "image size = ( " << img.rows << " X " << img.cols << " )." << std::endl;
//printf("image data address 0x%x\n", img.data);
if( startTracking ){
//trackerMutex.lock();
#ifdef PRINT_ROS_INFO
ROS_INFO("Tracker: Reading Frame ... ");
#endif
// update the tracking status, and draw the result.
tracker->readFrame(img);
#ifdef PRINT_ROS_INFO
ROS_INFO("Tracker: Updating status ... ");
#endif
tracker->updateTrackerStatus();
#ifdef PRINT_ROS_INFO
ROS_INFO("Tracker: status updated ... ");
ROS_INFO("Tracker: drawing ... ");
#endif
cv::Mat temp;
img.copyTo(temp);
tracker->drawTrackers(temp);
#ifdef PRINT_ROS_INFO
ROS_INFO("Tracker: Publishing ... ");
#endif
// republish this image.
sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", temp).toImageMsg();
pub.publish(msg);
// publish to topic -- object_location
cv::Point2i location = tracker->getWeightedAverageLocation();
std::stringstream locationStrStream;
int currentNum = tracker->getSampleNum();
int numWithHigConfidence = tracker->getNumOfSamplesHasProbLargerThan(PROB_THRESHOD);
float highConfidenceSampleRatio;
if( currentNum <= 0){
highConfidenceSampleRatio = 0;
}else{
highConfidenceSampleRatio = numWithHigConfidence * 1.0f / currentNum;
}
std::cout << "High confidence sample ratio = " << highConfidenceSampleRatio << std::endl;
if( location.x < 0 || location.y < 0 || highConfidenceSampleRatio <= HIGH_CONFID_NUM_RATIO_THRESHOLD ){
//locationStrStream << "object_x " << "0" << " , " << "object_y " << "0" << " , ";
locationStrStream << "object_x " << img.cols /2 << ", " << "object_y " << img.rows / 2 << ", ";
// make offsets to the samples:
ROS_INFO("Tracker offset!");
if( lastMovementDirection == TRACKER_UP)
tracker->offsetTracker(TRACKER_DOWN);
else if( lastMovementDirection == TRACKER_DOWN)
tracker->offsetTracker(TRACKER_UP);
else if( lastMovementDirection == TRACKER_LEFT)
tracker->offsetTracker(TRACKER_RIGHT);
else if( lastMovementDirection == TRACKER_RIGHT)
tracker->offsetTracker(TRACKER_LEFT);
}else{
// "x 10, y 10, width 360, height 640"
locationStrStream << "object_x " << location.x << ", " << "object_y " << location.y << ", ";
lastMovementDirection = -1;
}
locationStrStream << "width " << img.cols << ", " << "height " << img.rows << ", ";
//.........这里部分代码省略.........
示例6: imageCb
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
RNG rng(12345);
cv_bridge::CvImagePtr cv_ptr;
try{
cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
}
catch(cv_bridge::Exception& e){ ROS_ERROR("cv_bridge exception: %s", e.what()); return; }
frame = cv_ptr->image;
cvtColor(frame, frame_hsv, CV_BGR2HSV);
center_mat = Rect(Point(frame.cols/2 - 10, frame.rows/2 - 10), Point(frame.cols/2 + 10, frame.rows/2 + 10));
if(set_A_next_frame){
Mat croppedHSV = frame_hsv(center_mat);
Mat croppedFrame = frame(center_mat);
Scalar meanScalar, stdScalar;
meanStdDev(croppedHSV, meanScalar, stdScalar);
if(onetwoA == 0){
detect_color_A2 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
meanStdDev(croppedFrame, meanScalar, stdScalar);
color_A2_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
}
if(onetwoA == 1){
detect_color_A1 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
meanStdDev(croppedFrame, meanScalar, stdScalar);
color_A1_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
}
onetwoA = 1 - onetwoA;
recalculate_detection(0);
set_A_next_frame = false;
waitKey(3);
image_pub_.publish(cv_ptr->toImageMsg());
}
if(set_B_next_frame){
Mat croppedHSV = frame_hsv(center_mat);
Mat croppedFrame = frame(center_mat);
Scalar meanScalar, stdScalar;
meanStdDev(croppedHSV, meanScalar, stdScalar);
if(onetwoB == 0){
detect_color_B2 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
meanStdDev(croppedFrame, meanScalar, stdScalar);
color_B2_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
}
if(onetwoB == 1){
detect_color_B1 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
meanStdDev(croppedFrame, meanScalar, stdScalar);
color_B1_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
}
onetwoB = 1 - onetwoB;
recalculate_detection(1);
set_B_next_frame = false;
waitKey(3);
image_pub_.publish(cv_ptr->toImageMsg());
}
if(!set_B_next_frame && !set_B_next_frame){
Scalar color = Scalar( 0,0,0 );
rectangle(frame,center_mat, color, 2,8,0);
rectangle(frame, Point(5,0), Point(12,7), color_A1_RGB , 4,8,0);
rectangle(frame, Point(17,0), Point(24,7), color_A2_RGB, 4,8,0);
rectangle(frame, Point(40,0), Point(47,7), color_B1_RGB, 4,8,0);
rectangle(frame, Point(52,0), Point(59,7), color_B2_RGB, 4,8,0);
inRange(frame_hsv.clone(), detect_color_A_min, detect_color_A_max, thresh_A);
inRange(frame_hsv.clone(), detect_color_B_min, detect_color_B_max, thresh_B);
findContours(thresh_A, contours_A, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
findContours(thresh_B, contours_B, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Moments moment;
int x_mass, y_mass, x_center = frame.cols / 2, y_center = frame.rows / 2;
CvPoint mass_center = cvPoint(0,0), img_center = cvPoint(x_center,y_center);
double max_area = 0, current_area;
int max_area_index = -1;
for( size_t i = 0; i< contours_A.size(); i++ )
{
current_area = contourArea(contours_A[i]);
if(max_area < current_area )
{
max_area = current_area;
max_area_index = i;
}
//.........这里部分代码省略.........
示例7: filterDetectedObjects
void filterDetectedObjects(std_msgs::Header currentHeader, Mat& grey, Mat& thresholded, double canny_threshold, int cornerThreshold, list<DetectedObject*>& detectedObjects) {
for (list<DetectedObject*>::const_iterator iterator = detectedObjects.begin(), end = detectedObjects.end(); iterator != end; ++iterator) {
DetectedObject* obj = *iterator;
/*if (obj->getName() == "Thin alu profile") {
Mat* region = extractRotatedRegion(thresholded, obj->getRotatedRect());
cv_bridge::CvImagePtr cv_pextracted(new cv_bridge::CvImage);
sensor_msgs::ImagePtr msg_extracted;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
vector<int> cvxHullIndices;
vector<Vec4i> cvxDefects;
vector<vector<Point> > contours_poly(1);
findContours(*region, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
if (contours.size() <= 0) {
continue;
}
cout << "Contour size: " << contours[0].size() << endl;
//approxPolyDP( Mat(contours[0]), contours_poly[0], approxPolyEpsilon, true );
//convexHull(contours_poly[0], cvxHullIndices, true, true);
//if (cvxHullIndices.size() < 4) {
// continue;
//}
//convexityDefects(contours_poly[0], cvxHullIndices, cvxDefects);
//if (cvxDefects.size() > 2) {
// obj->setName("Big screw");
//}
Mat regionColored;
cvtColor(*region, regionColored, CV_GRAY2BGR);
Scalar color (0, 255, 0);
drawContours(regionColored, contours_poly, 0, color);
cv_pextracted->image = regionColored;
cv_pextracted->header = currentHeader;
cv_pextracted->encoding = sensor_msgs::image_encodings::BGR8;
msg_extracted = cv_pextracted->toImageMsg();
pub_extracted.publish(msg_extracted);
cv_pextracted->image.release();
delete region;
}else */if (obj->getName() == "Small cylinder") {
Mat* region = extractRotatedRegion(grey, obj->getRotatedRect());
Mat regCanny;
GaussianBlur(*region, regCanny, Size(3,3), 10, 10);
Canny(regCanny, regCanny, canny_threshold, canny_threshold);
cv_bridge::CvImagePtr cv_pextracted(new cv_bridge::CvImage);
sensor_msgs::ImagePtr msg_extracted;
//cout << "NonZeroCount: " << countNonZero(regCanny) << endl;
vector<Point2f> corners;
goodFeaturesToTrack(regCanny, corners, 100, 0.3, 10);
Mat cannyColored;
cvtColor(regCanny, cannyColored, CV_GRAY2BGR);
for (size_t i=0; i<corners.size(); i++) {
int x = corners[i].x;
int y = corners[i].y;
cannyColored.at<Vec3b>(y,x)[0] = 0;
cannyColored.at<Vec3b>(y,x)[1] = 0;
cannyColored.at<Vec3b>(y,x)[2] = 255;
}
//cout << "CornersCount: " << corners.size() << endl;
if (corners.size() > cornerThreshold) {
obj->setName("Rough small cylinder");
} else {
obj->setName("Smooth small cylinder");
}
cv_pextracted->image = cannyColored;
cv_pextracted->header = currentHeader;
cv_pextracted->encoding = sensor_msgs::image_encodings::BGR8;
msg_extracted = cv_pextracted->toImageMsg();
pub_extracted.publish(msg_extracted);
cv_pextracted->image.release();
delete region;
}
}
}
示例8: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
if(!cam_info_received) return;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
resultImg = cv_ptr->image.clone();
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
double min_size = boards[0].marker_size;
for (int board_index = 1; board_index < boards.size(); board_index++)
if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
mDetector.detect(inImage, markers, camParam, min_size, false);
for (int board_index = 0; board_index < boards.size(); board_index++)
{
Board board_detected;
//Detection of the board
float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
if (probDetect > 0.0)
{
tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);
tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");
//_tfBraodcaster.sendTransform(stampedTransform); // from phillip
/*geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = msg->header.frame_id;
poseMsg.header.stamp = msg->header.stamp;
pose_pub.publish(poseMsg);*/
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
/*geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);*/
if(camParam.isValid())
{
//draw board axis
CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
}
}
}
//for each marker, draw info and its boundaries in the image
for(size_t i=0; draw_markers && i < markers.size(); ++i)
{
markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
}
if(camParam.isValid())
{
//draw a 3d cube in each marker if there is 3d info
for(size_t i=0; i<markers.size(); ++i)
{
if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.frame_id = msg->header.frame_id;
out_msg.header.stamp = msg->header.stamp;
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = resultImg;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.frame_id = msg->header.frame_id;
debug_msg.header.stamp = msg->header.stamp;
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
//.........这里部分代码省略.........
示例9: do_work
void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
{
// Work on the image.
try
{
// Convert the image into something opencv can handle.
cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;
// Messages
opencv_apps::ContourArrayStamped contours_msg;
contours_msg.header = msg->header;
// Do the work
cv::Mat src_gray;
/// Convert image to gray and blur it
if ( frame.channels() > 1 ) {
cv::cvtColor( frame, src_gray, cv::COLOR_RGB2GRAY );
} else {
src_gray = frame;
}
cv::blur( src_gray, src_gray, cv::Size(3,3) );
/// Create window
if( debug_view_) {
cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
}
cv::Mat threshold_output;
int max_thresh = 255;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::RNG rng(12345);
/// Detect edges using Threshold
cv::threshold( src_gray, threshold_output, threshold_, 255, cv::THRESH_BINARY );
/// Find contours
cv::findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
/// Find the convex hull object for each contour
std::vector<std::vector<cv::Point> >hull( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ cv::convexHull( cv::Mat(contours[i]), hull[i], false ); }
/// Draw contours + hull results
cv::Mat drawing = cv::Mat::zeros( threshold_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
cv::drawContours( drawing, contours, (int)i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
cv::drawContours( drawing, hull, (int)i, color, 4, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
opencv_apps::Contour contour_msg;
for ( size_t j = 0; j < hull[i].size(); j++ ) {
opencv_apps::Point2D point_msg;
point_msg.x = hull[i][j].x;
point_msg.y = hull[i][j].y;
contour_msg.points.push_back(point_msg);
}
contours_msg.contours.push_back(contour_msg);
}
/// Create a Trackbar for user to enter threshold
if( debug_view_) {
if (need_config_update_) {
config_.threshold = threshold_;
srv.updateConfig(config_);
need_config_update_ = false;
}
cv::createTrackbar( "Threshold:", window_name_, &threshold_, max_thresh, trackbarCallback);
cv::imshow( window_name_, drawing );
int c = cv::waitKey(1);
}
// Publish the image.
sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, sensor_msgs::image_encodings::BGR8, drawing).toImageMsg();
img_pub_.publish(out_img);
msg_pub_.publish(contours_msg);
}
catch (cv::Exception &e)
{
NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
}
prev_stamp_ = msg->header.stamp;
}
示例10: imageCb
void imageCb(const sensor_msgs::ImageConstPtr& msg) {
cv_bridge::CvImagePtr cv_ptr;
try {
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
} catch (cv_bridge::Exception& e) {
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
// look for red pixels; turn all other pixels black, and turn red pixels white
int npix = 0; //count the pixels
int isum = 0; //accumulate the column values of red pixels
int jsum = 0; //accumulate the row values of red pixels
int redval, blueval, greenval, testval;
cv::Vec3b rgbpix;
for (int i = 0; i < cv_ptr->image.cols; i++) {
for (int j = 0; j < cv_ptr->image.rows; j++) {
rgbpix = cv_ptr->image.at<cv::Vec3b>(j, i); //[j][i];
redval = rgbpix[2] + 1;
blueval = rgbpix[0] + 1;
greenval = rgbpix[1] + 1;
testval = redval / (blueval + greenval);
//if red, paint it white:
if (testval > g_redratio) {
cv_ptr->image.at<cv::Vec3b>(j, i)[0] = 255;
cv_ptr->image.at<cv::Vec3b>(j, i)[1] = 255;
cv_ptr->image.at<cv::Vec3b>(j, i)[2] = 255;
npix++;
isum += i;
jsum += j;
} else { //else paint it black
cv_ptr->image.at<cv::Vec3b>(j, i)[0] = 0;
cv_ptr->image.at<cv::Vec3b>(j, i)[1] = 0;
cv_ptr->image.at<cv::Vec3b>(j, i)[2] = 0;
}
}
}
cout << "npix: " << npix << endl;
//paint in a blue square at the centroid:
int half_box = 5; // choose size of box to paint
int i_centroid, j_centroid;
if (npix > 0) {
i_centroid = isum / npix;
j_centroid = jsum / npix;
cout << "i_avg: " << i_centroid << endl; //i,j centroid of red pixels
cout << "j_avg: " << j_centroid << endl;
for (int i_box = i_centroid - half_box; i_box <= i_centroid + half_box; i_box++) {
for (int j_box = j_centroid - half_box; j_box <= j_centroid + half_box; j_box++) {
if ((i_box >= 0)&&(j_box >= 0)&&(i_box < cv_ptr->image.cols)&&(j_box < cv_ptr->image.rows)) {
cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[0] = 255;
cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[1] = 0;
cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[2] = 0;
}
}
}
}
// Update GUI Window; this will display processed images on the open-cv viewer.
cv::imshow(OPENCV_WINDOW, cv_ptr->image);
cv::waitKey(3);
// Also, publish the processed image as a ROS message on a ROS topic
// can view this stream in ROS with:
//rosrun imagview image_view image:=/image_converter/output_video
image_pub_.publish(cv_ptr->toImageMsg());
}
示例11: do_work
void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
{
// Work on the image.
try
{
// Convert the image into something opencv can handle.
cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;
// Messages
opencv_apps::LineArrayStamped lines_msg;
lines_msg.header = msg->header;
// Do the work
std::vector<cv::Rect> faces;
cv::Mat src_gray, edges;
if ( frame.channels() > 1 ) {
cv::cvtColor( frame, src_gray, cv::COLOR_BGR2GRAY );
} else {
src_gray = frame;
}
/// Apply Canny edge detector
Canny( src_gray, edges, 50, 200, 3 );
if( debug_view_) {
/// Create Trackbars for Thresholds
char thresh_label[50];
sprintf( thresh_label, "Thres: %d + input", min_threshold_ );
cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
cv::createTrackbar( thresh_label, window_name_, &threshold_, max_threshold_, trackbarCallback);
if (need_config_update_) {
config_.threshold = threshold_;
srv.updateConfig(config_);
need_config_update_ = false;
}
}
/// Initialize
cv::cvtColor( edges, frame, cv::COLOR_GRAY2BGR );
switch (config_.hough_type) {
case hough_lines::HoughLines_Standard_Hough_Transform:
{
std::vector<cv::Vec2f> s_lines;
/// 1. Use Standard Hough Transform
cv::HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold_ + threshold_, 0, 0 );
/// Show the result
for( size_t i = 0; i < s_lines.size(); i++ )
{
float r = s_lines[i][0], t = s_lines[i][1];
double cos_t = cos(t), sin_t = sin(t);
double x0 = r*cos_t, y0 = r*sin_t;
double alpha = 1000;
cv::Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) );
cv::Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) );
#ifndef CV_VERSION_EPOCH
cv::line( frame, pt1, pt2, cv::Scalar(255,0,0), 3, cv::LINE_AA);
#else
cv::line( frame, pt1, pt2, cv::Scalar(255,0,0), 3, CV_AA);
#endif
opencv_apps::Line line_msg;
line_msg.pt1.x = pt1.x;
line_msg.pt1.y = pt1.y;
line_msg.pt2.x = pt2.x;
line_msg.pt2.y = pt2.y;
lines_msg.lines.push_back(line_msg);
}
break;
}
case hough_lines::HoughLines_Probabilistic_Hough_Transform:
default:
{
std::vector<cv::Vec4i> p_lines;
/// 2. Use Probabilistic Hough Transform
cv::HoughLinesP( edges, p_lines, 1, CV_PI/180, min_threshold_ + threshold_, 30, 10 );
/// Show the result
for( size_t i = 0; i < p_lines.size(); i++ )
{
cv::Vec4i l = p_lines[i];
#ifndef CV_VERSION_EPOCH
cv::line( frame, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0), 3, cv::LINE_AA);
#else
cv::line( frame, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0), 3, CV_AA);
#endif
opencv_apps::Line line_msg;
line_msg.pt1.x = l[0];
line_msg.pt1.y = l[1];
line_msg.pt2.x = l[2];
line_msg.pt2.y = l[3];
lines_msg.lines.push_back(line_msg);
}
break;
//.........这里部分代码省略.........
示例12: imageCb
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
static int posX, posY, lastX, lastY; //To hold the X and Y position of the colored object
Mat rgbCameraFrames = cv_ptr->image; //hold raw image
Mat colorTrackingFrames; //hold color filtered frame
Mat resutantFrame; //add raw and scribble frames
CvMoments colorMoment; //Structure to hold moments information and their order
assert(rgbCameraFrames.type() == CV_8UC3);
Mat *scribbleFrame = new Mat(rgbCameraFrames.rows, rgbCameraFrames.cols, CV_8UC3);
double frame_middle = rgbCameraFrames.cols / 2;//get middle of the frame
//test draw
//cv::circle(cv_ptr->image, cv::Point(frame_middle, 50), 10, CV_RGB(255,0,0));
GaussianBlur(rgbCameraFrames, colorTrackingFrames, Size(11,11), 0, 0); //reduce the noise
inRange(colorTrackingFrames, Scalar(0, 0 , 80), Scalar(60,60,255), colorTrackingFrames);//make red color to white and rest to black
colorMoment = moments(colorTrackingFrames);
double moment10 = cvGetSpatialMoment(&colorMoment, 1, 0);//Sum of X coordinates of all white pixels
double moment01 = cvGetSpatialMoment(&colorMoment, 0, 1);//Sum of X coordinates of all white pixels
double area = cvGetSpatialMoment(&colorMoment, 0, 0); //Sum of all white pixels
lastX = posX;
lastY = posY;
posX = moment10/area;
posY = moment01/area;
geometry_msgs::Twist vel_msg;
double difference = posX - frame_middle;
ROS_INFO("frame_middle: %f, posX: %d, diffrence: %f",frame_middle, posX, difference);
if(posX >= frame_middle * 10 || posX <= 0)
{
ROS_INFO("ROTATE");
vel_msg.angular.z = -1;
}
else if( difference <50 && difference > -50)
{
ROS_INFO("FORWARD");
vel_msg.linear.x = 0.25;
}
else if(difference >= 50)
{
//turn right
ROS_INFO("TURN RIGHT");
vel_msg.angular.z = -0.5;
}
else if(difference <= -50)
{
//turn left
ROS_INFO("TURN LEFT");
vel_msg.angular.z = 0.5;
}
if(posX > 0 && posY > 0 && lastX >0 && lastY > 0)
{
line(*scribbleFrame, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 1);
line(rgbCameraFrames, cvPoint(posX, posY), cvPoint(lastX,lastY),cvScalar(0,255,255), 5);
}
cv::imshow(scribbleWindow, *scribbleFrame);
/*
// Draw an example circle on the video stream
if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
cv::circle(cv_ptr->image, cv::Point(50, 50), 10, CV_RGB(255,0,0));
*/
// Update GUI Window
cv::imshow(OPENCV_WINDOW, cv_ptr->image);
cv::waitKey(3);
vel_pub_.publish(vel_msg);
// Output modified video stream
image_pub_.publish(cv_ptr->toImageMsg());
}
示例13: detectFiducials
bool detectFiducials(cob_object_detection_msgs::DetectionArray& detection_array, cv::Mat& color_image)
{
int id_start_idx = 2351;
unsigned int marker_array_size = 0;
unsigned int pose_array_size = 0;
// Detect fiducials
std::vector<ipa_Fiducials::t_pose> tags_vec;
std::vector<std::vector<double> >vec_vec7d;
if (m_pi_tag->GetPose(color_image, tags_vec) & ipa_Utils::RET_OK)
{
pose_array_size = tags_vec.size();
// TODO: Average results
for (unsigned int i=0; i<pose_array_size; i++)
{
cob_object_detection_msgs::Detection fiducial_instance;
fiducial_instance.label = "pi-tag"; //tags_vec[i].id;
fiducial_instance.detector = "Fiducial_PI";
fiducial_instance.score = 0;
fiducial_instance.bounding_box_lwh.x = 0;
fiducial_instance.bounding_box_lwh.y = 0;
fiducial_instance.bounding_box_lwh.z = 0;
// TODO: Set Mask
cv::Mat frame(3,4, CV_64FC1);
for (int k=0; k<3; k++)
for (int l=0; l<3; l++)
frame.at<double>(k,l) = tags_vec[i].rot.at<double>(k,l);
frame.at<double>(0,3) = tags_vec[i].trans.at<double>(0,0);
frame.at<double>(1,3) = tags_vec[i].trans.at<double>(1,0);
frame.at<double>(2,3) = tags_vec[i].trans.at<double>(2,0);
std::vector<double> vec7d = FrameToVec7(frame);
vec_vec7d.push_back(vec7d);
// Results are given in CfromO
fiducial_instance.pose.pose.position.x = vec7d[0];
fiducial_instance.pose.pose.position.y = vec7d[1];
fiducial_instance.pose.pose.position.z = vec7d[2];
fiducial_instance.pose.pose.orientation.w = vec7d[3];
fiducial_instance.pose.pose.orientation.x = vec7d[4];
fiducial_instance.pose.pose.orientation.y = vec7d[5];
fiducial_instance.pose.pose.orientation.z = vec7d[6];
fiducial_instance.pose.header.stamp = received_timestamp_;
fiducial_instance.pose.header.frame_id = received_frame_id_;
detection_array.detections.push_back(fiducial_instance);
ROS_INFO("[fiducials] Detected PI-Tag '%s' at x,y,z,rw,rx,ry,rz ( %f, %f, %f, %f, %f, %f, %f ) ",
fiducial_instance.label.c_str(), vec7d[0], vec7d[1], vec7d[2],
vec7d[3], vec7d[4], vec7d[5], vec7d[6]);
}
}
else
{
pose_array_size = 0;
}
// Publish 2d image
if (publish_2d_image_)
{
for (unsigned int i=0; i<pose_array_size; i++)
{
RenderPose(color_image, tags_vec[i].rot, tags_vec[i].trans);
cv_bridge::CvImage cv_ptr;
cv_ptr.image = color_mat_8U3_;
cv_ptr.encoding = CobFiducialsNode::color_image_encoding_;
img2D_pub_.publish(cv_ptr.toImageMsg());
}
}
// Publish tf
if (publish_tf_)
{
for (unsigned int i=0; i<pose_array_size; i++)
{
// Broadcast transform of fiducial
tf::Transform transform;
std::stringstream tf_name;
tf_name << "pi_tag" <<"_" << "0";
transform.setOrigin(tf::Vector3(vec_vec7d[i][0], vec_vec7d[i][1], vec_vec7d[i][2]));
transform.setRotation(tf::Quaternion(vec_vec7d[i][4], vec_vec7d[i][5], vec_vec7d[i][6], vec_vec7d[i][3]));
tf_broadcaster_.sendTransform(tf::StampedTransform(transform, ros::Time::now(), received_frame_id_, tf_name.str()));
}
}
// Publish marker array
if (publish_marker_array_)
{
// 3 arrows for each coordinate system of each detected fiducial
marker_array_size = 3*pose_array_size;
if (marker_array_size >= prev_marker_array_size_)
{
marker_array_msg_.markers.resize(marker_array_size);
}
// publish a coordinate system from arrow markers for each object
for (unsigned int i=0; i<pose_array_size; i++)
{
//.........这里部分代码省略.........
示例14: imageCb
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
int i,j;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
}
catch(cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
//sensor_msgs::CvBridge bridge;
//we need this object bridge for facilitating conversion from ros-img to opencv
//IplImage* img = bridge.imgMsgToCv(msg,"rgb8");
//image being converted from ros to opencv using cvbridge
IplImage imgObj = cv_ptr->image;
IplImage* img = &imgObj;
IplImage* gray_out = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
cvCvtColor(img , gray_out, CV_RGB2GRAY);
int width = 752;
int height = 480;
int MaxVal[width][3],a,b;
int maks;
int wart;
double X,Y,ZM;
float focalLength = 748.821716;
float baseline = 0.060234;
float distance;
int disparity;
float numerator = focalLength * baseline;
IplImage* mapa = cvCreateImage( cvSize(600, 600), IPL_DEPTH_8U, 1 );
cvCircle(mapa, cvPoint(300,600), 200, cvScalar(50),1,8,0);
cvCircle(mapa, cvPoint(300,600), 400, cvScalar(50),1,8,0);
cvCircle(mapa, cvPoint(300,600), 150, cvScalar(50),1,8,0);
cvCircle(mapa, cvPoint(300,600), 100, cvScalar(50),1,8,0);
//Znajdowanie najjasniejszych punktow w kolumnach i zapisywanie ich do wektora MaxVal[]
for(i=0;i<752;i++)
{
maks = 0;
for(j=60;j<440;j++)
{
wart = (int)cvGetReal2D( gray_out, j, i );
if(wart >= maks)
{
maks=wart;
MaxVal[i][0]=maks;
MaxVal[i][1]=i;
MaxVal[i][2]=j;
}
}
//filtry
if ((MaxVal[i-2][0]==0 && MaxVal[i][0]==0 && MaxVal[i-1][0]!=0) )
{
MaxVal[i-1][0]=0;
}
if ( abs(MaxVal[i-3][0]-MaxVal[i-2][0])>40 && abs(MaxVal[i][0]-MaxVal[i-1][0])>40 )
{
MaxVal[i-1][0]=(MaxVal[i][0]+MaxVal[i-1][0])/10;
MaxVal[i-2][0]=(MaxVal[i-3][0]+MaxVal[i-2][0])/10;
}
if ( abs(MaxVal[i-2][0]-MaxVal[i-1][0])>40 && abs(MaxVal[i][0]-MaxVal[i-1][0])>40 )
{
MaxVal[i-1][0]=(MaxVal[i-1][0]+MaxVal[i-2][0])/2;
}
}
//Przeliczenie odleglosci
for(i=0;i<752;i++)
{
distance = 100;
disparity = MaxVal[i][0];
if(disparity != 0)
{
distance = (numerator*255) / (disparity*64);
}
// printf("MaksVal %d %d =%d\t odleglosc %lf\n", i, MaxVal[i][2], MaxVal[i][0], distance);
if(distance < 2.5)
{
X=(i-752/2)*0.006;
ZM=pow(3*3+X*X,0.5);
X=300+(X*distance*1000/(ZM*5));
//.........这里部分代码省略.........
示例15: image_cb
void image_cb (const sensor_msgs::ImageConstPtr& image) {
int threshold_value;
ros::param::param<int>("/vision_threshold_binary", threshold_value, 95);
int min_objekt_size;
ros::param::param<int>("/vision_min_objekt_size", min_objekt_size, 4000);
int keep_object_count;
ros::param::param<int>("/vision_keep_object_count", keep_object_count, 4);
int canny_threshold;
ros::param::param<int>("/vision_canny_threshold", canny_threshold, 40);
int corner_threshold;
ros::param::param<int>("/vision_corner_threshold", corner_threshold, 25);
int const max_BINARY_value = 255;
Mat grey, thresholded, filtered;
#ifdef SHOW_IMAGE_COUNT
static long imcount = 0;
cout << "Image "<< ++imcount <<" recieved" << endl;
#endif
cv_bridge::CvImagePtr cv_ptr;
cv_bridge::CvImagePtr cv_pthresholded(new cv_bridge::CvImage());
cv_bridge::CvImagePtr cv_pdebug(new cv_bridge::CvImage());
sensor_msgs::ImagePtr msg_thresholded;
sensor_msgs::ImagePtr msg_debug;
try
{
cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
Mat frame = cv_ptr->image.clone();
cvtColor( frame, grey, CV_RGB2GRAY );
int dilate_size = 3;
Mat dilate_element = getStructuringElement( MORPH_RECT,
Size( 2*dilate_size + 1, 2*dilate_size+1 ),
Point( dilate_size, dilate_size ) );
threshold( grey, thresholded, threshold_value, max_BINARY_value, 1 ); //Binary Inverted
dilate(thresholded, filtered, dilate_element);
erode(filtered, filtered, dilate_element);
try
{
cv_pthresholded->image = filtered;
cv_pthresholded->header = cv_ptr->header;
cv_pthresholded->encoding = sensor_msgs::image_encodings::MONO8;
msg_thresholded = cv_pthresholded->toImageMsg();
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
pub_thresholded.publish(msg_thresholded);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(filtered, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<RotatedRect> minRect( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minRect[i] = minAreaRect( Mat(contours_poly[i]) );
}
size_t filteredOutBound = 0;
size_t filteredOutRot = 0;
vector<Rect> bigBoundRecs(boundRect.size());
vector<RotatedRect> bigRotRecs(minRect.size());
for( size_t i = 0; i < boundRect.size(); ++i ){
if (boundRect[i].area() > static_cast<size_t>(min_objekt_size)) {
bigBoundRecs[i-filteredOutBound] = boundRect[i];
} else {
++filteredOutBound;
}
}
for( size_t i = 0; i < minRect.size(); ++i ){
if (getRotatedArea(minRect[i]) > min_objekt_size) {
bigRotRecs[i-filteredOutRot] = minRect[i];
} else {
++filteredOutRot;
}
}
/// Filter the x Largest Objects (BoundRects)
long filteredSizeBound;
//.........这里部分代码省略.........