本文整理汇总了C++中MarkerDetector::getThresholdedImage方法的典型用法代码示例。如果您正苦于以下问题:C++ MarkerDetector::getThresholdedImage方法的具体用法?C++ MarkerDetector::getThresholdedImage怎么用?C++ MarkerDetector::getThresholdedImage使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MarkerDetector
的用法示例。
在下文中一共展示了MarkerDetector::getThresholdedImage方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cvTackBarEvents
void cvTackBarEvents(int pos,void*)
{
if (iThresParam1<3) iThresParam1=3;
if (iThresParam1%2!=1) iThresParam1++;
if (ThresParam2<1) ThresParam2=1;
ThresParam1=iThresParam1;
ThresParam2=iThresParam2;
MDetector.setThresholdParams(ThresParam1,ThresParam2);
//recompute
MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i=0;i<TheMarkers.size();i++) TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
//print other rectangles that contains no valid markers
/*for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
//draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i=0;i<TheMarkers.size();i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
cv::imshow("in",TheInputImageCopy);
cv::imshow("thres",MDetector.getThresholdedImage());
}
示例2: cvTackBarEvents
void cvTackBarEvents(int pos, void*)
{
(void)(pos);
if (iThresParam1 < 3)
iThresParam1 = 3;
if (iThresParam1 % 2 != 1)
iThresParam1++;
if (ThresParam2 < 1)
ThresParam2 = 1;
ThresParam1 = iThresParam1;
ThresParam2 = iThresParam2;
TheMarkerDetector.setThresholdParams(ThresParam1, ThresParam2);
// detect, print, get pose, and print
// detect
vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
// print the markers detected that belongs to the markerset
for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
// detect 3d info if possible
if (TheMSPoseTracker.isValid())
{
TheMSPoseTracker.estimatePose(detected_markers);
aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters, TheMSPoseTracker.getRvec(),
TheMSPoseTracker.getTvec(), TheMarkerMapConfig[0].getMarkerSize() * 2);
}
cv::imshow("in", TheInputImageCopy);
cv::imshow("thres", TheMarkerDetector.getThresholdedImage());
}
示例3: cvTackBarEvents
void cvTackBarEvents(int pos, void*)
{
(void)(pos);
if (iThresParam1 < 3)
iThresParam1 = 3;
if (iThresParam1 % 2 != 1)
iThresParam1++;
if (iThresParam1 < 1)
iThresParam1 = 1;
MDetector.setThresholdParams(iThresParam1, iThresParam2);
if (iEnclosedMarkers){
auto params=MDetector.getParams();
params._doErosion=true;
params._cornerMethod=aruco::MarkerDetector::SUBPIX;
MDetector.setParams(params);
}
else{
auto params=MDetector.getParams();
params._doErosion=false;
params._cornerMethod=aruco::MarkerDetector::LINES;
MDetector.setParams(params);
}
MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. ); // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
if (iShowAllCandidates){
auto candidates=MDetector.getCandidates();
for(auto cand:candidates)
Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
}
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
cv::imshow("in", resize(TheInputImageCopy, 1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));
}
示例4: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
// double ticksBefore = cv::getTickCount();
static tf::TransformBroadcaster br;
if(cam_info_received)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
mDetector.detect(inImage, markers, camParam, marker_size, false);
//for each marker, draw info and its boundaries in the image
for(size_t i=0; i<markers.size(); ++i)
{
// only publishing the selected marker
if(markers[i].id == marker_id)
{
tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
tf::StampedTransform stampedTransform(transform, ros::Time::now(),
parent_name, child_name);
br.sendTransform(stampedTransform);
geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = parent_name;
poseMsg.header.stamp = ros::Time::now();
pose_pub.publish(poseMsg);
}
// but drawing all the detected markers
markers[i].draw(inImage,Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(size_t i=0; i<markers.size(); ++i)
{
//CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = ros::Time::now();
out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = ros::Time::now();
debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
// ROS_INFO("runtime: %f ms",
// 1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例5: main
//.........这里部分代码省略.........
if (TheInputVideo.find(":") != string::npos)
{
std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' ');
sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx);
}
cout << "Opening camera index " << vIdx << endl;
TheVideoCapturer.open(vIdx);
waitTime = 10;
}
else
TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened())
throw std::runtime_error("Could not open video");
///// CONFIGURE DATA
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
if (TheCameraParameters.isValid())
TheCameraParameters.resize(TheInputImage.size());
dictionaryString=cml("-d", "ARUCO");
MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. ); // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// gui requirements : the trackbars to change this parameters
iThresParam1 = static_cast<int>(MDetector.getParams()._thresParam1);
iThresParam2 = static_cast<int>(MDetector.getParams()._thresParam2);
cv::namedWindow("in");
cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
cv::createTrackbar("correction_rate", "in", &iCorrectionRate, 10, cvTackBarEvents);
cv::createTrackbar("EnclosedMarkers", "in", &iEnclosedMarkers, 1, cvTackBarEvents);
cv::createTrackbar("ShowAllCandidates", "in", &iShowAllCandidates, 1, cvTackBarEvents);
// go!
char key = 0;
int index = 0,indexSave=0;
// capture until press ESC or until the end of the video
do
{
TheVideoCapturer.retrieve(TheInputImage);
// copy image
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
TheMarkers = MDetector.detect(TheInputImage, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second
<< " milliseconds nmarkers=" << TheMarkers.size() << std::endl;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
if (iShowAllCandidates){
auto candidates=MDetector.getCandidates();
for(auto cand:candidates)
Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
}
for (unsigned int i = 0; i < TheMarkers.size(); i++)
{
cout << TheMarkers[i] << endl;
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
}
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid() && TheMarkerSize > 0)
for (unsigned int i = 0; i < TheMarkers.size(); i++)
{
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
cv::imshow("in", resize(TheInputImageCopy, 1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));
key = cv::waitKey(waitTime); // wait for key to be pressed
if (key == 's')
waitTime = waitTime == 0 ? 10 : 0;
if (key == 'w'){//writes current input image
string number=std::to_string(indexSave++);
while(number.size()!=3)number+="0";
string imname="arucoimage"+number+".png";
cv::imwrite(imname,TheInputImage);
cout<<"saved "<<imname<<endl;
}
index++; // number of images captured
} while (key != 27 && (TheVideoCapturer.grab()));
}
catch (std::exception& ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
示例6: main
int main(int argc, char **argv) {
try {
if (argc < 2) {
cerr << "Usage: (in.jpg|in.avi) [cameraParams.yml] [markerSize] [outImage]" << endl;
exit(0);
}
aruco::CameraParameters CamParam;
MarkerDetector MDetector;
vector< Marker > Markers;
float MarkerSize = -1;
// read the input image
cv::Mat InImage;
// try opening first as video
VideoCapture vreader(argv[1]);
if (vreader.isOpened()) {
vreader.grab();
vreader.retrieve(InImage);
} else {
InImage = cv::imread(argv[1]);
}
// at this point, we should have the image in InImage
// if empty, exit
if (InImage.total() == 0) {
cerr << "Could not open input" << endl;
return 0;
}
// read camera parameters if specifed
if (argc >= 3) {
CamParam.readFromXMLFile(argv[2]);
// resizes the parameters to fit the size of the input image
CamParam.resize(InImage.size());
}
// read marker size if specified
if (argc >= 4)
MarkerSize = atof(argv[3]);
cv::namedWindow("in", 1);
// Ok, let's detect
MDetector.detect(InImage, Markers, CamParam, MarkerSize);
// for each marker, draw info and its boundaries in the image
for (unsigned int i = 0; i < Markers.size(); i++) {
cout << Markers[i] << endl;
Markers[i].draw(InImage, Scalar(0, 0, 255), 2);
}
// draw a 3d cube in each marker if there is 3d info
if (CamParam.isValid() && MarkerSize != -1)
for (unsigned int i = 0; i < Markers.size(); i++) {
CvDrawingUtils::draw3dCube(InImage, Markers[i], CamParam);
}
// show input with augmented information
cv::imshow("in", InImage);
// show also the internal image resulting from the threshold operation
cv::imshow("thes", MDetector.getThresholdedImage());
cv::waitKey(0); // wait for key to be pressed
if (argc >= 5)
cv::imwrite(argv[4], InImage);
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
示例7: image_callback
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
static tf::TransformBroadcaster br;
if(cam_info_received)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
inImage = cv_ptr->image;
//detection results will go into "markers"
markers.clear();
//Ok, let's detect
mDetector.detect(inImage, markers, camParam, marker_size, false);
//for each marker, draw info and its boundaries in the image
for(size_t i=0; i<markers.size(); ++i)
{
// only publishing the selected marker
if(markers[i].id == marker_id)
{
tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
tf::StampedTransform cameraToReference;
cameraToReference.setIdentity();
if ( reference_frame != camera_frame )
{
getTransform(reference_frame,
camera_frame,
cameraToReference);
}
transform =
static_cast<tf::Transform>(cameraToReference)
* static_cast<tf::Transform>(rightToLeft)
* transform;
tf::StampedTransform stampedTransform(transform, ros::Time::now(),
reference_frame, marker_frame);
br.sendTransform(stampedTransform);
geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = reference_frame;
poseMsg.header.stamp = ros::Time::now();
pose_pub.publish(poseMsg);
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);
}
// but drawing all the detected markers
markers[i].draw(inImage,cv::Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(size_t i=0; i<markers.size(); ++i)
{
CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = ros::Time::now();
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = ros::Time::now();
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例8: main
//.........这里部分代码省略.........
// read camera parameters if passed
if (cml["-c"])
{
TheCameraParameters.readFromXMLFile(cml("-c"));
TheCameraParameters.resize(TheInputImage.size());
}
// prepare the detector
string dict =
TheMarkerMapConfig
.getDictionary(); // see if the dictrionary is already indicated in the configuration file. It should!
if (dict.empty())
dict = "ARUCO";
TheMarkerDetector.setDictionary(
dict); /// DO NOT FORGET THAT!!! Otherwise, the ARUCO dictionary will be used by default!
if (stoi(cml("-corner", "0")) == 0)
TheMarkerDetector.setCornerRefinementMethod(MarkerDetector::LINES);
else
{
MarkerDetector::Params params = TheMarkerDetector.getParams();
params._cornerMethod = MarkerDetector::SUBPIX;
// search corner subpix in a 5x5 widow area
params._subpix_wsize = static_cast<int>((15.f / 2000.f) * float(TheInputImage.cols));
TheMarkerDetector.setParams(params);
}
// prepare the pose tracker if possible
// if the camera parameers are avaiable, and the markerset can be expressed in meters, then go
if (TheMarkerMapConfig.isExpressedInPixels() && TheMarkerSize > 0)
TheMarkerMapConfig = TheMarkerMapConfig.convertToMeters(TheMarkerSize);
cout << "TheCameraParameters.isValid()=" << TheCameraParameters.isValid() << " "
<< TheMarkerMapConfig.isExpressedInMeters() << endl;
if (TheCameraParameters.isValid() && TheMarkerMapConfig.isExpressedInMeters())
TheMSPoseTracker.setParams(TheCameraParameters, TheMarkerMapConfig);
// Create gui
cv::namedWindow("thres", 1);
cv::namedWindow("in", 1);
TheMarkerDetector.getThresholdParams(ThresParam1, ThresParam2);
iThresParam1 = static_cast<int>(ThresParam1);
iThresParam2 = static_cast<int>(ThresParam2);
cv::createTrackbar("ThresParam1", "in", &iThresParam1, 13, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
cout << "Press 's' to start/stop video" << endl;
do
{
TheVideoCapturer.retrieve(TheInputImage);
TheInputImage.copyTo(TheInputImageCopy);
index++; // number of images captured
// Detection of the board
vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
// print the markers detected that belongs to the markerset
for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
// detect 3d info if possible
if (TheMSPoseTracker.isValid())
{
if (TheMSPoseTracker.estimatePose(detected_markers))
{
aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters,
TheMSPoseTracker.getRvec(), TheMSPoseTracker.getTvec(),
TheMarkerMapConfig[0].getMarkerSize() * 2);
frame_pose_map.insert(make_pair(index, TheMSPoseTracker.getRTMatrix()));
cout << "pose rt=" << TheMSPoseTracker.getRvec() << " " << TheMSPoseTracker.getTvec() << endl;
}
}
// show input with augmented information and the thresholded image
cv::imshow("in", TheInputImageCopy);
cv::imshow("thres", TheMarkerDetector.getThresholdedImage());
key = cv::waitKey(waitTime); // wait for key to be pressed
processKey(key);
} while (key != 27 && TheVideoCapturer.grab());
// save a beatiful pcd file (pcl library) showing the results (you can use pcl_viewer to see it)
if (cml["-pcd"])
{
savePCDFile(cml("-pcd"), TheMarkerMapConfig, frame_pose_map);
}
// save the poses to a file in tum rgbd data format
if (cml["-poses"])
{
savePosesToFile(cml("-poses"), frame_pose_map);
}
}
catch (std::exception& ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
示例9: main
//.........这里部分代码省略.........
for (unsigned int i=0;i<TheMarkers.size();i++) {
cout<<TheMarkers[i].Tvec.at<float>(0,0)<<","<<
TheMarkers[i].Tvec.at<float>(0,1)<<","<<
TheMarkers[i].Tvec.at<float>(0,2)<<",";
// cout<<TheMarkers[i]<<endl;
if (TheMarkers[i].id == 605 && 1 == 0) { // THIS WILL NEVER HAPPEN!!! 1 is not zero.
Mat R33;
cv::Rodrigues(TheMarkers[i].Rvec,R33);
cout << R33 << endl;
cout << TheMarkers[i].id << endl;
for (unsigned int maytr=0;maytr<TheMarkers.size();maytr++) { //take 0 , 1 , 0, inverse transform first, then transform.
if (TheMarkers[maytr].id == 500) {
zeroYzero.at<float>(0,0) = 0;
zeroYzero.at<float>(0,1) = 2;
zeroYzero.at<float>(0,2) = 0;
zeroYzero.at<float>(0,3) = 0;
zeroYzero.at<float>(0,4) = 0;
zeroYzero.at<float>(0,5) = 0;
cout << zeroYzero << endl;
Mat R33for500;
cout << TheMarkers[maytr].id<< "food"<< endl;
cv::Rodrigues(TheMarkers[maytr].Rvec,R33for500);
cout << R33for500 << endl;
//cout << TheMarkers[i].id << endl;
//R33for500 * R33.t() * zeroYzero; //transpose method
//Mat afterDouble;
//cout << TheMarkers[maytr].id<< "water"<< endl;
//afterDouble = R33for500 * (R33.inv() * zeroYzero); //inversion method
//R33for500 * (R33.inv() * zeroYzero); //inversion method
//cout << "shelll"<< endl;
//cout << afterDouble << endl;
//useVecLat.at<float>(0,0) = 0;
//useVecLat.at<float>(0,1) = 0;
//useVecLat.at<float>(0,2) = 0;
//useVecLat.at<float>(0,3) = afterDouble.at<float>(0,0);
//useVecLat.at<float>(0,4) = afterDouble.at<float>(0,1);
//useVecLat.at<float>(0,5) = afterDouble.at<float>(0,2);
//drawVecAtPos(TheInputImageCopy,TheMarkers[maytr],TheCameraParameters,afterDouble); //oneVect);
}
}
}
TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
//time date stuff
std::time_t result = std::time(NULL); //nullptr);
std::cout // << std::asctime(std::localtime(&result))
<< result; // << " seconds since the Epoch\n";
// fileOutt << "time" << result ; //<< endl;
cout<<endl; // <<endl<<endl;
}
//print other rectangles that contains no valid markers
/** for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
//draw a 3d cube in each marker if there is 3d info
if ( TheCameraParameters.isValid())
for (unsigned int i=0;i<TheMarkers.size();i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
//Never use this; just reference // CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
draw3dAxisj(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
drawVecAtPos(TheInputImageCopy,TheMarkers[i],TheCameraParameters,oneVect);
drawVecsAtPosTesting(TheInputImageCopy,TheMarkers[i],TheCameraParameters,someVects);
}
//DONE! Easy, right?
//show input with augmented information and the thresholded image
cv::imshow("in",TheInputImageCopy);
cv::imshow("thres",MDetector.getThresholdedImage());
key=cv::waitKey(waitTime);//wait for key to be pressed
}
// fileOutt.release();
} catch (std::exception &ex)
{
cout<<"Exception :"<<ex.what()<<endl;
}
}
示例10:
JNIEXPORT void JNICALL Java_wrapper_MarkerDetector_JgetThresholdedImage(JNIEnv * env, jobject obj, jlong imAddr){
MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
cv::Mat* img = (cv::Mat*)imAddr;
inst->getThresholdedImage(*img);
}
示例11: image_callback
//.........这里部分代码省略.........
poseMsg.header.stamp = curr_stamp;
double aruco_roll_, aruco_pitch_, aruco_yaw_;
tf::Quaternion aruco_quat_;
tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
//ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
aruco_yaw_ = aruco_yaw_*180/3.141592;
printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);
if (markers[i].id == 26 || markers[i].id == 58)
{
float PI = 3.1415926;
float ang = PI*3/4;
float x_0 = -0.045;//-0.015;
float y_0 = -0.015;//0.045;
poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang);
poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);
//printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);
aruco_yaw_ = aruco_yaw_ + ang*180/PI;
printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);
//printf("-----------unify the coordinate system ---------------------\n");
}
//printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
//printf("------------------------------------------------------------------\n");
double temp_x = poseMsg.pose.position.x;
double temp_y = poseMsg.pose.position.y;
poseMsg.pose.position.x = -temp_y;
poseMsg.pose.position.y = -temp_x;
tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
poseMsg.pose.orientation.x = quat.x();
poseMsg.pose.orientation.y = quat.y();
poseMsg.pose.orientation.z = quat.z();
poseMsg.pose.orientation.w = quat.w();
pose_pub.publish(poseMsg);
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);
}
// but drawing all the detected markers
markers[i].draw(inImage,cv::Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(size_t i=0; i<markers.size(); ++i)
{
CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = curr_stamp;
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = curr_stamp;
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例12: image_callback
//.........这里部分代码省略.........
}
// but drawing all the detected markers
markers[i].draw(inImage,Scalar(0,0,255),2);
}
//paint a circle in the center of the image
cv::circle(inImage, cv::Point(inImage.cols/2, inImage.rows/2), 4, cv::Scalar(0,255,0), 1);
if ( markers.size() == 2 )
{
float x[2], y[2], u[2], v[2];
for (unsigned int i = 0; i < 2; ++i)
{
ROS_DEBUG_STREAM("Marker(" << i << ") at camera coordinates = ("
<< markers[i].Tvec.at<float>(0,0) << ", "
<< markers[i].Tvec.at<float>(1,0) << ", "
<< markers[i].Tvec.at<float>(2,0));
//normalized coordinates of the marker
x[i] = markers[i].Tvec.at<float>(0,0)/markers[i].Tvec.at<float>(2,0);
y[i] = markers[i].Tvec.at<float>(1,0)/markers[i].Tvec.at<float>(2,0);
//undistorted pixel
u[i] = x[i]*camParam.CameraMatrix.at<float>(0,0) +
camParam.CameraMatrix.at<float>(0,2);
v[i] = y[i]*camParam.CameraMatrix.at<float>(1,1) +
camParam.CameraMatrix.at<float>(1,2);
}
ROS_DEBUG_STREAM("Mid point between the two markers in the image = ("
<< (x[0]+x[1])/2 << ", " << (y[0]+y[1])/2 << ")");
// //paint a circle in the mid point of the normalized coordinates of both markers
// cv::circle(inImage,
// cv::Point((u[0]+u[1])/2, (v[0]+v[1])/2),
// 3, cv::Scalar(0,0,255), CV_FILLED);
//compute the midpoint in 3D:
float midPoint3D[3]; //3D point
for (unsigned int i = 0; i < 3; ++i )
midPoint3D[i] = ( markers[0].Tvec.at<float>(i,0) +
markers[1].Tvec.at<float>(i,0) ) / 2;
//now project the 3D mid point to normalized coordinates
float midPointNormalized[2];
midPointNormalized[0] = midPoint3D[0]/midPoint3D[2]; //x
midPointNormalized[1] = midPoint3D[1]/midPoint3D[2]; //y
u[0] = midPointNormalized[0]*camParam.CameraMatrix.at<float>(0,0) +
camParam.CameraMatrix.at<float>(0,2);
v[0] = midPointNormalized[1]*camParam.CameraMatrix.at<float>(1,1) +
camParam.CameraMatrix.at<float>(1,2);
ROS_DEBUG_STREAM("3D Mid point between the two markers in undistorted pixel coordinates = ("
<< u[0] << ", " << v[0] << ")");
//paint a circle in the mid point of the normalized coordinates of both markers
cv::circle(inImage,
cv::Point(u[0], v[0]),
3, cv::Scalar(0,0,255), CV_FILLED);
}
//draw a 3d cube in each marker if there is 3d info
if(camParam.isValid() && marker_size!=-1)
{
for(unsigned int i=0; i<markers.size(); ++i)
{
CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.stamp = ros::Time::now();
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = inImage;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.stamp = ros::Time::now();
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
ROS_DEBUG("runtime: %f ms",
1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
}
示例13: main
//.........这里部分代码省略.........
} else
TheVideoCapturer.open(TheInputVideo);
// check video is open
if (!TheVideoCapturer.isOpened()) {
cerr << "Could not open video" << endl;
return -1;
}
bool isVideoFile = false;
if (TheInputVideo.find(".avi") != std::string::npos || TheInputVideo.find("live") != string::npos)
isVideoFile = true;
// read first image to get the dimensions
TheVideoCapturer >> TheInputImage;
// read camera parameters if passed
if (TheIntrinsicFile != "") {
TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
TheCameraParameters.resize(TheInputImage.size());
}
// Configure other parameters
if (ThePyrDownLevel > 0)
MDetector.pyrDown(ThePyrDownLevel);
// Create gui
cv::namedWindow("thres", 1);
cv::namedWindow("in", 1);
MDetector.setThresholdParams(7, 7);
MDetector.setThresholdParamRange(2, 0);
// MDetector.enableLockedCornersMethod(true);
// MDetector.setCornerRefinementMethod ( MarkerDetector::SUBPIX );
MDetector.getThresholdParams(ThresParam1, ThresParam2);
iThresParam1 = ThresParam1;
iThresParam2 = ThresParam2;
//cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents);
//cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents);
char key = 0;
int index = 0;
// capture until press ESC or until the end of the video
TheVideoCapturer.retrieve(TheInputImage);
cv::Size sz = TheInputImage.size();
MDetector.createCudaBuffers(sz.width, sz.height);
do {
// copy image
index++; // number of images captured
double tick = (double)getTickCount(); // for checking the speed
// Detection of markers in the image passed
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency();
AvrgTime.second++;
cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::flush;
// print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
cout << endl << TheMarkers[i];
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1);
}
if (TheMarkers.size() != 0)
cout << endl;
// print other rectangles that contains no valid markers
/** for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
}
// DONE! Easy, right?
// show input with augmented information and the thresholded image
cv::imshow("in", TheInputImageCopy);
cv::imshow("thres", MDetector.getThresholdedImage());
//cv::imshow("thres_gpu", MDetector.getThresholdedImageGPU());
key = cv::waitKey(waitTime); // wait for key to be pressed
if (isVideoFile)
TheVideoCapturer.retrieve(TheInputImage);
} while (key != 27 && (TheVideoCapturer.grab() || !isVideoFile));
} catch (std::exception &ex)
{
cout << "Exception :" << ex.what() << endl;
}
}
示例14: image_callback
//.........这里部分代码省略.........
tf::StampedTransform stampedTransform(transform, msg->header.stamp, msg->header.frame_id, board_frame);
if (publish_tf)
br.sendTransform(stampedTransform);
geometry_msgs::PoseStamped poseMsg;
tf::poseTFToMsg(transform, poseMsg.pose);
poseMsg.header.frame_id = msg->header.frame_id;
poseMsg.header.stamp = msg->header.stamp;
pose_pub.publish(poseMsg);
geometry_msgs::TransformStamped transformMsg;
tf::transformStampedTFToMsg(stampedTransform, transformMsg);
transform_pub.publish(transformMsg);
geometry_msgs::Vector3Stamped positionMsg;
positionMsg.header = transformMsg.header;
positionMsg.vector = transformMsg.transform.translation;
position_pub.publish(positionMsg);
std_msgs::Float32 boardSizeMsg;
boardSizeMsg.data=the_board_detected[0].ssize;
boardSize_pub.publish(boardSizeMsg);
}
//ADDED////////////////////////////////////////////////////////////////////////////
if(rvec_pub.getNumSubscribers() > 0 && foundBoard)
{
cv_bridge::CvImage rvecMsg;
rvecMsg.header.frame_id = msg->header.frame_id;
rvecMsg.header.stamp = msg->header.stamp;
rvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
rvecMsg.image = the_board_detected[0].Rvec;
rvec_pub.publish(rvecMsg.toImageMsg());
}
if(tvec_pub.getNumSubscribers() > 0 && foundBoard)
{
cv_bridge::CvImage tvecMsg;
tvecMsg.header.frame_id = msg->header.frame_id;
tvecMsg.header.stamp = msg->header.stamp;
tvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
tvecMsg.image = the_board_detected[0].Tvec;
tvec_pub.publish(tvecMsg.toImageMsg());
}
///////////////////////////////////////////////////////////////////////////////
//for each marker, draw info and its boundaries in the image
for(size_t i=0; draw_markers && i < markers.size(); ++i)
{
markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
}
if(camParam.isValid() && marker_size != -1)
{
//draw a 3d cube in each marker if there is 3d info
for(size_t i=0; i<markers.size(); ++i)
{
if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
}
//draw board axis
if (probDetect > 0.0)
{
CvDrawingUtils::draw3dAxis(resultImg, the_board_detected, camParam);
}
}
if(image_pub.getNumSubscribers() > 0)
{
//show input with augmented information
cv_bridge::CvImage out_msg;
out_msg.header.frame_id = msg->header.frame_id;
out_msg.header.stamp = msg->header.stamp;
out_msg.encoding = sensor_msgs::image_encodings::RGB8;
out_msg.image = resultImg;
image_pub.publish(out_msg.toImageMsg());
}
if(debug_pub.getNumSubscribers() > 0)
{
//show also the internal image resulting from the threshold operation
cv_bridge::CvImage debug_msg;
debug_msg.header.frame_id = msg->header.frame_id;
debug_msg.header.stamp = msg->header.stamp;
debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
debug_msg.image = mDetector.getThresholdedImage();
debug_pub.publish(debug_msg.toImageMsg());
}
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
示例15: main
int main(int argc,char **argv)
{
try
{
if (readArguments (argc,argv)==false) {
return 0;
}
//parse arguments
;
//read from camera or from file
if (TheInputVideo=="live") {
TheVideoCapturer.open(0);
waitTime=10;
}
else TheVideoCapturer.open(TheInputVideo);
//check video is open
if (!TheVideoCapturer.isOpened()) {
cerr<<"Could not open video"<<endl;
return -1;
}
//read first image to get the dimensions
TheVideoCapturer>>TheInputImage;
//read camera parameters if passed
if (TheIntrinsicFile!="") {
TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
TheCameraParameters.resize(TheInputImage.size());
}
//Configure other parameters
if (ThePyrDownLevel>0)
MDetector.pyrDown(ThePyrDownLevel);
//Create gui
cv::namedWindow("thres",1);
cv::namedWindow("in",1);
MDetector.getThresholdParams( ThresParam1,ThresParam2);
MDetector.setCornerRefinementMethod(MarkerDetector::LINES);
iThresParam1=ThresParam1;
iThresParam2=ThresParam2;
cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
char key=0;
int index=0;
//capture until press ESC or until the end of the video
while ( key!=27 && TheVideoCapturer.grab())
{
TheVideoCapturer.retrieve( TheInputImage);
//copy image
index++; //number of images captured
double tick = (double)getTickCount();//for checking the speed
//Detection of markers in the image passed
MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
//chekc the speed by calculating the mean speed of all iterations
AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
AvrgTime.second++;
cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
//print marker info and draw the markers in image
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i=0;i<TheMarkers.size();i++) {
cout<<TheMarkers[i]<<endl;
TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
}
//print other rectangles that contains no valid markers
/** for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
//draw a 3d cube in each marker if there is 3d info
if ( TheCameraParameters.isValid())
for (unsigned int i=0;i<TheMarkers.size();i++) {
CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
}
//DONE! Easy, right?
cout<<endl<<endl<<endl;
//show input with augmented information and the thresholded image
cv::imshow("in",TheInputImageCopy);
cv::imshow("thres",MDetector.getThresholdedImage());
key=cv::waitKey(waitTime);//wait for key to be pressed
}
} catch (std::exception &ex)
{
cout<<"Exception :"<<ex.what()<<endl;
}
}