本文整理汇总了C++中MarkerDetector类的典型用法代码示例。如果您正苦于以下问题:C++ MarkerDetector类的具体用法?C++ MarkerDetector怎么用?C++ MarkerDetector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MarkerDetector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: Java_cz_email_michalchomo_cardboardkeyboard_MainActivity_FindFeatures
JNIEXPORT void JNICALL
Java_cz_email_michalchomo_cardboardkeyboard_MainActivity_FindFeatures(JNIEnv *env, jobject instance,
jlong matAddrGr,
jlong matAddrRgba) {
Mat &mGr = *(Mat *) matAddrGr;
Mat &mRgb = *(Mat *) matAddrRgba;
//vector<KeyPoint> v;
MarkerDetector markerDetector;
vector<Marker> markers;
markerDetector.detect(mGr, markers);
for (unsigned int i=0; i < markers.size(); i++) {
cout << markers[i] << endl;
markers[i].draw(mRgb,Scalar(0,0,255),2);
}
/*Ptr<FeatureDetector> detector = FastFeatureDetector::create(50);
detector->detect(mGr, v);
for (unsigned int i = 0; i < v.size(); i++) {
const KeyPoint &kp = v[i];
circle(mRgb, Point(kp.pt.x, kp.pt.y), 10, Scalar(255, 0, 0, 255));
}*/
}
示例2: cvTackBarEvents
void cvTackBarEvents(int pos, void*)
{
(void)(pos);
if (iThresParam1 < 3)
iThresParam1 = 3;
if (iThresParam1 % 2 != 1)
iThresParam1++;
if (ThresParam2 < 1)
ThresParam2 = 1;
ThresParam1 = iThresParam1;
ThresParam2 = iThresParam2;
TheMarkerDetector.setThresholdParams(ThresParam1, ThresParam2);
// detect, print, get pose, and print
// detect
vector<aruco::Marker> detected_markers = TheMarkerDetector.detect(TheInputImage);
// print the markers detected that belongs to the markerset
for (auto idx : TheMarkerMapConfig.getIndices(detected_markers))
detected_markers[idx].draw(TheInputImageCopy, Scalar(0, 0, 255), 2);
// detect 3d info if possible
if (TheMSPoseTracker.isValid())
{
TheMSPoseTracker.estimatePose(detected_markers);
aruco::CvDrawingUtils::draw3dAxis(TheInputImageCopy, TheCameraParameters, TheMSPoseTracker.getRvec(),
TheMSPoseTracker.getTvec(), TheMarkerMapConfig[0].getMarkerSize() * 2);
}
cv::imshow("in", TheInputImageCopy);
cv::imshow("thres", TheMarkerDetector.getThresholdedImage());
}
示例3: main
int main(int argc,char **argv)
{
try
{
if (argc!=2) {
cerr<<"Usage: in.jpg "<<endl;
return -1;
}
MarkerDetector MDetector;
vector<Marker> Markers;
//read the input image
cv::Mat InImage;
InImage=cv::imread(argv[1]);
//Ok, let's detect
MDetector.detect(InImage,Markers);
//for each marker, draw info and its boundaries in the image
for (unsigned int i=0; i<Markers.size(); i++) {
cout<<Markers[i]<<endl;
cout<<Markers[0][0].x<<endl;
cout<<Markers[0].Tvec.at<float>(0,0)<<endl; //x value of Tvec and Markers[0].Tvec then whole Tvec will be printed
Markers[i].draw(InImage,Scalar(0,0,255),2);
}
cv::imshow("in",InImage);
cv::waitKey(0);//wait for key to be pressed
} catch (std::exception &ex)
{
cout<<"Exception :"<<ex.what()<<endl;
}
return 0;
}
示例4: Java_wrapper_MarkerDetector_Jwarp
JNIEXPORT jboolean JNICALL Java_wrapper_MarkerDetector_Jwarp(JNIEnv * env, jobject obj, jobject inAddr, jobject outAddr, jdouble width, jdouble height, jobject points){
MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
cv::Mat* in = (cv::Mat*)inAddr;
cv::Mat* out = (cv::Mat*)outAddr;
Size size = Size((double)width, (double)height);
return inst->warp(in, out, size, points);
}
示例5: main
int main(int argc,char **argv)
{
try
{
if(argc<3) {cerr<<"Usage: image boardConfig.yml [cameraParams.yml] [markerSize] [outImage]"<<endl;exit(0);}
aruco::CameraParameters CamParam;
MarkerDetector MDetector;
vector<aruco::Marker> Markers;
float MarkerSize=-1;
BoardConfiguration TheBoardConfig;
BoardDetector TheBoardDetector;
Board TheBoardDetected;
cv::Mat InImage=cv::imread(argv[1]);
TheBoardConfig.readFromFile(argv[2]);
if (argc>=4) {
CamParam.readFromXMLFile(argv[3]);
//resizes the parameters to fit the size of the input image
CamParam.resize( InImage.size());
}
if (argc>=5)
MarkerSize=atof(argv[4]);
cv::namedWindow("in",1);
MDetector.detect(InImage,Markers);//detect markers without computing R and T information
//Detection of the board
float probDetect=TheBoardDetector.detect( Markers, TheBoardConfig,TheBoardDetected, CamParam,MarkerSize);
//for each marker, draw info and its boundaries in the image
for(unsigned int i=0;i<Markers.size();i++){
cout<<Markers[i]<<endl;
Markers[i].draw(InImage,Scalar(0,0,255),2);
}
//draw a 3d cube in each marker if there is 3d info
if ( CamParam.isValid()){
for(unsigned int i=0;i<Markers.size();i++){
CvDrawingUtils::draw3dCube(InImage,Markers[i],CamParam);
CvDrawingUtils::draw3dAxis(InImage,Markers[i],CamParam);
}
CvDrawingUtils::draw3dAxis(InImage,TheBoardDetected,CamParam);
cout<<TheBoardDetected.Rvec<<" "<<TheBoardDetected.Tvec<<endl;
}
//draw board axis
//show input with augmented information
cv::imshow("in",InImage);
cv::waitKey(0);//wait for key to be pressed
if(argc>=6) cv::imwrite(argv[5],InImage);
}catch(std::exception &ex)
{
cout<<"Exception :"<<ex.what()<<endl;
}
}
示例6: videocallback
void videocallback(IplImage *image)
{
static IplImage *rgba;
bool flip_image = (image->origin?true:false);
if (flip_image) {
cvFlip(image);
image->origin = !image->origin;
}
if (init) {
init = false;
cout<<"Loading calibration: "<<calibrationFilename.str();
if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
cout<<" [Ok]"<<endl;
} else {
cam.SetRes(image->width, image->height);
cout<<" [Fail]"<<endl;
}
double p[16];
cam.GetOpenglProjectionMatrix(p,image->width,image->height);
GlutViewer::SetGlProjectionMatrix(p);
for (int i=0; i<32; i++) {
d[i].SetScale(marker_size);
}
rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4);
}
static MarkerDetector<MarkerData> marker_detector;
marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly
//static MarkerDetector<MarkerArtoolkit> marker_detector;
//marker_detector.SetMarkerSize(2.8, 3, 1.5);
// Here we try to use RGBA just to make sure that also it works...
//cvCvtColor(image, rgba, CV_RGB2RGBA);
marker_detector.Detect(image, &cam, true, true);
GlutViewer::DrawableClear();
for (size_t i=0; i<marker_detector.markers->size(); i++) {
if (i >= 32) break;
Pose p = (*(marker_detector.markers))[i].pose;
p.GetMatrixGL(d[i].gl_mat);
int id = (*(marker_detector.markers))[i].GetId();
double r = 1.0 - double(id+1)/32.0;
double g = 1.0 - double(id*3%32+1)/32.0;
double b = 1.0 - double(id*7%32+1)/32.0;
d[i].SetColor(r, g, b);
GlutViewer::DrawableAdd(&(d[i]));
}
if (flip_image) {
cvFlip(image);
image->origin = !image->origin;
}
}
示例7: imageCallback
//This function is called everytime a new image is published
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
//Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
cv_bridge::CvImageConstPtr cv_ptr;
try
{
//Always copy, returning a mutable CvImage
//OpenCV expects color images to use BGR channel order.
//cv_ptr = cv_bridge::toCvShare(original_image, "");
cv_ptr = cv_bridge::toCvCopy(original_image, enc::RGB8);
}
catch (cv_bridge::Exception& e)
{
//if there is an error during conversion, display it
ROS_ERROR("main.cpp::cv_bridge exception: %s", e.what());
return;
}
try
{
MarkerDetector MDetector;
vector<Marker> Markers;
//read the input image
cv::Mat InImage;
InImage = cv_ptr->image;
//Ok, let's detect
MDetector.detect(InImage,Markers);
//for each marker, draw info and its boundaries in the image
for (unsigned int i=0;i<Markers.size();i++) {
//cout<<Markers[i]<<endl;
Markers[i].draw(InImage,Scalar(0,0,255),2);
}
cv::imshow(WINDOW,InImage);
//Add some delay in milliseconds
cv::waitKey(3);
} catch (std::exception &ex)
{
ROS_ERROR("main.cpp::aruco exception: %s", ex.what());
}
/**
* The publish() function is how you send messages. The parameter
* is the message object. The type of this object must agree with the type
* given as a template parameter to the advertise<>() call, as was done
* in the constructor in main().
*/
//Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
pub.publish(cv_ptr->toImageMsg());
}
示例8: Java_com_polysfactory_n3_jni_NativeMarkerDetector_nativeFindMarkers
JNIEXPORT void JNICALL Java_com_polysfactory_n3_jni_NativeMarkerDetector_nativeFindMarkers(
JNIEnv * jenv, jobject, jlong thiz, jlong imageBgra, jlong transMat, jfloat scale) {
try {
MarkerDetector* markerDetector = (MarkerDetector*) thiz;
Mat* image = (Mat*) imageBgra;
Mat* transMatObj = (Mat*) transMat;
markerDetector->processFrame(*image, (float) scale);
std::vector<Transformation> transformations =
markerDetector->getTransformations();
vector_Transformation_to_Mat(transformations, *transMatObj);
} catch (...) {
LOGD("nativeDetect caught unknown exception");
jclass je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je,
"Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__()}");
}
}
示例9: main
int main(int, char **)
{
VideoCapture cap(0);
static MarkerDetector<MarkerData> marker_detector;
marker_detector.SetMarkerSize(15); // for marker ids larger than 255, set the content resolution accordingly
if (cap.isOpened())
{
frame;
namedWindow("portal");
while(true)
{
cap >> frame;
IplImage image = frame;
marker_detector.Detect(&image, &cam, true, true);
if (marker_detector.markers->size() > 0)
cout << "Detected " << marker_detector.markers->size() << endl;
for (size_t i=0; i<marker_detector.markers->size(); i++)
{
if (i >= 32) break;
Pose p = (*(marker_detector.markers))[i].pose;
vector<PointDouble> corners = (*(marker_detector.markers))[i].marker_corners_img;
//cout << "corners size(4) == " << corners.size() << endl;
/*
line(frame, bestSquares[i][0], bestSquares[i][1], Scalar(0,255,0), 2);
line(frame, bestSquares[i][1], bestSquares[i][2], Scalar(0,255,0), 2);
line(frame, bestSquares[i][2], bestSquares[i][3], Scalar(0,255,0), 2);
line(frame, bestSquares[i][3], bestSquares[i][0], Scalar(0,255,0), 2);*/
}
imshow("portal", frame);
if(waitKey(30) >= 0) break;
}
}
示例10: cvTackBarEvents
void cvTackBarEvents(int pos, void*)
{
(void)(pos);
if (iThresParam1 < 3)
iThresParam1 = 3;
if (iThresParam1 % 2 != 1)
iThresParam1++;
if (iThresParam1 < 1)
iThresParam1 = 1;
MDetector.setThresholdParams(iThresParam1, iThresParam2);
if (iEnclosedMarkers){
auto params=MDetector.getParams();
params._doErosion=true;
params._cornerMethod=aruco::MarkerDetector::SUBPIX;
MDetector.setParams(params);
}
else{
auto params=MDetector.getParams();
params._doErosion=false;
params._cornerMethod=aruco::MarkerDetector::LINES;
MDetector.setParams(params);
}
MDetector.setDictionary(dictionaryString,float(iCorrectionRate)/10. ); // sets the dictionary to be employed (ARUCO,APRILTAGS,ARTOOLKIT,etc)
// recompute
MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
if (iShowAllCandidates){
auto candidates=MDetector.getCandidates();
for(auto cand:candidates)
Marker(cand,-1).draw(TheInputImageCopy, Scalar(255, 0, 255));
}
for (unsigned int i = 0; i < TheMarkers.size(); i++)
TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255));
// draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i = 0; i < TheMarkers.size(); i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy, TheMarkers[i], TheCameraParameters);
cv::imshow("in", resize(TheInputImageCopy, 1280));
cv::imshow("thres", resize(MDetector.getThresholdedImage(), 1280));
}
示例11: Java_wrapper_MarkerDetector_Jdetect
JNIEXPORT void JNICALL Java_wrapper_MarkerDetector_Jdetect(JNIEnv * env, jobject obj, jlong img, jobject markers, jobject camParams, jfloat markerSizeMeters){
MarkerDetector *inst = getHandle<MarkerDetector>(env, obj);
// use the Array list
jclass ArrayList_class = env->FindClass( "java/util/ArrayList" );
jclass Marker_class = env->FindClass("wrapper/Marker");
jmethodID Add_method = env->GetMethodID(ArrayList_class,"add", "(Ljava/lang/Object;)Z");
jmethodID Get_method = env->GetMethodID(ArrayList_class,"get", "(I)Ljava/lang/Object;");
jmethodID Marker_Constructor_method = env->GetMethodID(Marker_class,"<init>","(J)V");
jmethodID Marker_CopyConstructor_method = env->GetMethodID(Marker_class,"<init>","(Lwrapper/Marker;)V");
jmethodID Marker_EmptyConstructor_method = env->GetMethodID(Marker_class,"<init>","()V");
jmethodID Marker_SetNativeHandle_method = env->GetMethodID(Marker_class,"setNativeHandle","(J)V");
cv::Mat* inMat = (cv::Mat*)img;
aruco::CameraParameters *camParam = getHandle<CameraParameters>(env, camParams);
std::vector <Marker> vMarkers;
inst->detect(*inMat, vMarkers, *camParam, markerSizeMeters);
jobject newMarkers[vMarkers.size()];
for (int i=0; i< vMarkers.size(); i++) {
Marker * markPtr = &vMarkers[i];
newMarkers[i] = env->NewObject(Marker_class,Marker_Constructor_method,(long) markPtr);
env->CallBooleanMethod(markers,Add_method,newMarkers[i]);
env->DeleteLocalRef(newMarkers[i]);
}
env->DeleteLocalRef(ArrayList_class);
env->DeleteLocalRef(Marker_class);
}
示例12: cvTackBarEvents
void cvTackBarEvents(int pos,void*)
{
if (iThresParam1<3) iThresParam1=3;
if (iThresParam1%2!=1) iThresParam1++;
if (ThresParam2<1) ThresParam2=1;
ThresParam1=iThresParam1;
ThresParam2=iThresParam2;
MDetector.setThresholdParams(ThresParam1,ThresParam2);
//recompute
MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters);
TheInputImage.copyTo(TheInputImageCopy);
for (unsigned int i=0;i<TheMarkers.size();i++) TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
//print other rectangles that contains no valid markers
/*for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
aruco::Marker m( MDetector.getCandidates()[i],999);
m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
}*/
//draw a 3d cube in each marker if there is 3d info
if (TheCameraParameters.isValid())
for (unsigned int i=0;i<TheMarkers.size();i++)
CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
cv::imshow("in",TheInputImageCopy);
cv::imshow("thres",MDetector.getThresholdedImage());
}
示例13: GetMultiMarkerPoses
// Updates the bundlePoses of the multi_marker_bundles by detecting markers and using all markers in a bundle to infer the master tag's position
void GetMultiMarkerPoses(IplImage *image) {
if (marker_detector.Detect(image, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true)){
for(int i=0; i<n_bundles; i++)
multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
if(marker_detector.DetectAdditional(image, cam, false) > 0){
for(int i=0; i<n_bundles; i++){
if ((multi_marker_bundles[i]->SetTrackMarkers(marker_detector, cam, bundlePoses[i], image) > 0))
multi_marker_bundles[i]->Update(marker_detector.markers, cam, bundlePoses[i]);
}
}
}
}
示例14: vIdle
void vIdle() {
if (TheCaptureFlag) {
// capture image
TheVideoCapturer.grab();
TheVideoCapturer.retrieve(TheInputImage);
TheUndInputImage.create(TheInputImage.size(), CV_8UC3);
// by deafult, opencv works in BGR, so we must convert to RGB because OpenGL in windows preffer
cv::cvtColor(TheInputImage, TheInputImage, CV_BGR2RGB);
// remove distorion in image
cv::undistort(TheInputImage, TheUndInputImage, TheCameraParams.CameraMatrix,
TheCameraParams.Distorsion);
// detect markers
MDetector.detect(TheUndInputImage, TheMarkers);
// Detection of the board
TheBoardDetected.second = TheBoardDetector.detect(
TheMarkers, TheBoardConfig, TheBoardDetected.first, TheCameraParams, TheMarkerSize);
// chekc the speed by calculating the mean speed of all iterations
// resize the image to the size of the GL window
cv::resize(TheUndInputImage, TheResizedImage, TheGlWindowSize);
// create mask. It is a syntetic mask consisting of a simple rectangle, just to show a example of
// opengl with mask
TheMask = createSyntheticMask(TheResizedImage); // lets create with the same size of the resized
// image, i.e. the size of the opengl window
}
glutPostRedisplay();
}
示例15: GetMarkerPoses
void GetMarkerPoses(IplImage *image, ARCloud &cloud) {
//Detect and track the markers
if (marker_detector.Detect(image, cam, true, false, max_new_marker_error,
max_track_error, CVSEQ, true))
{
printf("\n--------------------------\n\n");
for (size_t i=0; i<marker_detector.markers->size(); i++)
{
vector<cv::Point, Eigen::aligned_allocator<cv::Point> > pixels;
Marker *m = &((*marker_detector.markers)[i]);
int id = m->GetId();
cout << "******* ID: " << id << endl;
int resol = m->GetRes();
int ori = m->ros_orientation;
PointDouble pt1, pt2, pt3, pt4;
pt4 = m->ros_marker_points_img[0];
pt3 = m->ros_marker_points_img[resol-1];
pt1 = m->ros_marker_points_img[(resol*resol)-resol];
pt2 = m->ros_marker_points_img[(resol*resol)-1];
m->ros_corners_3D[0] = cloud(pt1.x, pt1.y);
m->ros_corners_3D[1] = cloud(pt2.x, pt2.y);
m->ros_corners_3D[2] = cloud(pt3.x, pt3.y);
m->ros_corners_3D[3] = cloud(pt4.x, pt4.y);
if(ori >= 0 && ori < 4){
if(ori != 0){
std::rotate(m->ros_corners_3D.begin(), m->ros_corners_3D.begin() + ori, m->ros_corners_3D.end());
}
}
else
ROS_ERROR("FindMarkerBundles: Bad Orientation: %i for ID: %i", ori, id);
//Get the 3D marker points
BOOST_FOREACH (const PointDouble& p, m->ros_marker_points_img)
pixels.push_back(cv::Point(p.x, p.y));
ARCloud::Ptr selected_points = ata::filterCloud(cloud, pixels);
//Use the kinect data to find a plane and pose for the marker
int ret = PlaneFitPoseImprovement(i, m->ros_corners_3D, selected_points, cloud, m->pose);
}
}