本文整理汇总了C++中Pose::getOrientation方法的典型用法代码示例。如果您正苦于以下问题:C++ Pose::getOrientation方法的具体用法?C++ Pose::getOrientation怎么用?C++ Pose::getOrientation使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Pose
的用法示例。
在下文中一共展示了Pose::getOrientation方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getEncodedKey
string MCDMFunction::getEncodedKey(Pose& p, int value)
{
string key;
//value = 0 : encode everything
//value = 1 : encode x,y,orientation, take first
//value = 2 : encode x,y,orientation, take multiple time
if(value == 0){
key = to_string(p.getX()) + "/" + to_string( p.getY()) + "/" + to_string( p.getOrientation()) + "/" + to_string(p.getRange()) + "/" + to_string(p.getFOV());
}else if(value == 1){
key = to_string(p.getX()) + "/" + to_string( p.getY()) + "/" + to_string( p.getOrientation()) + "/" + "1";
} else if (value ==2){
key = to_string(p.getX()) + "/" + to_string( p.getY()) + "/" + to_string( p.getOrientation()) + "/" + "0";
}
return key;
}
示例2: poseToMessage
geometry_msgs::Pose MessageConversions::poseToMessage(const Pose& pose)
const {
geometry_msgs::Pose message;
const Eigen::Vector3d& position = pose.getPosition();
message.position.x = position[0];
message.position.y = position[1];
message.position.z = position[2];
const Eigen::Quaterniond& orientation = pose.getOrientation();
message.orientation.x = orientation.x();
message.orientation.y = orientation.y();
message.orientation.z = orientation.z();
message.orientation.w = orientation.w();
return message;
}
示例3:
pair<Pose,double> MCDMFunction::selectNewPose(EvaluationRecords *evaluationRecords)
{
Pose newTarget;
double value = 0;
unordered_map<string,double> evaluation = evaluationRecords->getEvaluations();
for(unordered_map<string,double>::iterator it = evaluation.begin(); it != evaluation.end(); it++){
string tmp = (*it).first;
Pose p = evaluationRecords->getPoseFromEncoding(tmp);
if(value <= (*it).second){
newTarget = p;
value = (*it).second;
}//else continue;
}
pair<Pose,double> result = make_pair(newTarget,value);
// i switch x and y to allow debugging graphically looking the image
cout << "New target : " << "x = "<<newTarget.getY() <<", y = "<< newTarget.getX() << ", orientation = "
<<newTarget.getOrientation() << ", Evaluation: "<< value << endl;
return result;
}
示例4: main
// Input : ./mcdm_online_exploration_ros ./../Maps/map_RiccardoFreiburg_1m2.pgm 100 75 5 0 15 180 0.95 0.12
// resolution x y orientation range centralAngle precision threshold
int main ( int argc, char **argv )
{
auto startMCDM = chrono::high_resolution_clock::now();
ifstream infile;
infile.open ( argv[1] ); // the path to the map
double resolution = atof ( argv[2] ); // the resolution of the map
double imgresolution = atof ( argv[10] ); // the resolution to use for the planningGrid and RFIDGrid
dummy::Map map = dummy::Map ( infile,resolution, imgresolution );
RFIDGridmap myGrid(argv[1], resolution, imgresolution, false);
cout << "Map dimension: " << map.getNumGridCols() << " : "<< map.getNumGridRows() << endl;
int gridToPathGridScale = map.getGridToPathGridScale();
// i switched x and y because the map's orientation inside and outside programs are different
long initX = static_cast<long>( atoi ( argv[4] ) *imgresolution ); // initial X-position of the robot in map frame
long initY = static_cast<long>( atoi ( argv[3] ) *imgresolution ); // initial Y-position of the robot in map frame
std::cout << "initX: " << initX << " initY: " << initY << std::endl;
int initOrientation = atoi ( argv[5] ); // initial orientation of the robot in map frame
double initFov = atoi ( argv[7] ); // initial FOV of the robot sensor
initFov = initFov * PI /180;
int initRange = atoi ( argv[6] );
double precision = atof ( argv[8] );
double threshold = atof ( argv[9] );
//x,y,orientation,range,FOV
Pose initialPose = Pose ( initX,initY,initOrientation,initRange,initFov );
Pose invertedInitial = createFromInitialPose ( initX,initY,initOrientation,180,initRange,initFov );
Pose eastInitial = createFromInitialPose ( initX,initY,initOrientation,90,initRange,initFov );
Pose westInitial = createFromInitialPose ( initX,initY,initOrientation,270,initRange,initFov );
Pose target = initialPose;
Pose previous = initialPose;
long numConfiguration = 1;
vector<pair<string,list<Pose>>> graph2;
NewRay ray;
ray.setGridToPathGridScale ( gridToPathGridScale );
MCDMFunction function;
long sensedCells = 0;
long newSensedCells =0;
long totalFreeCells = map.getTotalFreeCells();
int count = 0;
double travelledDistance = 0;
int numOfTurning = 0;
unordered_map<string,int> visitedCell;
vector<string>history;
history.push_back ( function.getEncodedKey ( target,1 ) );
EvaluationRecords record;
//amount of time the robot should do nothing for scanning the environment ( final value expressed in second)
unsigned int microseconds = 5 * 1000 * 1000 ;
list<Pose> unexploredFrontiers;
list<Pose> tabuList;
tabuList.push_back(target);
list<Pose> nearCandidates;
bool btMode = false;
double totalAngle = 0;
Astar astar;
double totalScanTime = 0;
bool act = true;
int encodedKeyValue = 0;
// RFID
double absTagX = std::stod(argv[12]); // m.
double absTagY = std::stod(argv[11]); // m.
double freq = std::stod(argv[13]); // Hertzs
double txtPower= std::stod(argv[14]); // dBs
std::pair<int, int> relTagCoord;
do
{
// If we are doing "forward" navigation towards cells never visited before
if ( btMode == false )
{
long x = target.getX();
long y = target.getY();
int orientation = target.getOrientation();
int range = target.getRange();
double FOV = target.getFOV();
string actualPose = function.getEncodedKey ( target,0 );
map.setCurrentPose ( target );
string encoding = to_string ( target.getX() ) + to_string ( target.getY() );
visitedCell.emplace ( encoding,0 );
// Get the sensing time required for scanning
target.setScanAngles ( ray.getSensingTime ( map,x,y,orientation,FOV,range ) );
// Perform a scanning operation
newSensedCells = sensedCells + ray.performSensingOperation ( map,x,y,orientation,FOV,range, target.getScanAngles().first, target.getScanAngles().second );
// Calculate the scanning angle
double scanAngle = target.getScanAngles().second - target.getScanAngles().first;
// Update the overall scanning time
totalScanTime += calculateScanTime ( scanAngle*180/PI );
// Calculare the relative RFID tag position to the robot position
relTagCoord = map.getRelativeTagCoord(absTagX, absTagY, target.getX(), target.getY());
// Calculate the received power and phase
double rxPower = received_power_friis(relTagCoord.first, relTagCoord.second, freq, txtPower);
double phase = phaseDifference(relTagCoord.first, relTagCoord.second, freq);
// Update the path planning and RFID map
map.updatePathPlanningGrid ( x, y, range, rxPower - SENSITIVITY);
myGrid.addEllipse(rxPower - SENSITIVITY, map.getNumGridCols() - target.getX(), target.getY(), target.getOrientation(), -0.5, 7.0);
// Search for new candidate position
ray.findCandidatePositions ( map,x,y,orientation,FOV,range );
vector<pair<long,long> >candidatePosition = ray.getCandidatePositions();
ray.emptyCandidatePositions();
//.........这里部分代码省略.........
示例5: getEncodedKey
string EvaluationRecords::getEncodedKey(Pose& p)
{
string key = to_string(p.getX()) + "/" + to_string( p.getY()) + "/" + to_string( (int)p.getOrientation()) + "/" + to_string(p.getRange()) +"/" + to_string(p.getFOV());
return key;
}