当前位置: 首页>>代码示例>>C++>>正文


C++ Pose类代码示例

本文整理汇总了C++中Pose的典型用法代码示例。如果您正苦于以下问题:C++ Pose类的具体用法?C++ Pose怎么用?C++ Pose使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Pose类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: boxFilter

void boxFilter(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, Pose pose){		
	//Transform the cloud
	//convert the tranform from our fiducial markers to the Eigen
    Eigen::Matrix<float, 3, 3> R;
    Eigen::Vector3f T;
    cv::cv2eigen(pose.getT(), T);
    cv::cv2eigen(pose.getR(), R);
    //get the inverse transform to bring the point cloud's into the
    //same coordinate frame
    Eigen::Affine3f transform;
    transform = Eigen::AngleAxisf(R.transpose());
    transform *= Eigen::Translation3f(-T);
    //transform the cloud in place
    pcl::transformPointCloud(*cloud, *cloud, transform);
	
	//Define the box
	float box = 200.00;
	pcl::PassThrough<pcl::PointXYZRGB> pass_z, pass_x, pass_y;
	//Filters in x
	pass_x.setFilterFieldName("x");
	pass_x.setFilterLimits(-box, box);
	pass_x.setInputCloud(cloud);
	pass_x.filter(*cloud);
	//Filters in y
	pass_y.setFilterFieldName("y");
	pass_y.setFilterLimits(-box, box);
	pass_y.setInputCloud(cloud);
	pass_y.filter(*cloud);
	//Filters in z
	pass_z.setFilterFieldName("z");
	pass_z.setFilterLimits(0,box);
	pass_z.setInputCloud(cloud);
	pass_z.filter(*cloud);	
}
开发者ID:alantrrs,项目名称:augmented_dev,代码行数:34,代码来源:model_extractor.cpp

示例2: apply

	/** トラック間のモーションブレンドをした値を適用
	 *
	 * @param pose モーションを適用するPose
	 * @author SAM (T&GG, Org.)<[email protected]>
	 * @date 2004/10/05 0:18:09
	 * Copyright (C) 2001,2002,2003,2004 SAM (T&GG, Org.). All rights reserved.
	 */
	void MotionMixer::apply(Pose &pose) const
	{
		for(Pose::StanceIndex::iterator i = pose.begin(); i != pose.end(); ++i) {
			float weight_sum = 0;
			bool ok = false;
			for(MotionTrackVector::const_iterator j = tracks_.begin(); j != tracks_.end(); ++j) {
				if(j->setWork(i->second, i->first.c_str())) {
					weight_sum += j->weight();
					ok = true;
				}
			}
			if(ok) {
				bool first = true;
				for(MotionTrackVector::const_iterator j = tracks_.begin(); j != tracks_.end(); ++j) {
					if(j->work().type != CoordUnion::TYPE_NONE) {
						if(first) {
							i->second = j->work().stance()*(j->weight()/weight_sum);
							first = false;
						}
						else i->second += j->work().stance()*(j->weight()/weight_sum);
					}
				}
			}
		}
	}
开发者ID:Emulyator,项目名称:gamecatapult,代码行数:32,代码来源:motionmixer.cpp

示例3: correctPositions

    void Map::correctPositions(PointMatcherService<float> & pointMatcherService) {
        std::vector<Pose> correctedPoses;
        correctedPoses.push_back(Pose::origin());

        for(int i = 1; i < anchorPoints.size(); ++i) {
            auto it = anchorPoints.begin() + i;

            Pose previousOdometryEstimate = (it - 1)->getPosition();
            Pose currentOdometryEstimate = it->getPosition();
            Transform initialGuess = currentOdometryEstimate.transFromPose(previousOdometryEstimate);

            Transform icpResult = pointMatcherService.icp(*it, *(it - 1), initialGuess);

            Transform originToPreviousPose = correctedPoses[i-1].transFromPose(Pose::origin());
            Transform tFromOriginToCurrent = icpResult * originToPreviousPose;

            Pose poseOfCurrent = Pose::origin();
            poseOfCurrent.transform(tFromOriginToCurrent);
            correctedPoses.push_back(poseOfCurrent);
        }

        for(int i = 0; i < anchorPoints.size(); ++i) {
            anchorPoints[i].setPosition(correctedPoses[i]);
        }

    }
开发者ID:davidlandry93,项目名称:better_teach,代码行数:26,代码来源:teach_repeat_map.cpp

示例4: updatePoseFromGui

void testApp::updateModel() {
	updatePoseFromGui();
	
	Pose pose = bindPose;
	for(Pose::iterator i = pose.begin(); i != pose.end(); i++) {
		string name = i->first;
		
		float x = gui.exists(name + ".x") ? gui.get(name + ".x").getValue() : 0;
		float y = gui.exists(name + ".y") ? gui.get(name + ".y").getValue() : 0;
		float z = gui.exists(name + ".z") ? gui.get(name + ".z").getValue() : 0;
		
		aiMatrix4x4& bone = i->second;
		
		aiMatrix4x4 cur;
		ofMatrix4x4 mat;
		ofQuaternion quat(x, ofVec3f(1, 0, 0),
											y, ofVec3f(0, 1, 0),
											z, ofVec3f(0, 0, 1));
		quat.get(mat);
		cur = toAi(mat);
		
		bone *= cur;
	}
	model.setPose(pose);
}
开发者ID:kylemcdonald,项目名称:DigitalInteraction,代码行数:25,代码来源:testApp.cpp

示例5: frustum

// @param[in] isStereo		Whether scene is in stereo (widens near/far planes to fit both eyes)
void Lens::frustum(Frustumd& f, const Pose& p, double aspect) const {//, bool isStereo) const {

	Vec3d ur, uu, uf;
	p.directionVectors(ur, uu, uf);
	const Vec3d& pos = p.pos();

	double nh = heightAtDepth(near());
	double fh = heightAtDepth(far());

	double nw = nh * aspect;
	double fw = fh * aspect;

//	// This effectively creates a union between the near/far planes of the
//	// left and right eyes. The offsets are computed by using the law
//	// of similar triangles.
//	if(isStereo){
//		nw += fabs(0.5*eyeSep()*(focalLength()-near())/focalLength());
//		fw += fabs(0.5*eyeSep()*(focalLength()- far())/focalLength());
//	}

	Vec3d nc = pos + uf * near();	// center point of near plane
	Vec3d fc = pos + uf * far();	// center point of far plane

	f.ntl = nc + uu * nh - ur * nw;
	f.ntr = nc + uu * nh + ur * nw;
	f.nbl = nc - uu * nh - ur * nw;
	f.nbr = nc - uu * nh + ur * nw;

	f.ftl = fc + uu * fh - ur * fw;
	f.ftr = fc + uu * fh + ur * fw;
	f.fbl = fc - uu * fh - ur * fw;
	f.fbr = fc - uu * fh + ur * fw;

	f.computePlanes();
}
开发者ID:AlloSphere-Research-Group,项目名称:AlloSystem,代码行数:36,代码来源:al_Lens.cpp

示例6: assert

MiscData ControllerWrapper::genControls_(Pose s, int &vl, int &vr, double time, bool useTime) {
    assert(ctrlType == TRACKCTRL);
    Pose x = s;
    for(deque<pair<int,int> >::iterator it = uq.begin(); it != uq.end(); it++) {
        x.updateNoDelay(it->first, it->second, timeLC);
    }
    if (isFirstCall) {
        isFirstCall = false;
        gettimeofday(&startTime, NULL);
    }
    double elapsedS;
    if (!useTime) {
        struct timeval nowTime;
        gettimeofday(&nowTime, NULL);
        elapsedS = (nowTime.tv_sec-startTime.tv_sec)+(nowTime.tv_usec-startTime.tv_usec)/1000000.0;        
    } else {
        elapsedS = time;
    }
    // should offset elapsedS by the number of packet delay!!!
    elapsedS += k*timeLCMs/1000.;
    MiscData m = tracker.genControls(x, vl, vr, prevVl, prevVr, elapsedS);
    prevVl = vl; prevVr = vr;
    uq.push_back(make_pair<int,int>((int)vl, (int)vr));
    uq.pop_front();
    return m;
}
开发者ID:abhinavcoder,项目名称:motion-simulation,代码行数:26,代码来源:controller-wrapper.cpp

示例7: getPredictedPose

Pose ControllerWrapper::getPredictedPose(Pose s) {
    Pose x = s;
    for(deque<pair<int,int> >::iterator it = uq.begin(); it != uq.end(); it++) {
        x.updateNoDelay(it->first, it->second, timeLC);
    }
    return x;
}
开发者ID:abhinavcoder,项目名称:motion-simulation,代码行数:7,代码来源:controller-wrapper.cpp

示例8: Pose

void Joint::rotate(){
	//not sure if this is how it works...
	//local_pose.orientation = local_pose.orientation + rotate;
	//PoseEuler temp = PoseEuler(vertex3(), vertex3(0.0, 0.0, angle));
//	local_transformation = local_transformation * ( temp.getRotation()* angularVelocity);
	
	Pose currentPose = path->update();

	float x, y;
	x = currentPose.position.getx();
	y = currentPose.position.gety();
	angle = -PI/2.0 + (float) (std::atan2(y, x) - std::atan2(1.0, 0.0));


	//REALLY MEANS: angle = angularVelocity * dt + angle0;
	//angle += angularVelocity;
	//std::cout << "x:" << x << ", y:" << y << ", theta:" << angle << std::endl;
	Pose temp = Pose(vertex3(), quaternion(angle, 0.0, 0.0));
	//Pose temp = Pose(vertex3(), quaternion(0.0, angle, 0.0));
	local_transformation = temp.getRotation();

	//local_transformation = ( temp.getRotation() * angularVelocity);

	//if(!isNull()){
	//	for(unsigned int i=0; i<children.size(); i++){
	//		this->children.at(i)->child->rotate(rotate);
	//	}
	//}
}
开发者ID:lindseydi,项目名称:AnimationEngine,代码行数:29,代码来源:Joint.cpp

示例9: capture

	void capture(Graphics& gl, const Lens& cam, const Pose& pose, Drawable& draw) {
		validate();
		
		glPushAttrib(GL_ALL_ATTRIB_BITS);
		
		Vec3d pos = pose.pos();
		Vec3d ux, uy, uz; 
		pose.unitVectors(ux, uy, uz);
		mProjection = Matrix4d::perspective(90, 1, cam.near(), cam.far());
		
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, mFboId);
		for (int face=0; face<6; face++) {
			glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT+face);
			
			gl.viewport(0, 0, resolution(), resolution());
			gl.clearColor(clearColor());
			gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
			
			switch (face) {
				case 0:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_X   
					mModelView = Matrix4d::lookAt(uz, uy, -ux, pos);
					break;
				case 1:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_X   
					mModelView = Matrix4d::lookAt(-uz, uy, ux, pos);
					break;
				case 2:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Y   
					mModelView = Matrix4d::lookAt(ux, -uz, uy, pos);
					break;
				case 3:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Y   
					mModelView = Matrix4d::lookAt(ux, uz, -uy, pos);
					break;
				case 4:
					// GL_TEXTURE_CUBE_MAP_POSITIVE_Z   
					mModelView = Matrix4d::lookAt(ux, uy, uz, pos);
					break;
				default:
					// GL_TEXTURE_CUBE_MAP_NEGATIVE_Z   
					mModelView = Matrix4d::lookAt(-ux, uy, -uz, pos);
					break;
			}
			
			// apply camera transform:
			gl.pushMatrix(gl.PROJECTION);
			gl.loadMatrix(mProjection);
			gl.pushMatrix(gl.MODELVIEW);
			gl.loadMatrix(mModelView);
			
			draw.onDraw(gl);
			
			gl.popMatrix(gl.PROJECTION);
			gl.popMatrix(gl.MODELVIEW);
		}
		glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
		
		glPopAttrib();
	}
开发者ID:AlloSphere-Research-Group,项目名称:alive,代码行数:60,代码来源:al_CubeMapFBO.hpp

示例10: value

qreal PoseMapping::value(Pose &pose, quint32 from) const
{
	const quint32 to=map(from);
	if(to>pose.size()) {
		return 0.0;
	}
	return pose.value(to);
}
开发者ID:mrdeveloperdude,项目名称:OctoMY,代码行数:8,代码来源:PoseMapping.cpp

示例11: estimateError

void estimateError(const Pose& P_ref, const Pose& P_test, double *dRa, double *dta)
{
    Vector3d t_test = P_test.translation().normalized();
    Vector3d t_ref = P_ref.translation().normalized();

    *dRa = (P_test * P_ref.inverse()).getAngle(); // delta rotation angle
    *dta = fabs(t_test.dot(t_ref)); // cosine of the angle between translation directions
}
开发者ID:midjji,项目名称:ssba2016-scale-estimation,代码行数:8,代码来源:main.cpp

示例12: run

bool NullLocalization::run(const RobotState     & robotState,
                           const GameState      & gameState,
                           const VisionFeatures & visionFeatures,
                           Pose & pose) {
  pose.setPosition(0, 0);
  pose.setAngle(0);

  return false;
}
开发者ID:rcahoon,项目名称:2010CMRoboBitsGroup3,代码行数:9,代码来源:NullLocalization.cpp

示例13: evaluation

u_int32_t RoadSection::evaluation(const Pose& p)
{
    // kind elliptic distance function... TODO: improve this function!
    _position_type distance = Point(p.getX(), p.getY() *2).abs();

    if (distance > 0.3)
        return 0;

    return 0.4 - distance;
}
开发者ID:KAtana-Karlsruhe,项目名称:AADC_2015_KAtana,代码行数:10,代码来源:RoadSection.cpp

示例14:

    bool Pose::operator==(const Pose& right) const {
        // invalid poses return false for comparison, even against identical invalid poses, like NaN
        if (!valid || !right.valid) {
            return false;
        }

        // FIXME add margin of error?  Or add an additional withinEpsilon function?
        return translation == right.getTranslation() && rotation == right.getRotation() &&
            velocity == right.getVelocity() && angularVelocity == right.getAngularVelocity();
    }
开发者ID:AlphaStaxLLC,项目名称:hifi,代码行数:10,代码来源:Pose.cpp

示例15: videocallback

void videocallback(IplImage *image)
{
    static IplImage *rgba;
    bool flip_image = (image->origin?true:false);
    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }

    if (init) {
        init = false;
        cout<<"Loading calibration: "<<calibrationFilename.str();
        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
            cout<<" [Ok]"<<endl;
        } else {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
        }
        double p[16];
        cam.GetOpenglProjectionMatrix(p,image->width,image->height);
        GlutViewer::SetGlProjectionMatrix(p);
        for (int i=0; i<32; i++) {
            d[i].SetScale(marker_size);
        }
        rgba = CvTestbed::Instance().CreateImageWithProto("RGBA", image, 0, 4);
    }
    static MarkerDetector<MarkerData> marker_detector;
    marker_detector.SetMarkerSize(marker_size); // for marker ids larger than 255, set the content resolution accordingly
    //static MarkerDetector<MarkerArtoolkit> marker_detector;
    //marker_detector.SetMarkerSize(2.8, 3, 1.5);

    // Here we try to use RGBA just to make sure that also it works...
    //cvCvtColor(image, rgba, CV_RGB2RGBA);
    marker_detector.Detect(image, &cam, true, true);
    GlutViewer::DrawableClear();
    for (size_t i=0; i<marker_detector.markers->size(); i++) {
        if (i >= 32) break;
        
        Pose p = (*(marker_detector.markers))[i].pose;
        p.GetMatrixGL(d[i].gl_mat);

        int id = (*(marker_detector.markers))[i].GetId();
        double r = 1.0 - double(id+1)/32.0;
        double g = 1.0 - double(id*3%32+1)/32.0;
        double b = 1.0 - double(id*7%32+1)/32.0;
        d[i].SetColor(r, g, b);

        GlutViewer::DrawableAdd(&(d[i]));
    }

    if (flip_image) {
        cvFlip(image);
        image->origin = !image->origin;
    }
}
开发者ID:campbeb6,项目名称:RedhawkQuad,代码行数:55,代码来源:DetectAlvar.cpp


注:本文中的Pose类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。