本文整理匯總了Python中pybullet.getBasePositionAndOrientation方法的典型用法代碼示例。如果您正苦於以下問題:Python pybullet.getBasePositionAndOrientation方法的具體用法?Python pybullet.getBasePositionAndOrientation怎麽用?Python pybullet.getBasePositionAndOrientation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pybullet
的用法示例。
在下文中一共展示了pybullet.getBasePositionAndOrientation方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _reward
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _reward(self):
#rewards is height of target object
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
closestPoints = p.getClosestPoints(self.blockUid,self._kuka.kukaUid,1000, -1, self._kuka.kukaEndEffectorIndex)
reward = -1000
numPt = len(closestPoints)
#print(numPt)
if (numPt>0):
#print("reward:")
reward = -closestPoints[0][8]*10
if (blockPos[2] >0.2):
reward = reward+10000
print("successfully grasped a block!!!")
#print("self._envStepCounter")
#print(self._envStepCounter)
#print("self._envStepCounter")
#print(self._envStepCounter)
#print("reward")
#print(reward)
#print("reward")
#print(reward)
return reward
示例2: _render
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例3: _reward
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _reward(self):
#rewards is height of target object
blockPos,blockOrn=p.getBasePositionAndOrientation(self.blockUid)
closestPoints = p.getClosestPoints(self.blockUid,self._kuka.kukaUid,1000, -1, self._kuka.kukaEndEffectorIndex)
reward = -1000
numPt = len(closestPoints)
#print(numPt)
if (numPt>0):
#print("reward:")
reward = -closestPoints[0][8]*10
if (blockPos[2] >0.2):
#print("grasped a block!!!")
#print("self._envStepCounter")
#print(self._envStepCounter)
reward = reward+1000
#print("reward")
#print(reward)
return reward
示例4: bullet_base_pose_to_world_pose
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def bullet_base_pose_to_world_pose(self, pose_in_base):
"""
Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple.
"""
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
pose_in_world = T.pose_in_A_to_pose_in_B(
pose_A=pose_in_base, pose_A_in_B=base_pose_in_world
)
return T.mat2pose(pose_in_world)
示例5: ik_robot_eef_joint_cartesian_pose
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def ik_robot_eef_joint_cartesian_pose(self):
"""
Returns the current cartesian pose of the last joint of the ik robot with respect to the base frame as
a (pos, orn) tuple where orn is a x-y-z-w quaternion
"""
eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, 6)[0])
eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, 6)[1])
eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
world_pose_in_base = T.pose_inv(base_pose_in_world)
eef_pose_in_base = T.pose_in_A_to_pose_in_B(
pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
)
return T.mat2pose(eef_pose_in_base)
示例6: ik_robot_eef_joint_cartesian_pose
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def ik_robot_eef_joint_cartesian_pose(self):
"""
Returns the current cartesian pose of the last joint of the ik robot with respect to the base frame as
a (pos, orn) tuple where orn is a x-y-z-w quaternion
"""
eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, 6)[0])
eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, 6)[1])
eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
world_pose_in_base = T.pose_inv(base_pose_in_world)
eef_pose_in_base = T.pose_in_A_to_pose_in_B(
pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
)
return T.mat2pose(eef_pose_in_base)
示例7: frameToWorldMatrix
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def frameToWorldMatrix(self, frame):
"""Gets the given frame to world matrix transformation. can be a frame name
from URDF/SDF or "origin" for the part origin
Arguments:
frame {str} -- frame name
Returns:
np.matrix -- a 4x4 matrix
"""
if frame == 'origin':
frameToWorldPose = p.getBasePositionAndOrientation(self.robot)
else:
frameToWorldPose = p.getLinkState(self.robot, self.frames[frame])
return self.poseToMatrix(frameToWorldPose)
示例8: _get_obs
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
tool_pos, tool_orient = p.getBasePositionAndOrientation(self.cup, physicsClientId=self.id)
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_right_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
head_pos, head_orient = p.getLinkState(self.human, 23, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, tool_pos - self.target_pos, robot_joint_positions, head_pos-torso_pos, head_orient, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, tool_pos - self.target_pos, human_joint_positions, head_pos-human_pos, head_orient, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
示例9: _get_obs
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
spoon_pos, spoon_orient = p.getBasePositionAndOrientation(self.spoon, physicsClientId=self.id)
robot_right_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_right_arm_joint_indices, physicsClientId=self.id)
robot_right_joint_positions = np.array([x[0] for x in robot_right_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
head_pos, head_orient = p.getLinkState(self.human, 23, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([spoon_pos-torso_pos, spoon_orient, spoon_pos-self.target_pos, robot_right_joint_positions, head_pos-torso_pos, head_orient, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([spoon_pos-human_pos, spoon_orient, spoon_pos-self.target_pos, human_joint_positions, head_pos-human_pos, head_orient, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
示例10: _get_obs
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
state = p.getLinkState(self.tool, 1, computeForwardKinematics=True, physicsClientId=self.id)
tool_pos = np.array(state[0])
tool_orient = np.array(state[1]) # Quaternions
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
# Human shoulder, elbow, and wrist joint locations
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, robot_joint_positions, shoulder_pos-torso_pos, elbow_pos-torso_pos, wrist_pos-torso_pos, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, human_joint_positions, shoulder_pos-human_pos, elbow_pos-human_pos, wrist_pos-human_pos, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
示例11: _get_obs
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
state = p.getLinkState(self.tool, 1, computeForwardKinematics=True, physicsClientId=self.id)
tool_pos = np.array(state[0])
tool_orient = np.array(state[1]) # Quaternions
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
# Human shoulder, elbow, and wrist joint locations
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, tool_pos - self.target_pos, self.target_pos-torso_pos, robot_joint_positions, shoulder_pos-torso_pos, elbow_pos-torso_pos, wrist_pos-torso_pos, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, tool_pos - self.target_pos, self.target_pos-human_pos, human_joint_positions, shoulder_pos-human_pos, elbow_pos-human_pos, wrist_pos-human_pos, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
示例12: _get_obs
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
state = p.getLinkState(self.robot, 76 if self.robot_type=='pr2' else 19 if self.robot_type=='sawyer' else 48 if self.robot_type=='baxter' else 8, computeForwardKinematics=True, physicsClientId=self.id)
tool_pos = np.array(state[0])
tool_orient = np.array(state[1]) # Quaternions
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
# Human shoulder, elbow, and wrist joint locations
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 15, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 17, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 19, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, robot_joint_positions, shoulder_pos-torso_pos, elbow_pos-torso_pos, wrist_pos-torso_pos, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, human_joint_positions, shoulder_pos-human_pos, elbow_pos-human_pos, wrist_pos-human_pos, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
示例13: getState
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def getState(self):
'''
Simple tool: take the current simulation and get a state representing
what the robot will look like.
'''
(pos, rot) = pb.getBasePositionAndOrientation(self.handle)
# TODO(cpaxton): improve forward kinematics efficiency by just using
# PyBullet to get the position of the grasp frame.
q, dq = self._getArmPosition()
return SimulationRobotState(robot=self,
base_pos=pos,
base_rot=rot,
base_linear_v = 0,
base_angular_v = 0,
arm=q,
arm_v=dq,
gripper=self._getGripper(),
T=self.fwd(q))
示例14: render_physics
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例15: _flag_reposition
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import getBasePositionAndOrientation [as 別名]
def _flag_reposition(self):
self.walk_target_x = self.np_random.uniform(low=-self.scene.stadium_halflen,
high=+self.scene.stadium_halflen)
self.walk_target_y = self.np_random.uniform(low=-self.scene.stadium_halfwidth,
high=+self.scene.stadium_halfwidth)
more_compact = 0.5 # set to 1.0 whole football field
self.walk_target_x *= more_compact / self.robot.mjcf_scaling
self.walk_target_y *= more_compact / self.robot.mjcf_scaling
self.flag = None
#self.flag = self.scene.cpp_world.debug_sphere(self.walk_target_x, self.walk_target_y, 0.2, 0.2, 0xFF8080)
self.flag_timeout = 3000 / self.scene.frame_skip
#print('targetxy', self.flagid, self.walk_target_x, self.walk_target_y, p.getBasePositionAndOrientation(self.flagid))
#p.resetBasePositionAndOrientation(self.flagid, posObj = [self.walk_target_x, self.walk_target_y, 0.5], ornObj = [0,0,0,0])
if self.gui:
if self.lastid:
p.removeBody(self.lastid)
self.lastid = p.createMultiBody(baseVisualShapeIndex=self.visualid, baseCollisionShapeIndex=-1, basePosition=[self.walk_target_x, self.walk_target_y, 0.5])
self.robot.walk_target_x = self.walk_target_x
self.robot.walk_target_y = self.walk_target_y