本文整理汇总了Python中pybullet.resetDebugVisualizerCamera方法的典型用法代码示例。如果您正苦于以下问题:Python pybullet.resetDebugVisualizerCamera方法的具体用法?Python pybullet.resetDebugVisualizerCamera怎么用?Python pybullet.resetDebugVisualizerCamera使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybullet
的用法示例。
在下文中一共展示了pybullet.resetDebugVisualizerCamera方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: step
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def step(self, action):
yaw = 0
while True:
yaw += -0.75
p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=yaw, cameraPitch=-20, cameraTargetPosition=[0, 0, 1.0], physicsClientId=self.id)
indices = [4, 5, 6]
# indices = [14, 15, 16]
deltas = [0.01, 0.01, -0.01]
indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
deltas = [0, 0, 0, 0, 0.01, 0.01, -0.01, 0, 0, 0]
# indices = []
# deltas = []
# indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 10
# deltas = [0, 0, 0, 0, -0.01, 0.01, -0.01, 0, 0, 0]
for i, d in zip(indices, deltas):
joint_position = p.getJointState(self.human, jointIndex=i, physicsClientId=self.id)[0]
if joint_position + d > self.human_lower_limits[i] and joint_position + d < self.human_upper_limits[i]:
p.resetJointState(self.human, jointIndex=i, targetValue=joint_position+d, targetVelocity=0, physicsClientId=self.id)
p.stepSimulation(physicsClientId=self.id)
print('cameraYaw=%.2f, cameraPitch=%.2f, distance=%.2f' % p.getDebugVisualizerCamera(physicsClientId=self.id)[-4:-1])
self.enforce_realistic_human_joint_limits()
time.sleep(0.05)
return [], None, None, None
示例2: reset
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def reset(self, robot_base_offset=[0, 0, 0], task='scratch_itch_pr2'):
self.human, self.wheelchair, self.robot, self.robot_lower_limits, self.robot_upper_limits, self.human_lower_limits, self.human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender = self.world_creation.create_new_world(furniture_type=None, static_human_base=True, human_impairment='none', print_joints=False, gender='random')
p.resetDebugVisualizerCamera(cameraDistance=1.2, cameraYaw=0, cameraPitch=-20, cameraTargetPosition=[0, 0, 1.0], physicsClientId=self.id)
joints_positions = []
# self.human_controllable_joint_indices = []
self.human_controllable_joint_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# self.human_controllable_joint_indices = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
self.world_creation.setup_human_joints(self.human, joints_positions, self.human_controllable_joint_indices, use_static_joints=True, human_reactive_force=None)
p.setGravity(0, 0, 0, physicsClientId=self.id)
# Enable rendering
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
return []
示例3: step_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def step_physics(self, a):
self.nframe += 1
if not self.scene.multiplayer:
self.robot.apply_action(a)
self.scene.global_step()
self.rewards = self._rewards(a)
done = self._termination()
self.reward += sum(self.rewards)
self.eps_reward += sum(self.rewards)
if self.gui:
pos = self.robot._get_scaled_position()
orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = np.array(pos)
dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling
p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos)
return {}, sum(self.rewards), done, {}
示例4: lookAt
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def lookAt(self, target):
"""Control the look of the visualizer camera
Arguments:
target {tuple} -- target as (x,y,z) tuple
"""
if self.gui:
params = p.getDebugVisualizerCamera()
p.resetDebugVisualizerCamera(params[10], params[8], params[9], target)
示例5: set_camera
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def set_camera(focal_point, focal_dist, euler, width=500, height=540):
""" """
_euler = euler / np.pi * 180
roll = _euler[0]
pitch = _euler[1]
yaw = _euler[2]
aspect = float(height) / float(width)
# TODO: Order of pitch and yaw for the camera?
p.resetDebugVisualizerCamera(
cameraDistance=focal_dist,
cameraYaw=pitch,
cameraPitch=yaw,
cameraTargetPosition=focal_point)
示例6: _reset
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def _reset(self):
assert(self._robot_introduced)
assert(self._scene_introduced)
debugmode = 1
if debugmode:
print("Episode: steps:{} score:{}".format(self.nframe, self.reward))
body_xyz = self.robot.body_xyz
#print("[{}, {}, {}],".format(body_xyz[0], body_xyz[1], body_xyz[2]))
print("Episode count: {}".format(self.eps_count))
self.eps_count += 1
self.nframe = 0
self.eps_reward = 0
BaseEnv._reset(self)
if not self.ground_ids:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene(
[])
self.ground_ids = set(self.scene.scene_obj_list)
## Todo: (hzyjerry) this part is not working, robot_tracking_id = -1
for i in range (p.getNumBodies()):
if (p.getBodyInfo(i)[0].decode() == self.robot_body.get_name()):
self.robot_tracking_id=i
#print(p.getBodyInfo(i)[0].decode())
i = 0
eye_pos, eye_quat = self.get_eye_pos_orientation()
pose = [eye_pos, eye_quat]
observations = self.render_observations(pose)
pos = self.robot._get_scaled_position()
orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
p.resetDebugVisualizerCamera(self.tracking_camera['distance'],self.tracking_camera['yaw'], self.tracking_camera['pitch'],pos)
return observations
示例7: _reset
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def _reset(self):
assert(self._robot_introduced)
assert(self._scene_introduced)
debugmode = 1
if debugmode:
print("Episode: steps:{} score:{}".format(self.nframe, self.reward))
body_xyz = self.robot.body_xyz
print("[{}, {}, {}],".format(body_xyz[0], body_xyz[1], body_xyz[2]))
print("Episode count: {}".format(self.eps_count))
self.eps_count += 1
self.nframe = 0
self.eps_reward = 0
BaseEnv._reset(self)
if not self.ground_ids:
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene(
[])
self.ground_ids = set(self.scene.scene_obj_list)
## Todo: (hzyjerry) this part is not working, robot_tracking_id = -1
for i in range (p.getNumBodies()):
if (p.getBodyInfo(i)[0].decode() == self.robot_body.get_name()):
self.robot_tracking_id=i
#print(p.getBodyInfo(i)[0].decode())
i = 0
eye_pos, eye_quat = self.get_eye_pos_orientation()
pose = [eye_pos, eye_quat]
observations = self.render_observations(pose)
pos = self.robot._get_scaled_position()
orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
p.resetDebugVisualizerCamera(self.tracking_camera['distance'],self.tracking_camera['yaw'], self.tracking_camera['pitch'],pos)
return observations
示例8: move_and_look_at
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def move_and_look_at(self,i,j,k,x,y,z):
lookat = [x,y,z]
distance = 10
yaw = 10
p.resetDebugVisualizerCamera(distance, yaw, -20, lookat)
示例9: main
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def main():
# initialize robot
robot = skrobot.models.Kuka()
interface = skrobot.interfaces.PybulletRobotInterface(robot)
pybullet.resetDebugVisualizerCamera(
cameraDistance=1.5,
cameraYaw=45,
cameraPitch=-45,
cameraTargetPosition=(0, 0, 0.5),
)
print('==> Initialized Kuka Robot on PyBullet')
for _ in range(100):
pybullet.stepSimulation()
time.sleep(3)
# reset pose
print('==> Moving to Reset Pose')
robot.reset_manip_pose()
interface.angle_vector(robot.angle_vector(), realtime_simulation=True)
interface.wait_interpolation()
time.sleep(3)
# ik
print('==> Solving Inverse Kinematics')
target_coords = skrobot.coordinates.Coordinates(
pos=[0.5, 0, 0]
).rotate(np.pi / 2.0, 'y', 'local')
skrobot.interfaces.pybullet.draw(target_coords)
robot.inverse_kinematics(
target_coords,
link_list=robot.rarm.link_list,
move_target=robot.rarm_end_coords,
rotation_axis=True,
stop=1000,
)
interface.angle_vector(robot.angle_vector(), realtime_simulation=True)
interface.wait_interpolation()
# wait
while pybullet.isConnected():
time.sleep(0.01)
示例10: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
actionRepeat=1,
isEnableSelfCollision=True,
renders=False,
isDiscrete=False,
maxSteps = 1000):
#print("KukaGymEnv __init__")
self._isDiscrete = isDiscrete
self._timeStep = 1./240.
self._urdfRoot = urdfRoot
self._actionRepeat = actionRepeat
self._isEnableSelfCollision = isEnableSelfCollision
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._maxSteps = maxSteps
self.terminated = 0
self._cam_dist = 1.3
self._cam_yaw = 180
self._cam_pitch = -40
self._p = p
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
cid = p.connect(p.GUI)
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
else:
p.connect(p.DIRECT)
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
self._seed()
self.reset()
observationDim = len(self.getExtendedObservation())
#print("observationDim")
#print(observationDim)
observation_high = np.array([largeValObservation] * observationDim)
if (self._isDiscrete):
self.action_space = spaces.Discrete(7)
else:
action_dim = 3
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(-observation_high, observation_high)
self.viewer = None
示例11: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
actionRepeat=1,
isEnableSelfCollision=True,
renders=False,
isDiscrete=False):
self._timeStep = 1./240.
self._urdfRoot = urdfRoot
self._actionRepeat = actionRepeat
self._isEnableSelfCollision = isEnableSelfCollision
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._width = 341
self._height = 256
self._isDiscrete=isDiscrete
self.terminated = 0
self._p = p
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
p.connect(p.GUI)
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
else:
p.connect(p.DIRECT)
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
self._seed()
self.reset()
observationDim = len(self.getExtendedObservation())
#print("observationDim")
#print(observationDim)
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
if (self._isDiscrete):
self.action_space = spaces.Discrete(7)
else:
action_dim = 3
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4))
self.viewer = None
示例12: _step
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def _step(self, a):
self.nframe += 1
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
for i in range(math.ceil(self.config['dt']/self.config['gibson_dt'])):
state = self.robot.apply_action(a)
self.scene.global_step()
self.rewards = self._rewards(a)
done = self._termination()
debugmode=0
if (debugmode):
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.reward += sum(self.rewards)
self.eps_reward += sum(self.rewards)
debugmode = 0
if debugmode:
print("Eps frame {} reward {}".format(self.nframe, self.reward))
print("position", self.robot.get_position())
if self.gui:
pos = self.robot._get_scaled_position()
orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = np.array(pos)
dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling
p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos)
eye_pos, eye_quat = self.get_eye_pos_orientation()
pose = [eye_pos, eye_quat]
observations = self.render_observations(pose)
debugmode = 0
if (debugmode):
print("Camera env eye position", eye_pos)
print("episode rewards", sum(self.rewards), "steps", self.nframe)
episode = None
if done:
episode = {'r': self.reward,
'l': self.nframe}
debugmode = 0
if debugmode:
print("return episode:", episode)
return observations, sum(self.rewards), bool(done), dict(eye_pos= eye_pos, eye_quat= eye_quat, episode= episode, x=state[0], y=state[1], yaw=state[2], height=state[3], speed=state[4])
示例13: _step
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def _step(self, a):
self.nframe += 1
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.robot.apply_action(a)
self.scene.global_step()
self.rewards = self._rewards(a)
done = self._termination()
debugmode=0
if (debugmode):
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.reward += sum(self.rewards)
self.eps_reward += sum(self.rewards)
debugmode = 0
if debugmode:
print("Eps frame {} reward {}".format(self.nframe, self.reward))
print("position", self.robot.get_position())
if self.gui:
pos = self.robot._get_scaled_position()
orn = self.robot.get_orientation()
pos = (pos[0], pos[1], pos[2] + self.tracking_camera['z_offset'])
pos = np.array(pos)
dist = self.tracking_camera['distance'] / self.robot.mjcf_scaling
p.resetDebugVisualizerCamera(dist, self.tracking_camera['yaw'], self.tracking_camera['pitch'], pos)
eye_pos, eye_quat = self.get_eye_pos_orientation()
pose = [eye_pos, eye_quat]
observations = self.render_observations(pose)
debugmode = 0
if (debugmode):
print("Camera env eye position", eye_pos)
print("episode rewards", sum(self.rewards), "steps", self.nframe)
episode = None
if done:
episode = {'r': self.reward,
'l': self.nframe}
debugmode = 0
if debugmode:
print("return episode:", episode)
return observations, sum(self.rewards), bool(done), dict(eye_pos=eye_pos, eye_quat=eye_quat, episode=episode)
示例14: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def __init__(self,
action_repeat=1,
use_IK=1,
control_arm='l',
control_orientation=0,
obj_name=get_objects_list()[0],
obj_pose_rnd_std=0,
renders=False,
max_steps=2000):
self._time_step = 1. / 240.
self._control_arm = control_arm
self._use_IK = use_IK
self._control_orientation = control_orientation
self._action_repeat = action_repeat
self._observation = []
self._hand_pose = []
self._env_step_counter = 0
self._renders = renders
self._max_steps = max_steps
self._last_frame_time = 0
self.terminated = 0
self._target_dist_min = 0.03
# Initialize PyBullet simulator
self._p = p
if self._renders:
self._physics_client_id = p.connect(p.SHARED_MEMORY)
if self._physics_client_id < 0:
self._physics_client_id = p.connect(p.GUI)
p.resetDebugVisualizerCamera(2.5, 90, -60, [0.0, -0.0, -0.0], physicsClientId=self._physics_client_id)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
else:
self._physics_client_id = p.connect(p.DIRECT)
# Load robot
self._robot = iCubEnv(self._physics_client_id,
use_IK=self._use_IK, control_arm=self._control_arm,
control_orientation=self._control_orientation)
# Load world environment
self._world = WorldEnv(self._physics_client_id,
obj_name=obj_name, obj_pose_rnd_std=obj_pose_rnd_std,
workspace_lim=self._robot.get_workspace())
# limit iCub workspace to table plane
workspace = self._robot.get_workspace()
workspace[2][0] = self._world.get_table_height()
self._robot.set_workspace(workspace)
# Define spaces
self.observation_space, self.action_space = self.create_gym_spaces()
# initialize simulation environment
self.seed()
# self.reset()
示例15: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import resetDebugVisualizerCamera [as 别名]
def __init__(self,
numControlledJoints=7,
use_IK=0,
action_repeat=1,
obj_name=get_objects_list()[1],
renders=False,
max_steps=1000,
obj_pose_rnd_std=0.0,
tg_pose_rnd_std=0.0,
includeVelObs=True):
self._timeStep = 1. / 240.
self.action_dim = []
self._use_IK = use_IK
self._action_repeat = action_repeat
self._observation = []
self._env_step_counter = 0
self._renders = renders
self._max_steps = max_steps
self.terminated = False
self._target_pose = [0.0] * 3
self._target_dist_max = 0.3
self._target_dist_min = 0.1
self._tg_pose_rnd_std = tg_pose_rnd_std
self.includeVelObs = includeVelObs
if self._renders:
self._physics_client_id = p.connect(p.SHARED_MEMORY)
if self._physics_client_id < 0:
self._physics_client_id = p.connect(p.GUI)
p.resetDebugVisualizerCamera(2.5, 90, -60, [0.52, -0.2, -0.33], physicsClientId=self._physics_client_id)
else:
self._physics_client_id = p.connect(p.DIRECT)
# Load robot
self._robot = pandaEnv(self._physics_client_id, use_IK=self._use_IK, joint_action_space=numControlledJoints)
# Load world environment
self._world = WorldEnv(self._physics_client_id,
obj_name=obj_name, obj_pose_rnd_std=obj_pose_rnd_std,
workspace_lim=self._robot.get_workspace())
# limit robot workspace to table plane
workspace = self._robot.get_workspace()
workspace[2][0] = self._world.get_table_height()-0.2
self._robot.set_workspace(workspace)
# Define spaces
self.observation_space, self.action_space = self.create_gym_spaces()
self.seed()
# self.reset()