本文整理匯總了Python中pybullet.readUserDebugParameter方法的典型用法代碼示例。如果您正苦於以下問題:Python pybullet.readUserDebugParameter方法的具體用法?Python pybullet.readUserDebugParameter怎麽用?Python pybullet.readUserDebugParameter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pybullet
的用法示例。
在下文中一共展示了pybullet.readUserDebugParameter方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: render
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import readUserDebugParameter [as 別名]
def render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
camera_target_pos = self.camera_target_pos
if self.debug:
self._cam_dist = p.readUserDebugParameter(self.dist_slider)
self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
x = p.readUserDebugParameter(self.x_slider)
y = p.readUserDebugParameter(self.y_slider)
z = p.readUserDebugParameter(self.z_slider)
camera_target_pos = (x, y, z)
# self._cam_roll = p.readUserDebugParameter(self.roll_slider)
# TODO: recompute view_matrix and proj_matrix only in debug mode
view_matrix1 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=camera_target_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix1 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1,
projectionMatrix=proj_matrix1, renderer=self.renderer)
rgb_array1 = np.array(px1)
if self.multi_view:
# adding a second camera on the other side of the robot
view_matrix2 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=(0.316, 0.316, -0.105),
distance=1.05,
yaw=32,
pitch=-13,
roll=0,
upAxisIndex=2)
proj_matrix2 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px2, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2,
projectionMatrix=proj_matrix2, renderer=self.renderer)
rgb_array2 = np.array(px2)
rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2)
else:
rgb_array_res = rgb_array1[:, :, :3]
return rgb_array_res
示例2: render
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import readUserDebugParameter [as 別名]
def render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
camera_target_pos = self.camera_target_pos
if self.debug:
self._cam_dist = p.readUserDebugParameter(self.dist_slider)
self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
x = p.readUserDebugParameter(self.x_slider)
y = p.readUserDebugParameter(self.y_slider)
z = p.readUserDebugParameter(self.z_slider)
camera_target_pos = (x, y, z)
# TODO: recompute view_matrix and proj_matrix only in debug mode
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=camera_target_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=self.renderer)
rgb_array = np.array(px1)
rgb_array_res = rgb_array[:, :, :3]
# if first person view, then stack the obersvation from the car camera
if self.fpv:
# move camera
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15),
distance=0.3,
yaw=self._cam_yaw,
pitch=-17,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
# get and stack image
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=self.renderer)
rgb_array = np.array(px1)
rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]])
return rgb_array_res
示例3: main
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import readUserDebugParameter [as 別名]
def main(joint_control, arm, random_policy):
use_IK = 0 if joint_control else 1
env = iCubReachGymEnv(renders=True, control_arm=arm, use_IK=use_IK, control_orientation=1, obj_pose_rnd_std=0.05)
env.reset()
motorsIds = []
if not random_policy:
if use_IK:
dv = 1
motorsIds.append(p.addUserDebugParameter("lhPosX", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPosY", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPosZ", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhRollx", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPitchy", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhYawz", -dv, dv, 0.0))
else:
dv = 1
joints_idx = env._robot._joints_to_control
for j in joints_idx:
info = p.getJointInfo(env._robot.robot_id, j)
jointName = info[1]
motorsIds.append(p.addUserDebugParameter(jointName.decode("utf-8"), -dv, dv, 0.0))
done = False
for t in range(10000000):
# env.render()
if not random_policy:
action = []
for motorId in motorsIds:
action.append(p.readUserDebugParameter(motorId))
else:
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if t % 100 == 0:
print("reward ", reward)
if done:
print("done ", done)
env.reset()
示例4: main
# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import readUserDebugParameter [as 別名]
def main(cart_control, random_policy):
use_IK = 1 if cart_control else 0
env = pandaPushGymGoalEnv(renders=True, use_IK=use_IK, obj_pose_rnd_std=0.0)
env.reset()
motorsIds = []
if not random_policy:
if use_IK:
dv = 1
motorsIds.append(p.addUserDebugParameter("lhPosX", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPosY", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPosZ", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhRollx", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhPitchy", -dv, dv, 0.0))
motorsIds.append(p.addUserDebugParameter("lhYawz", -dv, dv, 0.0))
else:
dv = 1
joint_idxs = tuple(env._robot._joint_name_to_ids.values())
for j in joint_idxs[:env._robot.joint_action_space]:
info = p.getJointInfo(env._robot.robot_id, j)
jointName = info[1]
motorsIds.append(p.addUserDebugParameter(jointName.decode("utf-8"), -dv, dv, 0.0))
done = False
for t in range(10000000):
# env.render()
if not random_policy:
action = []
for motorId in motorsIds:
action.append(p.readUserDebugParameter(motorId))
else:
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if t % 100 == 0:
print("reward ", reward)
if done:
print("done ", done)
env.reset()