本文整理汇总了Python中pybullet.computeViewMatrixFromYawPitchRoll方法的典型用法代码示例。如果您正苦于以下问题:Python pybullet.computeViewMatrixFromYawPitchRoll方法的具体用法?Python pybullet.computeViewMatrixFromYawPitchRoll怎么用?Python pybullet.computeViewMatrixFromYawPitchRoll使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybullet
的用法示例。
在下文中一共展示了pybullet.computeViewMatrixFromYawPitchRoll方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def _render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
base_pos,orn = self._p.getBasePositionAndOrientation(self._racecar.racecarUniqueId)
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例2: _render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例3: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例4: render_map
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例5: _render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px)
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例6: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例7: render_map
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例8: getExtendedObservation
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def getExtendedObservation(self):
#camEyePos = [0.03,0.236,0.54]
#distance = 1.06
#pitch=-56
#yaw = 258
#roll=0
#upAxisIndex = 2
#camInfo = p.getDebugVisualizerCamera()
#print("width,height")
#print(camInfo[0])
#print(camInfo[1])
#print("viewMatrix")
#print(camInfo[2])
#print("projectionMatrix")
#print(camInfo[3])
#viewMat = camInfo[2]
#viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
#projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
rgb=img_arr[2]
np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
self._observation = np_img_arr
return self._observation
示例9: _reset
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def _reset(self):
"""Environment reset called at the beginning of an episode.
"""
# Set the camera settings.
look = [0.23, 0.2, 0.54]
distance = 1.
pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3)
yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3)
roll = 0
self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
look, distance, yaw, pitch, roll, 2)
fov = 20. + self._cameraRandom*np.random.uniform(-2, 2)
aspect = self._width / self._height
near = 0.01
far = 10
self._proj_matrix = p.computeProjectionMatrixFOV(
fov, aspect, near, far)
self._attempted_grasp = False
self._env_step = 0
self.terminated = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timeStep)
p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
p.setGravity(0,0,-10)
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0
p.stepSimulation()
# Choose the objects in the bin.
urdfList = self._get_random_object(
self._numObjects, self._isTest)
self._objectUids = self._randomly_place_objects(urdfList)
self._observation = self._get_observation()
return np.array(self._observation)
示例10: foo
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def foo():
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=focal_point,
distance=focal_dist,
yaw=yaw,
pitch=pitch,
roll=roll,
upAxisIndex=2)
projection_matrix = p.computeProjectionMatrixFOV(
fov=60,
aspect=aspect,
nearVal=0.01,
farVal=1000.0)
示例11: _reset_camera
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def _reset_camera(self):
look = [0., 0., 0.]
distance = 3.
pitch = -30 + np.random.uniform(-10, 10)
yaw = np.random.uniform(-180, 180)
roll = 0
self._view_matrix = pybullet.computeViewMatrixFromYawPitchRoll(
look, distance, yaw, pitch, roll, 2)
fov = 30
aspect = self._width / self._height
near = 0.1
far = 10
self._proj_matrix = pybullet.computeProjectionMatrixFOV(
fov, aspect, near, far)
示例12: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def __init__(self, name, target,
distance,
roll, pitch, yaw,
up_idx=2,
image_width=1024 / 8,
image_height=768 / 8,
fov=45,
near_plane=0.1,
far_plane=10):
'''
Create camera matrix for a particular position in the simulation. Task
definitions should produce these and
'''
self.name = name
self.matrix = np.array(pb.computeViewMatrixFromYawPitchRoll(
target, distance, yaw=yaw, pitch=pitch, roll=roll, upAxisIndex=up_idx))
self.image_height = image_height
self.image_width = image_width
self.aspect_ratio = self.image_width / self.image_height
self.fov = fov
self.near_plane = near_plane
self.far_plane = far_plane
self.projection_matrix = np.array(pb.computeProjectionMatrixFOV(
self.fov,
self.aspect_ratio,
self.near_plane,
self.far_plane))
示例13: render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
camera_target_pos = self.camera_target_pos
if self.debug:
self._cam_dist = p.readUserDebugParameter(self.dist_slider)
self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
x = p.readUserDebugParameter(self.x_slider)
y = p.readUserDebugParameter(self.y_slider)
z = p.readUserDebugParameter(self.z_slider)
camera_target_pos = (x, y, z)
# self._cam_roll = p.readUserDebugParameter(self.roll_slider)
# TODO: recompute view_matrix and proj_matrix only in debug mode
view_matrix1 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=camera_target_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix1 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1,
projectionMatrix=proj_matrix1, renderer=self.renderer)
rgb_array1 = np.array(px1)
if self.multi_view:
# adding a second camera on the other side of the robot
view_matrix2 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=(0.316, 0.316, -0.105),
distance=1.05,
yaw=32,
pitch=-13,
roll=0,
upAxisIndex=2)
proj_matrix2 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px2, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2,
projectionMatrix=proj_matrix2, renderer=self.renderer)
rgb_array2 = np.array(px2)
rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2)
else:
rgb_array_res = rgb_array1[:, :, :3]
return rgb_array_res
示例14: render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeViewMatrixFromYawPitchRoll [as 别名]
def render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
camera_target_pos = self.camera_target_pos
if self.debug:
self._cam_dist = p.readUserDebugParameter(self.dist_slider)
self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
x = p.readUserDebugParameter(self.x_slider)
y = p.readUserDebugParameter(self.y_slider)
z = p.readUserDebugParameter(self.z_slider)
camera_target_pos = (x, y, z)
# TODO: recompute view_matrix and proj_matrix only in debug mode
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=camera_target_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=self.renderer)
rgb_array = np.array(px1)
rgb_array_res = rgb_array[:, :, :3]
# if first person view, then stack the obersvation from the car camera
if self.fpv:
# move camera
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15),
distance=0.3,
yaw=self._cam_yaw,
pitch=-17,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
# get and stack image
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=self.renderer)
rgb_array = np.array(px1)
rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]])
return rgb_array_res