本文整理汇总了Python中pybullet.ER_TINY_RENDERER属性的典型用法代码示例。如果您正苦于以下问题:Python pybullet.ER_TINY_RENDERER属性的具体用法?Python pybullet.ER_TINY_RENDERER怎么用?Python pybullet.ER_TINY_RENDERER使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类pybullet
的用法示例。
在下文中一共展示了pybullet.ER_TINY_RENDERER属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import ER_TINY_RENDERER [as 别名]
def render(self):
if np.all(self._rendered_pos == self.camera_pos) and np.all(self._rendered_rot == self.camera_rot):
return self._rendered
target = self.camera_pos + np.dot(self.camera_rot, [0, 0, 1.0, 1.0])[0:3]
up = np.dot(self.camera_rot, [0, -1.0, 0, 1.0])[0:3]
vm = pb.computeViewMatrix(self.camera_pos, target, up)
i_arr = pb.getCameraImage(self.im_width, self.im_height, vm, self.pm,
shadow=0,
renderer=pb.ER_TINY_RENDERER)
# renderer=pb.ER_BULLET_HARDWARE_OPENGL)
# Record the position of the camera, and don't re-render if it hasn't moved.
self._rendered = i_arr
self._rendered_pos = self.camera_pos.copy()
self._rendered_rot = self.camera_rot.copy()
return i_arr
示例2: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import ER_TINY_RENDERER [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例3: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import ER_TINY_RENDERER [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例4: capture_image
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import ER_TINY_RENDERER [as 别名]
def capture_image(self):
width, height, im, depth, seg = p.getCameraImage(64, 64, list(
self.view_matrix), list(self.projection_matrix), renderer=p.ER_TINY_RENDERER)
self.depth = depth
im = np.array(im).reshape([height, width, -1])
return im[:, :, :3]
示例5: get_image
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import ER_TINY_RENDERER [as 别名]
def get_image(cam_pos, cam_orientation):
"""
Arguments
cam_pos: camera position
cam_orientation: camera orientation in quaternion
"""
width = 160
height = 120
fov = 90
aspect = width / height
near = 0.001
far = 5
if use_maximal_coordinates:
# cam_orientation has problem when enable bt_rigid_body,
# looking at 0.0, 0.0, 0.0 instead
# this does not affect performance
cam_pos_offset = cam_pos + np.array([0.0, 0.0, 0.3])
target_pos = np.array([0.0, 0.0, 0.0])
else:
# camera pos, look at, camera up direction
rot_matrix = p.getMatrixFromQuaternion(cam_orientation)
# offset to base pos
cam_pos_offset = cam_pos + np.dot(
np.array(rot_matrix).reshape(3, 3), np.array([0.1, 0.0, 0.3]))
target_pos = cam_pos_offset + np.dot(
np.array(rot_matrix).reshape(3, 3), np.array([-1.0, 0.0, 0.0]))
# compute view matrix
view_matrix = p.computeViewMatrix(cam_pos_offset, target_pos, [0, 0, 1])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
# Get depth values using the OpenGL renderer
if enable_open_gl_rendering:
w, h, rgb, depth, seg = p.getCameraImage(
width,
height,
view_matrix,
projection_matrix,
shadow=True,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
else:
w, h, rgb, depth, seg = p.getCameraImage(
width,
height,
view_matrix,
projection_matrix,
shadow=True,
renderer=p.ER_TINY_RENDERER)
# depth_buffer = np.reshape(images[3], [width, height])
# depth = far * near / (far - (far - near) * depth_buffer)
# seg = np.reshape(images[4],[width,height])*1./255.
return rgb