本文整理汇总了Python中pybullet.computeProjectionMatrixFOV方法的典型用法代码示例。如果您正苦于以下问题:Python pybullet.computeProjectionMatrixFOV方法的具体用法?Python pybullet.computeProjectionMatrixFOV怎么用?Python pybullet.computeProjectionMatrixFOV使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pybullet
的用法示例。
在下文中一共展示了pybullet.computeProjectionMatrixFOV方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: take_picture
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def take_picture(renderer, width=256, height=256, scale=0.001, conn_id=None):
view_matrix = p.computeViewMatrix(
[0, 0, -1], [0, 0, 0], [0, -1, 0], physicsClientId=conn_id
)
proj_matrix = p.computeProjectionMatrixFOV(
20, 1, 0.05, 2, physicsClientId=conn_id
)
w, h, rgba, depth, mask = p.getCameraImage(
width=width,
height=height,
projectionMatrix=proj_matrix,
viewMatrix=view_matrix,
renderer=renderer,
physicsClientId=conn_id,
)
return rgba
示例2: _render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例3: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例4: render_map
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例5: _render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px)
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例6: render_physics
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例7: render_map
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
示例8: _reset
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def _reset(self):
"""Environment reset called at the beginning of an episode.
"""
# Set the camera settings.
look = [0.23, 0.2, 0.54]
distance = 1.
pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3)
yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3)
roll = 0
self._view_matrix = p.computeViewMatrixFromYawPitchRoll(
look, distance, yaw, pitch, roll, 2)
fov = 20. + self._cameraRandom*np.random.uniform(-2, 2)
aspect = self._width / self._height
near = 0.01
far = 10
self._proj_matrix = p.computeProjectionMatrixFOV(
fov, aspect, near, far)
self._attempted_grasp = False
self._env_step = 0
self.terminated = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timeStep)
p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
p.setGravity(0,0,-10)
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0
p.stepSimulation()
# Choose the objects in the bin.
urdfList = self._get_random_object(
self._numObjects, self._isTest)
self._objectUids = self._randomly_place_objects(urdfList)
self._observation = self._get_observation()
return np.array(self._observation)
示例9: foo
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def foo():
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=focal_point,
distance=focal_dist,
yaw=yaw,
pitch=pitch,
roll=roll,
upAxisIndex=2)
projection_matrix = p.computeProjectionMatrixFOV(
fov=60,
aspect=aspect,
nearVal=0.01,
farVal=1000.0)
示例10: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def __init__(self, im_width, im_height, fov, near_plane, far_plane, DEBUG=False):
self.im_width = im_width
self.im_height = im_height
self.fov = fov
self.near_plane = near_plane
self.far_plane = far_plane
aspect = self.im_width/self.im_height
self.pm = pb.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)
self.camera_pos = np.array([0, 0, 0.5])
self.camera_rot = self._rotation_matrix([0, np.pi, 0])
self.objects = []
if DEBUG:
self.cid = pb.connect(pb.GUI)
else:
self.cid = pb.connect(pb.DIRECT)
pb.setAdditionalSearchPath(pybullet_data.getDataPath())
pb.setGravity(0, 0, -10)
self.draw_camera_pos()
self._rendered = None
self._rendered_pos = None
self._rendered_rot = None
示例11: _reset_camera
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def _reset_camera(self):
look = [0., 0., 0.]
distance = 3.
pitch = -30 + np.random.uniform(-10, 10)
yaw = np.random.uniform(-180, 180)
roll = 0
self._view_matrix = pybullet.computeViewMatrixFromYawPitchRoll(
look, distance, yaw, pitch, roll, 2)
fov = 30
aspect = self._width / self._height
near = 0.1
far = 10
self._proj_matrix = pybullet.computeProjectionMatrixFOV(
fov, aspect, near, far)
示例12: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def __init__(self, name, target,
distance,
roll, pitch, yaw,
up_idx=2,
image_width=1024 / 8,
image_height=768 / 8,
fov=45,
near_plane=0.1,
far_plane=10):
'''
Create camera matrix for a particular position in the simulation. Task
definitions should produce these and
'''
self.name = name
self.matrix = np.array(pb.computeViewMatrixFromYawPitchRoll(
target, distance, yaw=yaw, pitch=pitch, roll=roll, upAxisIndex=up_idx))
self.image_height = image_height
self.image_width = image_width
self.aspect_ratio = self.image_width / self.image_height
self.fov = fov
self.near_plane = near_plane
self.far_plane = far_plane
self.projection_matrix = np.array(pb.computeProjectionMatrixFOV(
self.fov,
self.aspect_ratio,
self.near_plane,
self.far_plane))
示例13: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def __init__(self,
n_actions, # Dimension of action vector.
n_substeps, # Number of simulation steps to do in every env step.
observation_type="low_dim",
done_after=float("inf"),
use_gui=False,
frame_memory_len=0):
self.n_substeps = n_substeps
self.metadata = {
'render.modes': ['rgbd_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
self.numSteps = 0
if use_gui:
physics_client = p.connect(p.GUI)
else:
physics_client = p.connect(p.DIRECT)
self.p = PhysClientWrapper(p, physics_client)
self.p.setAdditionalSearchPath(pybullet_data.getDataPath())
self.doneAfter = done_after
self.observation_type = observation_type
self.seed()
self.frameMemoryLen = frame_memory_len
if frame_memory_len:
self.frameMemory = deque(maxlen=frame_memory_len)
self.viewMatrix = p.computeViewMatrix([-1.05, 0, 0.68], [0.1, 0, 0],
[-0.5, 0, 1])
self.projMatrix = p.computeProjectionMatrixFOV(
fov=45, aspect=4. / 3., nearVal=0.01, farVal=2.5)
self.light = {
"diffuse": 0.4,
"ambient": 0.5,
"spec": 0.2,
"dir": [10, 10, 100],
"col": [1, 1, 1]
}
self._env_setup(initial_qpos=None)
self.action_space = spaces.Box(
-1, 1, shape=(n_actions, ), dtype='float32')
self.pixels_space = spaces.Box(
-np.inf, np.inf, shape=(84, 84, 3), dtype='float32')
if observation_type == "low_dim":
self.observation_space = self.low_dim_space
elif observation_type == "pixels":
self.observation_space = self.pixels_space
elif observation_type == "pixels_stacked":
self.observation_space = spaces.Box(
-np.inf, np.inf, shape=(84, 84, 12), dtype='float32')
elif observation_type == "pixels_depth":
self.observation_space = spaces.Box(
-np.inf, np.inf, shape=(84, 84), dtype='float32')
else:
raise Exception("Unimplemented observation_type")
示例14: __init__
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def __init__(self,
display=True,
data_dir='./data',
verbose=False,
key=None,
camera_params={}):
self._pe = BulletPhysicsEngine()
self._display = display
self._data_dir = data_dir
self._verbose = verbose
self._bodies = None
self._robots = None
self._time_step = None
self._ctrl_listeners = []
self._key_dict = None
self._key_act_dict = None
self._modifier_dict = None
# Camera Parameters
fov = camera_params.get('fov', 60)
aspect = camera_params.get('aspect', 1)
near = camera_params.get('near', 0.02)
far = camera_params.get('far', 1)
view_matrix = camera_params.get(
'view_matrix',
[[0.0, -0.4, 1.4], [0, 0.0, 0], [1, 0, 0]]
)
self.view_matrix = p.computeViewMatrix(*view_matrix)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov, aspect, near, far)
self.video_log_key = 0
# Connect to the simulation
# TODO(Kuan): If VR
# p.connect(p.SHARED_MEMORY)
if self._display:
p.connect(p.GUI)
else:
if key is None:
p.connect(p.DIRECT)
else:
p.connect(p.DIRECT, key=key)
示例15: render
# 需要导入模块: import pybullet [as 别名]
# 或者: from pybullet import computeProjectionMatrixFOV [as 别名]
def render(self, mode='human', close=False):
if mode != "rgb_array":
return np.array([])
camera_target_pos = self.camera_target_pos
if self.debug:
self._cam_dist = p.readUserDebugParameter(self.dist_slider)
self._cam_yaw = p.readUserDebugParameter(self.yaw_slider)
self._cam_pitch = p.readUserDebugParameter(self.pitch_slider)
x = p.readUserDebugParameter(self.x_slider)
y = p.readUserDebugParameter(self.y_slider)
z = p.readUserDebugParameter(self.z_slider)
camera_target_pos = (x, y, z)
# self._cam_roll = p.readUserDebugParameter(self.roll_slider)
# TODO: recompute view_matrix and proj_matrix only in debug mode
view_matrix1 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=camera_target_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=self._cam_roll,
upAxisIndex=2)
proj_matrix1 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px1, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1,
projectionMatrix=proj_matrix1, renderer=self.renderer)
rgb_array1 = np.array(px1)
if self.multi_view:
# adding a second camera on the other side of the robot
view_matrix2 = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=(0.316, 0.316, -0.105),
distance=1.05,
yaw=32,
pitch=-13,
roll=0,
upAxisIndex=2)
proj_matrix2 = p.computeProjectionMatrixFOV(
fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1, farVal=100.0)
(_, _, px2, _, _) = p.getCameraImage(
width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2,
projectionMatrix=proj_matrix2, renderer=self.renderer)
rgb_array2 = np.array(px2)
rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2)
else:
rgb_array_res = rgb_array1[:, :, :3]
return rgb_array_res