當前位置: 首頁>>代碼示例>>Python>>正文


Python pybullet.computeViewMatrix方法代碼示例

本文整理匯總了Python中pybullet.computeViewMatrix方法的典型用法代碼示例。如果您正苦於以下問題:Python pybullet.computeViewMatrix方法的具體用法?Python pybullet.computeViewMatrix怎麽用?Python pybullet.computeViewMatrix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pybullet的用法示例。


在下文中一共展示了pybullet.computeViewMatrix方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: take_picture

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def take_picture(renderer, width=256, height=256, scale=0.001, conn_id=None):
    view_matrix = p.computeViewMatrix(
        [0, 0, -1], [0, 0, 0], [0, -1, 0], physicsClientId=conn_id
    )
    proj_matrix = p.computeProjectionMatrixFOV(
        20, 1, 0.05, 2, physicsClientId=conn_id
    )
    w, h, rgba, depth, mask = p.getCameraImage(
        width=width,
        height=height,
        projectionMatrix=proj_matrix,
        viewMatrix=view_matrix,
        renderer=renderer,
        physicsClientId=conn_id,
    )
    return rgba 
開發者ID:hassony2,項目名稱:obman_train,代碼行數:18,代碼來源:simulate.py

示例2: render

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def render(self):
        if np.all(self._rendered_pos == self.camera_pos) and np.all(self._rendered_rot == self.camera_rot):
            return self._rendered

        target = self.camera_pos + np.dot(self.camera_rot, [0, 0, 1.0, 1.0])[0:3]
        up = np.dot(self.camera_rot, [0, -1.0, 0, 1.0])[0:3]
        vm = pb.computeViewMatrix(self.camera_pos, target, up)

        i_arr = pb.getCameraImage(self.im_width, self.im_height, vm, self.pm,
                                  shadow=0,
                                  renderer=pb.ER_TINY_RENDERER)
                                  # renderer=pb.ER_BULLET_HARDWARE_OPENGL)

        # Record the position of the camera, and don't re-render if it hasn't moved.
        self._rendered = i_arr
        self._rendered_pos = self.camera_pos.copy()
        self._rendered_rot = self.camera_rot.copy()

        return i_arr 
開發者ID:dougsm,項目名稱:mvp_grasp,代碼行數:21,代碼來源:renderer.py

示例3: _getCameraImage

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def _getCameraImage(self):
        """
        INTERNAL METHOD, Computes the OpenGL virtual camera image. The
        resolution and the projection matrix have to be computed before calling
        this method, or it will crash

        Returns:
            camera_image - The camera image of the OpenGL virtual camera
        """
        _, _, _, _, pos_world, q_world = pybullet.getLinkState(
            self.robot_model,
            self.camera_link.getParentIndex(),
            computeForwardKinematics=False,
            physicsClientId=self.physics_client)

        rotation = pybullet.getMatrixFromQuaternion(q_world)
        forward_vector = [rotation[0], rotation[3], rotation[6]]
        up_vector = [rotation[2], rotation[5], rotation[8]]

        camera_target = [
            pos_world[0] + forward_vector[0] * 10,
            pos_world[1] + forward_vector[1] * 10,
            pos_world[2] + forward_vector[2] * 10]

        view_matrix = pybullet.computeViewMatrix(
            pos_world,
            camera_target,
            up_vector,
            physicsClientId=self.physics_client)

        with self.resolution_lock:
            camera_image = pybullet.getCameraImage(
                self.resolution.width,
                self.resolution.height,
                view_matrix,
                self.projection_matrix,
                renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,
                flags=pybullet.ER_NO_SEGMENTATION_MASK,
                physicsClientId=self.physics_client)

        return camera_image 
開發者ID:softbankrobotics-research,項目名稱:qibullet,代碼行數:43,代碼來源:camera.py

示例4: render

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def render(robot):
    pos, rot, _, _, _, _ = p.getLinkState(robot.robot_id, linkIndex=robot.end_eff_idx, computeForwardKinematics=True)
    rot_matrix = p.getMatrixFromQuaternion(rot)
    rot_matrix = np.array(rot_matrix).reshape(3, 3)

    # camera params
    height = 640
    width = 480
    fx, fy = 596.6278076171875, 596.6278076171875
    cx, cy = 311.98663330078125, 236.76170349121094
    near, far = 0.1, 10

    camera_vector = rot_matrix.dot((0, 0, 1))
    up_vector = rot_matrix.dot((0, -1, 0))

    camera_eye_pos = np.array(pos)
    camera_target_position = camera_eye_pos + 0.2 * camera_vector

    view_matrix = p.computeViewMatrix(camera_eye_pos, camera_target_position, up_vector)

    proj_matrix = (2.0 * fx / width, 0.0, 0.0, 0.0,
                   0.0, 2.0 * fy / height, 0.0, 0.0,
                   1.0 - 2.0 * cx / width, 2.0 * cy / height - 1.0, (far + near) / (near - far), -1.0,
                   0.0, 0.0, 2.0 * far * near / (near - far), 0.0)

    p.getCameraImage(width=width, height=height, viewMatrix=view_matrix, projectionMatrix=proj_matrix,
                     renderer=p.ER_BULLET_HARDWARE_OPENGL)  # renderer=self._p.ER_TINY_RENDERER) 
開發者ID:robotology-playground,項目名稱:pybullet-robot-envs,代碼行數:29,代碼來源:helloworld_panda.py

示例5: __init__

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def __init__(self,
                 n_actions,  # Dimension of action vector.
                 n_substeps,  # Number of simulation steps to do in every env step.
                 observation_type="low_dim",
                 done_after=float("inf"),
                 use_gui=False,
                 frame_memory_len=0):
        self.n_substeps = n_substeps
        self.metadata = {
            'render.modes': ['rgbd_array'],
            'video.frames_per_second': int(np.round(1.0 / self.dt))
        }
        self.numSteps = 0
        if use_gui:
            physics_client = p.connect(p.GUI)
        else:
            physics_client = p.connect(p.DIRECT)
        self.p = PhysClientWrapper(p, physics_client)
        self.p.setAdditionalSearchPath(pybullet_data.getDataPath())
        self.doneAfter = done_after
        self.observation_type = observation_type
        self.seed()
        self.frameMemoryLen = frame_memory_len
        if frame_memory_len:
            self.frameMemory = deque(maxlen=frame_memory_len)

        self.viewMatrix = p.computeViewMatrix([-1.05, 0, 0.68], [0.1, 0, 0],
                                              [-0.5, 0, 1])
        self.projMatrix = p.computeProjectionMatrixFOV(
            fov=45, aspect=4. / 3., nearVal=0.01, farVal=2.5)
        self.light = {
            "diffuse": 0.4,
            "ambient": 0.5,
            "spec": 0.2,
            "dir": [10, 10, 100],
            "col": [1, 1, 1]
        }
        self._env_setup(initial_qpos=None)

        self.action_space = spaces.Box(
            -1, 1, shape=(n_actions, ), dtype='float32')

        self.pixels_space = spaces.Box(
            -np.inf, np.inf, shape=(84, 84, 3), dtype='float32')
        if observation_type == "low_dim":
            self.observation_space = self.low_dim_space
        elif observation_type == "pixels":
            self.observation_space = self.pixels_space
        elif observation_type == "pixels_stacked":
            self.observation_space = spaces.Box(
                -np.inf, np.inf, shape=(84, 84, 12), dtype='float32')
        elif observation_type == "pixels_depth":
            self.observation_space = spaces.Box(
                -np.inf, np.inf, shape=(84, 84), dtype='float32')
        else:
            raise Exception("Unimplemented observation_type") 
開發者ID:JanMatas,項目名稱:Rainbow_ddpg,代碼行數:58,代碼來源:bullet_robot_env.py

示例6: __init__

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def __init__(self,
                 display=True,
                 data_dir='./data',
                 verbose=False,
                 key=None,
                 camera_params={}):

        self._pe = BulletPhysicsEngine()

        self._display = display
        self._data_dir = data_dir
        self._verbose = verbose

        self._bodies = None
        self._robots = None
        self._time_step = None
        self._ctrl_listeners = []

        self._key_dict = None
        self._key_act_dict = None
        self._modifier_dict = None

        # Camera Parameters
        fov = camera_params.get('fov', 60)
        aspect = camera_params.get('aspect', 1)
        near = camera_params.get('near', 0.02)
        far = camera_params.get('far', 1)
        view_matrix = camera_params.get(
            'view_matrix',
            [[0.0, -0.4, 1.4], [0, 0.0, 0], [1, 0, 0]]
        )


        self.view_matrix = p.computeViewMatrix(*view_matrix)
        self.projection_matrix = p.computeProjectionMatrixFOV(
            fov, aspect, near, far)

        self.video_log_key = 0

        # Connect to the simulation
        # TODO(Kuan): If VR
        #     p.connect(p.SHARED_MEMORY)
        if self._display:
            p.connect(p.GUI)
        else:
            if key is None:
                p.connect(p.DIRECT)
            else:
                p.connect(p.DIRECT, key=key) 
開發者ID:StanfordVL,項目名稱:NTP-vat-release,代碼行數:51,代碼來源:bullet_world.py

示例7: get_image

# 需要導入模塊: import pybullet [as 別名]
# 或者: from pybullet import computeViewMatrix [as 別名]
def get_image(cam_pos, cam_orientation):
    """
    Arguments
        cam_pos: camera position
        cam_orientation: camera orientation in quaternion
    """
    width = 160
    height = 120
    fov = 90
    aspect = width / height
    near = 0.001
    far = 5

    if use_maximal_coordinates:
        # cam_orientation has problem when enable bt_rigid_body,
        # looking at 0.0, 0.0, 0.0 instead
        # this does not affect performance
        cam_pos_offset = cam_pos + np.array([0.0, 0.0, 0.3])
        target_pos = np.array([0.0, 0.0, 0.0])
    else:
        # camera pos, look at, camera up direction
        rot_matrix = p.getMatrixFromQuaternion(cam_orientation)
        # offset to base pos
        cam_pos_offset = cam_pos + np.dot(
            np.array(rot_matrix).reshape(3, 3), np.array([0.1, 0.0, 0.3]))
        target_pos = cam_pos_offset + np.dot(
            np.array(rot_matrix).reshape(3, 3), np.array([-1.0, 0.0, 0.0]))
    # compute view matrix
    view_matrix = p.computeViewMatrix(cam_pos_offset, target_pos, [0, 0, 1])
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)

    # Get depth values using the OpenGL renderer
    if enable_open_gl_rendering:
        w, h, rgb, depth, seg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            shadow=True,
            renderer=p.ER_BULLET_HARDWARE_OPENGL)
    else:
        w, h, rgb, depth, seg = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            shadow=True,
            renderer=p.ER_TINY_RENDERER)

    # depth_buffer = np.reshape(images[3], [width, height])
    # depth = far * near / (far - (far - near) * depth_buffer)
    # seg = np.reshape(images[4],[width,height])*1./255.
    return rgb 
開發者ID:HorizonRobotics,項目名稱:SocialRobot,代碼行數:55,代碼來源:turtlebot_pybullet.py


注:本文中的pybullet.computeViewMatrix方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。