本文整理汇总了Python中panda3d.core.OrthographicLens类的典型用法代码示例。如果您正苦于以下问题:Python OrthographicLens类的具体用法?Python OrthographicLens怎么用?Python OrthographicLens使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OrthographicLens类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SkyAOCaptureStage
class SkyAOCaptureStage(RenderStage):
""" This stage captures the sky ao by rendering the scene from above """
required_inputs = []
required_pipes = []
@property
def produced_pipes(self):
return {"SkyAOHeight": self.target_convert.color_tex}
@property
def produced_inputs(self):
return {"SkyAOCapturePosition": self.pta_position}
def __init__(self, pipeline):
RenderStage.__init__(self, pipeline)
self.pta_position = PTALVecBase3f.empty_array(1)
self.resolution = 512
self.capture_height = 100.0
self.max_radius = 100.0
def create(self):
self.camera = Camera("SkyAOCaptureCam")
self.cam_lens = OrthographicLens()
self.cam_lens.set_film_size(self.max_radius, self.max_radius)
self.cam_lens.set_near_far(0, self.capture_height)
self.camera.set_lens(self.cam_lens)
self.cam_node = Globals.base.render.attach_new_node(self.camera)
self.cam_node.look_at(0, 0, -1)
self.cam_node.set_r(0)
self.target = self.create_target("SkyAOCapture")
self.target.size = self.resolution
self.target.add_depth_attachment(bits=16)
self.target.prepare_render(self.cam_node)
self.target_convert = self.create_target("ConvertDepth")
self.target_convert.size = self.resolution
self.target_convert.add_color_attachment(bits=(16, 0, 0, 0))
self.target_convert.prepare_buffer()
self.target_convert.set_shader_inputs(
DepthTex=self.target.depth_tex,
position=self.pta_position)
# Register camera
self._pipeline.tag_mgr.register_camera("shadow", self.camera)
def update(self):
snap_size = self.max_radius / self.resolution
cam_pos = Globals.base.camera.get_pos(Globals.base.render)
self.cam_node.set_pos(
cam_pos.x - cam_pos.x % snap_size,
cam_pos.y - cam_pos.y % snap_size,
self.capture_height / 2.0)
self.pta_position[0] = self.cam_node.get_pos()
def reload_shaders(self):
self.target_convert.shader = self.load_plugin_shader("convert_depth.frag.glsl")
示例2: __init__
def __init__(self):
DirectObject.__init__(self)
self.base = ShowBase()
resolution = (1024, 768)
wp = WindowProperties()
wp.setSize(int(resolution[0]), int(resolution[1]))
wp.setOrigin(0, 0)
self.base.win.requestProperties(wp)
# depth completely doesn't matter for this, since just 2d, and no layers
self.depth = 0
self.base.setBackgroundColor(115 / 255, 115 / 255, 115 / 255)
# set up a 2d camera
camera = self.base.camList[0]
lens = OrthographicLens()
lens.setFilmSize(int(resolution[0]), int(resolution[1]))
lens.setNearFar(-100, 100)
camera.node().setLens(lens)
camera.reparentTo(self.base.render)
self.accept("escape", sys.exit)
# spread out some positions
self.positions = [(-200, 0, -200),
(0, 0, -200),
(200, 0, -200),
(-200, 0, 0),
(0, 0, 0),
(200, 0, 0),
(-200, 0, 200),
(0, 0, 200),
(200, 0, 200)]
self.all_smiles()
示例3: orthographic
def orthographic():
global PERSPECTIVE
PERSPECTIVE = False
sandbox.base.disableMouse()
sandbox.base.camera.setPos(0, 0, 3000)
sandbox.base.camera.setHpr(0, -90, 0)
lens = OrthographicLens()
lens.setFilmSize(10)
sandbox.base.cam.node().setLens(lens)
示例4: PSSMCameraRig
class PSSMCameraRig(object):
""" PSSM is not really supported in python yet (too slow), so this is a stub,
supporting only one cascade """
def __init__(self, num_splits):
self._split_count = num_splits
self._mvps = PTALMatrix4f.empty_array(num_splits)
self._nearfar = PTALVecBase2f.empty_array(num_splits)
for i in range(num_splits):
self._nearfar[i] = Vec2(20, 1000)
mat = Mat4()
mat.fill(0)
self._mvps[i] = mat
self._lens = OrthographicLens()
self._lens.set_near_far(20, 1000)
self._lens.set_film_size(100, 100)
self._camera = Camera("PSSMDummy", self._lens)
self._cam_node = NodePath(self._camera)
self._parent = None
def update(self, cam_node, light_vector):
cam_pos = cam_node.get_pos()
self._cam_node.set_pos(cam_pos + light_vector * 500)
self._cam_node.look_at(cam_pos)
transform = self._parent.get_transform(self._cam_node).get_mat()
self._mvps[0] = transform * self._lens.get_projection_mat()
def get_camera(self, index): # pylint: disable=W0613
return self._cam_node
def reparent_to(self, parent):
self._cam_node.reparent_to(parent)
self._parent = parent
def get_mvp_array(self):
return self._mvps
def get_nearfar_array(self):
return self._nearfar
# Stubs
def _stub(self, *args, **kwargs):
pass
set_pssm_distance = _stub
set_sun_distance = _stub
set_resolution = _stub
set_use_stable_csm = _stub
set_logarithmic_factor = _stub
set_border_bias = _stub
set_use_fixed_film_size = _stub
reset_film_size_cache = _stub
示例5: __init__
def __init__(self, map):
self.node, self.mapData = importTiledMap('data/tiled/maps/'+map)
self.node.reparentTo(render)
self.player = Player()
lens = OrthographicLens()
lens.setFilmSize(20, 15)
base.cam.node().setLens(lens)
base.cam.setPos((0,0,50))
base.cam.setHpr((0,-90,0))
示例6: init_camera
def init_camera(self):
print("-- init camera")
self.disableMouse()
lens = OrthographicLens()
lens.setFilmSize(20, 15) # Or whatever is appropriate for your scene
self.cam.node().setLens(lens)
self.cam.setPos(0, -20, 0)
self.cam.lookAt(0, 0, 0)
示例7: create
def create(self):
# Create voxelize camera
self.voxelizeCamera = Camera("VoxelizeCamera")
self.voxelizeCamera.setCameraMask(BitMask32.bit(4))
self.voxelizeCameraNode = Globals.render.attachNewNode(self.voxelizeCamera)
self.voxelizeLens = OrthographicLens()
self.voxelizeLens.setFilmSize(self.voxelGridSize * 2, self.voxelGridSize * 2)
self.voxelizeLens.setNearFar(0.0, self.voxelGridSize * 2)
self.voxelizeCamera.setLens(self.voxelizeLens)
self.voxelizeCamera.setTagStateKey("VoxelizePassShader")
Globals.render.setTag("VoxelizePassShader", "Default")
# Create voxelize tareet
self.target = RenderTarget("VoxelizePass")
self.target.setSize(self.voxelGridResolution * self.gridResolutionMultiplier)
if self.pipeline.settings.useDebugAttachments:
self.target.addColorTexture()
else:
self.target.setColorWrite(False)
self.target.setCreateOverlayQuad(False)
self.target.setSource(self.voxelizeCameraNode, Globals.base.win)
self.target.prepareSceneRender()
self.target.setActive(False)
示例8: create
def create(self):
self.camera = Camera("PSSMDistShadowsESM")
self.cam_lens = OrthographicLens()
self.cam_lens.set_film_size(12000, 12000)
self.cam_lens.set_near_far(10.0, self.sun_distance * 2)
self.camera.set_lens(self.cam_lens)
self.cam_node = Globals.base.render.attach_new_node(self.camera)
self.target = self.create_target("ShadowMap")
self.target.size = self.resolution
self.target.add_depth_attachment(bits=32)
self.target.prepare_render(self.cam_node)
self.target_convert = self.create_target("ConvertToESM")
self.target_convert.size = self.resolution
self.target_convert.add_color_attachment(bits=(32, 0, 0, 0))
self.target_convert.prepare_buffer()
self.target_convert.set_shader_input("SourceTex", self.target.depth_tex)
self.target_blur_v = self.create_target("BlurVert")
self.target_blur_v.size = self.resolution
self.target_blur_v.add_color_attachment(bits=(32, 0, 0, 0))
self.target_blur_v.prepare_buffer()
self.target_blur_v.set_shader_input("SourceTex", self.target_convert.color_tex)
self.target_blur_v.set_shader_input("direction", LVecBase2i(1, 0))
self.target_blur_h = self.create_target("BlurHoriz")
self.target_blur_h.size = self.resolution
self.target_blur_h.add_color_attachment(bits=(32, 0, 0, 0))
self.target_blur_h.prepare_buffer()
self.target_blur_h.set_shader_input("SourceTex", self.target_blur_v.color_tex)
self.target_blur_h.set_shader_input("direction", LVecBase2i(0, 1))
# Register shadow camera
self._pipeline.tag_mgr.register_camera("shadow", self.camera)
示例9: create
def create(self):
# Create the voxel grid used to store the voxels
self._voxel_grid = Image.create_3d(
"Voxels", self._voxel_res, self._voxel_res, self._voxel_res, Texture.T_float, Texture.F_r11_g11_b10
)
self._voxel_grid.set_clear_color(Vec4(0))
# Create the camera for voxelization
self._voxel_cam = Camera("VoxelizeCam")
self._voxel_cam.set_camera_mask(self._pipeline.tag_mgr.get_voxelize_mask())
self._voxel_cam_lens = OrthographicLens()
self._voxel_cam_lens.set_film_size(-self._voxel_ws, self._voxel_ws)
self._voxel_cam_lens.set_near_far(0.0, 2.0 * self._voxel_ws)
self._voxel_cam.set_lens(self._voxel_cam_lens)
self._voxel_cam_np = Globals.base.render.attach_new_node(self._voxel_cam)
self._pipeline.tag_mgr.register_voxelize_camera(self._voxel_cam)
# Create the voxelization target
self._voxel_target = self._create_target("VoxelizeScene")
self._voxel_target.set_source(source_cam=self._voxel_cam_np, source_win=Globals.base.win)
self._voxel_target.set_size(self._voxel_res, self._voxel_res)
self._voxel_target.set_create_overlay_quad(False)
self._voxel_target.prepare_scene_render()
# Create the initial state used for rendering voxels
initial_state = NodePath("VXInitialState")
initial_state.set_attrib(CullFaceAttrib.make(CullFaceAttrib.M_cull_none), 100000)
initial_state.set_attrib(DepthTestAttrib.make(DepthTestAttrib.M_none), 100000)
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 100000)
self._voxel_cam.set_initial_state(initial_state.get_state())
Globals.base.render.set_shader_input("voxelGridPosition", self._pta_grid_pos)
Globals.base.render.set_shader_input("voxelGridRes", self._pta_grid_res)
Globals.base.render.set_shader_input("voxelGridSize", self._pta_grid_size)
Globals.base.render.set_shader_input("VoxelGridDest", self._voxel_grid.texture)
示例10: create
def create(self):
self.camera = Camera("SkyAOCaptureCam")
self.cam_lens = OrthographicLens()
self.cam_lens.set_film_size(200, 200)
self.cam_lens.set_near_far(0.0, 500.0)
self.camera.set_lens(self.cam_lens)
self.cam_node = Globals.base.render.attach_new_node(self.camera)
self.cam_node.look_at(0, 0, -1)
self.cam_node.set_r(0)
self.target = self.create_target("SkyAOCapture")
self.target.size = 1024
self.target.add_depth_attachment(bits=16)
self.target.prepare_render(self.cam_node)
self.target_convert = self.create_target("ConvertDepth")
self.target_convert.size = 1024
self.target_convert.add_color_attachment(bits=(16, 0, 0, 0))
self.target_convert.prepare_buffer()
self.target_convert.set_shader_input("DepthTex", self.target.depth_tex)
self.target_convert.set_shader_input("position", self.pta_position)
# Register camera
self._pipeline.tag_mgr.register_camera("shadow", self.camera)
示例11: __init__
def __init__(self):
base.disableMouse()
lens = OrthographicLens()
lens.setFilmSize(34.2007, 25.6505)
lens.setNear(-10)
lens.setFar(100)
base.cam.node().setLens(lens)
self.container = render.attachNewNode('camContainer')
base.camera.reparentTo( self.container )
base.camera.setPos( -40, 0, 23 )
base.camera.lookAt(0, 0, 3)
self.container.setHpr(45, 0, 0)
self.zoomed = True
self.r = False
# Load sounds
self.toggle_r_snd = base.loader.loadSfx(GAME+'/sounds/camera_toggle_r.ogg')
self.rotate_snd = base.loader.loadSfx(GAME+'/sounds/camera_rotate.ogg')
self.acceptAll()
self.windowEvent(base.win)
示例12: __init__
def __init__(self):
base.disableMouse()
lens = OrthographicLens()
lens.setFilmSize(34.2007, 25.6505)
lens.setNear(-10)
lens.setFar(100)
base.cam.node().setLens(lens)
self.container = render.attachNewNode("camContainer")
base.camera.reparentTo(self.container)
base.camera.setPos(-40, 0, 23)
base.camera.lookAt(0, 0, 3)
self.container.setHpr(45, 0, 0)
self.zoomed = True
self.r = False
self.phase = None
# Load sounds
self.toggle_r_snd = base.loader.loadSfx(GAME + "/sounds/camera_toggle_r.ogg")
self.rotate_snd = base.loader.loadSfx(GAME + "/sounds/camera_rotate.ogg")
self.accept("e", self.toggleZoom)
self.accept("r", self.toggleR)
self.accept("a", lambda: self.rotate(90))
self.accept("z", lambda: self.rotate(-90))
self.accept("window-event", self.windowEvent)
示例13: renderQuadInto
def renderQuadInto(self, xsize, ysize, colortex=None, cmode = GraphicsOutput.RTMBindOrCopy, auxtex = None):
buffer = self.createBuffer("filter-stage", xsize, ysize, colortex, cmode, auxtex)
if (buffer == None):
return None
cm = CardMaker("filter-stage-quad")
cm.setFrameFullscreenQuad()
quad = NodePath(cm.generate())
quad.setDepthTest(0)
quad.setDepthWrite(0)
quad.setColor(Vec4(1,0.5,0.5,1))
quadcamnode = Camera("filter-quad-cam")
lens = OrthographicLens()
lens.setFilmSize(2, 2)
lens.setFilmOffset(0, 0)
lens.setNearFar(-1000, 1000)
quadcamnode.setLens(lens)
quadcam = quad.attachNewNode(quadcamnode)
buffer.getDisplayRegion(0).setCamera(quadcam)
buffer.getDisplayRegion(0).setActive(1)
return quad, buffer
示例14: create
def create(self):
g_buffer = base.win.makeTextureBuffer(self._name, self._size, self._size)
g_buffer.setClearColor(self._bg_color)
self._texture = g_buffer.getTexture()
g_buffer.setSort(-100)
camera2d = NodePath(base.makeCamera(g_buffer))
lens = OrthographicLens()
lens.setFilmSize(2, 2)
lens.setNearFar(-1000, 1000)
camera2d.node().setLens(lens)
self._render2d = NodePath("{0}-my-render2d".format(self._name))
self._render2d.setDepthTest(False)
self._render2d.setDepthWrite(False)
camera2d.reparentTo(self._render2d)
return self._render2d, self._texture
示例15: __init__
def __init__(self,base):
self.base = base
self.base.disableMouse()
x_win = self.base.win.getXSize()
y_win = self.base.win.getYSize()
aspect_win = float(x_win)/float(y_win)
self.active_lens = 1
self.ortho_lens = OrthographicLens()
self.ortho_lens.setAspectRatio(aspect_win)
self.ortho_lens.setNearFar(1.0,100.0)
self.persp_lens = PerspectiveLens()
self.persp_lens.setAspectRatio(aspect_win)
self.persp_lens.setFov(5.0)
self.persp_lens.setNearFar(1.0,100.0)
self.lenses = [self.persp_lens, self.ortho_lens]
self.set_lens(lens=self.active_lens)
self.set_view(Vec3(50.0,50.0,50.0))
self.old_x = None
self.old_y = None
self.zoom_speed = 0.05
self.pan_speed = 0.005
self.rotate_speed = 0.1
#these are pointers
self.keys = self.base.keyboard_reader.keys
self.key_map = self.base.keyboard_reader.key_map
#registering camera functions
self.base.taskMgr.add(hold_caller_multikey(trigger=self.keys, indices=[self.key_map['shift'], self.key_map['mouse2']], values=[1,1], init_handle=None, loop_handle=self.pan_camera, cleanup_handle=self.mouse_delta_cleanup), 'pan_camera_task')
self.base.taskMgr.add(hold_caller_multikey(trigger=self.keys, indices=[self.key_map['control'], self.key_map['shift'], self.key_map['mouse2']], values=[0, 0,1], init_handle=None, loop_handle=self.rotate_camera_fixed_pivot, cleanup_handle=self.mouse_delta_cleanup), 'rotate_camera_task')
self.base.taskMgr.add(hold_caller_multikey(trigger=self.keys, indices=[self.key_map['control'], self.key_map['mouse2']], values=[1,1], init_handle=None, loop_handle=self.zoom_camera, cleanup_handle=self.mouse_delta_cleanup), 'rotate_camera_task')
#register camera presets
self.base.taskMgr.add( delta_caller(handle = self.set_viewpoint_front , trigger = self.keys, index = self.key_map['1'], value=1), 'camera_preset_front_task')
self.base.taskMgr.add( delta_caller(handle = self.set_viewpoint_side , trigger = self.keys, index = self.key_map['2'], value=1), 'camera_preset_side_task')
self.base.taskMgr.add( delta_caller(handle = self.set_viewpoint_top , trigger = self.keys, index = self.key_map['3'], value=1), 'camera_preset_top_task')
#register switching perspective
#TODO: currently disabled because perspective camera is stupid and should feel bad
#self.base.taskMgr.add( delta_caller(handle = self.switch_perspective , trigger = self.keys, index = self.key_map['5'], value=1), 'camera_switch_perspective_task')
#makes the zoom level of the orthographic camera more reasonable
self.fixed_zoom_camera(10.0)