本文整理汇总了Python中neural_renderer.Renderer方法的典型用法代码示例。如果您正苦于以下问题:Python neural_renderer.Renderer方法的具体用法?Python neural_renderer.Renderer怎么用?Python neural_renderer.Renderer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neural_renderer
的用法示例。
在下文中一共展示了neural_renderer.Renderer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def __init__(self, filename_obj, filename_ref):
super(Model, self).__init__()
with self.init_scope():
# load .obj
vertices, faces = neural_renderer.load_obj(filename_obj)
self.vertices = vertices[None, :, :]
self.faces = faces[None, :, :]
# create textures
texture_size = 4
textures = np.zeros((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
self.textures = chainer.Parameter(textures)
# load reference image
self.image_ref = scipy.misc.imread(filename_ref).astype('float32') / 255.
# setup renderer
renderer = neural_renderer.Renderer()
renderer.perspective = False
renderer.light_intensity_directional = 0.0
renderer.light_intensity_ambient = 1.0
self.renderer = renderer
示例2: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def __init__(self, filename_obj, filename_ref):
super(Model, self).__init__()
with self.init_scope():
# load .obj
vertices, faces = neural_renderer.load_obj(filename_obj)
self.vertices = chainer.Parameter(vertices[None, :, :])
self.faces = faces[None, :, :]
# create textures
texture_size = 2
textures = np.ones((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
self.textures = textures
# load reference image
self.image_ref = scipy.misc.imread(filename_ref).astype('float32').mean(-1) / 255.
# setup renderer
renderer = neural_renderer.Renderer()
self.renderer = renderer
示例3: test_forward_case1
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_forward_case1(self):
"""Whether a silhouette by neural renderer matches that by Blender."""
# load teapot
vertices, faces, _ = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.image_size = 256
renderer.anti_aliasing = False
images = renderer.render_depth(vertices, faces)
images = images.data.get()
image = images[2]
image = image != image.max()
# load reference image by blender
ref = scipy.misc.imread('./tests/data/teapot_blender.png')
ref = ref.astype('float32')
ref = (ref.min(-1) != 255).astype('float32')
chainer.testing.assert_allclose(ref, image)
示例4: test_forward_case1
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_forward_case1(self):
"""Rendering a teapot without anti-aliasing."""
# load teapot
vertices, faces, textures = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.image_size = 256
renderer.anti_aliasing = False
# render
images = renderer.render(vertices, faces, textures)
images = images.data.get()
image = images[2]
image = image.transpose((1, 2, 0))
scipy.misc.imsave('./tests/data/test_rasterize1.png', image)
示例5: test_forward_case2
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_forward_case2(self):
"""Rendering a teapot with anti-aliasing and another viewpoint."""
# load teapot
vertices, faces, textures = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.eye = [1, 1, -2.7]
# render
images = renderer.render(vertices, faces, textures)
images = images.data.get()
image = images[2]
image = image.transpose((1, 2, 0))
scipy.misc.imsave('./tests/data/test_rasterize2.png', image)
示例6: test_forward_case3
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_forward_case3(self):
"""Whether a silhouette by neural renderer matches that by Blender."""
# load teapot
vertices, faces, textures = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.image_size = 256
renderer.anti_aliasing = False
renderer.light_intensity_ambient = 1.0
renderer.light_intensity_directional = 0.0
images = renderer.render(vertices, faces, textures)
images = images.data.get()
image = images[2].mean(0)
# load reference image by blender
ref = scipy.misc.imread('./tests/data/teapot_blender.png')
ref = ref.astype('float32')
ref = (ref.min(-1) != 255).astype('float32')
chainer.testing.assert_allclose(ref, image)
示例7: test_case1
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_case1(self):
"""Whether a silhouette by neural renderer matches that by Blender."""
# load teapot
vertices, faces, _ = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.image_size = 256
renderer.anti_aliasing = False
images = renderer.render_silhouettes(vertices, faces)
images = images.data.get()
image = images[2]
# load reference image by blender
ref = scipy.misc.imread('./tests/data/teapot_blender.png')
ref = ref.astype('float32')
ref = (ref.min(-1) != 255).astype('float32')
chainer.testing.assert_allclose(ref, image)
示例8: test_texture
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_texture(self):
pass
# renderer = neural_renderer.Renderer()
# renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
# renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
#
# vertices, faces, textures = neural_renderer.load_obj(
# './tests/data/4e49873292196f02574b5684eaec43e9/model.obj', load_texture=True, texture_size=16, normalization=False)
#
# # vertices, faces, textures = neural_renderer.load_obj('./tests/data/1cde62b063e14777c9152a706245d48/model.obj')
# neural_renderer.save_obj('./tests/data/tmp.obj', vertices, faces, textures)
#
# vertices, faces, textures = neural_renderer.load_obj('./tests/data/tmp.obj', load_texture=True, texture_size=16)
# vertices = chainer.cuda.to_gpu(vertices)
# faces = chainer.cuda.to_gpu(faces)
# textures = chainer.cuda.to_gpu(textures)
# images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
# scipy.misc.imsave('./tests/data/car2.png', scipy.misc.toimage(images[0]))
示例9: test_texture
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_texture(self):
renderer = neural_renderer.Renderer()
vertices, faces, textures = neural_renderer.load_obj(
'./tests/data/1cde62b063e14777c9152a706245d48/model.obj', load_texture=True)
vertices = chainer.cuda.to_gpu(vertices)
faces = chainer.cuda.to_gpu(faces)
textures = chainer.cuda.to_gpu(textures)
renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
scipy.misc.imsave('./tests/data/car.png', scipy.misc.toimage(images[0]))
vertices, faces, textures = neural_renderer.load_obj(
'./tests/data/4e49873292196f02574b5684eaec43e9/model.obj', load_texture=True, texture_size=16)
vertices = chainer.cuda.to_gpu(vertices)
faces = chainer.cuda.to_gpu(faces)
textures = chainer.cuda.to_gpu(textures)
renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
scipy.misc.imsave('./tests/data/display.png', scipy.misc.toimage(images[0]))
示例10: backward
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def backward(self, grad_out):
g_o = grad_out.cpu().numpy()
if self.mask_only:
grad_verts = self.renderer.backward_mask(g_o)
grad_verts = convert_as(torch.Tensor(grad_verts), grad_out)
grad_tex = None
else:
grad_verts, grad_tex = self.renderer.backward_img(g_o)
grad_verts = convert_as(torch.Tensor(grad_verts), grad_out)
grad_tex = convert_as(torch.Tensor(grad_tex), grad_out)
grad_verts[:, :, 1] *= -1
return grad_verts, None, grad_tex
########################################################################
############## Wrapper torch module for Neural Renderer ################
########################################################################
示例11: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def __init__(self,
args):
super(CircleNet, self).__init__()
self.num_nudes = args.num_nodes
self.dec_dim = args.dec_dim
self.dec_size = args.dec_size
self.image_size = args.image_size
self.stages = args.stages
if args.arch == 'resnet':
kwargs = {'stages': self.stages}
res_dims = [256, 512, 1024, 2048]
self.backbone = resnet_encoder(pretrained=True, **kwargs)
dec_skip_dims = [res_dims[i] for i in self.stages][::-1]
self.disp = resnet_decoder(dec_skip_dims, 2, self.dec_dim, self.dec_size, drop=args.drop)
elif args.arch == 'unet':
self.backbone = unet_encoder(args.enc_dim, drop=args.drop)
self.disp = unet_decoder(self.backbone.dims, drop=args.drop)
self.texture_size = 2
self.camera_distance = 1
self.elevation = 0
self.azimuth = 0
self.renderer = nr.Renderer(camera_mode='look_at', image_size=self.image_size, light_intensity_ambient=1,
light_intensity_directional=1, perspective=False)
示例12: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def __init__(self,
img_size=256,
face_path='models/smpl_faces.npy',
t_size=1):
self.renderer = nr.Renderer(
img_size, camera_mode='look_at', perspective=False)
self.set_light_dir([1, .5, -1], int_dir=0.3, int_amb=0.7)
self.set_bgcolor([1, 1, 1.])
self.img_size = img_size
self.faces_np = np.load(face_path).astype(np.int)
self.faces = to_variable(torch.IntTensor(self.faces_np).cuda())
if self.faces.dim() == 2:
self.faces = torch.unsqueeze(self.faces, 0)
# Default color:
default_tex = np.ones((1, self.faces.shape[1], t_size, t_size, t_size,
3))
self.default_tex = to_variable(torch.FloatTensor(default_tex).cuda())
# Default camera:
cam = np.hstack([0.9, 0, 0])
default_cam = to_variable(torch.FloatTensor(cam).cuda())
self.default_cam = torch.unsqueeze(default_cam, 0)
# Setup proj fn:
self.proj_fn = orthographic_proj_withz_idrot
示例13: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def __init__(self, filename_obj, filename_ref=None):
super(Model, self).__init__()
with self.init_scope():
# load .obj
vertices, faces = neural_renderer.load_obj(filename_obj)
self.vertices = vertices[None, :, :]
self.faces = faces[None, :, :]
# create textures
texture_size = 2
textures = np.ones((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
self.textures = textures
# load reference image
if filename_ref is not None:
self.image_ref = (scipy.misc.imread(filename_ref).max(-1) != 0).astype('float32')
else:
self.image_ref = None
# camera parameters
self.camera_position = chainer.Parameter(np.array([6, 10, -14], 'float32'))
# setup renderer
renderer = neural_renderer.Renderer()
renderer.eye = self.camera_position
self.renderer = renderer
示例14: test_backward_case1
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_backward_case1(self):
vertices = [
[-0.9, -0.9, 2.],
[-0.8, 0.8, 1.],
[0.8, 0.8, 0.5]]
faces = [[0, 1, 2]]
renderer = neural_renderer.Renderer()
renderer.image_size = 64
renderer.anti_aliasing = False
renderer.perspective = False
renderer.camera_mode = 'none'
vertices = cp.array(vertices, 'float32')
faces = cp.array(faces, 'int32')
vertices, faces = utils.to_minibatch((vertices, faces))
vertices = chainer.Variable(vertices)
images = renderer.render_depth(vertices, faces)
loss = cf.sum(cf.square(images[0, 15, 20] - 1))
loss.backward()
grad = vertices.grad.get()
grad2 = np.zeros_like(grad)
for i in range(3):
for j in range(3):
eps = 1e-3
vertices2 = vertices.data.copy()
vertices2[i, j] += eps
images = renderer.render_depth(vertices2, faces)
loss2 = cf.sum(cf.square(images[0, 15, 20] - 1))
grad2[i, j] = ((loss2 - loss) / eps).data.get()
chainer.testing.assert_allclose(grad, grad2, atol=1e-3)
示例15: test_backward_case1
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import Renderer [as 别名]
def test_backward_case1(self):
"""Backward if non-zero gradient is out of a face."""
vertices = [
[0.8, 0.8, 1.],
[0.0, -0.5, 1.],
[0.2, -0.4, 1.]]
faces = [[0, 1, 2]]
pxi = 35
pyi = 25
grad_ref = [
[1.6725862, -0.26021874, 0.],
[1.41986704, -1.64284933, 0.],
[0., 0., 0.],
]
renderer = neural_renderer.Renderer()
renderer.image_size = 64
renderer.anti_aliasing = False
renderer.perspective = False
renderer.light_intensity_ambient = 1.0
renderer.light_intensity_directional = 0.0
vertices = cp.array(vertices, 'float32')
faces = cp.array(faces, 'int32')
textures = cp.ones((faces.shape[0], 4, 4, 4, 3), 'float32')
grad_ref = cp.array(grad_ref, 'float32')
vertices, faces, textures, grad_ref = utils.to_minibatch((vertices, faces, textures, grad_ref))
vertices = chainer.Variable(vertices)
images = renderer.render(vertices, faces, textures)
images = cf.mean(images, axis=1)
loss = cf.sum(cf.absolute(images[:, pyi, pxi] - 1))
loss.backward()
chainer.testing.assert_allclose(vertices.grad, grad_ref, rtol=1e-2)