本文整理汇总了Python中neural_renderer.get_points_from_angles方法的典型用法代码示例。如果您正苦于以下问题:Python neural_renderer.get_points_from_angles方法的具体用法?Python neural_renderer.get_points_from_angles怎么用?Python neural_renderer.get_points_from_angles使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neural_renderer
的用法示例。
在下文中一共展示了neural_renderer.get_points_from_angles方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_texture
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def test_texture(self):
pass
# renderer = neural_renderer.Renderer()
# renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
# renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
#
# vertices, faces, textures = neural_renderer.load_obj(
# './tests/data/4e49873292196f02574b5684eaec43e9/model.obj', load_texture=True, texture_size=16, normalization=False)
#
# # vertices, faces, textures = neural_renderer.load_obj('./tests/data/1cde62b063e14777c9152a706245d48/model.obj')
# neural_renderer.save_obj('./tests/data/tmp.obj', vertices, faces, textures)
#
# vertices, faces, textures = neural_renderer.load_obj('./tests/data/tmp.obj', load_texture=True, texture_size=16)
# vertices = chainer.cuda.to_gpu(vertices)
# faces = chainer.cuda.to_gpu(faces)
# textures = chainer.cuda.to_gpu(textures)
# images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
# scipy.misc.imsave('./tests/data/car2.png', scipy.misc.toimage(images[0]))
示例2: test_texture
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def test_texture(self):
renderer = neural_renderer.Renderer()
vertices, faces, textures = neural_renderer.load_obj(
'./tests/data/1cde62b063e14777c9152a706245d48/model.obj', load_texture=True)
vertices = chainer.cuda.to_gpu(vertices)
faces = chainer.cuda.to_gpu(faces)
textures = chainer.cuda.to_gpu(textures)
renderer.eye = neural_renderer.get_points_from_angles(2, 15, 30)
images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
scipy.misc.imsave('./tests/data/car.png', scipy.misc.toimage(images[0]))
vertices, faces, textures = neural_renderer.load_obj(
'./tests/data/4e49873292196f02574b5684eaec43e9/model.obj', load_texture=True, texture_size=16)
vertices = chainer.cuda.to_gpu(vertices)
faces = chainer.cuda.to_gpu(faces)
textures = chainer.cuda.to_gpu(textures)
renderer.eye = neural_renderer.get_points_from_angles(2, 15, -90)
images = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :]).data.get()
scipy.misc.imsave('./tests/data/display.png', scipy.misc.toimage(images[0]))
示例3: make_reference_image
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def make_reference_image(filename_ref, filename_obj):
model = Model(filename_obj)
model.to_gpu()
model.renderer.eye = neural_renderer.get_points_from_angles(2.732, 30, -15)
images = model.renderer.render(model.vertices, model.faces, cf.tanh(model.textures))
image = images.data.get()[0]
scipy.misc.toimage(image, cmin=0, cmax=1).save(filename_ref)
示例4: __call__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def __call__(self):
self.renderer.eye = neural_renderer.get_points_from_angles(2.732, 0, np.random.uniform(0, 360))
image = self.renderer.render(self.vertices, self.faces, cf.tanh(self.textures))
loss = cf.sum(cf.square(image - self.image_ref.transpose((2, 0, 1))[None, :, :, :]))
return loss
示例5: run
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-io', '--filename_obj', type=str, default='./examples/data/teapot.obj')
parser.add_argument('-ir', '--filename_ref', type=str, default='./examples/data/example3_ref.png')
parser.add_argument('-or', '--filename_output', type=str, default='./examples/data/example3_result.gif')
parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
working_directory = os.path.dirname(args.filename_output)
model = Model(args.filename_obj, args.filename_ref)
model.to_gpu()
optimizer = chainer.optimizers.Adam(alpha=0.1, beta1=0.5)
optimizer.setup(model)
loop = tqdm.tqdm(range(300))
for _ in loop:
loop.set_description('Optimizing')
optimizer.target.cleargrads()
loss = model()
loss.backward()
optimizer.update()
# draw object
loop = tqdm.tqdm(range(0, 360, 4))
for num, azimuth in enumerate(loop):
loop.set_description('Drawing')
model.renderer.eye = neural_renderer.get_points_from_angles(2.732, 0, azimuth)
images = model.renderer.render(model.vertices, model.faces, cf.tanh(model.textures))
image = images.data.get()[0].transpose((1, 2, 0))
scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (working_directory, num))
make_gif(working_directory, args.filename_output)
示例6: __call__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def __call__(self):
self.renderer.eye = neural_renderer.get_points_from_angles(2.732, 0, 90)
image = self.renderer.render_silhouettes(self.vertices, self.faces)
loss = cf.sum(cf.square(image - self.image_ref[None, :, :]))
return loss
示例7: get_random_batch
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def get_random_batch(self, batch_size):
data_ids_a = np.zeros(batch_size, 'int32')
data_ids_b = np.zeros(batch_size, 'int32')
viewpoint_ids_a = np.zeros(batch_size, 'int32')
viewpoint_ids_b = np.zeros(batch_size, 'int32')
for i in range(batch_size):
class_id = np.random.choice(self.class_ids)
object_id = np.random.randint(0, self.num_data[class_id])
viewpoint_id_a = np.random.randint(0, 24)
viewpoint_id_b = np.random.randint(0, 24)
data_id_a = (object_id + self.pos[class_id]) * 24 + viewpoint_id_a
data_id_b = (object_id + self.pos[class_id]) * 24 + viewpoint_id_b
data_ids_a[i] = data_id_a
data_ids_b[i] = data_id_b
viewpoint_ids_a[i] = viewpoint_id_a
viewpoint_ids_b[i] = viewpoint_id_b
images_a = self.images[data_ids_a].astype('float32') / 255.
images_b = self.images[data_ids_b].astype('float32') / 255.
distances = np.ones(batch_size, 'float32') * self.distance
elevations = np.ones(batch_size, 'float32') * self.elevation
viewpoints_a = neural_renderer.get_points_from_angles(distances, elevations, -viewpoint_ids_a * 15)
viewpoints_b = neural_renderer.get_points_from_angles(distances, elevations, -viewpoint_ids_b * 15)
return images_a, images_b, viewpoints_a, viewpoints_b
示例8: __init__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def __init__(self, faces,np_v_template = None,test=False,test_camera=False,high_resolution=False):
super(Renderer_seg, self).__init__()
image_size=500 if high_resolution else 256
self.camera_distance = 1/math.tan(math.pi/18)#3.464#1.732
self.elevation = 0
self.azimuth = 0
self.register_buffer('faces', faces)
self.renderer = nr.Renderer(image_size = image_size,camera_mode='look_at',\
light_intensity_ambient=0.72, light_intensity_directional=0.3,\
light_color_ambient=[1,1,1], light_color_directional=[1,1,1],\
light_direction=[0,1,0]).cuda()
texture_size = 4
textures = torch.ones(64, self.faces.shape[1], texture_size, texture_size, texture_size, 3, dtype=torch.float32)
textures[:,:,:,:,:] = torch.from_numpy(np.array([0.7098039, 0.84117647, 0.95882353])).float()#'light_blue'193,210,240;0.65098039, 0.74117647, 0.85882353;;'light_pink': [.9, .7, .7]
self.register_buffer('textures', textures.cuda())
self.renderer.viewing_angle = 10
self.renderer.eye = nr.get_points_from_angles(self.camera_distance,self.elevation,self.azimuth)
#self.renderer.camera_direction = [0,0,1]
#testing
if test:
v_template = torch.from_numpy(np_v_template).float().cuda()
#v_template = np_v_template.float().cuda()
projected_seg = self.forward(v_template)
projected_seg = (projected_seg.cpu().numpy()[0]*255).astype(np.uint8)
cv2.imwrite('./cut_{}_{}_{}.png'.format(self.azimuth,self.elevation,self.camera_distance),projected_seg)
if test_camera:
v_template = torch.from_numpy(np_v_template).float().cuda()
for self.camera_distance in range(1,3):
for self.elevation in range(0,90,30):
for self.azimuth in range(0,360,30):
self.renderer.eye = nr.get_points_from_angles(self.camera_distance,self.elevation,self.azimuth)
projected_seg = self.forward(v_template)
projected_seg = projected_seg.cpu().numpy()[0].astype(np.bool).astype(np.int)*255
cv2.imwrite('./cut_{}_{}_{}.png'.format(self.azimuth,self.elevation,self.camera_distance),projected_seg)
示例9: __call__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def __call__(self, batch_size):
xp = self.xp
# set random background color
# background_color = xp.random.uniform(0., 1., size=(batch_size, 3)).astype('float32')
# background_color = xp.random.uniform(0., 1., size=(batch_size,)).astype('float32')
# background_color = xp.tile(background_color[:, None], (1, 3))
background_color = xp.ones((batch_size, 3), 'float32') * xp.random.uniform(0., 1.)
self.renderer.background_color = background_color
# set random viewpoints
self.renderer.eye = neural_renderer.get_points_from_angles(
distance=(
xp.ones(batch_size, 'float32') * self.camera_distance +
xp.random.normal(size=batch_size).astype('float32') * self.camera_distance_noise),
elevation=xp.random.uniform(self.elevation_min, self.elevation_max, batch_size).astype('float32'),
azimuth=xp.random.uniform(0, 360, size=batch_size).astype('float32'))
# compute loss
images = self.renderer.render(*self.mesh.get_batch(batch_size))
features = self.extract_feature(images)
loss = -cf.sum(cf.square(features)) / features.size
var_line_length = get_var_line_length_loss(self.mesh.vertices, self.mesh.faces)
loss += self.lambda_length * var_line_length
return loss
示例10: __call__
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def __call__(self, batch_size):
xp = self.xp
# set random viewpoints
self.renderer.eye = neural_renderer.get_points_from_angles(
distance=(
xp.ones(batch_size, 'float32') * self.camera_distance +
xp.random.normal(size=batch_size).astype('float32') * self.camera_distance_noise),
elevation=xp.random.uniform(self.elevation_min, self.elevation_max, batch_size).astype('float32'),
azimuth=xp.random.uniform(0, 360, size=batch_size).astype('float32'))
# set random lighting direction
angles = xp.random.uniform(0, 360, size=batch_size).astype('float32')
y = xp.ones(batch_size, 'float32') * xp.cos(xp.radians(30).astype('float32'))
x = xp.ones(batch_size, 'float32') * xp.sin(xp.radians(30).astype('float32')) * xp.sin(xp.radians(angles))
z = xp.ones(batch_size, 'float32') * xp.sin(xp.radians(30).astype('float32')) * xp.cos(xp.radians(angles))
directions = xp.concatenate((x[:, None], y[:, None], z[:, None]), axis=1)
self.renderer.light_direction = directions
# compute loss
images = self.renderer.render(*self.mesh.get_batch(batch_size))
masks = self.renderer.render_silhouettes(*self.mesh.get_batch(batch_size)[:2])
# import IPython
# IPython.embed()
features = self.extract_style_feature(images, masks)
loss_style = self.compute_style_loss(features)
loss_content = self.compute_content_loss()
loss_tv = self.compute_tv_loss(images, masks)
loss = self.lambda_style * loss_style + self.lambda_content * loss_content + self.lambda_tv * loss_tv
# set default lighting direction
self.renderer.light_direction = [0, 1, 0]
return loss
示例11: run
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--filename_input', type=str, default='./examples/data/teapot.obj')
parser.add_argument('-o', '--filename_output', type=str, default='./examples/data/example1.gif')
parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
working_directory = os.path.dirname(args.filename_output)
# other settings
camera_distance = 2.732
elevation = 30
texture_size = 2
# load .obj
vertices, faces = neural_renderer.load_obj(args.filename_input)
vertices = vertices[None, :, :] # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]
faces = faces[None, :, :] # [num_faces, 3] -> [batch_size=1, num_faces, 3]
# create texture [batch_size=1, num_faces, texture_size, texture_size, texture_size, RGB]
textures = np.ones((1, faces.shape[1], texture_size, texture_size, texture_size, 3), 'float32')
# to gpu
chainer.cuda.get_device_from_id(args.gpu).use()
vertices = chainer.cuda.to_gpu(vertices)
faces = chainer.cuda.to_gpu(faces)
textures = chainer.cuda.to_gpu(textures)
# create renderer
renderer = neural_renderer.Renderer()
# draw object
loop = tqdm.tqdm(range(0, 360, 4))
for num, azimuth in enumerate(loop):
loop.set_description('Drawing')
renderer.eye = neural_renderer.get_points_from_angles(camera_distance, elevation, azimuth)
images = renderer.render(vertices, faces, textures) # [batch_size, RGB, image_size, image_size]
image = images.data.get()[0].transpose((1, 2, 0)) # [image_size, image_size, RGB]
scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (working_directory, num))
# generate gif (need ImageMagick)
options = '-delay 8 -loop 0 -layers optimize'
subprocess.call('convert %s %s/_tmp_*.png %s' % (options, working_directory, args.filename_output), shell=True)
# remove temporary files
for filename in glob.glob('%s/_tmp_*.png' % working_directory):
os.remove(filename)
示例12: run
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def run():
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('-eid', '--experiment_id', type=str)
parser.add_argument('-d', '--directory', type=str, default=DIRECTORY)
parser.add_argument('-i', '--input_image', type=str)
parser.add_argument('-oi', '--output_image', type=str)
parser.add_argument('-oo', '--output_obj', type=str)
parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
parser.add_argument('-g', '--gpu', type=int, default=GPU)
args = parser.parse_args()
directory_output = os.path.join(args.directory, args.experiment_id)
# set random seed, gpu
random.seed(args.seed)
np.random.seed(args.seed)
cp.random.seed(args.seed)
chainer.cuda.get_device(args.gpu).use()
# load dataset
image_in = skimage.io.imread(args.input_image).astype('float32') / 255
if image_in.ndim != 3 or image_in.shape[-1] != 4:
raise Exception('Input must be a RGBA image.')
images_in = image_in.transpose((2, 0, 1))[None, :, :, :]
images_in = chainer.cuda.to_gpu(images_in)
# setup model & optimizer
model = models.Model()
model.to_gpu()
chainer.serializers.load_npz(os.path.join(directory_output, 'model.npz'), model)
# reconstruct .obj
vertices, faces = model.reconstruct(images_in)
neural_renderer.save_obj(args.output_obj, vertices.data.get()[0], faces.get()[0])
# render reconstructed shape
ones = chainer.cuda.to_gpu(np.ones((16,), 'float32'))
distances = 2.732 * ones
elevations = 30. * ones
azimuths = chainer.cuda.to_gpu(np.arange(0, 360, 360. / 16.).astype('float32')) * ones
viewpoints = neural_renderer.get_points_from_angles(distances, elevations, azimuths)
images_out = model.reconstruct_and_render(chainer.functions.tile(images_in, (16, 1, 1, 1)), viewpoints)
image_out = tile_images(images_out.data.get())
image_out = (image_out * 255).clip(0, 255).astype('uint8')
skimage.io.imsave(args.output_image, image_out)
示例13: run
# 需要导入模块: import neural_renderer [as 别名]
# 或者: from neural_renderer import get_points_from_angles [as 别名]
def run():
# settings
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--filename_obj', type=str)
parser.add_argument('-o', '--filename_output', type=str)
parser.add_argument('-d', '--output_directory', type=str)
parser.add_argument('-al', '--adam_lr', type=float, default=0.01)
parser.add_argument('-ab1', '--adam_beta1', type=float, default=0.9)
parser.add_argument('-bs', '--batch_size', type=int, default=4)
parser.add_argument('-ni', '--num_iteration', type=int, default=1000)
parser.add_argument('-cd', '--camera_distance', type=float, default=2.5)
parser.add_argument('-ib', '--init_bias', type=str, default='(0,0,0)')
parser.add_argument('-g', '--gpu', type=int, default=0)
args = parser.parse_args()
args.init_bias = tuple([float(v) for v in args.init_bias[1:-1].split(',')])
# create output directory
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
# setup chainer
chainer.cuda.get_device_from_id(args.gpu).use()
cp.random.seed(0)
np.random.seed(0)
# setup scene & optimizer
model = deep_dream_3d.DeepDreamModel(
args.filename_obj,
camera_distance=args.camera_distance,
init_bias=args.init_bias)
model.to_gpu()
optimizer = neural_renderer.Adam(alpha=args.adam_lr, beta1=args.adam_beta1)
optimizer.setup(model)
# optimization
loop = tqdm.tqdm(range(args.num_iteration))
for _ in loop:
optimizer.target.cleargrads()
loss = model(args.batch_size)
loss.backward()
optimizer.update()
loop.set_description('Optimizing. Loss %.4f' % loss.data)
# draw object
model.renderer.background_color = (1, 1, 1)
loop = tqdm.tqdm(range(0, 360, 4))
for num, azimuth in enumerate(loop):
loop.set_description('Drawing')
model.renderer.eye = neural_renderer.get_points_from_angles(2.732, 30, azimuth)
images = model.renderer.render(*model.mesh.get_batch(1))
image = images.data.get()[0].transpose((1, 2, 0))
scipy.misc.toimage(image, cmin=0, cmax=1).save('%s/_tmp_%04d.png' % (args.output_directory, num))
make_gif(args.output_directory, args.filename_output)