本文整理汇总了Python中chainer.links.VGG16Layers方法的典型用法代码示例。如果您正苦于以下问题:Python links.VGG16Layers方法的具体用法?Python links.VGG16Layers怎么用?Python links.VGG16Layers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.links
的用法示例。
在下文中一共展示了links.VGG16Layers方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import VGG16Layers [as 别名]
def __init__(self):
super(VGG16FeatureExtractor, self).__init__()
with self.init_scope():
self.cnn = L.VGG16Layers()
self.cnn_layer_name = 'fc7'
示例2: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import VGG16Layers [as 别名]
def __init__(self, alpha=[0,0,1,1], beta=[1,1,1,1]):
from chainer.links import VGG16Layers
print ("load model... vgg_chainer")
self.model = VGG16Layers()
self.alpha = alpha
self.beta = beta
示例3: __init__
# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import VGG16Layers [as 别名]
def __init__(
self,
filename_mesh,
filename_style,
texture_size=4,
camera_distance=2.732,
camera_distance_noise=0.1,
elevation_min=20,
elevation_max=40,
lr_vertices=0.01,
lr_textures=1.0,
lambda_style=1,
lambda_content=2e9,
lambda_tv=1e7,
image_size=224,
):
super(StyleTransferModel, self).__init__()
self.image_size = image_size
self.camera_distance = camera_distance
self.camera_distance_noise = camera_distance_noise
self.elevation_min = elevation_min
self.elevation_max = elevation_max
self.lambda_style = lambda_style
self.lambda_content = lambda_content
self.lambda_tv = lambda_tv
# load feature extractor
self.vgg16 = cl.VGG16Layers()
# load reference image
reference_image = scipy.misc.imread(filename_style)
reference_image = scipy.misc.imresize(reference_image, (image_size, image_size))
reference_image = reference_image.astype('float32') / 255.
reference_image = reference_image[:, :, :3].transpose((2, 0, 1))[None, :, :, :]
reference_image = self.xp.array(reference_image)
with chainer.no_backprop_mode():
features_ref = [f.data for f in self.extract_style_feature(reference_image)]
self.features_ref = features_ref
self.background_color = reference_image.mean((0, 2, 3))
with self.init_scope():
# load .obj
self.mesh = neural_renderer.Mesh(filename_mesh, texture_size)
self.mesh.set_lr(lr_vertices, lr_textures)
self.vertices_original = self.xp.copy(self.mesh.vertices.data)
# setup renderer
renderer = neural_renderer.Renderer()
renderer.image_size = image_size
renderer.background_color = self.background_color
self.renderer = renderer