本文整理汇总了Python中lasagne.layers.ElemwiseSumLayer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.ElemwiseSumLayer方法的具体用法?Python layers.ElemwiseSumLayer怎么用?Python layers.ElemwiseSumLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers
的用法示例。
在下文中一共展示了layers.ElemwiseSumLayer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: residual_block
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def residual_block(resnet_in, num_styles=None, num_filters=None, filter_size=3, stride=1):
if num_filters == None:
num_filters = resnet_in.output_shape[1]
conv1 = style_conv_block(resnet_in, num_styles, num_filters, filter_size, stride)
conv2 = style_conv_block(conv1, num_styles, num_filters, filter_size, stride, linear)
res_block = ElemwiseSumLayer([conv2, resnet_in])
return res_block
示例2: residual_block
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def residual_block(l, increase_dim=False, projection=True, first=False):
"""
Create a residual learning building block with two stacked 3x3 convlayers as in paper
'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
"""
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2, 2)
out_num_filters = input_num_filters * 2
else:
first_stride = (1, 1)
out_num_filters = input_num_filters
if first:
# hacky solution to keep layers correct
bn_pre_relu = l
else:
# contains the BN -> ReLU portion, steps 1 to 2
bn_pre_conv = BatchNormLayer(l)
bn_pre_relu = NonlinearityLayer(bn_pre_conv, rectify)
# contains the weight -> BN -> ReLU portion, steps 3 to 5
conv_1 = batch_norm(ConvLayer(bn_pre_relu, num_filters=out_num_filters, filter_size=(3, 3), stride=first_stride,
nonlinearity=rectify, pad='same', W=he_norm))
# contains the last weight portion, step 6
conv_2 = ConvLayer(conv_1, num_filters=out_num_filters, filter_size=(3, 3), stride=(1, 1), nonlinearity=None,
pad='same', W=he_norm)
# add shortcut connections
if increase_dim:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1, 1), stride=(2, 2), nonlinearity=None,
pad='same', b=None)
block = ElemwiseSumLayer([conv_2, projection])
else:
block = ElemwiseSumLayer([conv_2, l])
return block
示例3: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResLayer(incoming, IB):
return NL(ESL([IB,incoming]),elu)
示例4: get_output_for
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
return self.p*input
else:
return theano.ifelse.ifelse(
T.lt(self._srng.uniform( (1,), 0, 1)[0], self.p),
input,
T.zeros(input.shape)
)
# def ResDrop(incoming, IB, p):
# return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例5: ResDrop
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResDrop(incoming, IB, p):
return ESL([IfElseDropLayer(IB,survival_p=p),incoming])
示例6: ResDropNoPre
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResDropNoPre(incoming, IB, p):
return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例7: ResDrop
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResDrop(incoming, IB, p):
return NL(ESL([IfElseDropLayer(IB,survival_p=p),incoming]),elu)
示例8: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResLayer(incoming, IB):
return NL(ESL([IB,incoming]),elu)
# If-else Drop Layer, adopted from Christopher Beckham's recipe:
# https://github.com/Lasagne/Recipes/pull/67
示例9: ResDrop
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResDrop(incoming, IB, p):
return ESL([IfElseDropLayer(IB,survival_p=p),incoming])
# Non-preactivation stochastically-dropped Resnet Wrapper
示例10: make_block
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def make_block(self, name, input, units):
self.make_layer(name+'-A', input, units, alpha=0.1)
# self.make_layer(name+'-B', self.last_layer(), units, alpha=1.0)
return ElemwiseSumLayer([input, self.last_layer()]) if args.generator_residual else self.last_layer()
示例11: MDBLOCK
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def MDBLOCK(incoming,num_filters,scales,name,nonlinearity):
return NL(BN(ESL([incoming,
MDCL(NL(BN(MDCL(NL(BN(incoming,name=name+'bnorm0'),nonlinearity),num_filters,scales,name),name=name+'bnorm1'),nonlinearity),
num_filters,
scales,
name+'2')]),name=name+'bnorm2'),nonlinearity)
# Gaussian Sample Layer for VAE from Tencia Lee
示例12: GL
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def GL(mu,ls):
return([GSL(z_mu,z_ls) for z_mu,z_ls in zip(mu,ls)])
# Convenience function to return a residual layer. It's not really that much more convenient than ESL'ing,
# but I like being able to see when I'm using Residual connections as opposed to Elemwise-sums
示例13: ResLayer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def ResLayer(incoming, IB,nonlinearity):
return NL(ESL([IB,incoming]),nonlinearity)
# Inverse autoregressive flow layer
示例14: get_output_for
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def get_output_for(self,input, **kwargs):
if input.ndim > 2:
input = input.flatten(2)
activation = T.dot(input, self.W*self.weights_mask)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
# Stripped-Down Direct Input masked layer: Combine this with ESL and a masked layer to get a true DIML.
# Consider making this a simultaneous subclass of MaskedLayer and elemwise sum layer for cleanliness
# adopted from M.Germain
示例15: build_vgg_action_cond_encoder_net
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import ElemwiseSumLayer [as 别名]
def build_vgg_action_cond_encoder_net(input_shapes, levels=None, x1_c_dim=16, bilinear_type='share', tanh=False):
x_shape, u_shape = input_shapes
assert len(x_shape) == 3
assert len(u_shape) == 1
levels = levels or [3]
levels = sorted(set(levels))
X_var = T.tensor4('x')
U_var = T.matrix('u')
X_next_var = T.tensor4('x_next')
l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
l_x_next = L.InputLayer(shape=(None,) + x_shape, input_var=X_next_var, name='x_next')
xlevels_c_dim = OrderedDict()
for level in range(levels[-1]+1):
if level == 0:
xlevels_c_dim[level] = x_shape[0]
else:
xlevels_c_dim[level] = x1_c_dim * 2**(level-1)
# encoding
l_xlevels = OrderedDict()
for level in range(levels[-1]+1):
if level == 0:
l_xlevel = l_x
else:
l_xlevel = LT.VggEncodingLayer(l_xlevels[level-1], xlevels_c_dim[level], name='x%d' % level)
l_xlevels[level] = l_xlevel
# bilinear
l_xlevels_next_pred = OrderedDict()
for level in levels:
l_xlevel = l_xlevels[level]
l_xlevel_diff_pred = LT.create_bilinear_layer(l_xlevel, l_u, level, bilinear_type=bilinear_type, name='x%d_diff_pred' % level)
l_xlevels_next_pred[level] = L.ElemwiseSumLayer([l_xlevel, l_xlevel_diff_pred],
name='x%d_next_pred' % level)
if tanh:
l_xlevels_next_pred[level].name += '_unconstrained'
l_xlevels_next_pred[level] = L.NonlinearityLayer(l_xlevels_next_pred[level], nl.tanh,
name='x%d_next_pred' % level)
pred_layers = OrderedDict([('x', l_xlevels[0]),
('x_next', l_x_next),
('x0_next', l_x_next),
('x_next_pred', l_xlevels_next_pred[0]),
])
pred_layers.update([('x%d' % level, l_xlevels[level]) for level in l_xlevels.keys()])
pred_layers.update([('x%d_next_pred' % level, l_xlevels_next_pred[level]) for level in l_xlevels_next_pred.keys()])
return pred_layers