本文整理汇总了Python中lasagne.nonlinearities.identity方法的典型用法代码示例。如果您正苦于以下问题:Python nonlinearities.identity方法的具体用法?Python nonlinearities.identity怎么用?Python nonlinearities.identity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.nonlinearities
的用法示例。
在下文中一共展示了nonlinearities.identity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
示例2: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
示例3: initialization
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def initialization(name):
initializations = {'sigmoid':init.HeNormal(gain=1.0),
'softmax':init.HeNormal(gain=1.0),
'elu':init.HeNormal(gain=1.0),
'relu':init.HeNormal(gain=math.sqrt(2)),
'lrelu':init.HeNormal(gain=math.sqrt(2/(1+0.01**2))),
'vlrelu':init.HeNormal(gain=math.sqrt(2/(1+0.33**2))),
'rectify':init.HeNormal(gain=math.sqrt(2)),
'identity':init.HeNormal(gain=math.sqrt(2))
}
return initializations[name]
#################### BASELINE MODEL #####################
示例4: test_workflow
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def test_workflow(self):
inp = InputLayer(self.x.shape)
out = DenseLayer(inp, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
out = DenseLayer(out, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
assert out.root is inp
with out:
pm.Normal('y', mu=get_output(out),
sd=self.sd,
observed=self.y)
示例5: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, W_in=init.GlorotUniform(), W_hid=init.GlorotUniform(),
W_cell=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.sigmoid):
self.W_in = W_in
self.W_hid = W_hid
# Don't store a cell weight vector when cell is None
if W_cell is not None:
self.W_cell = W_cell
self.b = b
# For the nonlinearity, if None is supplied, use identity
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
示例6: nonlinearity
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def nonlinearity(name):
nonlinearities = {'rectify': nl.rectify,
'relu': nl.rectify,
'lrelu': nl.LeakyRectify(0.01),
'vlrelu': nl.LeakyRectify(0.33),
'elu': nl.elu,
'softmax': nl.softmax,
'sigmoid': nl.sigmoid,
'identity':nl.identity}
return nonlinearities[name]
示例7: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, **kwargs):
super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
if num_leading_axes >= len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"leaving no trailing axes for the dot product." %
(num_leading_axes, len(self.input_shape)))
elif num_leading_axes < -len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"requesting more trailing axes than there are input "
"dimensions." % (num_leading_axes, len(self.input_shape)))
self.num_leading_axes = num_leading_axes
if any(s is None for s in self.input_shape[num_leading_axes:]):
raise ValueError(
"A DenseLayer requires a fixed input shape (except for "
"the leading axes). Got %r for num_leading_axes=%d." %
(self.input_shape, self.num_leading_axes))
num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
if args.regL1 is True:
self.L1 = self.add_param(init.Constant(args.regInit['L1']),
(num_inputs, num_units), name="L1")
if args.regL2 is True:
self.L2 = self.add_param(init.Constant(args.regInit['L2']),
(num_inputs, num_units), name="L2")
示例8: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, incoming, filter_size,
init_std=5., W_logstd=None,
stride=1, pad=0,
nonlinearity=None,
convolution=conv1d_mc0, **kwargs):
super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
# convolution = conv1d_gpucorrmm_mc0
# convolution = conv.conv1d_mc0
# convolution = T.nnet.conv2d
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.convolution = convolution
# if self.filter_size[0] % 2 == 0:
# raise NotImplementedError(
# 'GaussianConv1dLayer requires odd filter size.')
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same', 'strictsame'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
if W_logstd is None:
init_std = np.asarray(init_std, dtype=floatX)
W_logstd = init.Constant(np.log(init_std))
# print(W_std)
# W_std = init.Constant(init_std),
self.num_input_channels = self.input_shape[1]
# self.num_filters = self.num_input_channels
self.W_logstd = self.add_param(W_logstd,
(self.num_input_channels,),
name="W_logstd",
regularizable=False)
self.W = self.make_gaussian_filter()
示例9: build_autoencoder_network
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
mask_map = layer;
layer = batch_norm(layers.Conv2DLayer(layer, 10, filter_size=(1,1), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 1000, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 10, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, nonlinearity=identity);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例10: batch_nmsp
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nonlinearities.identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
layer = BatchNormSparseLayer(layer, beta=beta, **kwargs)
if nonlinearity is not None:
from lasagne.layers import NonlinearityLayer
layer = NonlinearityLayer(layer, nonlinearity)
return layer
示例11: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, incoming, num_filters, num_rot,
filter_size, stride=(1, 1),
border_mode="valid", untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(RotConv, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.num_rot = num_rot;
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.border_mode = border_mode
self.untie_biases = untie_biases
self.convolution = convolution
if self.border_mode not in ['valid', 'full', 'same']:
raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2], self.
output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
示例12: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def __init__(self, incoming, nonlinearity=nonlinearities.rectify,
**kwargs):
super(NonlinearityLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
示例13: build_resnet_model
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def build_resnet_model():
log.i('BUILDING RESNET MODEL...')
# Random Seed
lasagne_random.set_rng(cfg.getRandomState())
# Input layer for images
net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))
# First Convolution
net = l.Conv2DLayer(net,
num_filters=cfg.FILTERS[0],
filter_size=cfg.KERNEL_SIZES[0],
pad='same',
W=initialization(cfg.NONLINEARITY),
nonlinearity=None)
log.i(("\tFIRST CONV OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
# Residual Stacks
for i in range(0, len(cfg.FILTERS)):
net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], stride=2, num_groups=cfg.NUM_OF_GROUPS[i])
for _ in range(1, cfg.RESNET_N):
net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], num_groups=cfg.NUM_OF_GROUPS[i], preactivated=False)
log.i(("\tRES STACK", i + 1, "OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
# Post Activation
net = batch_norm(net)
net = l.NonlinearityLayer(net, nonlinearity=nonlinearity(cfg.NONLINEARITY))
# Pooling
net = l.GlobalPoolLayer(net)
log.i(("\tFINAL POOLING SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
# Classification Layer
net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('identity'), W=initialization('identity'))
net = l.NonlinearityLayer(net, nonlinearity=nonlinearity('softmax'))
log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net))))
log.i("...DONE!")
# Model stats
log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
log.i(("MODEL HAS", l.count_params(net), "PARAMS"))
return net
################## PASPBERRY PI NET #####################
示例14: build_autoencoder_network
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=50.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例15: build_autoencoder_network
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import identity [as 别名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;