本文整理匯總了Python中lasagne.nonlinearities.leaky_rectify方法的典型用法代碼示例。如果您正苦於以下問題:Python nonlinearities.leaky_rectify方法的具體用法?Python nonlinearities.leaky_rectify怎麽用?Python nonlinearities.leaky_rectify使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類lasagne.nonlinearities
的用法示例。
在下文中一共展示了nonlinearities.leaky_rectify方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_network_from_ae
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_network_from_ae(classn):
input_var = T.tensor4('input_var');
target_var = T.imatrix('targets');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
layer = (layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = (layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);
return network, input_var, target_var;
示例2: build_network_from_ae
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_network_from_ae(classn):
input_var = T.tensor4('input_var');
target_var = T.imatrix('targets');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(4,4), stride=1, nonlinearity=leaky_rectify));
layer = layers.MaxPool2DLayer(layer, pool_size=(3,3), stride=2);
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(20,20), stride=20, mode='average_inc_pad');
network = layers.DenseLayer(layer, classn, nonlinearity=sigmoid);
return network, input_var, target_var;
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:28,代碼來源:deep_conv_classification_alt51_luad10_luad10in20_brca10x1_heatmap.py
示例3: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
mask_map = layer;
layer = batch_norm(layers.Conv2DLayer(layer, 10, filter_size=(1,1), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 1000, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 10, filter_size=(76,76), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, nonlinearity=identity);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例4: test_LeakyRectifier
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def test_LeakyRectifier(self):
nn = MLPR(layers=[N(ly.DenseLayer, units=24, nonlinearity=nl.leaky_rectify),
L("Linear")], n_iter=1)
self._run(nn)
示例5: conv_params
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def conv_params(num_filters, filter_size=(3, 3), border_mode='same',
nonlinearity=leaky_rectify, W=init.Orthogonal(gain=1.0),
b=init.Constant(0.05), untie_biases=True, **kwargs):
args = {
'num_filters': num_filters,
'filter_size': filter_size,
'border_mode': border_mode,
'nonlinearity': nonlinearity,
'W': W,
'b': b,
'untie_biases': untie_biases,
}
args.update(kwargs)
return args
示例6: dense_params
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def dense_params(num_units, nonlinearity=leaky_rectify, **kwargs):
args = {
'num_units': num_units,
'nonlinearity': nonlinearity,
'W': init.Orthogonal(1.0),
'b': init.Constant(0.05),
}
args.update(kwargs)
return args
示例7: create_student_model
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def create_student_model(input_var):
# create a small convolutional neural network
network = lasagne.layers.InputLayer((None, 2), input_var)
network = lasagne.layers.DenseLayer(network, 256, nonlinearity=leaky_rectify)
network = lasagne.layers.DenseLayer(network, 256, nonlinearity=leaky_rectify)
network = lasagne.layers.DenseLayer(network, 1, nonlinearity=linear)
return network
示例8: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=50.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例9: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例10: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=110.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例11: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
pool1 = layers.MaxPool2DLayer(layer, (2, 2), 2);
layer = batch_norm(layers.Conv2DLayer(pool1, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));
pool2 = layers.MaxPool2DLayer(layer, (2, 2), 2);
layer = batch_norm(layers.Conv2DLayer(pool2, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=90.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.InverseLayer(layer, pool2);
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(1,1), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.InverseLayer(layer, pool1);
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例12: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=90.0, alpha=0.5, beta=init.Constant(0.1), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(20,20), stride=20, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Upscale2DLayer(glblf, scale_factor=20);
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例13: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 140, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 160, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 180, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 220, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 360, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 480, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 320, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=1.00, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 360, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 220, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 180, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 160, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 140, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例14: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=99.85, alpha=0.5, beta=init.Constant(0.5), tight=100.0, name="mask_map");
encod = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(encod, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(20,20), stride=20, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(24,24), stride=20, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 8, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 8, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 8, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
示例15: build_autoencoder_network
# 需要導入模塊: from lasagne import nonlinearities [as 別名]
# 或者: from lasagne.nonlinearities import leaky_rectify [as 別名]
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 80, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 180, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 120, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 120, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=99.9, alpha=0.5, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 80, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 3, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb')));
feat_var = lasagne.layers.get_output(feat_map, deterministic=True);
mask_var = lasagne.layers.get_output(mask_map, deterministic=True);
outp_var = lasagne.layers.get_output(network, deterministic=True);
return network, input_var, feat_var, mask_var, outp_var;