本文整理汇总了Python中blocks.bricks.conv.ConvolutionalSequence.get_dim方法的典型用法代码示例。如果您正苦于以下问题:Python ConvolutionalSequence.get_dim方法的具体用法?Python ConvolutionalSequence.get_dim怎么用?Python ConvolutionalSequence.get_dim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.bricks.conv.ConvolutionalSequence
的用法示例。
在下文中一共展示了ConvolutionalSequence.get_dim方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_convolutional_sequence_activation_get_dim
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def test_convolutional_sequence_activation_get_dim():
seq = ConvolutionalSequence([Tanh()], num_channels=9, image_size=(4, 6))
seq.allocate()
assert seq.get_dim('output') == (9, 4, 6)
seq = ConvolutionalSequence([Convolutional(filter_size=(7, 7),
num_filters=5,
border_mode=(1, 1)),
Tanh()], num_channels=8, image_size=(8, 11))
seq.allocate()
assert seq.get_dim('output') == (5, 4, 7)
示例2: VGGNet
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
class VGGNet(FeedforwardSequence, Initializable):
def __init__(self, image_dimension, **kwargs):
layers = []
#############################################
# a first block with 2 convolutions of 32 (3, 3) filters
layers.append(Convolutional((3, 3), 32, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 32, border_mode='half'))
layers.append(Rectifier())
# maxpool with size=(2, 2)
layers.append(MaxPooling((2, 2)))
#############################################
# a 2nd block with 3 convolutions of 64 (3, 3) filters
layers.append(Convolutional((3, 3), 64, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 64, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 64, border_mode='half'))
layers.append(Rectifier())
# maxpool with size=(2, 2)
layers.append(MaxPooling((2, 2)))
#############################################
# a 3rd block with 4 convolutions of 128 (3, 3) filters
layers.append(Convolutional((3, 3), 128, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 128, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 128, border_mode='half'))
layers.append(Rectifier())
layers.append(Convolutional((3, 3), 128, border_mode='half'))
layers.append(Rectifier())
# maxpool with size=(2, 2)
layers.append(MaxPooling((2, 2)))
self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension)
flattener = Flattener()
self.top_mlp = BatchNormalizedMLP(activations=[Rectifier(), Logistic()], dims=[500, 1])
application_methods = [self.conv_sequence.apply, flattener.apply, self.top_mlp.apply]
super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
print conv_out_dim
self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp.dims
示例3: build_model_mnist
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def build_model_mnist():
# CNN
filter_size = (5, 5)
activation = Rectifier().apply
pooling_size = (2, 2)
num_filters = 50
layer0 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
pooling_size=pooling_size,
weights_init=Uniform(width=0.1),
biases_init=Uniform(width=0.01), name="layer_0")
filter_size = (3, 3)
activation = Rectifier().apply
num_filters = 20
layer1 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
pooling_size=pooling_size,
weights_init=Uniform(width=0.1),
biases_init=Uniform(width=0.01), name="layer_1")
conv_layers = [layer0, layer1]
convnet = ConvolutionalSequence(conv_layers, num_channels= 1,
image_size=(28, 28))
convnet.initialize()
output_dim = np.prod(convnet.get_dim('output'))
mlp = MLP(activations=[Identity()], dims=[output_dim, 10],
weights_init=Uniform(width=0.1),
biases_init=Uniform(width=0.01), name="layer_2")
mlp.initialize()
classifier = Classifier(convnet, mlp)
classifier.initialize()
return classifier
示例4: create_model_bricks
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def create_model_bricks():
convnet = ConvolutionalSequence(
layers=[
Convolutional(
filter_size=(4, 4),
num_filters=32,
name='conv1'),
SpatialBatchNormalization(name='batch_norm1'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
step=(2, 2),
num_filters=32,
name='conv2'),
SpatialBatchNormalization(name='batch_norm2'),
Rectifier(),
Convolutional(
filter_size=(4, 4),
num_filters=64,
name='conv3'),
SpatialBatchNormalization(name='batch_norm3'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
step=(2, 2),
num_filters=64,
name='conv4'),
SpatialBatchNormalization(name='batch_norm4'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
num_filters=128,
name='conv5'),
SpatialBatchNormalization(name='batch_norm5'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
step=(2, 2),
num_filters=128,
name='conv6'),
SpatialBatchNormalization(name='batch_norm6'),
Rectifier(),
],
num_channels=3,
image_size=(64, 64),
use_bias=False,
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name='convnet')
convnet.initialize()
mlp = BatchNormalizedMLP(
activations=[Rectifier(), Logistic()],
dims=[numpy.prod(convnet.get_dim('output')), 1000, 40],
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name='mlp')
mlp.initialize()
return convnet, mlp
示例5: net_dvc
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def net_dvc(image_size=(32,32)):
convos = [5,5,5]
pools = [2,2,2]
filters = [100,200,300]
tuplify = lambda x: (x,x)
convos = list(map(tuplify, convos))
conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]
pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]
activations = [Rectifier() for i in convos]
layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]
cnn = ConvolutionalSequence(layers, 3, image_size=image_size, name="cnn",
weights_init=Uniform(width=.1),
biases_init=Constant(0))
cnn._push_allocation_config()
cnn_output = np.prod(cnn.get_dim('output'))
mlp_size = [cnn_output,500,2]
mlp = MLP([Rectifier(), Softmax()], mlp_size, name="mlp",
weights_init=Uniform(width=.1),
biases_init=Constant(0))
seq = FeedforwardSequence([net.apply for net in [cnn,Flattener(),mlp]])
seq.push_initialization_config()
seq.initialize()
return seq
示例6: build_conv_layers
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def build_conv_layers(self, image=None) :
if image is None :
image = T.ftensor4('spectrogram')
else :
image = image
conv_list = []
for layer in range(self.layers) :
layer_param = self.params[layer]
conv_layer = Convolutional(layer_param[0], layer_param[1], layer_param[2])
pool_layer = MaxPooling(layer_param[3])
conv_layer.name = "convolution"+str(layer)
pool_layer.name = "maxpooling"+str(layer)
conv_list.append(conv_layer)
conv_list.append(pool_layer)
conv_list.append(Rectifier())
conv_seq = ConvolutionalSequence(
conv_list,
self.params[0][2],
image_size=self.image_size,
weights_init=IsotropicGaussian(std=0.5, mean=0),
biases_init=Constant(0))
conv_seq._push_allocation_config()
conv_seq.initialize()
out = conv_seq.apply(image)
return out, conv_seq.get_dim('output')
示例7: LeNet
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
class LeNet(FeedforwardSequence, Initializable):
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes,
top_mlp_activations, top_mlp_dims,
conv_step=None, border_mode='valid', **kwargs):
if conv_step is None:
self.conv_step = (1, 1)
else:
self.conv_step = conv_step
self.num_channels = num_channels
self.image_shape = image_shape
self.top_mlp_activations = top_mlp_activations
self.top_mlp_dims = top_mlp_dims
self.border_mode = border_mode
conv_parameters = zip(filter_sizes, feature_maps)
# Construct convolutional layers with corresponding parameters
self.layers = list(interleave([
(Convolutional(filter_size=filter_size,
num_filters=num_filter,
step=self.conv_step,
border_mode=self.border_mode,
name='conv_{}'.format(i))
for i, (filter_size, num_filter)
in enumerate(conv_parameters)),
conv_activations,
(MaxPooling(size, name='pool_{}'.format(i))
for i, size in enumerate(pooling_sizes))]))
self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
image_size=image_shape)
# Construct a top MLP
self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)
# We need to flatten the output of the last convolutional layer.
# This brick accepts a tensor of dimension (batch_size, ...) and
# returns a matrix (batch_size, features)
self.flattener = Flattener()
application_methods = [self.conv_sequence.apply, self.flattener.apply,
self.top_mlp.apply]
super(LeNet, self).__init__(application_methods, **kwargs)
@property
def output_dim(self):
return self.top_mlp_dims[-1]
@output_dim.setter
def output_dim(self, value):
self.top_mlp_dims[-1] = value
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
self.top_mlp.activations = self.top_mlp_activations
self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例8: EncoderMapping
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
class EncoderMapping(Initializable):
"""
Parameters
----------
layers: :class:`list`
list of bricks
num_channels: :class: `int`
Number of input channels
image_size: :class:`tuple`
Image size
n_emb: :class:`int`
Dimensionality of the embedding
use_bias: :class:`bool`
self explanatory
"""
def __init__(self, layers, num_channels, image_size, n_emb, use_bias=False, **kwargs):
self.layers = layers
self.num_channels = num_channels
self.image_size = image_size
self.pre_encoder = ConvolutionalSequence(layers=layers[:-1],
num_channels=num_channels,
image_size=image_size,
use_bias=use_bias,
name='encoder_conv_mapping')
self.pre_encoder.allocate()
n_channels = n_emb + self.pre_encoder.get_dim('output')[0]
self.post_encoder = ConvolutionalSequence(layers=[layers[-1]],
num_channels=n_channels,
image_size=(1, 1),
use_bias=use_bias)
children = [self.pre_encoder, self.post_encoder]
kwargs.setdefault('children', []).extend(children)
super(EncoderMapping, self).__init__(**kwargs)
@application(inputs=['x', 'y'], outputs=['output'])
def apply(self, x, y):
"Returns mu and logsigma"
# Getting emebdding
pre_z = self.pre_encoder.apply(x)
# Concatenating
pre_z_embed_y = tensor.concatenate([pre_z, y], axis=1)
# propagating through last layer
return self.post_encoder.apply(pre_z_embed_y)
示例9: LeNet
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
class LeNet(FeedforwardSequence, Initializable):
"""LeNet-like convolutional network.
The class implements LeNet, which is a convolutional sequence with
an MLP on top (several fully-connected layers). For details see
[LeCun95]_.
.. [LeCun95] LeCun, Yann, et al.
*Comparison of learning algorithms for handwritten digit
recognition.*,
International conference on artificial neural networks. Vol. 60.
Parameters
----------
conv_activations : list of :class:`.Brick`
Activations for convolutional network.
num_channels : int
Number of channels in the input image.
image_shape : tuple
Input image shape.
filter_sizes : list of tuples
Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
feature_maps : list
Number of filters for each of convolutions.
pooling_sizes : list of tuples
Sizes of max pooling for each convolutional layer.
repeat_times : list of int
How many times to repeat each convolutional layer.
top_mlp_activations : list of :class:`.blocks.bricks.Activation`
List of activations for the top MLP.
top_mlp_dims : list
Numbers of hidden units and the output dimension of the top MLP.
stride : int
Step of convolution for the first layer, 1 will be used
for all other layers.
border_mode : str
Border mode of convolution (similar for all layers).
batch_norm : str
"""
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes, repeat_times,
top_mlp_activations, top_mlp_dims,
stride, batch_norm, border_mode='valid', **kwargs):
self.stride = stride
self.num_channels = num_channels
self.image_shape = image_shape
self.top_mlp_activations = top_mlp_activations
self.top_mlp_dims = top_mlp_dims
self.border_mode = border_mode
# Construct convolutional layers with corresponding parameters
self.layers = []
for i, activation in enumerate(conv_activations):
for j in range(repeat_times[i]):
self.layers.append(
Convolutional(
filter_size=filter_sizes[i], num_filters=feature_maps[i],
step=(1, 1) if i > 0 or j > 0 else (self.stride, self.stride),
border_mode=self.border_mode,
name='conv_{}_{}'.format(i, j)))
if batch_norm:
self.layers.append(
BatchNormalization(broadcastable=(False, True, True),
conserve_memory=True,
mean_only=batch_norm == 'mean-only',
name='bn_{}_{}'.format(i, j)))
self.layers.append(activation)
self.layers.append(MaxPooling(pooling_sizes[i], name='pool_{}'.format(i)))
self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
image_size=image_shape)
# Construct a top MLP
self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)
# We need to flatten the output of the last convolutional layer.
# This brick accepts a tensor of dimension (batch_size, ...) and
# returns a matrix (batch_size, features)
self.flattener = Flattener()
application_methods = [self.conv_sequence.apply, self.flattener.apply,
self.top_mlp.apply]
super(LeNet, self).__init__(application_methods, **kwargs)
@property
def output_dim(self):
return self.top_mlp_dims[-1]
@output_dim.setter
def output_dim(self, value):
self.top_mlp_dims[-1] = value
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
self.top_mlp.activations = self.top_mlp_activations
self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
@application(inputs=['image'])
def apply_5windows(self, image):
#.........这里部分代码省略.........
示例10: create_model_bricks
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def create_model_bricks():
encoder_convnet = ConvolutionalSequence(
layers=[
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=32,
name='conv1'),
SpatialBatchNormalization(name='batch_norm1'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=32,
name='conv2'),
SpatialBatchNormalization(name='batch_norm2'),
Rectifier(),
Convolutional(
filter_size=(2, 2),
step=(2, 2),
num_filters=32,
name='conv3'),
SpatialBatchNormalization(name='batch_norm3'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=64,
name='conv4'),
SpatialBatchNormalization(name='batch_norm4'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=64,
name='conv5'),
SpatialBatchNormalization(name='batch_norm5'),
Rectifier(),
Convolutional(
filter_size=(2, 2),
step=(2, 2),
num_filters=64,
name='conv6'),
SpatialBatchNormalization(name='batch_norm6'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=128,
name='conv7'),
SpatialBatchNormalization(name='batch_norm7'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=128,
name='conv8'),
SpatialBatchNormalization(name='batch_norm8'),
Rectifier(),
Convolutional(
filter_size=(2, 2),
step=(2, 2),
num_filters=128,
name='conv9'),
SpatialBatchNormalization(name='batch_norm9'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=256,
name='conv10'),
SpatialBatchNormalization(name='batch_norm10'),
Rectifier(),
Convolutional(
filter_size=(3, 3),
border_mode=(1, 1),
num_filters=256,
name='conv11'),
SpatialBatchNormalization(name='batch_norm11'),
Rectifier(),
Convolutional(
filter_size=(2, 2),
step=(2, 2),
num_filters=256,
name='conv12'),
SpatialBatchNormalization(name='batch_norm12'),
Rectifier(),
],
num_channels=3,
image_size=(64, 64),
use_bias=False,
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name='encoder_convnet')
encoder_convnet.initialize()
encoder_filters = numpy.prod(encoder_convnet.get_dim('output'))
encoder_mlp = MLP(
dims=[encoder_filters, 1000, 1000],
#.........这里部分代码省略.........
示例11: create_model_bricks
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
#.........这里部分代码省略.........
original_image_size=(g_image_size3, g_image_size3),
num_filters=128,
name="conv6",
),
SpatialBatchNormalization(name="batch_norm6"),
Rectifier(),
]
if depth > 1:
decoder_layers = decoder_layers + [
Convolutional(filter_size=(3, 3), border_mode=(1, 1), num_filters=64, name="conv7"),
SpatialBatchNormalization(name="batch_norm7"),
Rectifier(),
Convolutional(filter_size=(3, 3), border_mode=(1, 1), num_filters=64, name="conv8"),
SpatialBatchNormalization(name="batch_norm8"),
Rectifier(),
ConvolutionalTranspose(
filter_size=(2, 2),
step=(2, 2),
original_image_size=(g_image_size2, g_image_size2),
num_filters=64,
name="conv9",
),
SpatialBatchNormalization(name="batch_norm9"),
Rectifier(),
]
if depth > 0:
decoder_layers = decoder_layers + [
Convolutional(filter_size=(3, 3), border_mode=(1, 1), num_filters=32, name="conv10"),
SpatialBatchNormalization(name="batch_norm10"),
Rectifier(),
Convolutional(filter_size=(3, 3), border_mode=(1, 1), num_filters=32, name="conv11"),
SpatialBatchNormalization(name="batch_norm11"),
Rectifier(),
ConvolutionalTranspose(
filter_size=(2, 2),
step=(2, 2),
original_image_size=(g_image_size, g_image_size),
num_filters=32,
name="conv12",
),
SpatialBatchNormalization(name="batch_norm12"),
Rectifier(),
]
decoder_layers = decoder_layers + [Convolutional(filter_size=(1, 1), num_filters=3, name="conv_out"), Logistic()]
print(
"creating model of depth {} with {} encoder and {} decoder layers".format(
depth, len(encoder_layers), len(decoder_layers)
)
)
encoder_convnet = ConvolutionalSequence(
layers=encoder_layers,
num_channels=3,
image_size=(g_image_size, g_image_size),
use_bias=False,
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name="encoder_convnet",
)
encoder_convnet.initialize()
encoder_filters = numpy.prod(encoder_convnet.get_dim("output"))
encoder_mlp = MLP(
dims=[encoder_filters, 1000, z_dim],
activations=[
Sequence([BatchNormalization(1000).apply, Rectifier().apply], name="activation1"),
Identity().apply,
],
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name="encoder_mlp",
)
encoder_mlp.initialize()
decoder_mlp = BatchNormalizedMLP(
activations=[Rectifier(), Rectifier()],
dims=[encoder_mlp.output_dim // 2, 1000, encoder_filters],
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name="decoder_mlp",
)
decoder_mlp.initialize()
decoder_convnet = ConvolutionalSequence(
layers=decoder_layers,
num_channels=encoder_convnet.get_dim("output")[0],
image_size=encoder_convnet.get_dim("output")[1:],
use_bias=False,
weights_init=IsotropicGaussian(0.033),
biases_init=Constant(0),
name="decoder_convnet",
)
decoder_convnet.initialize()
return encoder_convnet, encoder_mlp, decoder_convnet, decoder_mlp
示例12: build_submodel
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
#.........这里部分代码省略.........
# a "step" argument, but `ConvolutionActivation` takes "conv_step" argument
kwargs['conv_step'] = filter_step
if (pool_size[0] == 0 and pool_size[1] == 0):
layer_conv = ConvolutionalActivation(activation=activation,
filter_size=filter_size,
num_filters=num_filters,
border_mode=border_mode,
name="layer_%d" % index,
**kwargs)
else:
if pool_step is None:
pass
else:
kwargs['pooling_step'] = tuple(pool_step)
layer_conv = ConvolutionalLayer(activation=activation,
filter_size=filter_size,
num_filters=num_filters,
border_mode=border_mode,
pooling_size=pool_size,
name="layer_%d" % index,
**kwargs)
conv_layers.append(layer_conv)
convnet = ConvolutionalSequence(conv_layers, num_channels=num_channels,
image_size=image_size,
weights_init=Uniform(width=0.1),
biases_init=Constant(0.0),
name="conv_section")
convnet.push_allocation_config()
convnet.initialize()
output_dim = np.prod(convnet.get_dim('output'))
output_conv = convnet.apply(output_conv)
output_conv = Flattener().apply(output_conv)
# FULLY CONNECTED
output_mlp = output_conv
full_layers = []
assert len(L_dim_full_layers) == len(L_activation_full)
assert len(L_dim_full_layers) + 1 == len(L_endo_dropout_full_layers)
assert len(L_dim_full_layers) + 1 == len(L_exo_dropout_full_layers)
# reguarding the batch dropout : the dropout is applied on the filter
# which is equivalent to the output dimension
# you have to look at the dropout_rate of the next layer
# that is why we throw away the first value of L_exo_dropout_full_layers
L_exo_dropout_full_layers = L_exo_dropout_full_layers[1:]
pre_dim = output_dim
print "When constructing the model, the output_dim of the conv section is %d." % output_dim
if len(L_dim_full_layers):
for (dim, activation_str,
dropout, index) in zip(L_dim_full_layers,
L_activation_full,
L_exo_dropout_full_layers,
range(len(L_dim_conv_layers),
len(L_dim_conv_layers)+
len(L_dim_full_layers))
):
# TO DO : leaky relu
if activation_str.lower() == 'rectifier':
示例13: LeNet
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
class LeNet(FeedforwardSequence, Initializable):
'''
----------
conv_activations : list of :class:`.Brick`
Activations for convolutional network.
num_channels : int
Number of channels in the input image.
image_shape : tuple
Input image shape.
filter_sizes : list of tuples
Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
feature_maps : list
Number of filters for each of convolutions.
pooling_sizes : list of tuples
Sizes of max pooling for each convolutional layer.
top_mlp_activations : list of :class:`.blocks.bricks.Activation`
List of activations for the top MLP.
top_mlp_dims : list
Numbers of hidden units and the output dimension of the top MLP.
conv_step : tuples
Step of convolution (similar for all layers).
border_mode : str
Border mode of convolution (similar for all layers).
'''
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes,
top_mlp_activations, top_mlp_dims,
conv_step=None, border_mode='valid', **kwargs):
if conv_step is None:
self.conv_step = (1, 1)
else:
self.conv_step = conv_step
self.num_channels = num_channels
self.image_shape = image_shape
self.top_mlp_activations = top_mlp_activations
self.top_mlp_dims = top_mlp_dims
self.border_mode = border_mode
conv_parameters = zip(filter_sizes, feature_maps)
# Construct convolutional layers with corresponding parameters
self.layers = list(interleave([
(Convolutional(filter_size=filter_size,
num_filters=num_filter,
border_mode=self.border_mode,
name='conv_{}'.format(i))
for i, (filter_size, num_filter)
in enumerate(conv_parameters)),
conv_activations,
(MaxPooling(size, name='pool_{}'.format(i))
for i, size in enumerate(pooling_sizes))]))
self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
image_size=image_shape)
# Construct a top MLP
self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)
# We need to flatten the output of the last convolutional layer.
# This brick accepts a tensor of dimension (batch_size, ...) and
# returns a matrix (batch_size, features)
self.flattener = Flattener()
application_methods = [self.conv_sequence.apply, self.flattener.apply,
self.top_mlp.apply]
super(LeNet, self).__init__(application_methods, **kwargs)
@property
def output_dim(self):
return self.top_mlp_dims[-1]
@output_dim.setter
def output_dim(self, value):
self.top_mlp_dims[-1] = value
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
self.top_mlp.activations = self.top_mlp_activations
self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例14: create_model_brick
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def create_model_brick():
# Encoder
enc_layers = [
conv_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 2 * NLAT)]
encoder_mapping = EncoderMapping(layers=enc_layers,
num_channels=NUM_CHANNELS,
n_emb=NEMB,
image_size=IMAGE_SIZE, weights_init=GAUSSIAN_INIT,
biases_init=ZERO_INIT,
use_bias=False)
encoder = GaussianConditional(encoder_mapping, name='encoder')
# Decoder
dec_layers = [
conv_transpose_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, NUM_CHANNELS), Logistic()]
decoder = Decoder(
layers=dec_layers, num_channels=NLAT + NEMB, image_size=(1, 1), use_bias=False,
name='decoder_mapping')
# Discriminator
layers = [
conv_brick(2, 1, 64), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK)]
x_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
use_bias=False, name='x_discriminator')
x_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK)]
z_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
name='z_discriminator')
z_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 1)]
joint_discriminator = ConvolutionalSequence(
layers=layers,
num_channels=(x_discriminator.get_dim('output')[0] +
z_discriminator.get_dim('output')[0] +
NEMB),
image_size=(1, 1),
name='joint_discriminator')
discriminator = XZYJointDiscriminator(
x_discriminator, z_discriminator, joint_discriminator,
name='discriminator')
ali = ConditionalALI(encoder, decoder, discriminator,
n_cond=NCLASSES, n_emb=NEMB,
weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT,
name='ali')
ali.push_allocation_config()
encoder_mapping.layers[-1].use_bias = True
encoder_mapping.layers[-1].tied_biases = False
decoder.layers[-2].use_bias = True
decoder.layers[-2].tied_biases = False
x_discriminator.layers[0].use_bias = True
x_discriminator.layers[0].tied_biases = True
ali.initialize()
raw_marginals, = next(
create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
b_value = get_log_odds(raw_marginals)
decoder.layers[-2].b.set_value(b_value)
return ali
示例15: build_and_run
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import get_dim [as 别名]
def build_and_run(label, config):
############## CREATE THE NETWORK ###############
#Define the parameters
num_epochs, num_batches, num_channels, image_shape, filter_size, num_filter, pooling_sizes, mlp_hiddens, output_size, batch_size, activation, mlp_activation = config['num_epochs'], config['num_batches'], config['num_channels'], config['image_shape'], config['filter_size'], config['num_filter'], config['pooling_sizes'], config['mlp_hiddens'], config['output_size'], config['batch_size'], config['activation'], config['mlp_activation']
# print(num_epochs, num_channels, image_shape, filter_size, num_filter, pooling_sizes, mlp_hiddens, output_size, batch_size, activation, mlp_activation)
lambda_l1 = 0.000025
lambda_l2 = 0.000025
print("Building model")
#Create the symbolics variable
x = T.tensor4('image_features')
y = T.lmatrix('targets')
#Get the parameters
conv_parameters = zip(filter_size, num_filter)
#Create the convolutions layers
conv_layers = list(interleave([(Convolutional(
filter_size=filter_size,
num_filters=num_filter,
name='conv_{}'.format(i))
for i, (filter_size, num_filter)
in enumerate(conv_parameters)),
(activation),
(MaxPooling(size, name='pool_{}'.format(i)) for i, size in enumerate(pooling_sizes))]))
# (AveragePooling(size, name='pool_{}'.format(i)) for i, size in enumerate(pooling_sizes))]))
#Create the sequence
conv_sequence = ConvolutionalSequence(conv_layers, num_channels, image_size=image_shape, weights_init=Uniform(width=0.2), biases_init=Constant(0.))
#Initialize the convnet
conv_sequence.initialize()
#Add the MLP
top_mlp_dims = [np.prod(conv_sequence.get_dim('output'))] + mlp_hiddens + [output_size]
out = Flattener().apply(conv_sequence.apply(x))
mlp = MLP(mlp_activation, top_mlp_dims, weights_init=Uniform(0, 0.2),
biases_init=Constant(0.))
#Initialisze the MLP
mlp.initialize()
#Get the output
predict = mlp.apply(out)
cost = CategoricalCrossEntropy().apply(y.flatten(), predict).copy(name='cost')
error = MisclassificationRate().apply(y.flatten(), predict)
#Little trick to plot the error rate in two different plots (We can't use two time the same data in the plot for a unknow reason)
error_rate = error.copy(name='error_rate')
error_rate2 = error.copy(name='error_rate2')
########### REGULARIZATION ##################
cg = ComputationGraph([cost])
weights = VariableFilter(roles=[WEIGHT])(cg.variables)
biases = VariableFilter(roles=[BIAS])(cg.variables)
# # l2_penalty_weights = T.sum([i*lambda_l2/len(weights) * (W ** 2).sum() for i,W in enumerate(weights)]) # Gradually increase penalty for layer
l2_penalty = T.sum([lambda_l2 * (W ** 2).sum() for i,W in enumerate(weights+biases)]) # Gradually increase penalty for layer
# # #l2_penalty_bias = T.sum([lambda_l2*(B **2).sum() for B in biases])
# # #l2_penalty = l2_penalty_weights + l2_penalty_bias
l2_penalty.name = 'l2_penalty'
l1_penalty = T.sum([lambda_l1*T.abs_(z).sum() for z in weights+biases])
# l1_penalty_weights = T.sum([i*lambda_l1/len(weights) * T.abs_(W).sum() for i,W in enumerate(weights)]) # Gradually increase penalty for layer
# l1_penalty_biases = T.sum([lambda_l1 * T.abs_(B).sum() for B in biases])
# l1_penalty = l1_penalty_biases + l1_penalty_weights
l1_penalty.name = 'l1_penalty'
costreg = cost + l2_penalty + l1_penalty
costreg.name = 'costreg'
########### DEFINE THE ALGORITHM #############
# algorithm = GradientDescent(cost=cost, parameters=cg.parameters, step_rule=Momentum())
algorithm = GradientDescent(cost=costreg, parameters=cg.parameters, step_rule=Adam())
########### GET THE DATA #####################
istest = 'test' in config.keys()
train_stream, valid_stream, test_stream = get_stream(batch_size,image_shape,test=istest)
########### INITIALIZING EXTENSIONS ##########
checkpoint = Checkpoint('models/best_'+label+'.tar')
checkpoint.add_condition(['after_epoch'],
predicate=OnLogRecord('valid_error_rate_best_so_far'))
#Adding a live plot with the bokeh server
plot = Plot(label,
channels=[['train_error_rate', 'valid_error_rate'],
['valid_cost', 'valid_error_rate2'],
# ['train_costreg','train_grad_norm']], #
['train_costreg','train_total_gradient_norm','train_l2_penalty','train_l1_penalty']],
server_url="http://hades.calculquebec.ca:5042")
grad_norm = aggregation.mean(algorithm.total_gradient_norm)
grad_norm.name = 'grad_norm'
extensions = [Timing(),
FinishAfter(after_n_epochs=num_epochs,
after_n_batches=num_batches),
DataStreamMonitoring([cost, error_rate, error_rate2], valid_stream, prefix="valid"),
TrainingDataMonitoring([costreg, error_rate, error_rate2,
grad_norm,l2_penalty,l1_penalty],
prefix="train", after_epoch=True),
plot,
ProgressBar(),
Printing(),
TrackTheBest('valid_error_rate',min), #Keep best
#.........这里部分代码省略.........