本文整理汇总了Python中lasagne.layers.FlattenLayer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.FlattenLayer方法的具体用法?Python layers.FlattenLayer怎么用?Python layers.FlattenLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers
的用法示例。
在下文中一共展示了layers.FlattenLayer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer16, num_units=256)
network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
return network
#random search to initialize the weights
示例2: create_network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
#layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer15, num_units=256)
network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
return network
#random search to initialize the weights
示例3: build_convpool_conv1d
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
convpool = DimshuffleLayer(convpool, (0, 2, 1))
# input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
convpool = Conv1DLayer(convpool, 64, 3)
# A fully-connected layer of 512 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
示例4: build_convpool_lstm
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM layer to integrate time from sequences of EEG images.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
# We only need the final prediction, we isolate that quantity and feed it
# to the next layer.
convpool = SliceLayer(convpool, -1, 1) # Selecting the last prediction
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
示例5: mask_loss
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def mask_loss(loss, mask):
return loss * lo(LL.FlattenLayer(mask, 1))
示例6: build_model
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 32, 32))
net['conv1'] = ConvLayer(net['input'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp1'] = ConvLayer(
net['conv1'], num_filters=160, filter_size=1, flip_filters=False)
net['cccp2'] = ConvLayer(
net['cccp1'], num_filters=96, filter_size=1, flip_filters=False)
net['pool1'] = PoolLayer(net['cccp2'],
pool_size=3,
stride=2,
mode='max',
ignore_border=False)
net['drop3'] = DropoutLayer(net['pool1'], p=0.5)
net['conv2'] = ConvLayer(net['drop3'],
num_filters=192,
filter_size=5,
pad=2,
flip_filters=False)
net['cccp3'] = ConvLayer(
net['conv2'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp4'] = ConvLayer(
net['cccp3'], num_filters=192, filter_size=1, flip_filters=False)
net['pool2'] = PoolLayer(net['cccp4'],
pool_size=3,
stride=2,
mode='average_exc_pad',
ignore_border=False)
net['drop6'] = DropoutLayer(net['pool2'], p=0.5)
net['conv3'] = ConvLayer(net['drop6'],
num_filters=192,
filter_size=3,
pad=1,
flip_filters=False)
net['cccp5'] = ConvLayer(
net['conv3'], num_filters=192, filter_size=1, flip_filters=False)
net['cccp6'] = ConvLayer(
net['cccp5'], num_filters=10, filter_size=1, flip_filters=False)
net['pool3'] = PoolLayer(net['cccp6'],
pool_size=8,
mode='average_exc_pad',
ignore_border=False)
net['output'] = FlattenLayer(net['pool3'])
return net
示例7: build_convpool_mix
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM and 1D-conv layers combined
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
# input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
conv_out = Conv1DLayer(reformConvpool, 64, 3)
conv_out = FlattenLayer(conv_out)
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
lstm_out = SliceLayer(lstm, -1, 1)
# Merge 1D-Conv and LSTM outputs
dense_input = ConcatLayer([conv_out, lstm_out])
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
convpool = DenseLayer(convpool,
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
示例8: network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import FlattenLayer [as 别名]
def network(self):
if self._network is not None:
return self._network
# Build the computational graph using a dummy input.
import lasagne
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, InputLayer, FlattenLayer, DenseLayer
from lasagne.layers import batch_norm
from lasagne.nonlinearities import rectify
self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None)
network_out = []
if self.convnet_blueprint is not None:
convnet_layers = [self._network_in]
layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->")))
for i, layer_blueprint in enumerate(layer_blueprints, start=1):
# eg. "64@3x3(valid) -> 64@3x3(full)"
nb_filters, rest = layer_blueprint.split("@")
filter_shape, rest = rest.split("(")
nb_filters = int(nb_filters)
filter_shape = tuple(map(int, filter_shape.split("x")))
pad = rest[:-1]
preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1),
nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu'),
name="layer_{}_conv".format(i))
if self.use_batch_norm:
preact = batch_norm(preact)
layer = NonlinearityLayer(preact, nonlinearity=rectify)
convnet_layers.append(layer)
network_out.append(FlattenLayer(preact))
if self.fullnet_blueprint is not None:
fullnet_layers = [FlattenLayer(self._network_in)]
layer_blueprints = list(map(str.strip, self.fullnet_blueprint.split("->")))
for i, layer_blueprint in enumerate(layer_blueprints, start=1):
# e.g. "500 -> 500 -> 784"
hidden_size = int(layer_blueprint)
preact = DenseLayer(fullnet_layers[-1], num_units=hidden_size,
nonlinearity=None, W=lasagne.init.HeNormal(gain='relu'),
name="layer_{}_dense".format(i))
if self.use_batch_norm:
preact = batch_norm(preact)
layer = NonlinearityLayer(preact, nonlinearity=rectify)
fullnet_layers.append(layer)
network_out.append(preact)
self._network = ElemwiseSumLayer(network_out)
# TODO: sigmoid should be applied here instead of within loss function.
print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True)))
return self._network