本文整理汇总了Python中lasagne.layers.SliceLayer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.SliceLayer方法的具体用法?Python layers.SliceLayer怎么用?Python layers.SliceLayer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.layers
的用法示例。
在下文中一共展示了layers.SliceLayer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer16, num_units=256)
network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
return network
#random search to initialize the weights
示例2: create_network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
#layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer15, num_units=256)
network = DenseLayer(layer17, num_units= 1, nonlinearity=None)
return network
#random search to initialize the weights
示例3: _build
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def _build(self, forget_bias=5.0, grad_clip=10.0):
"""Build architecture
"""
network = InputLayer(shape=(None, self.seq_length, self.input_size),
name='input')
self.input_var = network.input_var
# Hidden layers
tanh = lasagne.nonlinearities.tanh
gate, constant = lasagne.layers.Gate, lasagne.init.Constant
for _ in range(self.depth):
network = LSTMLayer(network, self.width, nonlinearity=tanh,
grad_clipping=grad_clip,
forgetgate=gate(b=constant(forget_bias)))
# Retain last-output state
network = SliceLayer(network, -1, 1)
# Output layer
sigmoid = lasagne.nonlinearities.sigmoid
loc_layer = DenseLayer(network, self.num_outputs * 2)
conf_layer = DenseLayer(network, self.num_outputs,
nonlinearity=sigmoid)
# Grab all layers into DAPs instance
self.network = get_all_layers([loc_layer, conf_layer])
# Get theano expression for outputs of DAPs model
self.loc_var, self.conf_var = get_output([loc_layer, conf_layer],
deterministic=True)
示例4: __init__
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def __init__(self, incoming, channel_layer_class, name=None, **channel_layer_kwargs):
super(ChannelwiseLayer, self).__init__(incoming, name=name)
self.channel_layer_class = channel_layer_class
self.channel_incomings = []
self.channel_outcomings = []
for channel in range(lasagne.layers.get_output_shape(incoming)[0]):
channel_incoming = L.SliceLayer(incoming, indices=slice(channel, channel+1), axis=1,
name='%s.%s%d' % (name, 'slice', channel) if name is not None else None)
channel_outcoming = channel_layer_class(channel_incoming,
name='%s.%s%d' % (name, 'op', channel) if name is not None else None,
**channel_layer_kwargs)
self.channel_incomings.append(channel_incoming)
self.channel_outcomings.append(channel_outcoming)
self.outcoming = L.ConcatLayer(self.channel_outcomings, axis=1,
name='%s.%s' % (name, 'concat') if name is not None else None)
示例5: build_convpool_lstm
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM layer to integrate time from sequences of EEG images.
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
# We only need the final prediction, we isolate that quantity and feed it
# to the next layer.
convpool = SliceLayer(convpool, -1, 1) # Selecting the last prediction
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the output layer with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
示例6: util_slice_layer
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def util_slice_layer(self, layer, persons_cnt, factor):
g_sz = persons_cnt//factor
layers = []
for i in range(factor):
layer_i = SliceLayer(layer, indices=slice(i*g_sz, (i+1)*g_sz), axis=2)
layers.append(layer_i)
return layers
############################################################################
示例7: build_convpool_mix
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
"""
Builds the complete network with LSTM and 1D-conv layers combined
:param input_vars: list of EEG images (one image per time window)
:param nb_classes: number of classes
:param grad_clip: the gradient messages are clipped to the given value during
the backward pass.
:param imsize: size of the input image (assumes a square input)
:param n_colors: number of color channels in the image
:param n_timewin: number of time windows in the snippet
:return: a pointer to the output of last layer
"""
convnets = []
w_init = None
# Build 7 parallel CNNs with shared weights
for i in range(n_timewin):
if i == 0:
convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
else:
convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
convnets.append(FlattenLayer(convnet))
# at this point convnets shape is [numTimeWin][n_samples, features]
# we want the shape to be [n_samples, features, numTimeWin]
convpool = ConcatLayer(convnets)
convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
# input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
conv_out = Conv1DLayer(reformConvpool, 64, 3)
conv_out = FlattenLayer(conv_out)
# Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
nonlinearity=lasagne.nonlinearities.tanh)
lstm_out = SliceLayer(lstm, -1, 1)
# Merge 1D-Conv and LSTM outputs
dense_input = ConcatLayer([conv_out, lstm_out])
# A fully-connected layer of 256 units with 50% dropout on its inputs:
convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
convpool = DenseLayer(convpool,
num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
return convpool
示例8: network
# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import SliceLayer [as 别名]
def network(self):
if self._network is not None:
return self._network
# Build the computational graph using a dummy input.
import lasagne
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import ElemwiseSumLayer, NonlinearityLayer, ExpressionLayer, PadLayer, InputLayer, FlattenLayer, SliceLayer
# from lasagne.layers import batch_norm
from lasagne.nonlinearities import rectify
self._network_in = InputLayer(shape=(None, self.nb_channels,) + self.image_shape, input_var=None)
convnet_layers = [self._network_in]
convnet_layers_preact = [self._network_in]
layer_blueprints = list(map(str.strip, self.convnet_blueprint.split("->")))
for i, layer_blueprint in enumerate(layer_blueprints, start=1):
"64@3x3(valid) -> 64@3x3(full)"
nb_filters, rest = layer_blueprint.split("@")
filter_shape, rest = rest.split("(")
nb_filters = int(nb_filters)
filter_shape = tuple(map(int, filter_shape.split("x")))
pad = rest[:-1]
preact = ConvLayer(convnet_layers[-1], num_filters=nb_filters, filter_size=filter_shape, stride=(1, 1), nonlinearity=None, pad=pad, W=lasagne.init.HeNormal(gain='relu'))
if i > len(layer_blueprints) // 2 and i != len(layer_blueprints):
shortcut = convnet_layers_preact[len(layer_blueprints)-i]
if i == len(layer_blueprints):
if preact.output_shape[1] != shortcut.output_shape[1]:
shortcut = SliceLayer(shortcut, slice(0, 1), axis=1)
else:
raise NameError("Something is wrong.")
print("Shortcut from {} to {}".format(len(layer_blueprints)-i, i))
preact = ElemwiseSumLayer([preact, shortcut])
convnet_layers_preact.append(preact)
layer = NonlinearityLayer(preact, nonlinearity=rectify)
convnet_layers.append(layer)
self._network = FlattenLayer(preact)
# network = DenseLayer(l, num_units=int(np.prod(self.image_shape)),
# W=lasagne.init.HeNormal(),
# nonlinearity=None)
print("Nb. of parameters in model: {}".format(lasagne.layers.count_params(self._network, trainable=True)))
return self._network