本文整理汇总了Python中lasagne.layers方法的典型用法代码示例。如果您正苦于以下问题:Python lasagne.layers方法的具体用法?Python lasagne.layers怎么用?Python lasagne.layers使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne
的用法示例。
在下文中一共展示了lasagne.layers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: example2
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def example2():
""" Two branches"""
# Input
l_in = lasagne.layers.InputLayer((100, 1, 20, 20))
# Branch one
l_conv1 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
l_pool1 = lasagne.layers.MaxPool2DLayer(l_conv1, pool_size=(2, 2))
l_dense1 = lasagne.layers.DenseLayer(l_pool1, num_units=20)
# Branch two
l_conv2 = lasagne.layers.Conv2DLayer(l_in, num_filters=32, filter_size=(5, 5))
l_pool2 = lasagne.layers.MaxPool2DLayer(l_conv2, pool_size=(2, 2))
l_dense2 = lasagne.layers.DenseLayer(l_pool2, num_units=20)
# Merge
l_concat = lasagne.layers.ConcatLayer((l_dense1, l_dense2))
# Output
l_out = lasagne.layers.DenseLayer(l_concat, num_units=10)
layers = get_all_layers(l_out)
print(get_network_str(layers, get_network=False, incomings=True, outgoings=True))
return None
示例2: set_weights
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def set_weights(net,model_file):
'''
Sets the parameters of the model using the weights stored in model_file
Parameters
----------
net: a Lasagne layer
model_file: string
path to the model that containes the weights
Returns
-------
None
'''
with open(model_file) as f:
print('Load pretrained weights from %s...' % model_file)
model = pickle.load(f)
print('Set the weights...')
lasagne.layers.set_all_param_values(net, model,trainable=True)
######## Below, there are several helper functions to transform (lists of) images into the right format ######
示例3: instance_norm
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def instance_norm(layer, **kwargs):
"""
The equivalent of Lasagne's `batch_norm()` convenience method, but for instance normalization.
Refer: http://lasagne.readthedocs.io/en/latest/modules/layers/normalization.html#lasagne.layers.batch_norm
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
bn_name = (kwargs.pop('name', None) or
(getattr(layer, 'name', None) and layer.name + '_bn'))
layer = InstanceNormLayer(layer, name=bn_name, **kwargs)
if nonlinearity is not None:
nonlin_name = bn_name and bn_name + '_nonlin'
layer = NonlinearityLayer(layer, nonlinearity, name=nonlin_name)
return layer
# TODO: Add normalization
示例4: get_output_for
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def get_output_for(self, input, **kwargs):
# by default we assume 'cross', consistent with corrmm.
conv_mode = 'conv' if self.flip_filters else 'cross'
from lasagne.layers.dnn import dnn
image = T.alloc(0., input.shape[0], *self.output_shape[1:])
conved = dnn.dnn_conv(img=image,
kerns=self.W,
subsample=self.stride,
border_mode=self.pad,
conv_mode=conv_mode
)
grad = T.grad(conved.sum(), wrt=image, known_grads={conved: input})
if self.b is None:
activation = grad
elif self.untie_biases:
activation = grad + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = grad + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
示例5: build_bilinear_net
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1):
x_shape, u_shape = input_shapes
X_var = X_var or T.tensor4('X')
U_var = U_var or T.matrix('U')
X_diff_var = X_diff_var or T.tensor4('X_diff')
X_next_var = X_var + X_diff_var
l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var)
l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var)
l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis)
l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add)
l_y = L.flatten(l_x)
l_y_diff_pred = L.flatten(l_x_diff_pred)
X_next_pred_var = lasagne.layers.get_output(l_x_next_pred)
loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2.
net_name = 'BilinearNet'
input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)])
return net_name, input_vars, pred_layers, loss
示例6: build_BiRNN_CNN
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def build_BiRNN_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, nonlinearity=nonlinearities.tanh,
precompute_input=True, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match rnn incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiRNN(incoming, num_units, mask=mask, grad_clipping=grad_clipping, nonlinearity=nonlinearity,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
示例7: build_BiLSTM_CNN
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def build_BiLSTM_CNN(incoming1, incoming2, num_units, mask=None, grad_clipping=0, precompute_input=True,
peepholes=False, num_filters=20, dropout=True, in_to_out=False):
# first get some necessary dimensions or parameters
conv_window = 3
_, sent_length, _ = incoming2.output_shape
# dropout before cnn?
if dropout:
incoming1 = lasagne.layers.DropoutLayer(incoming1, p=0.5)
# construct convolution layer
cnn_layer = lasagne.layers.Conv1DLayer(incoming1, num_filters=num_filters, filter_size=conv_window, pad='full',
nonlinearity=lasagne.nonlinearities.tanh, name='cnn')
# infer the pool size for pooling (pool size should go through all time step of cnn)
_, _, pool_size = cnn_layer.output_shape
# construct max pool layer
pool_layer = lasagne.layers.MaxPool1DLayer(cnn_layer, pool_size=pool_size)
# reshape the layer to match lstm incoming layer [batch * sent_length, num_filters, 1] --> [batch, sent_length, num_filters]
output_cnn_layer = lasagne.layers.reshape(pool_layer, (-1, sent_length, [1]))
# finally, concatenate the two incoming layers together.
incoming = lasagne.layers.concat([output_cnn_layer, incoming2], axis=2)
return build_BiLSTM(incoming, num_units, mask=mask, grad_clipping=grad_clipping, peepholes=peepholes,
precompute_input=precompute_input, dropout=dropout, in_to_out=in_to_out)
示例8: build_linear_network
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def build_linear_network(self, input_width, input_height, output_dim,
num_frames, batch_size):
"""
Build a simple linear learner. Useful for creating
tests that sanity-check the weight update code.
"""
l_in = lasagne.layers.InputLayer(
shape=(None, num_frames, input_width, input_height)
)
l_out = lasagne.layers.DenseLayer(
l_in,
num_units=output_dim,
nonlinearity=None,
W=lasagne.init.Constant(0.0),
b=None
)
return l_out
示例9: style_loss
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def style_loss(self):
"""Returns a list of loss components as Theano expressions. Finds the best style patch for each patch in the
current image using normalized cross-correlation, then computes the mean squared error for all patches.
"""
style_loss = []
if args.style_weight == 0.0:
return style_loss
# Extract the patches from the current image, as well as their magnitude.
result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('conv', self.style_layers)))
# Multiple style layers are optimized separately, usually conv3_1 and conv4_1. Semantic data not used here.
# Semantic data is only used for selecting nearest neighbor style patches
# tensor_matches = current_best, i.e. indices of best matching patches in each layer
for l, matches, patches in zip(self.style_layers, self.tensor_matches, result[0::3]):
# Compute the mean squared error between the current patch and the best matching style patch.
# Ignore the last channels (from semantic map) so errors returned are indicative of image only.
# matches = tensor_matches[i] = current_best[i]
loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0)
if 'laplace' not in l:
style_loss.append(('style', l, args.style_weight * loss))
else:
style_loss.append(('style', l, args.style_lapweight * loss))
return style_loss
示例10: __init__
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def __init__(self, n_dim, n_out, n_chan=1, n_batch=128, n_superbatch=12800, model='bernoulli',
opt_alg='adam', opt_params={'lr' : 1e-3, 'b1': 0.9, 'b2': 0.99}):
# save model that wil be created
self.model = model
self.n_sample = 1 # adjustable parameter, though 1 works best in practice
self.n_batch = n_batch
self.n_lat = 200
self.n_dim = n_dim
self.n_chan = n_chan
self.n_batch = n_batch
Model.__init__(self, n_dim, n_chan, n_out, n_superbatch, opt_alg, opt_params)
# sample generation
Z = T.matrix(dtype=theano.config.floatX) # noise matrix
l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
l_qz_mu, l_qz_logsigma, l_qa_mu, l_qa_logsigma, \
l_qa, l_qz, l_d = self.network
sample = lasagne.layers.get_output(l_px_mu, {l_qz : Z}, deterministic=True)
self.sample = theano.function([Z], sample, on_unused_input='warn')
示例11: example1
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def example1():
""" Sequential network, no branches or cycles"""
l_in = lasagne.layers.InputLayer((100, 20))
l_hidden1 = lasagne.layers.DenseLayer(l_in, num_units=20)
l_hidden1_dropout = lasagne.layers.DropoutLayer(l_hidden1)
l_hidden2 = lasagne.layers.DenseLayer(l_hidden1_dropout, num_units=20)
l_hidden2_dropout = lasagne.layers.DropoutLayer(l_hidden2)
l_out = lasagne.layers.DenseLayer(l_hidden2_dropout, num_units=10)
print(get_network_str(l_out))
return None
示例12: load_model
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def load_model(net, layer='fc8'):
model_values = utils.PickleLoad(os.path.join(model_dir, 'caffe_reference_%s.pkl' % layer))
lasagne.layers.set_all_param_values(net[layer], model_values)
示例13: build_one_side
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def build_one_side(self, X, A, x, a, hidden_list):
"""
:param X: theano param # N times F
:param A: theano param # N times N
:param x: real x, for determining the dimension
:param a: real a, for determining the dimension
:return:
"""
l_x_in = lasagne.layers.InputLayer(shape=(a.shape[0], x.shape[1]), input_var=X)
cur_layer = layers.DenseGraphCovLayer(l_x_in, A, hidden_list[0], nonlinearity=lasagne.nonlinearities.tanh)
for hidden_unit in hidden_list[1:]:
cur_layer = layers.DenseGraphCovLayer(cur_layer, A, hidden_unit, nonlinearity=lasagne.nonlinearities.tanh)
return lasagne.layers.get_output(cur_layer), cur_layer
示例14: _build
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def _build(self, forget_bias=5.0, grad_clip=10.0):
"""Build architecture
"""
network = InputLayer(shape=(None, self.seq_length, self.input_size),
name='input')
self.input_var = network.input_var
# Hidden layers
tanh = lasagne.nonlinearities.tanh
gate, constant = lasagne.layers.Gate, lasagne.init.Constant
for _ in range(self.depth):
network = LSTMLayer(network, self.width, nonlinearity=tanh,
grad_clipping=grad_clip,
forgetgate=gate(b=constant(forget_bias)))
# Retain last-output state
network = SliceLayer(network, -1, 1)
# Output layer
sigmoid = lasagne.nonlinearities.sigmoid
loc_layer = DenseLayer(network, self.num_outputs * 2)
conf_layer = DenseLayer(network, self.num_outputs,
nonlinearity=sigmoid)
# Grab all layers into DAPs instance
self.network = get_all_layers([loc_layer, conf_layer])
# Get theano expression for outputs of DAPs model
self.loc_var, self.conf_var = get_output([loc_layer, conf_layer],
deterministic=True)
示例15: __init__
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import layers [as 别名]
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
super(Deconv2DLayer, self).__init__(incoming, **kwargs)
self.target_shape = target_shape
self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
self.filter_size = lasagne.layers.dnn.as_tuple(filter_size, 2)
self.stride = lasagne.layers.dnn.as_tuple(stride, 2)
self.target_shape = target_shape
self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
self.W = self.add_param(W, self.W_shape, name="W")
if b is not None:
self.b = self.add_param(b, (target_shape[1],), name="b")
else:
self.b = None