本文整理汇总了Python中lasagne.init方法的典型用法代码示例。如果您正苦于以下问题:Python lasagne.init方法的具体用法?Python lasagne.init怎么用?Python lasagne.init使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne
的用法示例。
在下文中一共展示了lasagne.init方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mdclW
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def mdclW(num_filters,num_channels,filter_size,winit,name,scales):
# Coefficient Initializer
sinit = lasagne.init.Constant(1.0/(1+len(scales)))
# Total filter size
size = filter_size + (filter_size-1)*(scales[-1]-1)
# Multiscale Dilated Filter
W = T.zeros((num_filters,num_channels,size,size))
# Undilated Base Filter
baseW = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,num_channels,filter_size,filter_size))),name=name+'.W')
for scale in enumerate(scales[::-1]): # enumerate backwards so that we place the main filter on top
W = T.set_subtensor(W[:,:,scales[-1]-scale:size-scales[-1]+scale:scale,scales[-1]-scale:size-scales[-1]+scale:scale],
baseW*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'.coeff_'+str(scale)).dimshuffle(0,'x','x','x'))
return W
# Subpixel Upsample Layer from (https://arxiv.org/abs/1609.05158)
# This layer uses a set of r^2 set_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style
# as done in the ESPCN Magic ony paper for super-resolution.
# r is the upscale factor.
# c is the number of output channels.
示例2: __init__
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def __init__(self, incoming, RMAX,DMAX,axes='auto', epsilon=1e-4, alpha=0.1,
beta=lasagne.init.Constant(0), gamma=lasagne.init.Constant(1),
mean=lasagne.init.Constant(0), inv_std=lasagne.init.Constant(1), **kwargs):
super(BatchReNormDNNLayer, self).__init__(
incoming, axes, epsilon, alpha, beta, gamma, mean, inv_std,
**kwargs)
all_but_second_axis = (0,) + tuple(range(2, len(self.input_shape)))
self.RMAX,self.DMAX = RMAX,DMAX
if self.axes not in ((0,), all_but_second_axis):
raise ValueError("BatchNormDNNLayer only supports normalization "
"across the first axis, or across all but the "
"second axis, got axes=%r" % (axes,))
示例3: get_output_for
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def get_output_for(self,input, **kwargs):
if input.ndim > 2:
input = input.flatten(2)
activation = T.dot(input, self.W*self.weights_mask)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
# Conditioning Masked Layer
# Currently not used.
# class CML(MaskedLayer):
# def __init__(self, incoming, num_units, mask_generator,use_cond_mask=False,U=lasagne.init.GlorotUniform(),W=lasagne.init.GlorotUniform(),
# b=init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
# super(CML, self).__init__(incoming, num_units, mask_generator,W,
# b, nonlinearity,**kwargs)
# self.use_cond_mask=use_cond_mask
# if use_cond_mask:
# self.U = self.add_param(spec = U,
# shape = (num_inputs, num_units),
# name='U',
# trainable=True,
# regularizable=False)theano.shared(value=self.weights_initialization((self.n_in, self.n_out)), name=self.name+'U', borrow=True)
# self.add_param(self.U,name =
# def get_output_for(self,input,**kwargs):
# lin = self.lin_output = T.dot(input, self.W * self.weights_mask) + self.b
# if self.use_cond_mask:
# lin = lin+T.dot(T.ones_like(input), self.U * self.weights_mask)
# return lin if self._activation is None else self._activation(lin)
# Made layer, adopted from M.Germain
示例4: main
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def main():
voc, full_embeddings = sys.argv[1], sys.argv[2]
voc = set([line.strip() for line in open(voc, 'r')] + ['_UNK'])
# get all embeddings from full embeddings
embeddings = dict()
for i, line in enumerate(open(full_embeddings, 'r')):
parts = line.rstrip().split()
word = parts[0]
if word in voc:
# print(parts[1:])
try:
embeddings[word] = list(map(float, parts[1:]))
except Exception as e:
print('cannot parse line %i' % i, file=sys.stderr)
# estimate dim
dim = len(list(embeddings.values())[0])
if dim == 0:
raise Exception('embedding dim is 0, probably parsing error')
# init unk
embeddings['_UNK'] = initializer((dim,))
# handle missing embeddings
for word in voc:
if word not in embeddings:
print("no embedding for %s, skipping it " % word, file=sys.stderr)
emb = initializer((dim,))
else:
emb = embeddings[word]
print(word + '\t' + ' '.join(map(str, emb)))
示例5: _sample_trained_minibatch_gan
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def _sample_trained_minibatch_gan(params_file, n, batch_size, rs):
import lasagne
from lasagne.init import Normal
import lasagne.layers as ll
import theano as th
from theano.sandbox.rng_mrg import MRG_RandomStreams
import theano.tensor as T
import nn
theano_rng = MRG_RandomStreams(rs.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rs.randint(2 ** 15)))
noise_dim = (batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
ls = [ll.InputLayer(shape=noise_dim, input_var=noise)]
ls.append(nn.batch_norm(
ll.DenseLayer(ls[-1], num_units=4*4*512, W=Normal(0.05),
nonlinearity=nn.relu),
g=None))
ls.append(ll.ReshapeLayer(ls[-1], (batch_size,512,4,4)))
ls.append(nn.batch_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,256,8,8), (5,5), W=Normal(0.05),
nonlinearity=nn.relu),
g=None)) # 4 -> 8
ls.append(nn.batch_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,128,16,16), (5,5), W=Normal(0.05),
nonlinearity=nn.relu),
g=None)) # 8 -> 16
ls.append(nn.weight_norm(
nn.Deconv2DLayer(ls[-1], (batch_size,3,32,32), (5,5), W=Normal(0.05),
nonlinearity=T.tanh),
train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(ls[-1])
with np.load(params_file) as d:
params = [d['arr_{}'.format(i)] for i in range(9)]
ll.set_all_param_values(ls[-1], params, trainable=True)
sample_batch = th.function(inputs=[], outputs=gen_dat)
samps = []
while len(samps) < n:
samps.extend(sample_batch())
samps = np.array(samps[:n])
return samps
示例6: MDCL
# 需要导入模块: import lasagne [as 别名]
# 或者: from lasagne import init [as 别名]
def MDCL(incoming,num_filters,scales,name,dnn=True):
if dnn:
from lasagne.layers.dnn import Conv2DDNNLayer as C2D
# W initialization method--this should also work as Orthogonal('relu'), but I have yet to validate that as thoroughly.
winit = initmethod(0.02)
# Initialization method for the coefficients
sinit = lasagne.init.Constant(1.0/(1+len(scales)))
# Number of incoming channels
ni =lasagne.layers.get_output_shape(incoming)[1]
# Weight parameter--the primary parameter for this block
W = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,lasagne.layers.get_output_shape(incoming)[1],3,3))),name=name+'W')
# Primary Convolution Layer--No Dilation
n = C2D(incoming = incoming,
num_filters = num_filters,
filter_size = [3,3],
stride = [1,1],
pad = (1,1),
W = W*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_base').dimshuffle(0,'x','x','x'), # Note the broadcasting dimshuffle for the num_filter scalars.
b = None,
nonlinearity = None,
name = name+'base'
)
# List of remaining layers. This should probably just all be concatenated into a single list rather than being a separate deal.
nd = []
for i,scale in enumerate(scales):
# I don't think 0 dilation is technically defined (or if it is it's just the regular filter) but I use it here as a convenient keyword to grab the 1x1 mean conv.
if scale==0:
nd.append(C2D(incoming = incoming,
num_filters = num_filters,
filter_size = [1,1],
stride = [1,1],
pad = (0,0),
W = T.mean(W,axis=[2,3]).dimshuffle(0,1,'x','x')*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_1x1').dimshuffle(0,'x','x','x'),
b = None,
nonlinearity = None,
name = name+str(scale)))
# Note the dimshuffles in this layer--these are critical as the current DilatedConv2D implementation uses a backward pass.
else:
nd.append(lasagne.layers.DilatedConv2DLayer(incoming = lasagne.layers.PadLayer(incoming = incoming, width=(scale,scale)),
num_filters = num_filters,
filter_size = [3,3],
dilation=(scale,scale),
W = W.dimshuffle(1,0,2,3)*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'_coeff_'+str(scale)).dimshuffle('x',0,'x','x'),
b = None,
nonlinearity = None,
name = name+str(scale)))
return ESL(nd+[n])
# MDC-based Upsample Layer.
# This is a prototype I don't make use of extensively. It's operational but it doesn't seem to improve results yet.