本文整理匯總了Python中lib.param方法的典型用法代碼示例。如果您正苦於以下問題:Python lib.param方法的具體用法?Python lib.param怎麽用?Python lib.param使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類lib
的用法示例。
在下文中一共展示了lib.param方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: conv1d
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def conv1d(name,input,kernel,stride,n_filters,depth,bias=False,batchnorm=False,pad='valid',filter_dilation=(1,1),run_mode=0):
W = lib.param(
name+'.W',
lasagne.init.HeNormal().sample((n_filters,depth,kernel,1)).astype('float32')
)
out = T.nnet.conv2d(input,W,subsample=(stride,1),border_mode=pad,filter_dilation=filter_dilation)
if bias:
b = lib.param(
name + '.b',
np.zeros(n_filters).astype('float32')
)
out += b[None,:,None,None]
if batchnorm:
out = BatchNorm(name,out,n_filters,mode=1,run_mode=run_mode)
return out
示例2: create_wavenet_block
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def create_wavenet_block(inp, num_dilation_layer, input_dim, output_dim, name =None):
assert name is not None
layer_out = inp
skip_contrib = []
skip_weights = lib.param(name+".parametrized_weights", lib.floatX(numpy.ones((num_dilation_layer,))))
for i in range(num_dilation_layer):
layer_out, skip_c = lib.ops.dil_conv_1D(
layer_out,
output_dim,
input_dim if i == 0 else output_dim,
2,
dilation = 2**i,
non_linearity = 'gated',
name = name+".dilation_{}".format(i+1)
)
skip_c = skip_c*skip_weights[i]
skip_contrib.append(skip_c)
skip_out = skip_contrib[-1]
j = 0
for i in range(num_dilation_layer-1):
j += 2**(num_dilation_layer-i-1)
skip_out = skip_out + skip_contrib[num_dilation_layer-2 - i][:,j:]
return layer_out, skip_out
示例3: Embedding
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Embedding(name, n_symbols, output_dim, indices):
vectors = lib.param(
name,
numpy.random.randn(
n_symbols,
output_dim
).astype(theano.config.floatX)
)
output_shape = [
indices.shape[i]
for i in xrange(indices.ndim)
] + [output_dim]
return vectors[indices.flatten()].reshape(output_shape)
示例4: __Recurrent
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def __Recurrent(name, hidden_dims, step_fn, inputs, non_sequences=[], h0s=None):
if not isinstance(inputs, list):
inputs = [inputs]
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims]
if h0s is None:
h0s = [None]*len(hidden_dims)
for i in xrange(len(hidden_dims)):
if h0s[i] is None:
h0_unbatched = lib.param(
name + '.h0_' + str(i),
numpy.zeros((hidden_dims[i],), dtype=theano.config.floatX)
)
num_batches = inputs[0].shape[1]
h0s[i] = T.alloc(h0_unbatched, num_batches, hidden_dims[i])
h0s[i] = T.patternbroadcast(h0s[i], [False] * h0s[i].ndim)
outputs, _ = theano.scan(
step_fn,
sequences=inputs,
outputs_info=h0s,
non_sequences=non_sequences
)
return outputs
示例5: Recurrence
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Recurrence(processed_frames, h0, reset):
"""
processed_frames.shape: (batch size, n frames, DIM)
h0.shape: (batch size, N_GRUS, DIM)
reset.shape: ()
output.shape: (batch size, n frames, DIM)
"""
# print "warning no recurrence"
# return T.zeros_like(processed_frames), h0
learned_h0 = lib.param(
'Recurrence.h0',
numpy.zeros((N_GRUS, DIM), dtype=theano.config.floatX)
)
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_GRUS, DIM)
learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
gru0 = lib.ops.LowMemGRU('Recurrence.GRU0', DIM, DIM, processed_frames, h0=h0[:, 0])
grus = [gru0]
for i in xrange(1, N_GRUS):
gru = lib.ops.LowMemGRU('Recurrence.GRU'+str(i), DIM, DIM, grus[-1], h0=h0[:, i])
grus.append(gru)
last_hidden = T.stack([gru[:,-1] for gru in grus], axis=1)
return (grus[-1], last_hidden)
示例6: Embedding
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Embedding(name, n_symbols, output_dim, indices):
vectors = lib.param(
name,
numpy.random.randn(
n_symbols,
output_dim
).astype(theano.config.floatX)
)
output_shape = [
indices.shape[i]
for i in xrange(indices.ndim)
] + [output_dim]
return vectors[indices.flatten()].reshape(output_shape)
示例7: Recurrent
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Recurrent(name, hidden_dims, step_fn, inputs, non_sequences=[], h0s=None):
if not isinstance(inputs, list):
inputs = [inputs]
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims]
if h0s is None:
h0s = [None]*len(hidden_dims)
for i in xrange(len(hidden_dims)):
if h0s[i] is None:
h0_unbatched = lib.param(
name + '.h0_' + str(i),
numpy.zeros((hidden_dims[i],), dtype=theano.config.floatX)
)
num_batches = inputs[0].shape[1]
h0s[i] = T.alloc(h0_unbatched, num_batches, hidden_dims[i])
h0s[i] = T.patternbroadcast(h0s[i], [False] * h0s[i].ndim)
outputs, _ = theano.scan(
step_fn,
sequences=inputs,
outputs_info=h0s,
non_sequences=non_sequences
)
return outputs
示例8: Dense
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Dense(name, input_dim, output_dim, inputs, bias=True, init=None, weightnorm=True,hidden_dim=None):
weight_values = init_weights(input_dim,output_dim,init)
weight = lib.param(
name + '.W',
weight_values
)
batch_size = None
if inputs.ndim==3:
batch_size = inputs.shape[0]
inputs = inputs.reshape((-1,input_dim))
if weightnorm:
norm_values = numpy.linalg.norm(weight_values, axis=0)
norms = lib.param(
name + '.g',
norm_values
)
normed_weight = weight * (norms / weight.norm(2, axis=0)).dimshuffle('x', 0)
result = T.dot(inputs, normed_weight)
else:
result = T.dot(inputs, weight)
if bias:
b = lib.param(
name + '.b',
numpy.zeros((output_dim,), dtype=theano.config.floatX)
)
result += b
result.name = name+".output"
if batch_size!=None:
return result.reshape((batch_size,hidden_dim,output_dim))
else:
return result
示例9: Embedding
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Embedding(name, n_symbols, output_dim, indices):
vectors = lib.param(
name,
numpy.random.randn(
n_symbols,
output_dim
).astype(theano.config.floatX)
)
output_shape = tuple(list(indices.shape) + [output_dim])
return vectors[indices.flatten()].reshape(output_shape)
示例10: __ConvLSTMStep
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def __ConvLSTMStep(
name,
seq_len,
input_dim,
hidden_dim,
current_input,
last_hidden,
last_cell,
dilation_depth=10,
inp_bias_init=0.,
forget_bias_init=3.,
out_bias_init=0.,
g_bias_init=0.):
# X_t*(U^i, U^f, U^o, U^g)
dilations = [2**i for i in xrange(dilation_depth)]
prev_conv = current_input
last_cell_stack = T.concatenate((last_cell,last_cell),axis=1)
for i,value in enumerate(dilations):
#prev_conv = lib.ops.conv1d(name+".WaveNetConv%d"%(i+1),prev_conv,2,1,hidden_dim,input_dim,True,False,pad=(dilation,0),filter_dilation=(dilation,1))[:,:,:current_input.shape[2],:]
prev_conv,y = lib.ops.WaveNetConv1d("WaveNetBlock-%d"%(i+1),prev_conv,2,hidden_dim,input_dim,bias=True,batchnorm=False,dilation=value)
prev_conv = T.concatenate((prev_conv,last_hidden),axis=1)
prev_conv = lib.ops.conv1d(name+".ConvGates",prev_conv,1,1,4*hidden_dim,2*input_dim,True,False)
W_cell = lib.param(name+'.CellWeights',lasagne.init.HeNormal().sample((3*hidden_dim,seq_len,1)))
inp_forget = T.nnet.sigmoid(prev_conv[:,:2*hidden_dim] + W_cell[:2*hidden_dim]*last_cell_stack)
i_t = inp_forget[:,:hidden_dim]
f_t = inp_forget[:,hidden_dim:]
C_t = f_t*last_cell + i_t*T.tanh(prev_conv[:,2*hidden_dim:3*hidden_dim])
o_t = T.nnet.sigmoid(prev_conv[:,3*hidden_dim:]+W_cell[2*hidden_dim:]*C_t)
H_t = o_t*T.tanh(C_t)
return H_t,C_t
示例11: Conv1D
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def Conv1D(name, input_dim, output_dim, filter_size, inputs, apply_biases=True):
"""
inputs.shape: (batch size, height, input_dim)
output.shape: (batch size, height, output_dim)
* performs valid convs
"""
def uniform(stdev, size):
"""uniform distribution with the given stdev and size"""
return numpy.random.uniform(
low=-stdev * numpy.sqrt(3),
high=stdev * numpy.sqrt(3),
size=size
).astype(theano.config.floatX)
filters = lib.param(
name+'.Filters',
uniform(
1./numpy.sqrt(input_dim * filter_size),
# output dim, input dim, height, width
(output_dim, input_dim, filter_size, 1)
)
)
# conv2d takes inputs as (batch size, input channels, height[?], width[?])
inputs = inputs.reshape((inputs.shape[0], inputs.shape[1], 1, inputs.shape[2]))
inputs = inputs.dimshuffle(0, 3, 1, 2)
result = T.nnet.conv2d(inputs, filters, border_mode='valid', filter_flip=False)
if apply_biases:
biases = lib.param(
name+'.Biases',
numpy.zeros(output_dim, dtype=theano.config.floatX)
)
result = result + biases[None, :, None, None]
result = result.dimshuffle(0, 2, 3, 1)
return result.reshape((result.shape[0], result.shape[1], result.shape[3]))
示例12: big_frame_level_rnn
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def big_frame_level_rnn(input_sequences, h0, reset):
"""
input_sequences.shape: (batch size, n big frames * BIG_FRAME_SIZE)
h0.shape: (batch size, N_BIG_GRUS, BIG_DIM)
reset.shape: ()
output[0].shape: (batch size, n frames, DIM)
output[1].shape: same as h0.shape
output[2].shape: (batch size, seq len, Q_LEVELS)
"""
learned_h0 = lib.param(
'BigFrameLevel.h0',
numpy.zeros((N_BIG_GRUS, BIG_DIM), dtype=theano.config.floatX)
)
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_BIG_GRUS, BIG_DIM)
learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] / BIG_FRAME_SIZE,
BIG_FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
gru0 = lib.ops.LowMemGRU('BigFrameLevel.GRU0', BIG_FRAME_SIZE, BIG_DIM, frames, h0=h0[:, 0])
grus = [gru0]
for i in xrange(1, N_BIG_GRUS):
gru = lib.ops.LowMemGRU('BigFrameLevel.GRU'+str(i), BIG_DIM, BIG_DIM, grus[-1], h0=h0[:, i])
grus.append(gru)
output = lib.ops.Linear(
'BigFrameLevel.Output',
BIG_DIM,
DIM * BIG_FRAME_SIZE / FRAME_SIZE,
grus[-1]
)
output = output.reshape((output.shape[0], output.shape[1] * BIG_FRAME_SIZE / FRAME_SIZE, DIM))
last_hidden = T.stack([gru[:,-1] for gru in grus], axis=1)
independent_preds = lib.ops.Linear(
'BigFrameLevel.IndependentPreds',
BIG_DIM,
Q_LEVELS * BIG_FRAME_SIZE,
grus[-1]
)
independent_preds = independent_preds.reshape((independent_preds.shape[0], independent_preds.shape[1] * BIG_FRAME_SIZE, Q_LEVELS))
return (output, last_hidden, independent_preds)
示例13: frame_level_rnn
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def frame_level_rnn(input_sequences, other_input, h0, reset):
"""
input_sequences.shape: (batch size, n frames * FRAME_SIZE)
other_input.shape: (batch size, n frames, DIM)
h0.shape: (batch size, N_GRUS, DIM)
reset.shape: ()
output.shape: (batch size, n frames * FRAME_SIZE, DIM)
"""
learned_h0 = lib.param(
'FrameLevel.h0',
numpy.zeros((N_GRUS, DIM), dtype=theano.config.floatX)
)
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_GRUS, DIM)
learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] / FRAME_SIZE,
FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
gru_input = lib.ops.Linear('FrameLevel.InputExpand', FRAME_SIZE, DIM, frames) + other_input
gru0 = lib.ops.LowMemGRU('FrameLevel.GRU0', DIM, DIM, gru_input, h0=h0[:, 0])
grus = [gru0]
for i in xrange(1, N_GRUS):
gru = lib.ops.LowMemGRU('FrameLevel.GRU'+str(i), DIM, DIM, grus[-1], h0=h0[:, i])
grus.append(gru)
output = lib.ops.Linear(
'FrameLevel.Output',
DIM,
FRAME_SIZE * DIM,
grus[-1],
initialization='he'
)
output = output.reshape((output.shape[0], output.shape[1] * FRAME_SIZE, DIM))
last_hidden = T.stack([gru[:,-1] for gru in grus], axis=1)
return (output, last_hidden)
示例14: frame_level_rnn
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def frame_level_rnn(input_sequences, h0, reset):
"""
input_sequences.shape: (batch size, n frames * FRAME_SIZE)
h0.shape: (batch size, N_GRUS, DIM)
reset.shape: ()
output.shape: (batch size, n frames * FRAME_SIZE, DIM)
"""
learned_h0 = lib.param(
'FrameLevel.h0',
numpy.zeros((N_GRUS, DIM), dtype=theano.config.floatX)
)
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_GRUS, DIM)
learned_h0 = T.patternbroadcast(learned_h0, [False] * learned_h0.ndim)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1] / FRAME_SIZE,
FRAME_SIZE
))
# Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# (a reasonable range to pass as inputs to the RNN)
frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
frames *= lib.floatX(2)
gru0 = lib.ops.LowMemGRU('FrameLevel.GRU0', FRAME_SIZE, DIM, frames, h0=h0[:, 0])
grus = [gru0]
for i in xrange(1, N_GRUS):
gru = lib.ops.LowMemGRU('FrameLevel.GRU'+str(i), DIM, DIM, grus[-1], h0=h0[:, i])
grus.append(gru)
output = lib.ops.Linear(
'FrameLevel.Output',
DIM,
FRAME_SIZE * DIM,
grus[-1],
initialization='he'
)
output = output.reshape((output.shape[0], output.shape[1] * FRAME_SIZE, DIM))
last_hidden = T.stack([gru[:,-1] for gru in grus], axis=1)
return (output, last_hidden)
示例15: sample_level_rnn
# 需要導入模塊: import lib [as 別名]
# 或者: from lib import param [as 別名]
def sample_level_rnn(input_sequences, h0, reset):
"""
input_sequences.shape: (batch size, seq len)
h0.shape: (batch size, N_GRUS, DIM)
reset.shape: ()
output.shape: (batch size, seq len, Q_LEVELS)
"""
learned_h0 = lib.param(
'SampleLevel.h0',
numpy.zeros((N_GRUS, DIM), dtype=theano.config.floatX)
)
learned_h0 = T.alloc(learned_h0, h0.shape[0], N_GRUS, DIM)
h0 = theano.ifelse.ifelse(reset, learned_h0, h0)
# Embedded inputs
#################
# FRAME_SIZE = Q_LEVELS
# frames = lib.ops.Embedding('SampleLevel.Embedding', Q_LEVELS, Q_LEVELS, input_sequences)
# Real-valued inputs
####################
# 'frames' of size 1
FRAME_SIZE = 1
frames = input_sequences.reshape((
input_sequences.shape[0],
input_sequences.shape[1],
1
))
# # Rescale frames from ints in [0, Q_LEVELS) to floats in [-2, 2]
# # (a reasonable range to pass as inputs to the RNN)
# frames = (frames.astype('float32') / lib.floatX(Q_LEVELS/2)) - lib.floatX(1)
# frames *= lib.floatX(2)
gru0 = lib.ops.LowMemGRU('SampleLevel.GRU0', FRAME_SIZE, DIM, frames, h0=h0[:, 0])
# gru0 = T.nnet.relu(lib.ops.Linear('SampleLevel.GRU0FF', DIM, DIM, gru0, initialization='he'))
grus = [gru0]
for i in xrange(1, N_GRUS):
gru = lib.ops.LowMemGRU('SampleLevel.GRU'+str(i), DIM, DIM, grus[-1], h0=h0[:, i])
# gru = T.nnet.relu(lib.ops.Linear('SampleLevel.GRU'+str(i)+'FF', DIM, DIM, gru, initialization='he'))
grus.append(gru)
# We apply the softmax later
output = lib.ops.Linear(
'Output',
N_GRUS*DIM,
2,
T.concatenate(grus, axis=2)
)
# output = lib.ops.Linear(
# 'Output',
# DIM,
# Q_LEVELS,
# grus[-1]
# )
last_hidden = T.stack([gru[:,-1] for gru in grus], axis=1)
return (output, last_hidden)