本文整理匯總了Python中theano.tensor.zeros方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.zeros方法的具體用法?Python tensor.zeros怎麽用?Python tensor.zeros使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.zeros方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def build(self):
#{{{
import numpy as np;
self.W = shared((self.input_dim, 4 * self.output_dim),
name='{}_W'.format(self.name))
self.U = shared((self.output_dim, 4 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init(
(self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
#self.c_0 = shared((self.output_dim,), name='{}_c_0'.format(self.name) )
#self.h_0 = shared((self.output_dim,), name='{}_h_0'.format(self.name) )
self.c_0=np.zeros(self.output_dim).astype(theano.config.floatX);
self.h_0=np.zeros(self.output_dim).astype(theano.config.floatX);
self.params=[self.W,self.U,
self.b,
# self.c_0,self.h_0
];
#}}}
示例2: ctc_update_log_p
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例3: with_additional_nodes
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def with_additional_nodes(self, new_node_strengths, new_node_ids, new_node_states=None):
"""
Helper function to generate a new state with new nodes added.
Params:
new_node_strengths: Tensor of shape (n_batch, n_new_nodes)
new_node_ids: Tensor of shape (n_batch, n_new_nodes, num_node_ids)
new_node_states: (Optional) Tensor of shape (n_batch, n_new_nodes, node_state_size)
If not provided, will be zero
Returns: A new graph state with the changes
"""
if new_node_states is None:
new_node_states = T.zeros([self.n_batch, new_node_strengths.shape[1], self.node_state_size])
next_node_strengths = T.concatenate([self.node_strengths, new_node_strengths], 1)
next_node_ids = T.concatenate([self.node_ids, new_node_ids], 1)
next_node_states = T.concatenate([self.node_states, new_node_states], 1)
next_n_nodes = next_node_strengths.shape[1]
next_edge_strengths = pad_to(self.edge_strengths, [self.n_batch, next_n_nodes, next_n_nodes, self.num_edge_types])
cls = type(self)
return cls(next_node_strengths, next_node_ids, next_node_states, next_edge_strengths)
示例4: param_init_lstm
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def param_init_lstm(options, params, prefix='lstm', nin=None, dim=None, hiero=False):
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
if not hiero:
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
示例5: DiagonalBiLSTM
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def DiagonalBiLSTM(name, input_dim, inputs):
"""
inputs.shape: (batch size, height, width, input_dim)
inputs.shape: (batch size, height, width, DIM)
"""
forward = DiagonalLSTM(name+'.Forward', input_dim, inputs)
backward = DiagonalLSTM(name+'.Backward', input_dim, inputs[:,:,::-1,:])[:,:,::-1,:]
batch_size = inputs.shape[0]
backward = T.concatenate([
T.zeros([batch_size, 1, WIDTH, DIM], dtype=theano.config.floatX),
backward[:, :-1, :, :]
], axis=1)
return forward + backward
# inputs.shape: (batch size, height, width, channels)
示例6: generate_and_save_samples
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def generate_and_save_samples(tag):
def save_images(images, filename):
"""
images.shape: (batch, height, width, channels)
"""
images = images.reshape((10,10,28,28))
# rowx, rowy, height, width -> rowy, height, rowx, width
images = images.transpose(1,2,0,3)
images = images.reshape((10*28, 10*28))
scipy.misc.toimage(images, cmin=0.0, cmax=1.0).save('{}_{}.jpg'.format(filename, tag))
samples = numpy.zeros((100, HEIGHT, WIDTH, 1), dtype='float32')
for i in xrange(HEIGHT):
for j in xrange(WIDTH):
for k in xrange(N_CHANNELS):
next_sample = binarize(sample_fn(samples))
samples[:, i, j, k] = next_sample[:, i, j, k]
save_images(samples, 'samples')
示例7: load_embeddings
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def load_embeddings(args):
lst = [ ]
vas = [ ]
with gzip.open(args.embedding) as fin:
for line in fin:
parts = line.strip().split()
w = parts[0]
e = numpy.array( [[ float(x) for x in parts[1:] ]],
dtype = theano.config.floatX )
lst.append(w)
vas.append(e)
lst.append("## UNK ##")
vas.append( numpy.zeros(vas[0].shape, dtype = theano.config.floatX) )
vocabx = dict([ (y,x) for x,y in enumerate(lst) ])
embeddings = numpy.concatenate(vas)
assert len(vocabx) == len(embeddings)
print "{} embedding loaded, size {}".format(embeddings.shape[0], embeddings.shape[1])
return vocabx, embeddings
示例8: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def __init__(self, input, n_in, n_out, W=None):
self.input = input
if W is None:
self.W = theano.shared(
value = numpy.zeros(
(n_in, n_out),
dtype = theano.config.floatX),
name = 'W',
borrow = True
)
else:
self.W = W
self.s_y_given_x = T.dot(input, self.W)
self.p_y_given_x = T.nnet.softmax(self.s_y_given_x) #+ self.b)
self.pred = T.argmax(self.s_y_given_x, axis=1)
self.params = [ self.W ]
示例9: calculate
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def calculate(self, input):
P = self.P
Q = self.Q
R = self.R
decay = self.decay
f_0 = T.zeros((input.shape[1], self.n_out), dtype=theano.config.floatX)
([f1, s1, f2, s2, f3], updates) = theano.scan( fn = StrConvLayer.loop_one_step,
sequences = input,
outputs_info = [ f_0, f_0, f_0, f_0, f_0 ],
non_sequences = [ P, Q, R, decay ]
)
return f1, s1, f2, s2, f3
# ###
# Dynamic programming to calculate aggregated unigram to trigram representation vectors
# ###
示例10: adagrad_update
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def adagrad_update(self, cost, learning_rate, eps=1e-8):
params = [ p if p != self.slices else self.EMB for p in self.params ]
accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX))
for p in params ]
gparams = [ T.grad(cost, param) for param in self.params ]
self.gparams = gparams
updates = [ ]
for param, gparam, acc in zip(self.params, gparams, accumulators):
if param == self.slices:
acc_slices = acc[self.x.flatten()]
new_acc_slices = acc_slices + gparam**2
updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) )
updates.append( (self.EMB, T.inc_subtensor(param,
- learning_rate * gparam / T.sqrt(new_acc_slices+eps))) )
else:
new_acc = acc + gparam**2
updates.append( (acc, new_acc) )
updates.append( (param, param - learning_rate * gparam /
T.sqrt(new_acc + eps)) )
return updates
示例11: init_params
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def init_params(self, inp_size, hidden_size, output_size):
# First layer
self.W1_in_act = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, inp_size, hidden_size), name='W1_in_'+self.name))
self.b1_in_act = add_to_params(self.params, theano.shared(value=np.zeros((hidden_size,), dtype='float32'), name='b1_in_'+self.name))
# First layer batch norm / layer norm parameters
self.normop_in_act_h1_gamma = add_to_params(self.params, theano.shared(value=self.normop_gamma_init*np.ones((hidden_size,), dtype='float32'), name='normop_in_act_h1_gamma_'+self.name))
self.normop_in_act_h1_mean = add_to_params(self.params, theano.shared(value=np.zeros((hidden_size,), dtype='float32'), name='normop_in_act_h1_mean_'+self.name))
self.normop_in_act_h1_var = add_to_params(self.params, theano.shared(value=(1e-7)*np.ones((hidden_size,), dtype='float32'), name='normop_in_act_h1_var_'+self.name))
# Output layer
self.W2_in_act = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, hidden_size, output_size), name='W2_in_'+self.name))
self.b2_in_act = add_to_params(self.params, theano.shared(value=np.zeros((output_size,), dtype='float32'), name='b2_in_'+self.name))
# Output layer batch norm / layer norm parameters
self.normop_in_act_h2_gamma = add_to_params(self.params, theano.shared(value=self.normop_gamma_init*np.ones((output_size,), dtype='float32'), name='normop_in_act_h2_gamma_'+self.name))
self.normop_in_act_h2_mean = add_to_params(self.params, theano.shared(value=np.zeros((output_size,), dtype='float32'), name='normop_in_act_h2_mean_'+self.name))
self.normop_in_act_h2_var = add_to_params(self.params, theano.shared(value=(1e-7)*np.ones((output_size,), dtype='float32'), name='normop_in_act_h2_var_'+self.name))
示例12: step
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def step(self, word,index,energy_tm1,h_tm1,c_tm1,x):
#{{{
#attention
H=x;
if self.attendedMode is "concat":
M_X=T.dot(x,self.W_A_X)#+self.b_A_X;
M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h;
M=T.tanh(M_X+M_state)
_energy=T.dot(M,self.W_A.T)#+self.b_A;
elif self.attendedMode is "dot":
energy=None;
assert 0,"not implement";
elif self.attendedMode is "general":
M_X=T.dot(x,self.W_A_X)#+self.b_A_X;
M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h;
M=T.tanh(M_X*M_state);
_energy=T.dot(M,self.W_A.T)#+self.b_A;
#mask
mask=T.zeros((1,x.shape[0]),dtype=theano.config.floatX);
energy=T.nnet.softmax(_energy[:index+1]);
masked_energy=T.set_subtensor(mask[0,:index+1],energy.flatten());
glimpsed=(masked_energy.T*H).sum(axis=0)
#combine glimpsed with word;
if self.wordInput_dim==0:
combined=glimpsed;
else:
combine=K.concatenate([glimpsed,word]);
combined=combine;
#original LSTM step
h_t,c_t=super(AttentionLSTM3,self).step(combined,h_tm1,c_tm1);
return masked_energy.flatten(),h_t,c_t
#}}}
示例13: link
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def link(self, input,words):
#{{{
"""
Propagate the input through the network and return the last hidden
vector. The whole sequence is also accessible via self.h, but
where self.h of shape (sequence_length, batch_size, output_dim)
"""
# If we use batches, we have to permute the first and second dimension.
if self.with_batch:
assert 0,"AttentionLSTM not implement with_batch";
else:
self.input = input
initial_states = [self.h_0, self.c_0]
step_function=self.step;
[e,h,c], _ = theano.scan(
fn=step_function,
sequences=[words,T.arange(words.shape[0])],
outputs_info=[T.zeros((input.shape[0],),
dtype=theano.config.floatX)]+initial_states,
non_sequences=[self.input],
)
self.h = h
self.output = h[-1]
self.e=e;
self.c=c;
return self.output
#}}}
#}}}
示例14: zeros
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def zeros(shape, dtype=None, name=None):
'''Instantiates an all-zeros variable.
'''
if dtype is None:
dtype = floatx()
return variable(np.zeros(shape), dtype, name)
示例15: temporal_padding
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import zeros [as 別名]
def temporal_padding(x, padding=1):
'''Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Apologies for the inane API, but Theano makes this
really hard.
'''
# TODO: `keras_shape` inference.
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + 2 * padding,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x)