本文整理汇总了Python中theano.scan函数的典型用法代码示例。如果您正苦于以下问题:Python scan函数的具体用法?Python scan怎么用?Python scan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了scan函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: output_h_vals
def output_h_vals(self, train=False):
if self.inputs_dict.has_key('input_single'):
input = self.get_input('input_single', train) #(nb_sample, input_dim)
X = TU.repeat(input, self.input_length) # (input_length, nb_sample, input_dim)
mask = None
else:
input = self.get_input('input_sequence', train) # (nb_sample, input_length, input_dim)
X = input.dimshuffle((1, 0, 2)) # (input_length, nb_sample, input_dim)
mask = self.get_input_mask('input_sequence',train) # (nb_sample, input_length)
if mask:
mask = T.cast(mask, dtype='int8').dimshuffle((1, 0, 'x')) # (input_length, nb_sample, 1)
#h_0 = T.zeros((X.shape[1], self.output_dim), X.dtype) # (nb_samples, output_dim)
h_0 = self._get_initial_state(X)
if mask:
h_vals, _ = theano.scan( self.step,
sequences=[mask, X],
outputs_info=h_0,
non_sequences=[self.W, self.U, self.b],
truncate_gradient=self.truncate_gradient,
go_backwards=self.go_backwards,
strict=True)
else:
h_vals, _ = theano.scan( self.step_no_mask,
sequences=[X],
outputs_info=h_0,
non_sequences=[self.W, self.U, self.b],
truncate_gradient=self.truncate_gradient,
go_backwards=self.go_backwards,
strict=True)
return h_vals #(input_length, nb_samples, output_dim)
示例2: get_output
def get_output(self, train=False):
self._train_state = train
X, eps = self.get_input(train).values()
eps = eps.dimshuffle(1, 0, 2)
canvas, init_enc, init_dec = self._get_initial_states(X)
if self.inner_rnn == 'gru':
outputs, updates = scan(self._step,
sequences=eps,
outputs_info=[canvas, init_enc, init_dec, None],
non_sequences=[X, ] + self.params,
# n_steps=self.n_steps,
truncate_gradient=self.truncate_gradient)
elif self.inner_rnn == 'lstm':
outputs, updates = scan(self._step_lstm,
sequences=eps,
outputs_info=[0*canvas, 0*init_enc, 0*init_enc,
0*init_dec, 0*init_dec, None],
non_sequences=[X, ] + self.params,
truncate_gradient=self.truncate_gradient)
kl = outputs[-1].sum(axis=0).mean()
if train:
# self.updates = updates
self.regularizers = [SimpleCost(kl), ]
if self.return_sequences:
return [outputs[0].dimshuffle(1, 0, 2, 3, 4), kl]
else:
return [outputs[0][-1], kl]
示例3: apply
def apply(self , src , mask_length , tgt):
"""
viterbi algorithm
"""
result , updates = theano.scan(
fn = self.train_step,
sequences = src,
outputs_info = [self.A_start, None] ,
non_sequences = self.A ,
n_steps = mask_length
)
# the score of best path
best_path_score = result[0][-1].max()
idx = T.argmax(result[0][-1])
#backtracking
res2 , _ = theano.scan(
fn = lambda dps , idx , idx2 : [dps[idx] , idx],
sequences = result[1][::-1],
outputs_info = [idx , idx],
n_steps = mask_length
)
# the path of best score
best_path = res2[1]
#if len(best_path) < seq_len:
# best_path.extend((seq_len - len(best_path)) * [2])
# the score of tgt path
tgt_score = self.decode(src , mask_length , tgt)
# max_margin
max_margin = T.sum(T.neq(tgt[:mask_length] , best_path))
cost = best_path_score + max_margin - tgt_score
return T.switch(T.lt(cost , T.alloc(numpy.float32(0.)))
, T.alloc(numpy.float32(0.))
, cost
),best_path
示例4: loss_fn_per_context
def loss_fn_per_context(word_position,context):
# sum up the global vectors of the context
context_vector = T.sum(W_g[context], axis = 0)
# start with -1 with none of the words disambiguated
start = -1*T.ones_like(context)
output_alg, updates = theano.scan(l2C, sequences = [context, T.arange(4)], outputs_info = [start, context_vector])
disambiguated_senses = output_alg[0][-1]
augmented_context_vector = output_alg[1][-1]
sense_of_actual_word = disambiguated_senses[word_position]
#return T.argsort(T.dot(context_vector, W_s[actual_word].T)), T.dot(context_vector, W_s[actual_word].T)
actual_word = context[word_position]
# Compute loss to update the global word vectors ignoring the word itself
def score(i):
return T.switch(T.eq(i, actual_word), 0, T.log(T.nnet.sigmoid(T.dot(W_g[actual_word], W_g[i]))))
scores, ignore_updates = theano.scan(score, sequences = [context])
def calc_score(context_word, sense_of_context_word):
return T.switch(T.eq(context_word, actual_word), 0, T.log(T.nnet.sigmoid(T.dot(W_s[actual_word][sense_of_actual_word], W_s[context_word][sense_of_context_word] ))))
sense_scores, ignore_updates_ = theano.scan(calc_score, sequences = [context, disambiguated_senses])
loss_this_example = T.sum(scores, axis = 0) + T.sum(sense_scores, axis = 0)
return loss_this_example
示例5: nin
def nin(X, param):
w1, w2, w3, b1, b2, b3 = param
X = X.dimshuffle(0, 1, 'x', 2, 3) # (n,32,1,r,c)
w1 = w1.dimshuffle(0, 1, 2, 'x', 3, 4) # (64,32,16,1,3,3)
w2 = w2.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,16,1,1)
w3 = w3.dimshuffle(0, 1, 2, 'x', 'x') # (64,2,32,1,1)
b1 = b1.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,16,1,1)
b2 = b2.dimshuffle(0, 1, 'x', 2, 'x', 'x') # (64,32,1,1,1,1)
b3 = b3.dimshuffle(0, 'x', 1, 'x', 'x') # (64,1,2,1,1)
indexi = T.arange(w1.shape[0], dtype='int32') # (0:64)
indexi = T.repeat(indexi, w1.shape[1], axis=0)
indexj = T.arange(w1.shape[1], dtype='int32') # (0:64)
indexj = T.tile(indexj, w1.shape[0])
results, updates = scan(fn=metaOp1,
sequences=[indexi, indexj],
outputs_info=None,
non_sequences=[X, w1, w2, b1, b2],
strict=True) # (64*32,n,1,r,c)
metaShape1 = results.shape[-4], results.shape[-2], results.shape[-1]
reshaped1 = results.reshape((w1.shape[0], w1.shape[1]) + metaShape1) # (64,32,n,r,c)
permuted1 = T.transpose(reshaped1, axes=(0, 2, 1, 3, 4)) # (64,n,32,r,c)
indexi = T.arange(w1.shape[0], dtype='int32') # (0:64)
results, updates = scan(fn=metaOp2,
sequences=[indexi],
outputs_info=None,
non_sequences=[permuted1, w3, b3],
strict=True) # (64,n,2,r,c)
permuted2 = T.transpose(results, axes=(1, 0, 2, 3, 4)) # (n,64,2,r,c)
metaShape2 = permuted2.shape[-2], permuted2.shape[-1]
reshaped2 = permuted2.reshape((permuted2.shape[0], -1) + metaShape2) # (n,128,r,c)
return reshaped2
示例6: mvNormal_logp
def mvNormal_logp(mu, tau, value):
"""
This logp function is for multivariate normal distribution
Inputs:
-------
mu = mu values assumed for each observation (num_obs x dims)
tau = tau values assumed for each observations (num_obs x dim x dim)
value = observed values (num_obs x dims)
Output:
-------
output = log likelihood
"""
dim = mu.shape[-1]
k = tau.shape[1]
n_count = value.shape[0]
delta = value - mu
# first function
long_sum1, updates = theano.scan(lambda n: tt.log(1.0 / tt.nlinalg.det(n)), sequences=[tau], strict=True)
# second function
long_sum2, updates = theano.scan(lambda t, d: d.reshape((1, -1)).dot(t).dot(d), sequences=[tau, delta], strict=True)
output = k * tt.log(2 * np.pi)
output += long_sum1
output += long_sum2
output *= -1 / 2.0
return output
示例7: function
def function(self, input_tensor):
init_hs = T.zeros((input_tensor.shape[1], self.output_neurons))
init_cs = T.zeros((input_tensor.shape[1], self.output_neurons))
lstm_out_1, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.d_forward, go_forwards=True),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None)
lstm_out_2, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.d_backward, go_forwards=False),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None)
lstm_out_3, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.u_forward, go_forwards=True),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None,
go_backwards=True)
lstm_out_4, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.u_backward, go_forwards=False),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None,
go_backwards=True)
return T.concatenate((lstm_out_1[0],
lstm_out_2[0],
lstm_out_3[0][::-1],
lstm_out_4[0][::-1]), axis=2)
示例8: fprop
def fprop(self, data):
if self.use_ground_truth:
self.input_space.validate(data)
features, phones = data
init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
init_out = T.unbroadcast(init_out, 0)
fn = lambda f, p, h, o: self.fprop_step(f, p, h, o)
((h, out), updates) = theano.scan(fn=fn,
sequences=[features, phones],
outputs_info=[dict(initial=init_h,
taps=[-1]),
init_out])
return out
else:
self.input_space.validate(data)
features, phones = data
init_in = features[0]
init_h = T.alloc(numpy.cast[theano.config.floatX](0), self.nhid)
init_out = T.alloc(numpy.cast[theano.config.floatX](0), 1)
init_out = T.unbroadcast(init_out, 0)
fn = lambda t, p, f, h, o: self.fprop_step_prime(t, p, f, h, o)
((f, h, out), updates) = theano.scan(fn=fn,
sequences=[features, phones],
outputs_info=[init_in,
dict(initial=init_h,
taps=[-1]),
init_out])
return out
示例9: _build_model
def _build_model(self, input, options, layers, params, go_backwards=False):
def _step1(x_, t_, layer_):
layer_ = str(layer_.data)
v = layers['conv_' + layer_ + '_v'].conv(x_)
t = layers['conv_' + layer_ + '_t'].conv(t_)
h = v + t
return x_, h
def _step2(h, r_, layer_):
layer_ = str(layer_.data)
o = h + params['b_' + layer_].dimshuffle('x', 0, 'x', 'x')
if layer_ != str(len(options['filter_shape']) - 1):
r = layers['conv_' + layer_ + '_r'].conv(r_)
o = tensor.nnet.relu(o + r)
return o
rval = input
if go_backwards:
rval = rval[::-1]
for i in range(len(options['filter_shape'])):
rval, _ = theano.scan(_step1, sequences=[rval],
outputs_info=[rval[0], None],
non_sequences=[i],
name='rnn_layers_k_' + str(i))
rval = rval[1]
rval, _ = theano.scan(_step2, sequences=[rval],
outputs_info=[rval[-1]],
non_sequences=[i],
name='rnn_layers_q_' + str(i))
proj = rval
return proj
示例10: For_MMD_Sub_class
def For_MMD_Sub_class(self,target,data,omega,num_FF,Xlabel):
Num=T.sum(Xlabel,0)
D_num=Xlabel.shape[1]
N=data.shape[0]
F_times_Omega = T.dot(data, omega)#minibatch_size*n_rff
Phi = (self.sf2**0.5 /num_FF**0.5 ) * T.concatenate([T.cos(F_times_Omega), T.sin(F_times_Omega)],1)
#各RFFは2N_rffのたてベクトル
Phi_total=T.sum(Phi.T,-1)/N
#Domain_number*2N_rffの行列
Phi_each_domain, updates = theano.scan(fn=lambda a,b: T.switch(T.neq(b,0), Phi.T*a/b, 0),
sequences=[Xlabel.T,Num])
each_Phi=T.sum(Phi_each_domain,-1)
#まず自分自身との内積 結果はD次元のベクトル
each_domain_sum=T.sum(each_Phi*each_Phi,-1)
#全体の内積
tot_sum=T.dot(Phi_total,Phi_total)
#全体とドメインのクロス内積
tot_domain_sum, updates=theano.scan(fn=lambda a: a*Phi_total,
sequences=[each_Phi])
#MMDの計算
MMD_central=T.sum(each_domain_sum)+D_num*tot_sum-2*T.sum(tot_domain_sum)
return MMD_central
示例11: apply
def apply(self):
result , updates = theano.scan(
fn = self.train_step,
sequences = self.f,
outputs_info = [self.A_start , None],
non_sequences = self.A ,
n_steps = self.tgt.shape[0]
)
best_path_score = result[0][-1].max()
idx = T.argmax(result[0][-1])
res2 , _ = theano.scan(
fn = lambda dps , idx : [dps[idx] , idx],
sequences = result[1][::-1],
outputs_info = [idx , None]
)
best_path = res2[1]
tgt_score = self.decode()
max_margin = T.sum(T.neq(self.tgt , best_path))
self.cost = best_path_score + max_margin - tgt_score
#if T.lt(self.cost , T.alloc(numpy.int64(0))):
# self.cost = T.alloc(numpy.int64(0))
#return T.argmax(result[-1])
#self.cost = T.mean(T.nnet.categorical_crossentropy(self.p_y_given_x , tgt))
#return best_path_score
#return best_path
return self.cost
示例12: call
def call(self, x, mask=None):
maxlen = x.shape[1]
hidden0 = x
# shape: (batch_size, maxlen, hidden_dim)
pyramid, _ = theano.scan(fn=self.build_pyramid,
sequences=T.arange(maxlen-1),
outputs_info=[hidden0],
non_sequences=maxlen)
# shape: (maxlen-1, batch_size, maxlen, hidden_dim)
hidden0 = K.expand_dims(hidden0, dim=0)
# shape: (1, batch_size, maxlen, hidden_dim)
pyramid = K.concatenate([hidden0, pyramid], axis=0)
# shape: (maxlen, batch_size, maxlen, hidden_dim)
hierarchy, _ = theano.scan(fn=self.compress_pyramid,
sequences=[T.arange(maxlen, 0, -1),
pyramid])
# shape: (maxlen, batch_size, hidden_dim)
hierarchy = K.permute_dimensions(hierarchy, (1, 0, 2))
# shape: (batch_size, maxlen, hidden_dim)
return hierarchy
示例13: __dealWithOneDoc
def __dealWithOneDoc(self, DocSentenceCount0, oneDocSentenceCount1, \
docs, corpusPos, oneDocSentenceWordCount, docW, docB, sentenceW, sentenceB, posW, posB):
# t = T.and_((shareRandge < oneDocSentenceCount1 + 1), (shareRandge >= DocSentenceCount0)).nonzero()
oneDocSentenceWordCount = oneDocSentenceWordCount[DocSentenceCount0:oneDocSentenceCount1 + 1]
sentenceResults0, _ = theano.scan(fn=self.__dealWithSentence,
non_sequences=[docs, sentenceW, sentenceB],
sequences=[dict(input=oneDocSentenceWordCount, taps=[-1, -0])],
strict=True)
sentenceResults1, _ = theano.scan(fn=self.__dealWithSentence,
non_sequences=[corpusPos, posW, posB],
sequences=[dict(input=oneDocSentenceWordCount, taps=[-1, -0])],
strict=True)
sentenceResults = T.concatenate([sentenceResults0, sentenceResults1], axis=1)
# p = printing.Print('docPool')
# docPool = p(docPool)
# p = printing.Print('sentenceResults')
# sentenceResults = p(sentenceResults)
# p = printing.Print('doc_out')
# doc_out = p(doc_out)
doc_out = conv.conv2d(input=sentenceResults, filters=docW)
docPool = downsample.max_pool_2d(doc_out, (self.__MAXDIM, 1), mode=self.__pooling_mode, ignore_border=False)
docOutput = T.tanh(docPool + docB.dimshuffle([0, 'x', 'x']))
doc_embedding = docOutput.flatten(1)
return doc_embedding
示例14: layers
def layers(self, n_layers=1):
layers = []
params = []
layer_output = []
for i in xrange(n_layers):
if i == 0:
layer_input = self.x.reshape((self.batch_size, self.n_words, self.n_in)).dimshuffle(1, 0, 2) # 100 * 10 * 32
layer = FirstLayer(n_i=self.n_in)
else:
layer_input = layer_output[-1][::-1]
layer = Layer(n_i=self.n_in)
[h, c], _ = theano.scan(fn=layer.forward,
sequences=layer_input,
outputs_info=[self.h0, self.c0])
layers.append(layer)
params.extend(layer.params)
layer_output.append(h)
layer_input = layer_output[-1]
layer = LastLayer(n_i=self.n_in, n_h=self.n_y)
y, _ = theano.scan(fn=layer.forward,
sequences=layer_input,
outputs_info=[None])
layers.append(layer)
params.extend(layer.params)
layer_output.append(y)
return layers, params, layer_output
示例15: gibbs_all
def gibbs_all(self, sample, W, vBias, hBias, countSteps, function_mode):
if function_mode < 3:
gibbsOne_format = lambda sample: self.list_function_for_gibbs[function_mode](sample, W, vBias, hBias);
format, updates = theano.scan(fn=gibbsOne_format, \
outputs_info=sample, \
n_steps=countSteps)
return format, updates
else:
if function_mode == MODE_WITH_COIN_EXCEPT_LAST:
gibbsOne_format = lambda sample: self.list_function_for_gibbs[MODE_WITH_COIN](sample, W, vBias, hBias);
format, updates = theano.scan(fn=gibbsOne_format, \
outputs_info=sample, \
n_steps=countSteps - 1)
gibbsOne_format = lambda sample: self.list_function_for_gibbs[MODE_WITHOUT_COIN](sample, W, vBias, hBias);
res = gibbsOne_format(format[-1])
res = T.concatenate([format, [res]])
return res, updates
else:
gibbsOne_format = lambda sample: self.list_function_for_gibbs[MODE_WITHOUT_COIN](sample, W, vBias, hBias);
format, updates = theano.scan(fn=gibbsOne_format, \
outputs_info=sample, \
n_steps=countSteps - 1)
gibbsOne_format = lambda sample: self.list_function_for_gibbs[MODE_WITH_COIN](sample, W, vBias, hBias);
res = gibbsOne_format(format[-1])
res = T.concatenate([format, [res]])
return res, updates