本文整理匯總了Python中dynet.softmax方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.softmax方法的具體用法?Python dynet.softmax怎麽用?Python dynet.softmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.softmax方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _attend
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def _attend(self, input_vectors, decoder):
w1 = self.att_w1.expr(update=True)
w2 = self.att_w2.expr(update=True)
v = self.att_v.expr(update=True)
attention_weights = []
w2dt = w2 * decoder.s()[-1]
for input_vector in input_vectors:
attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
attention_weights.append(attention_weight)
attention_weights = dy.softmax(dy.concatenate(attention_weights))
output_vectors = dy.esum(
[vector * attention_weight for vector, attention_weight in zip(input_vectors, attention_weights)])
return output_vectors, attention_weights
示例2: __getitem__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def __getitem__(self, i):
"""
Return the weighted layers for the current word.
:param i: Word at index i in the sentence.
:return: Embedding for the word
"""
layers = self._get_sentence_layers(i)
normalized_weights = dy.softmax(self.elmo.weights)
y_hat = [
dy.inputTensor(layer) * weight
for layer, weight in zip(layers, normalized_weights)
]
# Sum the layer contents together
return dy.esum(y_hat) * self.elmo.gamma
示例3: tag_sentence
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def tag_sentence(self, sentence, word_chars):
'''
For use in testing phase.
Tag sentence and return tags for each attribute, without caluclating loss.
'''
observations_set = self.build_tagging_graph(sentence, word_chars)
tag_seqs = {}
for att, observations in observations_set.items():
observations = [ dy.softmax(obs) for obs in observations ]
probs = [ obs.npvalue() for obs in observations ]
tag_seq = []
for prob in probs:
tag_t = np.argmax(prob)
tag_seq.append(tag_t)
tag_seqs[att] = tag_seq
return tag_seqs
示例4: __call__
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def __call__(self, query, keys, values=None):
if not values:
values = keys
query_t, keys_t, values_t = self.transform_arguments(query,
keys,
values)
scores = dy.transpose(query_t * keys_t)
distribution = dy.softmax(scores)
context_vector = values_t * distribution
return AttentionResult(scores, distribution, context_vector)
示例5: encode_pt
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def encode_pt(self, X, train=False):
dy.renew_cg()
# Remove dy.parameters(...) for DyNet v.2.1
#w_pos = dy.parameter(self.w_pos)
#b_pos = dy.parameter(self.b_pos)
w_pos = self.w_pos
b_pos = self.b_pos
ipts = []
length = len(X[0])
for i in range(length):
cids = X[0][i]
wid = X[1][i]
tids = X[2][i]
vec_char = self.char_seq_model.transduce([self.UNI[cid] for cid in cids])[-1]
vec_tags = []
for tid in tids:
if tid == 0:
zero = dy.inputVector(np.zeros(self.dim_tag_emb))
vec_tags.append(zero)
else:
vec_tags.append(self.POS[tid])
vec_tag = dy.esum(vec_tags)
if wid == 0:
vec_word = dy.inputVector(np.zeros(self.dim_word))
else:
vec_word = self.WORD[wid]
vec_at_i = dy.concatenate([vec_word, vec_char, vec_tag])
if train is True:
vec_at_i = dy.dropout(vec_at_i, self.dropout_rate)
ipts.append(vec_at_i)
hiddens = self.pos_model.transduce(ipts)
probs = [dy.softmax(w_pos*h+b_pos) for h in hiddens]
return probs
示例6: softmaxify
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def softmaxify(neg_assocs):
"""
stable softmax function built before dynet's utility was efficient
:param neg_assocs: association scores for negative samples
:return: numpy array of softmaxed scores
"""
#return dy.softmax(dy.concatenate(list(neg_assocs.values()))).value() # can replace 4 lines below
neg_assoc_scores = np.array(list(neg_assocs.values()))
exp_neg_assocs = np.exp(neg_assoc_scores - np.max(neg_assoc_scores))
assoc_scores_sumexp = np.sum(exp_neg_assocs)
return exp_neg_assocs / assoc_scores_sumexp
示例7: _attend
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def _attend(self, input_list, decoder_state, last_pos=None):
w1 = self.att_w1.expr(update=True)
w2 = self.att_w2.expr(update=True)
v = self.att_v.expr(update=True)
attention_weights = []
w2dt = w2 * dy.concatenate([decoder_state.s()[-1]])
for input_vector in input_list:
attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
attention_weights.append(attention_weight)
attention_weights = dy.softmax(dy.concatenate(attention_weights))
# force incremental attention if this is runtime
if last_pos is not None:
current_pos = np.argmax(attention_weights.value())
if current_pos < last_pos or current_pos >= last_pos + 2:
current_pos = last_pos + 1
if current_pos >= len(input_list):
current_pos = len(input_list) - 1
output_vectors = input_list[current_pos]
simulated_att = np.zeros((len(input_list)))
simulated_att[current_pos] = 1.0
new_att_vec = dy.inputVector(simulated_att)
return output_vectors, new_att_vec
output_vectors = dy.esum(
[vector * attention_weight for vector, attention_weight in zip(input_list, attention_weights)])
return output_vectors, attention_weights
示例8: _attend_classic
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def _attend_classic(self, input_list, decoder_state, w1, w2, v):
attention_weights = []
w2dt = w2 * decoder_state
for input_vector in input_list:
attention_weight = v * dy.tanh(w1 * input_vector + w2dt)
attention_weights.append(attention_weight)
attention_weights = dy.softmax(dy.concatenate(attention_weights))
output_vectors = dy.esum(
[vector * attention_weight for vector, attention_weight in zip(input_list, attention_weights)])
return output_vectors, attention_weights
示例9: tag_sentence
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def tag_sentence(self, sentence):
observations_set = self.build_tagging_graph(sentence)
tag_seqs = {}
for att, observations in observations_set.items():
observations = [ dy.softmax(obs) for obs in observations ]
probs = [ obs.npvalue() for obs in observations ]
tag_seq = []
for prob in probs:
tag_t = np.argmax(prob)
tag_seq.append(tag_t)
tag_seqs[att] = tag_seq
return tag_seqs
示例10: compute_loss
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Inputs:
gold_seq (list of str): A sequence of gold tokens.
scores (list of dy.Expression): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_tok_maps (list of dict str->list of int): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (float, optional): The amount of noise to add to the loss.
Returns:
dy.Expression representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores)
assert len(index_to_token_maps) == len(scores)
losses = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
if len(gold_indices) == 1:
losses.append(dy.pickneglogsoftmax(score, gold_indices[0]))
else:
prob_of_tok = dy.zeroes(1)
probdist = dy.softmax(score)
for index in gold_indices:
prob_of_tok += probdist[index]
prob_of_tok += noise
losses.append(-dy.log(prob_of_tok))
return dy.esum(losses)
示例11: synthesize
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import softmax [as 別名]
def synthesize(self, mgc, batch_size, sample=True, temperature=1.0):
synth = []
total_audio_len = mgc.shape[0] * len(self.upsample_w_s)
num_batches = total_audio_len / batch_size
if total_audio_len % batch_size != 0:
num_batches + 1
last_rnn_state = None
last_sample = 127
w_index = 0
last_proc = 0
for iBatch in range(num_batches):
dy.renew_cg()
# bias=dy.inputVector([0]*self.RNN_SIZE)
# gain=dy.inputVector([1.0]*self.RNN_SIZE)
start = batch_size * iBatch
stop = batch_size * (iBatch + 1)
if stop >= total_audio_len:
stop = total_audio_len - 1
upsampled = self._upsample(mgc, start, stop)
rnn = self.rnn.initial_state()
if last_rnn_state is not None:
rnn_state = [dy.inputVector(s) for s in last_rnn_state]
rnn = rnn.set_s(rnn_state)
out_list = []
for index in range(stop - start):
w_index += 1
curr_proc = w_index * 100 / total_audio_len
if curr_proc % 5 == 0 and curr_proc != last_proc:
last_proc = curr_proc
sys.stdout.write(' ' + str(curr_proc))
sys.stdout.flush()
if self.OUTPUT_EMB_SIZE != 1:
rnn_input = dy.concatenate([self.output_lookup[last_sample], upsampled[index]])
else:
rnn_input = dy.concatenate([dy.scalarInput(float(last_sample) / 127.0 - 1.0), upsampled[index]])
rnn = rnn.add_input(rnn_input)
rnn_output = rnn.output() # dy.layer_norm(rnn.output(), gain, bias)
hidden = rnn_output
for w, b in zip(self.mlp_w, self.mlp_b):
hidden = dy.tanh(w.expr(update=True) * hidden + b.expr(update=True))
softmax_output = dy.softmax(
self.softmax_w.expr(update=True) * hidden + self.softmax_b.expr(update=True))
out_list.append(softmax_output)
if sample:
last_sample = self._pick_sample(softmax_output.npvalue(),
temperature=temperature) # np.argmax(softmax_output.npvalue())
else:
last_sample = np.argmax(softmax_output.npvalue())
# last_sample = np.argmax(softmax_output.npvalue())
synth.append(last_sample)
rnn_state = rnn.s()
last_rnn_state = [s.value() for s in rnn_state]
return synth