本文整理匯總了Python中dynet.pickneglogsoftmax方法的典型用法代碼示例。如果您正苦於以下問題:Python dynet.pickneglogsoftmax方法的具體用法?Python dynet.pickneglogsoftmax怎麽用?Python dynet.pickneglogsoftmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類dynet
的用法示例。
在下文中一共展示了dynet.pickneglogsoftmax方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: loss
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import pickneglogsoftmax [as 別名]
def loss(self, sentence, word_chars, tags_set):
'''
For use in training phase.
Tag sentence (all attributes) and compute loss based on probability of expected tags.
'''
observations_set = self.build_tagging_graph(sentence, word_chars)
errors = {}
for att, tags in tags_set.items():
err = []
for obs, tag in zip(observations_set[att], tags):
err_t = dy.pickneglogsoftmax(obs, tag)
err.append(err_t)
errors[att] = dy.esum(err)
if self.att_props is not None:
for att, err in errors.items():
prop_vec = dy.inputVector([self.att_props[att]] * err.dim()[0])
err = dy.cmult(err, prop_vec)
return errors
示例2: evaluate
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import pickneglogsoftmax [as 別名]
def evaluate(self, inputs, outputs):
params = M.get_collection()
optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
num_batches = int(len(self.train_dataset) / self.batch_size)
for epoch in range(self.max_num_training_epochs):
random.shuffle(self.train_dataset)
i = 0
total_loss = 0
while (i < len(self.train_dataset)):
dy.renew_cg()
mbsize = min(self.batch_size, len(self.train_dataset) - i)
minibatch = self.train_dataset[i:i + mbsize]
losses = []
for (label, img) in minibatch:
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
loss = dy.pickneglogsoftmax(logits, label)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
optimizer.update()
total_loss += mbloss.scalar_value()
i += mbsize
val_acc = self.compute_accuracy(inputs, outputs)
if self.log_output_to_terminal and epoch % self.display_step == 0:
print("epoch:", '%d' % (epoch + 1), "loss:",
"{:.9f}".format(total_loss / num_batches),
"validation_accuracy:", "%.5f" % val_acc)
val_acc = self.compute_accuracy(inputs, outputs)
return {'val_acc': val_acc}
示例3: learn
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import pickneglogsoftmax [as 別名]
def learn(self, word, transcription):
output_list, att_list = self._predict(word, gs_phones=transcription)
for tp, pp, att, pos in zip(transcription, output_list, att_list, range(len(att_list))):
self.losses.append(dy.pickneglogsoftmax(pp, self.encodings.phoneme2int[tp]))
self.losses.append(self._compute_guided_attention(att, pos, len(word), len(transcription) + 1))
self.losses.append(dy.pickneglogsoftmax(output_list[-1], len(self.encodings.phoneme2int)))
示例4: compute_loss
# 需要導入模塊: import dynet [as 別名]
# 或者: from dynet import pickneglogsoftmax [as 別名]
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Inputs:
gold_seq (list of str): A sequence of gold tokens.
scores (list of dy.Expression): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_tok_maps (list of dict str->list of int): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (float, optional): The amount of noise to add to the loss.
Returns:
dy.Expression representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores)
assert len(index_to_token_maps) == len(scores)
losses = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
if len(gold_indices) == 1:
losses.append(dy.pickneglogsoftmax(score, gold_indices[0]))
else:
prob_of_tok = dy.zeroes(1)
probdist = dy.softmax(score)
for index in gold_indices:
prob_of_tok += probdist[index]
prob_of_tok += noise
losses.append(-dy.log(prob_of_tok))
return dy.esum(losses)