本文整理汇总了Python中dynet.log方法的典型用法代码示例。如果您正苦于以下问题:Python dynet.log方法的具体用法?Python dynet.log怎么用?Python dynet.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dynet
的用法示例。
在下文中一共展示了dynet.log方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dy.pick(scores, argmax_score)
max_score_expr_broadcast = dy.concatenate([max_score_expr] * self.tagset_size)
return max_score_expr + dy.log(dy.sum_dim(dy.transpose(dy.exp(scores - max_score_expr_broadcast)),[1]))
init_alphas = [-1e10] * self.tagset_size
init_alphas[t2i[START_TAG]] = 0
for_expr = dy.inputVector(init_alphas)
for obs in observations:
alphas_t = []
for next_tag in range(self.tagset_size):
obs_broadcast = dy.concatenate([dy.pick(obs, next_tag)] * self.tagset_size)
next_tag_expr = for_expr + self.transitions[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dy.concatenate(alphas_t)
terminal_expr = for_expr + self.transitions[t2i["<STOP>"]]
alpha = log_sum_exp(terminal_expr)
return alpha
示例2: init_logger
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def init_logger():
if not os.path.exists(root_dir):
os.mkdir(root_dir)
log_formatter = logging.Formatter("%(message)s")
logger = logging.getLogger()
file_handler = logging.FileHandler("{0}/info.log".format(root_dir), mode='w')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
return logger
# ===-----------------------------------------------------------------------===
# Set up logging
# ===-----------------------------------------------------------------------===
示例3: forward
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def forward(self, observations):
def log_sum_exp(scores):
npval = scores.npvalue()
argmax_score = np.argmax(npval)
max_score_expr = dy.pick(scores, argmax_score)
max_score_expr_broadcast = dy.concatenate([max_score_expr] * self.dim_output)
return max_score_expr + dy.log(dy.sum_elems(dy.transpose(dy.exp(scores - max_score_expr_broadcast))))
init_alphas = [-1e10] * self.dim_output
init_alphas[self.sp_s] = 0
for_expr = dy.inputVector(init_alphas)
for obs in observations:
alphas_t = []
for next_tag in range(self.dim_output):
obs_broadcast = dy.concatenate([dy.pick(obs, next_tag)] * self.dim_output)
next_tag_expr = for_expr + self.trans[next_tag] + obs_broadcast
alphas_t.append(log_sum_exp(next_tag_expr))
for_expr = dy.concatenate(alphas_t)
terminal_expr = for_expr + self.trans[self.sp_e]
alpha = log_sum_exp(terminal_expr)
return alpha
示例4: get_POStagging_loss
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def get_POStagging_loss(self, X, Y):
losses = []
probs = self.encode_pt(X, train=True)
for prob, y in zip(probs, Y):
losses.append(-dy.log(dy.pick(prob, y)))
loss = dy.esum(losses)
return loss
示例5: _pick_sample
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def _pick_sample(self, probs, temperature=1.0):
probs = probs / np.sum(probs)
scaled_prediction = np.log(probs) / temperature
scaled_prediction = (scaled_prediction -
np.logaddexp.reduce(scaled_prediction))
scaled_prediction = np.exp(scaled_prediction)
# print np.sum(probs)
# probs = probs / np.sum(probs)
return np.random.choice(np.arange(256), p=scaled_prediction)
示例6: compute_loss
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Inputs:
gold_seq (list of str): A sequence of gold tokens.
scores (list of dy.Expression): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_tok_maps (list of dict str->list of int): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (lambda (str, str)->list of int): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (float, optional): The amount of noise to add to the loss.
Returns:
dy.Expression representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores)
assert len(index_to_token_maps) == len(scores)
losses = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
if len(gold_indices) == 1:
losses.append(dy.pickneglogsoftmax(score, gold_indices[0]))
else:
prob_of_tok = dy.zeroes(1)
probdist = dy.softmax(score)
for index in gold_indices:
prob_of_tok += probdist[index]
prob_of_tok += noise
losses.append(-dy.log(prob_of_tok))
return dy.esum(losses)
示例7: test
# 需要导入模块: import dynet [as 别名]
# 或者: from dynet import log [as 别名]
def test(epoch, trees_test, policy, policy_save_test, best_test, best_test_idx):
if epoch % 10 == 0:
pass
metric_total = [0] * 6
micro_total = [0.] * 3
wrong_at_total = [0.] * 10
policy.disable_dropout()
for i_episode in tqdm(range(len(trees_test))):
dy.renew_cg()
policy.re_init()
# prob_l = []
T = trees_test[i_episode]
T_rollout = copy_tree(T, min(args.n_rollout_test, (len(T.terms) - 1) * 2)) # a list of T's copy
policy.init_history(args.n_rollout_test)
for i in range(len(T.terms) - 1):
if i == 0:
prob, pairs = policy.selection_by_tree(T, mode='test')
prob = dy.log(prob).npvalue()
indices = np.argsort(prob)[-args.n_rollout_test:]
prob_total = prob[indices]
selected_pairs = [pairs[idx] for idx in indices]
pair_from_tree_idx = [0] * len(T_rollout)
else:
prob_per = []
pairs_per = []
for T_idx in range(len(T_rollout)):
prob, pairs = policy.selection_by_tree(T_rollout[T_idx], mode='test', idx=T_idx)
prob = dy.log(prob) + prob_total[T_idx]
prob_per.append(prob.npvalue())
pairs_per.append(pairs)
prob_total, T_rollout, selected_pairs, pair_from_tree_idx = find_top_k(T_rollout, prob_per, pairs_per,
args.n_rollout_test)
for tree_idx, (tree_i, pair_i, from_idx) in enumerate(zip(T_rollout, selected_pairs, pair_from_tree_idx)):
pair_i = tuple(pair_i)
tree_i.update(pair_i)
policy.update_history(pair_i, from_idx=from_idx, to_idx=tree_idx)
# best candidate
metric_total, micro_total, wrong_at_total, wrong_total = T.evaluate(metric_total, micro_total,
wrong_at_total, reward_type='print')
# if args.debug:
# for tmp_T in T_rollout:
# tmp_total = [0] * 6
# print tmp_T.evaluate(data=tmp_total, reward_type='print')
# T.re_init()
for m_idx in range(5):
metric_total[m_idx] = round(metric_total[m_idx] / len(trees_test), 3)
for w_idx in range(len(wrong_at_total)):
wrong_at_total[w_idx] = round(wrong_at_total[w_idx] / len(trees_test), 3)
best_test, best_test_idx = update_best(metric_total, best_test, best_test_idx, epoch)
if epoch % 1 == 0:
print '[test]epoch {}:{} {} {} {}'.format(epoch, metric_total, micro_total, get_micro_f1(micro_total),
wrong_at_total),
print 'best_test', best_test, best_test_idx
return policy_save_test, best_test, best_test_idx