本文整理汇总了Python中chainer.reporter.report方法的典型用法代码示例。如果您正苦于以下问题:Python reporter.report方法的具体用法?Python reporter.report怎么用?Python reporter.report使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.reporter
的用法示例。
在下文中一共展示了reporter.report方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def forward(self, x, t):
xp = cuda.get_array_module(x)
y = self.predictor(x)
log_softmax = F.log_softmax(y)
# SelectItem is not supported by onnx-chainer.
# TODO(hamaji): Support it?
# log_prob = F.select_item(log_softmax, t)
batch_size = chainer.Variable(xp.array(t.size, xp.float32),
name='batch_size')
self.extra_inputs = [batch_size]
# TODO(hamaji): Currently, F.sum with axis=1 cannot be
# backpropped properly.
# log_prob = F.sum(log_softmax * t, axis=1)
# return -F.sum(log_prob, axis=0) / self.batch_size
log_prob = F.sum(log_softmax * t, axis=(0, 1))
loss = -log_prob / batch_size
reporter.report({'loss': loss}, self)
if self.compute_accuracy:
acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
reporter.report({'accuracy': acc}, self)
loss.name = 'loss'
return loss
示例2: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, xs, ts):
_, ys, ems = self.forward(xs)
# PIT loss
loss, labels = batch_pit_loss(ys, ts)
reporter.report({'loss_pit': loss}, self)
report_diarization_error(ys, labels, self)
# DPCL loss
loss_dc = F.sum(
F.stack([dc_loss(em, t) for (em, t) in zip(ems, ts)]))
n_frames = np.sum([t.shape[0] for t in ts])
loss_dc = loss_dc / (n_frames ** 2)
reporter.report({'loss_dc': loss_dc}, self)
# Multi-objective
loss = (1 - self.dc_loss_ratio) * loss + self.dc_loss_ratio * loss_dc
reporter.report({'loss': loss}, self)
return loss
示例3: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({self.key: bleu})
示例4: forward
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def forward(self, imgs, captions):
"""Batch of images to a single loss."""
imgs = Variable(imgs)
if self.finetune_feat_extractor:
img_feats = self.feat_extractor(imgs)
else:
# Extract features with the `train` configuration set to `False` in
# order to basically skip the dropout regularizations. This is how
# dropout is used during standard inference. Also, since we are not
# going to optimize the feature extractor, we explicitly set the
# backpropgation mode to not construct any computational graphs.
with chainer.using_config('train', False), \
chainer.no_backprop_mode():
img_feats = self.feat_extractor(imgs)
loss = self.lang_model(img_feats, captions)
# Report the loss so that it can be printed, logged and plotted by
# other trainer extensions
reporter.report({'loss': loss}, self)
return loss
示例5: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, trainer):
print('## Calculate BLEU')
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1) * 100
print('BLEU:', bleu)
reporter.report({self.key: bleu})
示例6: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, x, labels):
x = BatchTransform(self.model.mean)(x)
x = self.xp.array(x)
scores = self.model(x)
B, n_class = scores.shape[:2]
one_hot_labels = self.xp.zeros((B, n_class), dtype=np.int32)
for i, label in enumerate(labels):
one_hot_labels[i, label] = 1
# sigmoid_cross_entropy normalizes the loss
# by the size of batch and the number of classes.
# It works better to remove the normalization factor
# of the number of classes.
loss = self.loss_scale * F.sigmoid_cross_entropy(
scores, one_hot_labels)
result = calc_accuracy(scores, labels)
reporter.report({'loss': loss}, self)
reporter.report({'accuracy': result['accuracy']}, self)
reporter.report({'n_pred': result['n_pred']}, self)
reporter.report({'n_pos': result['n_pos']}, self)
return loss
示例7: report
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def report(self, loss_ctc_list, loss_att, acc, cer_ctc_list, cer, wer, mtl_loss):
"""Define a chainer reporter function."""
# loss_ctc_list = [weighted CTC, CTC1, CTC2, ... CTCN]
# cer_ctc_list = [weighted cer_ctc, cer_ctc_1, cer_ctc_2, ... cer_ctc_N]
num_encs = len(loss_ctc_list) - 1
reporter.report({"loss_ctc": loss_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"loss_ctc{}".format(i + 1): loss_ctc_list[i + 1]}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"cer_ctc{}".format(i + 1): cer_ctc_list[i + 1]}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
示例8: evaluate
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def evaluate(self):
val_iter = self.get_iterator("main")
target = self.get_target("main")
loss = 0
count = 0
for batch in copy.copy(val_iter):
x, t = convert.concat_examples(batch, device=self.device, padding=(0, -1))
xp = chainer.backends.cuda.get_array_module(x)
state = None
for i in six.moves.range(len(x[0])):
state, loss_batch = target(state, x[:, i], t[:, i])
non_zeros = xp.count_nonzero(x[:, i])
loss += loss_batch.data * non_zeros
count += int(non_zeros)
# report validation loss
observation = {}
with reporter.report_scope(observation):
reporter.report({"loss": float(loss / count)}, target)
return observation
示例9: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, trainer):
# set up a reporter
reporter = reporter_module.Reporter()
if hasattr(self, 'name'):
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self.targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name,
target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate(trainer)
reporter_module.report(result)
return result
示例10: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, x, t):
h, t1, t2 = self.calc(x)
cls_loss = functions.softmax_cross_entropy(h, t)
reporter.report({'cls_loss': cls_loss}, self)
loss = cls_loss
# Enforce the transformation as orthogonal matrix
if self.trans and self.trans_lam1 >= 0:
trans_loss1 = self.trans_lam1 * calc_trans_loss(t1)
reporter.report({'trans_loss1': trans_loss1}, self)
loss = loss + trans_loss1
if self.trans and self.trans_lam2 >= 0:
trans_loss2 = self.trans_lam2 * calc_trans_loss(t2)
reporter.report({'trans_loss2': trans_loss2}, self)
loss = loss + trans_loss2
reporter.report({'loss': loss}, self)
if self.compute_accuracy:
acc = functions.accuracy(h, t)
reporter.report({'accuracy': acc}, self)
return loss
示例11: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, xs, ys):
concat_outputs = self.predict(xs)
concat_truths = F.concat(ys, axis=0)
loss = F.softmax_cross_entropy(concat_outputs, concat_truths)
accuracy = F.accuracy(concat_outputs, concat_truths)
reporter.report({'loss': loss.data}, self)
reporter.report({'accuracy': accuracy.data}, self)
return loss
示例12: __call__
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def __call__(self, xs, ys):
concat_outputs = self.forward(xs)
concat_outputs = F.softmax(concat_outputs, axis=1)
concat_truths = F.concat(ys, axis=0)
loss = F.softmax_cross_entropy(concat_outputs, concat_truths)
accuracy = F.accuracy(concat_outputs, concat_truths)
reporter.report({'loss': loss.data}, self)
reporter.report({'accuracy': accuracy.data}, self)
return loss
示例13: report_diarization_error
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def report_diarization_error(ys, labels, observer):
"""
Reports diarization errors using chainer.reporter
Args:
ys: B-length list of predictions (Variable)
labels: B-length list of labels (ndarray)
observer: target link (chainer.Chain)
"""
for y, t in zip(ys, labels):
stats = calc_diarization_error(y.array, t)
for key in stats:
reporter.report({key: stats[key]}, observer)
示例14: calculate_loss
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def calculate_loss(self, input_chain, **args):
seq_batch = sum(input_chain, [])
t_out_concat = self.encode(seq_batch)
seq_batch_mid = [seq[1:-1] for seq in seq_batch]
seq_mid_concat = F.concat(seq_batch_mid, axis=0)
n_tok = sum(len(s) for s in seq_batch_mid)
loss = self.output_and_loss_from_concat(
t_out_concat, seq_mid_concat,
normalize=n_tok)
reporter.report({'perp': self.xp.exp(loss.data)}, self)
return loss
示例15: calculate_loss_with_labels
# 需要导入模块: from chainer import reporter [as 别名]
# 或者: from chainer.reporter import report [as 别名]
def calculate_loss_with_labels(self, seq_batch_with_labels):
seq_batch, labels = seq_batch_with_labels
t_out_concat = self.encode(seq_batch, labels=labels)
seq_batch_mid = [seq[1:-1] for seq in seq_batch]
seq_mid_concat = F.concat(seq_batch_mid, axis=0)
n_tok = sum(len(s) for s in seq_batch_mid)
loss = self.output_and_loss_from_concat(
t_out_concat, seq_mid_concat,
normalize=n_tok)
reporter.report({'perp': self.xp.exp(loss.data)}, self)
return loss