本文整理汇总了Python中utils.AverageMeter方法的典型用法代码示例。如果您正苦于以下问题:Python utils.AverageMeter方法的具体用法?Python utils.AverageMeter怎么用?Python utils.AverageMeter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.AverageMeter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def test(model, target_test_loader):
model.eval()
test_loss = utils.AverageMeter()
correct = 0
criterion = torch.nn.CrossEntropyLoss()
len_target_dataset = len(target_test_loader.dataset)
with torch.no_grad():
for data, target in target_test_loader:
data, target = data.to(DEVICE), target.to(DEVICE)
s_output = model.predict(data)
loss = criterion(s_output, target)
test_loss.update(loss.item())
pred = torch.max(s_output, 1)[1]
correct += torch.sum(pred == target)
print('{} --> {}: max correct: {}, accuracy{: .2f}%\n'.format(
source_name, target_name, correct, 100. * correct / len_target_dataset))
示例2: base_val
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def base_val(base_train_loader, model_E, criterion, epoch):
losses = AverageMeter()
top1 = AverageMeter()
model_E.eval()
with torch.no_grad():
for batch_idx, (input, target) in enumerate(base_train_loader):
input = input.cuda()
target = target.cuda(non_blocking=True)
# compute output
_, output = model_E(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
model_E.weight_norm()
if (batch_idx+1)%250==0:
print('base_test:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg)
return (losses.avg, top1.avg)
示例3: validate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def validate(data_loader, model):
losses = AverageMeter()
accuracy = AverageMeter()
model.eval()
with torch.no_grad():
for idx, (input_seq, target) in tqdm(enumerate(data_loader), total=len(data_loader)):
input_seq = input_seq.to(cuda)
target = target.to(cuda)
B = input_seq.size(0)
output, _ = model(input_seq)
[_, N, D] = output.size()
output = output.view(B*N, D)
target = target.repeat(1, N).view(-1)
loss = criterion(output, target)
acc = calc_accuracy(output, target)
losses.update(loss.item(), B)
accuracy.update(acc.item(), B)
print('Loss {loss.avg:.4f}\t'
'Acc: {acc.avg:.4f} \t'.format(loss=losses, acc=accuracy))
return losses.avg, accuracy.avg
示例4: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def train(source_loader, target_train_loader, target_test_loader, model, optimizer, CFG):
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
for e in range(CFG['epoch']):
train_loss_clf = utils.AverageMeter()
train_loss_transfer = utils.AverageMeter()
train_loss_total = utils.AverageMeter()
model.train()
iter_source, iter_target = iter(
source_loader), iter(target_train_loader)
n_batch = min(len_source_loader, len_target_loader)
criterion = torch.nn.CrossEntropyLoss()
for i in range(n_batch):
data_source, label_source = iter_source.next()
data_target, _ = iter_target.next()
data_source, label_source = data_source.to(
DEVICE), label_source.to(DEVICE)
data_target = data_target.to(DEVICE)
optimizer.zero_grad()
label_source_pred, transfer_loss = model(data_source, data_target)
clf_loss = criterion(label_source_pred, label_source)
loss = clf_loss + CFG['lambda'] * transfer_loss
loss.backward()
optimizer.step()
train_loss_clf.update(clf_loss.item())
train_loss_transfer.update(transfer_loss.item())
train_loss_total.update(loss.item())
if i % CFG['log_interval'] == 0:
print('Train Epoch: [{}/{} ({:02d}%)], cls_Loss: {:.6f}, transfer_loss: {:.6f}, total_Loss: {:.6f}'.format(
e + 1,
CFG['epoch'],
int(100. * i / n_batch), train_loss_clf.avg, train_loss_transfer.avg, train_loss_total.avg))
log.append([train_loss_clf.avg, train_loss_transfer.avg, train_loss_total.avg])
np_log = np.array(log, dtype=float)
np.savetxt('train_log.csv', np_log, delimiter=',', fmt='%.6f')
# Test
test(model, target_test_loader)
示例5: base_train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def base_train(base_train_loader, model_E, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
model_E.train()
# for param in model_E.parameters():
# param.requires_grad = True
for batch_idx, (input, target) in enumerate(base_train_loader):
# print(target)
input = input.cuda()
target = target.cuda(non_blocking=True)
# compute output
_, output = model_E(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_E.weight_norm()
if (batch_idx+1)%250==0:
print('base_train:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg)
return (losses.avg, top1.avg)
示例6: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def train(args, data_loader, model, global_stats):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
train_loss = utils.AverageMeter()
epoch_time = utils.Timer()
# Run one epoch
for idx, ex in enumerate(data_loader):
train_loss.update(*model.update(ex))
if idx % args.display_iter == 0:
logger.info('train: Epoch = %d | iter = %d/%d | ' %
(global_stats['epoch'], idx, len(data_loader)) +
'loss = %.2f | elapsed time = %.2f (s)' %
(train_loss.avg, global_stats['timer'].time()))
train_loss.reset()
logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %
(global_stats['epoch'], epoch_time.time()))
# Checkpoint
if args.checkpoint:
model.checkpoint(args.model_file + '.checkpoint',
global_stats['epoch'] + 1)
# ------------------------------------------------------------------------------
# Validation loops. Includes both "unofficial" and "official" functions that
# use different metrics and implementations.
# ------------------------------------------------------------------------------
示例7: validate_unofficial
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def validate_unofficial(args, data_loader, model, global_stats, mode):
"""Run one full unofficial validation.
Unofficial = doesn't use SQuAD script.
"""
eval_time = utils.Timer()
start_acc = utils.AverageMeter()
end_acc = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Make predictions
examples = 0
for ex in data_loader:
batch_size = ex[0].size(0)
pred_s, pred_e, _ = model.predict(ex)
target_s, target_e = ex[-3:-1]
# We get metrics for independent start/end and joint start/end
accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e)
start_acc.update(accuracies[0], batch_size)
end_acc.update(accuracies[1], batch_size)
exact_match.update(accuracies[2], batch_size)
# If getting train accuracies, sample max 10k
examples += batch_size
if mode == 'train' and examples >= 1e4:
break
logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' %
(mode, global_stats['epoch'], start_acc.avg) +
'end = %.2f | exact = %.2f | examples = %d | ' %
(end_acc.avg, exact_match.avg, examples) +
'valid time = %.2f (s)' % eval_time.time())
return {'exact_match': exact_match.avg}
示例8: validate_official
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def validate_official(args, data_loader, model, global_stats,
offsets, texts, answers):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = utils.Timer()
f1 = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Run through examples
examples = 0
for ex in data_loader:
ex_id, batch_size = ex[-1], ex[0].size(0)
pred_s, pred_e, _ = model.predict(ex)
for i in range(batch_size):
s_offset = offsets[ex_id[i]][pred_s[i][0]][0]
e_offset = offsets[ex_id[i]][pred_e[i][0]][1]
prediction = texts[ex_id[i]][s_offset:e_offset]
# Compute metrics
ground_truths = answers[ex_id[i]]
exact_match.update(utils.metric_max_over_ground_truths(
utils.exact_match_score, prediction, ground_truths))
f1.update(utils.metric_max_over_ground_truths(
utils.f1_score, prediction, ground_truths))
examples += batch_size
logger.info('dev valid official: Epoch = %d | EM = %.2f | ' %
(global_stats['epoch'], exact_match.avg * 100) +
'F1 = %.2f | examples = %d | valid time = %.2f (s)' %
(f1.avg * 100, examples, eval_time.time()))
return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100}
示例9: eval_accuracies
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def eval_accuracies(pred_s, target_s, pred_e, target_e):
"""An unofficial evalutation helper.
Compute exact start/end/complete match accuracies for a batch.
"""
# Convert 1D tensors to lists of lists (compatibility)
if torch.is_tensor(target_s):
target_s = [[e] for e in target_s]
target_e = [[e] for e in target_e]
# Compute accuracies from targets
batch_size = len(pred_s)
start = utils.AverageMeter()
end = utils.AverageMeter()
em = utils.AverageMeter()
for i in range(batch_size):
# Start matches
if pred_s[i] in target_s[i]:
start.update(1)
else:
start.update(0)
# End matches
if pred_e[i] in target_e[i]:
end.update(1)
else:
end.update(0)
# Both start and end match
if any([1 for _s, _e in zip(target_s[i], target_e[i])
if _s == torch.from_numpy(pred_s[i]) and _e == torch.from_numpy(pred_e[i])]):
em.update(1)
else:
em.update(0)
return start.avg * 100, end.avg * 100, em.avg * 100
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
示例10: validate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def validate(data_loader, model, epoch):
losses = AverageMeter()
accuracy = AverageMeter()
accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]
model.eval()
with torch.no_grad():
for idx, input_seq in tqdm(enumerate(data_loader), total=len(data_loader)):
input_seq = input_seq.to(cuda)
B = input_seq.size(0)
[score_, mask_] = model(input_seq)
del input_seq
if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)
# [B, P, SQ, B, N, SQ]
score_flattened = score_.view(B*NP*SQ, B2*NS*SQ)
target_flattened = target_.view(B*NP*SQ, B2*NS*SQ)
target_flattened = target_flattened.argmax(dim=1)
loss = criterion(score_flattened, target_flattened)
top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1,3,5))
losses.update(loss.item(), B)
accuracy.update(top1.item(), B)
accuracy_list[0].update(top1.item(), B)
accuracy_list[1].update(top3.item(), B)
accuracy_list[2].update(top5.item(), B)
print('[{0}/{1}] Loss {loss.local_avg:.4f}\t'
'Acc: top1 {2:.4f}; top3 {3:.4f}; top5 {4:.4f} \t'.format(
epoch, args.epochs, *[i.avg for i in accuracy_list], loss=losses))
return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]
示例11: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def test(data_loader, model):
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
confusion_mat = ConfusionMeter(args.num_class)
model.eval()
with torch.no_grad():
for idx, (input_seq, target) in tqdm(enumerate(data_loader), total=len(data_loader)):
input_seq = input_seq.to(cuda)
target = target.to(cuda)
B = input_seq.size(0)
input_seq = input_seq.squeeze(0) # squeeze the '1' batch dim
output, _ = model(input_seq)
del input_seq
top1, top5 = calc_topk_accuracy(torch.mean(
torch.mean(
nn.functional.softmax(output,2),
0),0, keepdim=True),
target, (1,5))
acc_top1.update(top1.item(), B)
acc_top5.update(top5.item(), B)
del top1, top5
output = torch.mean(torch.mean(output, 0), 0, keepdim=True)
loss = criterion(output, target.squeeze(-1))
losses.update(loss.item(), B)
del loss
_, pred = torch.max(output, 1)
confusion_mat.update(pred, target.view(-1).byte())
print('Loss {loss.avg:.4f}\t'
'Acc top1: {top1.avg:.4f} Acc top5: {top5.avg:.4f} \t'.format(loss=losses, top1=acc_top1, top5=acc_top5))
confusion_mat.plot_mat(args.test+'.svg')
write_log(content='Loss {loss.avg:.4f}\t Acc top1: {top1.avg:.4f} Acc top5: {top5.avg:.4f} \t'.format(loss=losses, top1=acc_top1, top5=acc_top5, args=args),
epoch=num_epoch,
filename=os.path.join(os.path.dirname(args.test), 'test_log.md'))
import ipdb; ipdb.set_trace()
return losses.avg, [acc_top1.avg, acc_top5.avg]
示例12: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def test(test_data, model, device, logger):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
entropy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
model.eval()
start = time.time()
with torch.no_grad():
for labels, tokens, mask in test_data:
labels = labels.to(device=device, non_blocking=True)
tokens = tokens.to(device=device, non_blocking=True)
mask = mask.to(device=device, non_blocking=True)
loading_time_meter.update(time.time() - start)
pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \
model(tokens, mask, labels)
entropy = entropy.mean()
normalized_entropy = normalized_entropy.mean()
accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
entropy_meter.update(entropy.item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
batch_time_meter.update(time.time() - start)
start = time.time()
logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} "
f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} "
f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}")
logger.info("done")
return accuracy_meter.avg
示例13: validate
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def validate(valid_data, model, epoch, device, logger, summary_writer):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
entropy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
model.eval()
start = time.time()
with torch.no_grad():
for labels, tokens, mask in valid_data:
labels = labels.to(device=device, non_blocking=True)
tokens = tokens.to(device=device, non_blocking=True)
mask = mask.to(device=device, non_blocking=True)
loading_time_meter.update(time.time() - start)
pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \
model(tokens, mask, labels)
entropy = entropy.mean()
normalized_entropy = normalized_entropy.mean()
accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
entropy_meter.update(entropy.item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
batch_time_meter.update(time.time() - start)
start = time.time()
logger.info(f"Valid: epoch: {epoch} ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} "
f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} "
f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}")
summary_writer["valid"].add_scalar(tag="ce", scalar_value=ce_loss_meter.avg, global_step=global_step)
summary_writer["valid"].add_scalar(tag="accuracy", scalar_value=accuracy_meter.avg, global_step=global_step)
summary_writer["valid"].add_scalar(tag="n_entropy", scalar_value=n_entropy_meter.avg, global_step=global_step)
model.train()
return accuracy_meter.avg
示例14: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def test(test_data, model, device, logger):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
model.eval()
start = time.time()
with torch.no_grad():
for labels, tokens, trees, mask in test_data:
labels = labels.to(device=device, non_blocking=True)
tokens = tokens.to(device=device, non_blocking=True)
trees = [e.to(device=device, non_blocking=True) for e in trees]
mask = mask.to(device=device, non_blocking=True)
loading_time_meter.update(time.time() - start)
ce_loss, pred_labels = model(tokens, trees, mask, labels)
accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
model.reset_memory_managers()
batch_time_meter.update(time.time() - start)
start = time.time()
logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} "
f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}")
logger.info("done")
return accuracy_meter.avg
示例15: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import AverageMeter [as 别名]
def test(test_data, model, device, logger):
loading_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
ce_loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
entropy_meter = AverageMeter()
n_entropy_meter = AverageMeter()
model.eval()
start = time.time()
with torch.no_grad():
for batch in test_data:
tokens, length = batch.text
labels = batch.label
mask = length_to_mask(length)
loading_time_meter.update(time.time() - start)
pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \
model(tokens, mask, labels)
entropy = entropy.mean()
normalized_entropy = normalized_entropy.mean()
accuracy = (labels == pred_labels).to(dtype=torch.float32).mean()
n = mask.shape[0]
accuracy_meter.update(accuracy.item(), n)
ce_loss_meter.update(ce_loss.item(), n)
entropy_meter.update(entropy.item(), n)
n_entropy_meter.update(normalized_entropy.item(), n)
batch_time_meter.update(time.time() - start)
start = time.time()
logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} "
f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} "
f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}")
logger.info("done")
return accuracy_meter.avg