本文整理汇总了Python中torchnet.meter.ConfusionMeter方法的典型用法代码示例。如果您正苦于以下问题:Python meter.ConfusionMeter方法的具体用法?Python meter.ConfusionMeter怎么用?Python meter.ConfusionMeter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torchnet.meter
的用法示例。
在下文中一共展示了meter.ConfusionMeter方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def __init__(self, faster_rcnn, log_filename=opt.log_filename):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
# target creator create gt_bbox gt_label etc as training targets.
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
# visdom wrapper
self.vis = Visualizer(env=opt.env, log_to_filename=log_filename)
# indicators for training status
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss
示例2: evaluate
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def evaluate(opt, loader, F, P):
F.eval()
P.eval()
it = iter(loader)
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
with torch.no_grad():
for inputs, targets in tqdm(it):
outputs = P(F(inputs))
_, pred = torch.max(outputs, 1)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
accuracy = correct / total
log.info('Accuracy on {} samples: {}%'.format(total, 100.0*accuracy))
log.debug(confusion.conf)
return accuracy
示例3: evaluate
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def evaluate(name, loader, F_s, F_d, C):
F_s.eval()
if F_d:
F_d.eval()
C.eval()
it = iter(loader)
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
for inputs, targets in tqdm(it):
targets = targets.to(opt.device)
if not F_d:
# unlabeled domain
d_features = torch.zeros(len(targets), opt.domain_hidden_size).to(opt.device)
else:
d_features = F_d(inputs)
features = torch.cat((F_s(inputs), d_features), dim=1)
outputs = C(features)
_, pred = torch.max(outputs, 1)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
log.info('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
log.debug(confusion.conf)
return acc
示例4: profile_for_quantization
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def profile_for_quantization(data_loader, model, criterion, loggers, args):
"""Profile activations for quantization"""
msglogger.info('--- profile for quantization ---')
#"""Execute the validation/test loop"""
#batch_time = tnt.AverageValueMeter()
#total_samples = len(data_loader.sampler)
#batch_size = data_loader.batch_size
#if args.display_confusion:
# confusion = tnt.ConfusionMeter(args.num_classes)
#total_steps = total_samples / batch_size
#msglogger.info('%d samples (%d per mini-batch)', total_samples, batch_size)
# Switch to evaluation mode
model.eval()
if args.profile_batches <= 0:
return
end = time.time()
data_iter = iter(data_loader)
for profile_batch in range(args.profile_batches):
if profile_batch == 0:
(inputs, target) = next(data_iter)
else:
(inputs_i, target_i) = next(data_iter)
inputs = torch.cat([inputs, inputs_i], dim=0)
target = torch.cat([target, target_i], dim=0)
msglogger.info('--- profiling with %d images ---' % inputs.shape[0])
with torch.no_grad():
inputs, target = inputs.to('cuda'), target.to('cuda')
# compute output from model
output = model(inputs)
# measure elapsed time
msglogger.info('==> Profile runtime: %d' % (time.time() - end))
示例5: model_analysis
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def model_analysis(model, dataloader, params, temperature=1., num_classes=10):
"""
Generate Confusion Matrix on evaluation set
"""
model.eval()
confusion_matrix = ConfusionMeter(num_classes)
softmax_scores = []
predict_correct = []
with tqdm(total=len(dataloader)) as t:
for idx, (data_batch, labels_batch) in enumerate(dataloader):
if params.cuda:
data_batch, labels_batch = data_batch.cuda(async=True), \
labels_batch.cuda(async=True)
data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)
output_batch = model(data_batch)
confusion_matrix.add(output_batch.data, labels_batch.data)
softmax_scores_batch = F.softmax(output_batch/temperature, dim=1)
softmax_scores_batch = softmax_scores_batch.data.cpu().numpy()
softmax_scores.append(softmax_scores_batch)
# extract data from torch Variable, move to cpu, convert to numpy arrays
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
predict_correct_batch = (np.argmax(output_batch, axis=1) == labels_batch).astype(int)
predict_correct.append(np.reshape(predict_correct_batch, (labels_batch.size, 1)))
t.update()
softmax_scores = np.vstack(softmax_scores)
predict_correct = np.vstack(predict_correct)
return softmax_scores, predict_correct, confusion_matrix.value().astype(int)
示例6: get_metrics
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def get_metrics(model, criterion, dataloaders, dataset_sizes, phase='valid'):
'''
Loops over phase (train or valid) set to determine acc, loss and
confusion meter of the model.
'''
confusion_matrix = meter.ConfusionMeter(2, normalized=True)
running_loss = 0.0
running_corrects = 0
for i, data in enumerate(dataloaders[phase]):
print(i, end='\r')
labels = data['label'].type(torch.FloatTensor)
inputs = data['images'][0]
# wrap them in Variable
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
# forward
outputs = model(inputs)
outputs = torch.mean(outputs)
loss = criterion(outputs, labels, phase)
# statistics
running_loss += loss.data[0] * inputs.size(0)
preds = (outputs.data > 0.5).type(torch.cuda.FloatTensor)
running_corrects += torch.sum(preds == labels.data)
confusion_matrix.add(preds, labels.data)
loss = running_loss / dataset_sizes[phase]
acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, loss, acc))
print('Confusion Meter:\n', confusion_matrix.value())
示例7: evaluate_acc
# 需要导入模块: from torchnet import meter [as 别名]
# 或者: from torchnet.meter import ConfusionMeter [as 别名]
def evaluate_acc(name, loader, vcoab, emb, lang, F_s, F_p, C):
emb.eval()
if F_s:
F_s.eval()
if F_p:
F_p.eval()
C.eval()
it = iter(loader)
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
with torch.no_grad():
for inputs, targets in tqdm(it, ascii=True):
inputs, lengths, mask, chars, char_lengths = inputs
embeds = (emb(lang, inputs, chars, char_lengths), lengths)
shared_features, lang_features = None, None
if opt.shared_hidden_size > 0:
shared_features = F_s(embeds)
if opt.private_hidden_size > 0:
if not F_p:
# unlabeled lang
lang_features = torch.zeros(targets.size(0),
targets.size(1), opt.private_hidden_size).to(opt.device)
else:
if opt.Fp_MoE:
lang_features, gate_outputs = F_p(embeds)
else:
lang_features = F_p(embeds)
if opt.C_MoE:
outputs, _ = C((shared_features, lang_features, lengths))
else:
outputs = C((shared_features, lang_features))
_, pred = torch.max(outputs, -1)
confusion.add(pred.detach(), targets.detach())
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
log.info('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
log.debug(confusion.conf)
return acc