本文整理汇总了Python中ignite.metrics.Accuracy方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.Accuracy方法的具体用法?Python metrics.Accuracy怎么用?Python metrics.Accuracy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ignite.metrics
的用法示例。
在下文中一共展示了metrics.Accuracy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_binary_wrong_inputs
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_binary_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError):
# y has not only 0 or 1 values
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.arange(0, 10).long()))
with pytest.raises(ValueError):
# y_pred values are not thresholded to 0, 1 values
acc.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10, 5, 6)).long(), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
示例2: test_multilabel_wrong_inputs
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_multilabel_wrong_inputs():
acc = Accuracy(is_multilabel=True)
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.randint(0, 2, size=(10,)), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible y_pred
acc.update((torch.rand(10, 5), torch.randint(0, 2, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible y
acc.update((torch.randint(0, 5, size=(10, 5, 6)), torch.rand(10)))
with pytest.raises(ValueError):
# incompatible binary shapes
acc.update((torch.randint(0, 2, size=(10, 1)), torch.randint(0, 2, size=(10, 1)).long()))
示例3: __init__
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def __init__(self,
model: nn.Module,
dataset_splits: DatasetSplits,
loss: nn.Module,
optimizer: optim.Optimizer,
metrics: Dict[str, Metric],
device: str = None,
num_epochs: int = 1,
seed: int = None,
cuda: bool = None,
loss_accumulation_steps: int = 4,
scheduler: Any = None,
regularizer: RegularizerABC = None,
gradient_clipping: float = 1.0,
output_transform=None,
tensorboard_logs: str = None,
clf_loss_coef: float = 0.1,
lm_loss_coef: float = 0.9
):
super().__init__(
model=model,
dataset_splits=dataset_splits,
loss=loss,
optimizer=optimizer,
metrics=metrics,
device=device,
num_epochs=num_epochs,
seed=seed,
cuda=cuda,
loss_accumulation_steps=loss_accumulation_steps,
scheduler=scheduler,
regularizer=regularizer,
gradient_clipping=gradient_clipping,
output_transform=output_transform,
tensorboard_logs=tensorboard_logs)
self.clf_loss_coef = clf_loss_coef
self.lm_loss_coef = lm_loss_coef
RunningAverage(Accuracy(output_transform=lambda x: (x[0], x[1]))).attach(self.trainer, 'acc')
示例4: create_supervised_evaluator
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def create_supervised_evaluator(self):
if self.device:
self.model.to(self.device)
engine = Engine(self.infer_engine)
Accuracy().attach(engine, "accuracy")
return engine
示例5: attach_pbar_and_metrics
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def attach_pbar_and_metrics(trainer, evaluator):
loss_metric = Average(output_transform=lambda output: output["loss"])
accuracy_metric = Accuracy(output_transform=lambda output: (output["logit"], output["label"]))
pbar = ProgressBar()
loss_metric.attach(trainer, "loss")
accuracy_metric.attach(trainer, "accuracy")
accuracy_metric.attach(evaluator, "accuracy")
pbar.attach(trainer)
示例6: objective
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def objective(trial):
# Create a convolutional neural network.
model = Net(trial)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.cuda(device)
optimizer = Adam(model.parameters())
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=device)
# Register a pruning handler to the evaluator.
pruning_handler = optuna.integration.PyTorchIgnitePruningHandler(trial, "accuracy", trainer)
evaluator.add_event_handler(Events.COMPLETED, pruning_handler)
# Load MNIST dataset.
train_loader, val_loader = get_data_loaders(TRAIN_BATCH_SIZE, VAL_BATCH_SIZE)
@trainer.on(Events.EPOCH_COMPLETED)
def log_results(engine):
evaluator.run(val_loader)
validation_acc = evaluator.state.metrics["accuracy"]
print("Epoch: {} Validation accuracy: {:.2f}".format(engine.state.epoch, validation_acc))
trainer.run(train_loader, max_epochs=EPOCHS)
evaluator.run(val_loader)
return evaluator.state.metrics["accuracy"]
示例7: test_no_update
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_no_update():
acc = Accuracy()
with pytest.raises(NotComputableError):
acc.compute()
示例8: test__check_shape
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test__check_shape():
acc = Accuracy()
with pytest.raises(ValueError):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 5, 12)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError):
acc._check_shape((torch.randint(0, 2, size=(10, 1, 6)).long(), torch.randint(0, 2, size=(10, 5, 6)).long()))
with pytest.raises(ValueError):
acc._check_shape((torch.randint(0, 2, size=(10, 1)).long(), torch.randint(0, 2, size=(10, 5)).long()))
示例9: test_binary_input_N
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_binary_input_N():
# Binary accuracy on input of shape (N, 1) or (N, )
def _test():
acc = Accuracy()
y_pred = torch.randint(0, 2, size=(10,)).long()
y = torch.randint(0, 2, size=(10,)).long()
acc.update((y_pred, y))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
# Batched Updates
acc.reset()
y_pred = torch.randint(0, 2, size=(100,)).long()
y = torch.randint(0, 2, size=(100,)).long()
n_iters = 16
batch_size = y.shape[0] // n_iters + 1
for i in range(n_iters):
idx = i * batch_size
acc.update((y_pred[idx : idx + batch_size], y[idx : idx + batch_size]))
np_y = y.numpy().ravel()
np_y_pred = y_pred.numpy().ravel()
assert acc._type == "binary"
assert isinstance(acc.compute(), float)
assert accuracy_score(np_y, np_y_pred) == pytest.approx(acc.compute())
# check multiple random inputs as random exact occurencies are rare
for _ in range(10):
_test()
示例10: test_multiclass_wrong_inputs
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_multiclass_wrong_inputs():
acc = Accuracy()
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 4), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10, 5, 6), torch.randint(0, 5, size=(10, 5)).long()))
with pytest.raises(ValueError):
# incompatible shapes
acc.update((torch.rand(10), torch.randint(0, 5, size=(10, 5, 6)).long()))
示例11: test_incorrect_type
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def test_incorrect_type():
acc = Accuracy()
# Start as binary data
y_pred = torch.randint(0, 2, size=(4,))
y = torch.ones(4).long()
acc.update((y_pred, y))
# And add a multiclass data
y_pred = torch.rand(4, 4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
acc.update((y_pred, y))
示例12: training
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def training(local_rank, config):
# Setup dataflow and
train_loader, val_loader = get_dataflow(config)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Setup model trainer and evaluator
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, config)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=idist.device())
# Run model evaluation every 3 epochs and show results
@trainer.on(Events.EPOCH_COMPLETED(every=3))
def evaluate_model():
state = evaluator.run(val_loader)
if idist.get_rank() == 0:
print(state.metrics)
# Setup tensorboard experiment tracking
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.get("output_path", "output"), trainer, optimizer, evaluators={"validation": evaluator},
)
trainer.run(train_loader, max_epochs=config.get("max_epochs", 3))
if idist.get_rank() == 0:
tb_logger.close()
# slide 3 ####################################################################
# Simply run everything on your infrastructure
# --- Single computation device ---
# $ python main.py
#
示例13: run_inference_test
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def run_inference_test(root_dir, model_file, device=torch.device("cuda:0")):
images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
val_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
val_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}),
SegmentationSaver(
output_dir=root_dir,
batch_transform=lambda batch: batch["image_meta_dict"],
output_transform=lambda output: output["pred"],
),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
post_transform=val_post_transforms,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
val_handlers=val_handlers,
)
evaluator.run()
return evaluator.state.best_metric
示例14: train
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def train():
writer = SummaryWriter()
net, optimiser, lr_scheduler, train_loader, val_loader = cifar10_experiment()
# Pre-training pruning using SKIP
keep_masks = SNIP(net, 0.05, train_loader, device) # TODO: shuffle?
apply_prune_mask(net, keep_masks)
trainer = create_supervised_trainer(net, optimiser, F.nll_loss, device)
evaluator = create_supervised_evaluator(net, {
'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)
}, device)
pbar = ProgressBar()
pbar.attach(trainer)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
lr_scheduler.step()
iter_in_epoch = (engine.state.iteration - 1) % len(train_loader) + 1
if engine.state.iteration % LOG_INTERVAL == 0:
# pbar.log_message("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
# "".format(engine.state.epoch, iter_in_epoch, len(train_loader), engine.state.output))
writer.add_scalar("training/loss", engine.state.output,
engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_epoch(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
# pbar.log_message("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
# .format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("validation/loss", avg_nll, engine.state.iteration)
writer.add_scalar("validation/accuracy", avg_accuracy,
engine.state.iteration)
trainer.run(train_loader, EPOCHS)
# Let's look at the final weights
# for name, param in net.named_parameters():
# if name.endswith('weight'):
# writer.add_histogram(name, param)
writer.close()
示例15: run
# 需要导入模块: from ignite import metrics [as 别名]
# 或者: from ignite.metrics import Accuracy [as 别名]
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
"Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
"".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output)
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()