本文整理汇总了Python中ignite.engine.create_supervised_evaluator方法的典型用法代码示例。如果您正苦于以下问题:Python engine.create_supervised_evaluator方法的具体用法?Python engine.create_supervised_evaluator怎么用?Python engine.create_supervised_evaluator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ignite.engine
的用法示例。
在下文中一共展示了engine.create_supervised_evaluator方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: objective
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def objective(trial):
# Create a convolutional neural network.
model = Net(trial)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.cuda(device)
optimizer = Adam(model.parameters())
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=device)
# Register a pruning handler to the evaluator.
pruning_handler = optuna.integration.PyTorchIgnitePruningHandler(trial, "accuracy", trainer)
evaluator.add_event_handler(Events.COMPLETED, pruning_handler)
# Load MNIST dataset.
train_loader, val_loader = get_data_loaders(TRAIN_BATCH_SIZE, VAL_BATCH_SIZE)
@trainer.on(Events.EPOCH_COMPLETED)
def log_results(engine):
evaluator.run(val_loader)
validation_acc = evaluator.state.metrics["accuracy"]
print("Epoch: {} Validation accuracy: {:.2f}".format(engine.state.epoch, validation_acc))
trainer.run(train_loader, max_epochs=EPOCHS)
evaluator.run(val_loader)
return evaluator.state.metrics["accuracy"]
示例2: create_evaluators
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def create_evaluators(model, metrics, config):
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
evaluator_args = dict(
model=model,
metrics=metrics,
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
return evaluator, train_evaluator
示例3: _test_create_supervised_evaluator
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def _test_create_supervised_evaluator(
model_device: Optional[str] = None, evaluator_device: Optional[str] = None, trace: bool = False
):
model = Linear(1, 1)
if model_device:
model.to(model_device)
model.weight.data.zero_()
model.bias.data.zero_()
if trace:
example_input = torch.randn(1, 1)
model = torch.jit.trace(model, example_input)
evaluator = create_supervised_evaluator(model, device=evaluator_device)
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [5.0]])
data = [(x, y)]
if model_device == evaluator_device or ((model_device == "cpu") ^ (evaluator_device == "cpu")):
state = evaluator.run(data)
y_pred, y = state.output
assert y_pred[0, 0].item() == approx(0.0)
assert y_pred[1, 0].item() == approx(0.0)
assert y[0, 0].item() == approx(3.0)
assert y[1, 0].item() == approx(5.0)
assert model.weight.data[0, 0].item() == approx(0.0)
assert model.bias.item() == approx(0.0)
else:
with pytest.raises(RuntimeError, match=r"device type"):
evaluator.run(data)
示例4: test_create_supervised_evaluator_with_metrics
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def test_create_supervised_evaluator_with_metrics():
model = Linear(1, 1)
model.weight.data.zero_()
model.bias.data.zero_()
evaluator = create_supervised_evaluator(model, metrics={"mse": MeanSquaredError()})
x = torch.tensor([[1.0], [2.0]])
y = torch.tensor([[3.0], [4.0]])
data = [(x, y)]
state = evaluator.run(data)
assert state.metrics["mse"] == 12.5
示例5: training
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def training(local_rank, config):
# Setup dataflow and
train_loader, val_loader = get_dataflow(config)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Setup model trainer and evaluator
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, config)
evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=idist.device())
# Run model evaluation every 3 epochs and show results
@trainer.on(Events.EPOCH_COMPLETED(every=3))
def evaluate_model():
state = evaluator.run(val_loader)
if idist.get_rank() == 0:
print(state.metrics)
# Setup tensorboard experiment tracking
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.get("output_path", "output"), trainer, optimizer, evaluators={"validation": evaluator},
)
trainer.run(train_loader, max_epochs=config.get("max_epochs", 3))
if idist.get_rank() == 0:
tb_logger.close()
# slide 3 ####################################################################
# Simply run everything on your infrastructure
# --- Single computation device ---
# $ python main.py
#
示例6: train
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def train():
writer = SummaryWriter()
net, optimiser, lr_scheduler, train_loader, val_loader = cifar10_experiment()
# Pre-training pruning using SKIP
keep_masks = SNIP(net, 0.05, train_loader, device) # TODO: shuffle?
apply_prune_mask(net, keep_masks)
trainer = create_supervised_trainer(net, optimiser, F.nll_loss, device)
evaluator = create_supervised_evaluator(net, {
'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)
}, device)
pbar = ProgressBar()
pbar.attach(trainer)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
lr_scheduler.step()
iter_in_epoch = (engine.state.iteration - 1) % len(train_loader) + 1
if engine.state.iteration % LOG_INTERVAL == 0:
# pbar.log_message("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
# "".format(engine.state.epoch, iter_in_epoch, len(train_loader), engine.state.output))
writer.add_scalar("training/loss", engine.state.output,
engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_epoch(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
# pbar.log_message("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
# .format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("validation/loss", avg_nll, engine.state.iteration)
writer.add_scalar("validation/accuracy", avg_accuracy,
engine.state.iteration)
trainer.run(train_loader, EPOCHS)
# Let's look at the final weights
# for name, param in net.named_parameters():
# if name.endswith('weight'):
# writer.add_histogram(name, param)
writer.close()
示例7: run
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
"Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
"".format(engine.state.epoch, engine.state.iteration, len(train_loader), engine.state.output)
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
示例8: run
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
trainer.logger = setup_logger("trainer")
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
evaluator.logger = setup_logger("evaluator")
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(initial=0, leave=False, total=len(train_loader), desc=desc.format(0))
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
pbar.n = pbar.last_print_n = 0
@trainer.on(Events.EPOCH_COMPLETED | Events.COMPLETED)
def log_time(engine):
tqdm.write(
"{} took {} seconds".format(trainer.last_event_name.name, trainer.state.times[trainer.last_event_name.name])
)
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
示例9: run
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def run(train_batch_size, val_batch_size, epochs, lr, momentum, display_gpu_info):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model, metrics={"accuracy": Accuracy(), "nll": Loss(F.nll_loss)}, device=device
)
RunningAverage(output_transform=lambda x: x).attach(trainer, "loss")
if display_gpu_info:
from ignite.contrib.metrics import GpuInfo
GpuInfo().attach(trainer, name="gpu")
pbar = ProgressBar(persist=True)
pbar.attach(trainer, metric_names="all")
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
pbar.log_message(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll
)
)
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=epochs)
示例10: main
# 需要导入模块: from ignite import engine [as 别名]
# 或者: from ignite.engine import create_supervised_evaluator [as 别名]
def main(dataset_path, batch_size=256, max_epochs=10, opt="O1"):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
model, optimizer = amp.initialize(model, optimizer, opt_level=opt)
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print("\t{} {}: {:.2f}".format(title, name, engine.state.metrics[name]))
@trainer.on(Events.COMPLETED)
def run_validation(_):
print("- Mean elapsed time for 1 epoch: {}".format(timer.value()))
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)