本文整理汇总了Python中tensorboardX.SummaryWriter.add_scalar方法的典型用法代码示例。如果您正苦于以下问题:Python SummaryWriter.add_scalar方法的具体用法?Python SummaryWriter.add_scalar怎么用?Python SummaryWriter.add_scalar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorboardX.SummaryWriter
的用法示例。
在下文中一共展示了SummaryWriter.add_scalar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TensorBoard
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
class TensorBoard(Callback):
# TODO: add option to write images; find fix for graph
def __init__(self, log_dir, update_frequency = 10):
super(Callback, self).__init__()
self.log_dir = log_dir
self.writer = None
self.update_frequency = update_frequency
def on_train_begin(self, **_):
self.writer = SummaryWriter(os.path.join(self.log_dir, datetime.datetime.now().__str__()))
rndm_input = torch.autograd.Variable(torch.rand(1, *self.model.input_shape), requires_grad = True).to(self.logger['device'])
# fwd_pass = self.model(rndm_input)
self.writer.add_graph(self.model, rndm_input)
return self
def on_epoch_end(self, **_):
if (self.logger['epoch'] % self.update_frequency) == 0:
epoch_metrics = self.logger['epoch_metrics'][self.logger['epoch']]
for e_metric, e_metric_dct in epoch_metrics.iteritems():
for e_metric_split, e_metric_val in e_metric_dct.iteritems():
self.writer.add_scalar('{}/{}'.format(e_metric_split, e_metric), e_metric_val, self.logger['epoch'])
for name, param in self.model.named_parameters():
self.writer.add_histogram(name.replace('.', '/'), param.clone().cpu().data.numpy(), self.logger['epoch'])
return self
def on_train_end(self, **_):
return self.writer.close()
示例2: TensorBoardReporting
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
class TensorBoardReporting(ReportingHook):
"""Log results to tensorboard.
Writes tensorboard logs to a directory specified in the `mead-settings`
section for tensorboard. Otherwise it defaults to `runs`.
"""
def __init__(self, **kwargs):
super(TensorBoardReporting, self).__init__(**kwargs)
from tensorboardX import SummaryWriter
# Base dir is often the dir created to save the model into
base_dir = kwargs.get('base_dir', '.')
log_dir = os.path.expanduser(kwargs.get('log_dir', 'runs'))
if not os.path.isabs(log_dir):
log_dir = os.path.join(base_dir, log_dir)
# Run dir is the name of an individual run
run_dir = kwargs.get('run_dir')
pid = str(os.getpid())
run_dir = '{}-{}'.format(run_dir, pid) if run_dir is not None else pid
log_dir = os.path.join(log_dir, run_dir)
flush_secs = int(kwargs.get('flush_secs', 2))
self._log = SummaryWriter(log_dir, flush_secs=flush_secs)
def step(self, metrics, tick, phase, tick_type=None, **kwargs):
tick_type = ReportingHook._infer_tick_type(phase, tick_type)
for metric in metrics.keys():
name = "{}/{}/{}".format(phase, tick_type, metric)
self._log.add_scalar(name, metrics[metric], tick)
示例3: __init__
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
class TBVisualizer:
def __init__(self, opt):
self._opt = opt
self._save_path = os.path.join(opt.checkpoints_dir, opt.name)
self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
self._tb_path = os.path.join(self._save_path, 'summary.json')
self._writer = SummaryWriter(self._save_path)
with open(self._log_path, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def __del__(self):
self._writer.close()
def display_current_results(self, visuals, it, is_train, save_visuals=False):
for label, image_numpy in visuals.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
self._writer.add_image(sum_name, image_numpy, it)
if save_visuals:
util.save_image(image_numpy,
os.path.join(self._opt.checkpoints_dir, self._opt.name,
'event_imgs', sum_name, '%08d.png' % it))
self._writer.export_scalars_to_json(self._tb_path)
def plot_scalars(self, scalars, it, is_train):
for label, scalar in scalars.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
self._writer.add_scalar(sum_name, scalar, it)
def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
visuals_info = "v" if visuals_were_stored else ""
message = '%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs) ' % (log_time, visuals_info, epoch, i, iters_per_epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def print_current_validate_errors(self, epoch, errors, t):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
message = '%s (V, epoch: %d, time_to_val: %ds) ' % (log_time, epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def save_images(self, visuals):
for label, image_numpy in visuals.items():
image_name = '%s.png' % label
save_path = os.path.join(self._save_path, "samples", image_name)
util.save_image(image_numpy, save_path)
示例4: log_to_tensorboard
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def log_to_tensorboard(self, writer: SummaryWriter, epoch: int) -> None:
def none_to_zero(x: Optional[float]) -> float:
if x is None or math.isnan(x):
return 0.0
return x
for name, value in [
("Training/td_loss", self.get_recent_td_loss()),
("Training/reward_loss", self.get_recent_reward_loss()),
]:
writer.add_scalar(name, none_to_zero(value), epoch)
示例5: train
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def train(self, epoch_to_restore=0):
g = Generator(self.nb_channels_first_layer, self.dim)
if epoch_to_restore > 0:
filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
g.load_state_dict(torch.load(filename_model))
else:
g.apply(weights_init)
g.cuda()
g.train()
dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
dataloader = DataLoader(dataset, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)
fixed_dataloader = DataLoader(dataset, 16)
fixed_batch = next(iter(fixed_dataloader))
criterion = torch.nn.L1Loss()
optimizer = optim.Adam(g.parameters())
writer = SummaryWriter(self.dir_logs)
try:
epoch = epoch_to_restore
while True:
g.train()
for _ in range(self.nb_epochs_to_save):
epoch += 1
for idx_batch, current_batch in enumerate(tqdm(dataloader)):
g.zero_grad()
x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
g_z = g.forward(z)
loss = criterion(g_z, x)
loss.backward()
optimizer.step()
writer.add_scalar('train_loss', loss, epoch)
z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
g.eval()
g_z = g.forward(z)
images = make_grid(g_z.data[:16], nrow=4, normalize=True)
writer.add_image('generations', images, epoch)
filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
torch.save(g.state_dict(), filename)
finally:
print('[*] Closing Writer.')
writer.close()
示例6: test_writing_stack
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def test_writing_stack(self):
with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
writer1 = SummaryWriter(tmp_dir1)
writer1.add_scalar = MagicMock()
writer2 = SummaryWriter(tmp_dir2)
writer2.add_scalar = MagicMock()
with summary_writer_context(writer1):
with summary_writer_context(writer2):
SummaryWriterContext.add_scalar("test2", torch.ones(1))
SummaryWriterContext.add_scalar("test1", torch.zeros(1))
writer1.add_scalar.assert_called_once_with(
"test1", torch.zeros(1), global_step=0
)
writer2.add_scalar.assert_called_once_with(
"test2", torch.ones(1), global_step=0
)
示例7: test_swallowing_exception
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def test_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
writer.exceptions_to_ignore = (NotImplementedError, KeyError)
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
示例8: test_not_swallowing_exception
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def test_not_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
with self.assertRaisesRegexp(
NotImplementedError, "test"
), summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
示例9: test_writing
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def test_writing(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
writer.add_scalar.assert_called_once_with(
"test", torch.ones(1), global_step=0
)
示例10: log_to_tensorboard
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def log_to_tensorboard(self, writer: SummaryWriter, epoch: int) -> None:
def none_to_zero(x: Optional[float]) -> float:
if x is None or math.isnan(x):
return 0.0
return x
for name, value in [
("Reward_CPE/Direct Method Reward", self.direct_method.normalized),
("Reward_CPE/IPS Reward", self.inverse_propensity.normalized),
("Reward_CPE/Doubly Robust Reward", self.doubly_robust.normalized),
(
"Value_CPE/Sequential Doubly Robust",
self.sequential_doubly_robust.normalized,
),
(
"Value_CPE/Weighted Doubly Robust",
self.weighted_doubly_robust.normalized,
),
("Value_CPE/MAGIC Estimator", self.magic.normalized),
]:
writer.add_scalar(name, none_to_zero(value), epoch)
示例11: test_global_step
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def test_global_step(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
SummaryWriterContext.increase_global_step()
SummaryWriterContext.add_scalar("test", torch.zeros(1))
writer.add_scalar.assert_has_calls(
[
call("test", torch.ones(1), global_step=0),
call("test", torch.zeros(1), global_step=1),
]
)
self.assertEqual(2, len(writer.add_scalar.mock_calls))
示例12: learn
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def learn(learning_rate, iterations, x, y, validation=None, stop_early=False, run_comment=''):
# Define a neural network using high-level modules.
writer = SummaryWriter(comment=run_comment)
model = Sequential(
Linear(len(x[0]), len(y[0]), bias=True) # n inputs -> 1 output
)
loss_fn = BCEWithLogitsLoss(reduction='sum') # reduction=mean converges slower.
# TODO: Add an option to twiddle pos_weight, which lets us trade off precision and recall. Maybe also graph using add_pr_curve(), which can show how that tradeoff is going.
optimizer = Adam(model.parameters(),lr=learning_rate)
if validation:
validation_ins, validation_outs = validation
previous_validation_loss = None
with progressbar(range(iterations)) as bar:
for t in bar:
y_pred = model(x) # Make predictions.
loss = loss_fn(y_pred, y)
writer.add_scalar('loss', loss, t)
if validation:
validation_loss = loss_fn(model(validation_ins), validation_outs)
if stop_early:
if previous_validation_loss is not None and previous_validation_loss < validation_loss:
print('Stopping early at iteration {t} because validation error rose.'.format(t=t))
model.load_state_dict(previous_model)
break
else:
previous_validation_loss = validation_loss
previous_model = model.state_dict()
writer.add_scalar('validation_loss', validation_loss, t)
writer.add_scalar('training_accuracy_per_tag', accuracy_per_tag(model, x, y), t)
optimizer.zero_grad() # Zero the gradients.
loss.backward() # Compute gradients.
optimizer.step()
# Horizontal axis is what confidence. Vertical is how many samples were that confidence.
writer.add_histogram('confidence', confidences(model, x), t)
writer.close()
return model
示例13: TensorBoard
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
class TensorBoard(Callback):
def __init__(self, logdir):
super().__init__()
self.logdir = logdir
self.writer = None
def on_train_begin(self):
os.makedirs(self.logdir, exist_ok=True)
self.writer = SummaryWriter(self.logdir)
def on_epoch_end(self, epoch):
for k, v in self.metrics_collection.train_metrics.items():
self.writer.add_scalar('train/{}'.format(k), float(v), global_step=epoch)
for k, v in self.metrics_collection.val_metrics.items():
self.writer.add_scalar('val/{}'.format(k), float(v), global_step=epoch)
for idx, param_group in enumerate(self.estimator.optimizer.param_groups):
lr = param_group['lr']
self.writer.add_scalar('group{}/lr'.format(idx), float(lr), global_step=epoch)
def on_train_end(self):
self.writer.close()
示例14: netd
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
if (ii + 1) % opt.d_every == 0:
optimizer_d.zero_grad()
output = netd(real_img)
error_d_real = criterion(output, true_labels)
error_d_real.backward()
noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))
fake_img = netg(noises).detach()
fake_output = netd(fake_img)
error_d_fake = criterion(fake_output, fake_labels)
error_d_fake.backward()
error = error_d_real + error_d_fake
print('error_d:', error.data[0])
writer.add_scalar('data/error_d', error_d_fake.data[0], ii)
optimizer_d.step()
if (ii + 1) % opt.g_every == 0:
optimizer_g.zero_grad()
noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))
fake_img = netg(noises)
fake_output = netd(fake_img)
error_g = criterion(fake_output, true_labels)
print('error_g:,', error_g.data[0])
writer.add_scalar('data/error_g', error_g.data[0], ii)
error_g.backward()
示例15: main
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalar [as 别名]
def main():
writer = SummaryWriter(args.snapshot_dir)
if not args.gpu == 'None':
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
cudnn.enabled = True
xlsor = XLSor(num_classes=args.num_classes)
print(xlsor)
saved_state_dict = torch.load(args.restore_from)
new_params = xlsor.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0]=='fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
xlsor.load_state_dict(new_params)
model = DataParallelModel(xlsor)
model.train()
model.float()
model.cuda()
criterion = Criterion()
criterion = DataParallelCriterion(criterion)
criterion.cuda()
cudnn.benchmark = True
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
trainloader = data.DataLoader(XRAYDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
batch_size=args.batch_size, shuffle=True, num_workers=16, pin_memory=True)
optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, xlsor.parameters()), 'lr': args.learning_rate }],
lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
optimizer.zero_grad()
interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)
for i_iter, batch in enumerate(trainloader):
i_iter += args.start_iters
images, labels, _, _ = batch
images = images.cuda()
labels = labels.float().cuda()
if torch_ver == "0.3":
images = Variable(images)
labels = Variable(labels)
optimizer.zero_grad()
lr = adjust_learning_rate(optimizer, i_iter)
preds = model(images, args.recurrence)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
if i_iter % 100 == 0:
writer.add_scalar('learning_rate', lr, i_iter)
writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)
if i_iter % 100 == 0:
images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
if isinstance(preds, list):
preds = preds[0]
if isinstance(preds, list):
preds = preds[0]
preds = interp(preds)
for index, img in enumerate(images_inv):
writer.add_image('Images/'+str(index), torch.from_numpy(img/255.).permute(2,0,1), i_iter)
writer.add_image('Labels/'+str(index), labels[index], i_iter)
writer.add_image('preds/'+str(index), (preds[index]>0.5).float(), i_iter)
print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))
if i_iter >= args.num_steps-1:
print('save model ...')
torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(args.num_steps)+'.pth'))
break
if i_iter % args.save_pred_every == 0:
print('taking snapshot ...')
torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(i_iter)+'.pth'))
end = timeit.default_timer()
print(end-start,'seconds')