本文整理汇总了Python中tensorboardX.SummaryWriter类的典型用法代码示例。如果您正苦于以下问题:Python SummaryWriter类的具体用法?Python SummaryWriter怎么用?Python SummaryWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SummaryWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_add_custom_scalars
def test_add_custom_scalars(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_custom_scalars = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_custom_scalars_multilinechart(
["a", "b"], category="cat", title="title"
)
with self.assertRaisesRegexp(
AssertionError, "Title \(title\) is already in category \(cat\)"
):
SummaryWriterContext.add_custom_scalars_multilinechart(
["c", "d"], category="cat", title="title"
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["e", "f"], category="cat", title="title2"
)
SummaryWriterContext.add_custom_scalars_multilinechart(
["g", "h"], category="cat2", title="title"
)
SummaryWriterContext.add_custom_scalars(writer)
writer.add_custom_scalars.assert_called_once_with(
{
"cat": {
"title": ["Multiline", ["a", "b"]],
"title2": ["Multiline", ["e", "f"]],
},
"cat2": {"title": ["Multiline", ["g", "h"]]},
}
)
示例2: __init__
def __init__(self, log_dir, params, up_factor):
"""
Args:
log_dir: (str) The path to the folder where the summary files are
going to be written. The summary object creates a train and a
val folders to store the summary files.
params: (train.utils.Params) The parameters loaded from the
parameters.json file.
up_factor: (int) The upscale factor that indicates how much the
scores maps need to be upscaled to match the original scale
(used when superposing the embeddings and score maps to the
input images).
Attributes:
writer_train: (tensorboardX.writer.SummaryWriter) The tensorboardX
writer that writes the training informations.
writer_val: (tensorboardX.writer.SummaryWriter) The tensorboardX
writer that writes the validation informations.
epoch: (int) Stores the current epoch.
ref_sz: (int) The size in pixels of the reference image.
srch_sz: (int) The size in pixels of the search image.
up_factor: (int) The upscale factor. See Args.
"""
# We use two different summary writers so we can plot both curves in
# the same plot, as suggested in https://www.quora.com/How-do-you-plot-training-and-validation-loss-on-the-same-graph-using-TensorFlow%E2%80%99s-TensorBoard
self.writer_train = SummaryWriter(join(log_dir, 'train'))
self.writer_val = SummaryWriter(join(log_dir, 'val'))
self.epoch = None
self.ref_sz = params.reference_sz
self.srch_sz = params.search_sz
self.up_factor = up_factor
示例3: test_swallowing_exception
def test_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
writer.exceptions_to_ignore = (NotImplementedError, KeyError)
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
示例4: TensorBoardReporting
class TensorBoardReporting(ReportingHook):
"""Log results to tensorboard.
Writes tensorboard logs to a directory specified in the `mead-settings`
section for tensorboard. Otherwise it defaults to `runs`.
"""
def __init__(self, **kwargs):
super(TensorBoardReporting, self).__init__(**kwargs)
from tensorboardX import SummaryWriter
# Base dir is often the dir created to save the model into
base_dir = kwargs.get('base_dir', '.')
log_dir = os.path.expanduser(kwargs.get('log_dir', 'runs'))
if not os.path.isabs(log_dir):
log_dir = os.path.join(base_dir, log_dir)
# Run dir is the name of an individual run
run_dir = kwargs.get('run_dir')
pid = str(os.getpid())
run_dir = '{}-{}'.format(run_dir, pid) if run_dir is not None else pid
log_dir = os.path.join(log_dir, run_dir)
flush_secs = int(kwargs.get('flush_secs', 2))
self._log = SummaryWriter(log_dir, flush_secs=flush_secs)
def step(self, metrics, tick, phase, tick_type=None, **kwargs):
tick_type = ReportingHook._infer_tick_type(phase, tick_type)
for metric in metrics.keys():
name = "{}/{}/{}".format(phase, tick_type, metric)
self._log.add_scalar(name, metrics[metric], tick)
示例5: test_not_swallowing_exception
def test_not_swallowing_exception(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock(side_effect=NotImplementedError("test"))
with self.assertRaisesRegexp(
NotImplementedError, "test"
), summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
示例6: test_writing
def test_writing(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
writer.add_scalar.assert_called_once_with(
"test", torch.ones(1), global_step=0
)
示例7: log_to_tensorboard
def log_to_tensorboard(self, writer: SummaryWriter, epoch: int) -> None:
def none_to_zero(x: Optional[float]) -> float:
if x is None or math.isnan(x):
return 0.0
return x
for name, value in [
("Training/td_loss", self.get_recent_td_loss()),
("Training/reward_loss", self.get_recent_reward_loss()),
]:
writer.add_scalar(name, none_to_zero(value), epoch)
示例8: one_stage_train
def one_stage_train(myModel, data_reader_trn, my_optimizer,
loss_criterion, snapshot_dir, log_dir,
i_iter, start_epoch, best_val_accuracy=0, data_reader_eval=None,
scheduler=None):
report_interval = cfg.training_parameters.report_interval
snapshot_interval = cfg.training_parameters.snapshot_interval
max_iter = cfg.training_parameters.max_iter
avg_accuracy = 0
accuracy_decay = 0.99
best_epoch = 0
writer = SummaryWriter(log_dir)
best_iter = i_iter
iepoch = start_epoch
snapshot_timer = Timer('m')
report_timer = Timer('s')
while i_iter < max_iter:
iepoch += 1
for i, batch in enumerate(data_reader_trn):
i_iter += 1
if i_iter > max_iter:
break
scheduler.step(i_iter)
my_optimizer.zero_grad()
add_graph = False
scores, total_loss, n_sample = compute_a_batch(batch, myModel, eval_mode=False,
loss_criterion=loss_criterion,
add_graph=add_graph, log_dir=log_dir)
total_loss.backward()
accuracy = scores / n_sample
avg_accuracy += (1 - accuracy_decay) * (accuracy - avg_accuracy)
clip_gradients(myModel, i_iter, writer)
my_optimizer.step()
if i_iter % report_interval == 0:
save_a_report(i_iter, total_loss.data[0], accuracy, avg_accuracy, report_timer,
writer, data_reader_eval,myModel, loss_criterion)
if i_iter % snapshot_interval == 0 or i_iter == max_iter:
best_val_accuracy, best_epoch, best_iter = save_a_snapshot(snapshot_dir, i_iter, iepoch, myModel,
my_optimizer, loss_criterion, best_val_accuracy,
best_epoch, best_iter, snapshot_timer,
data_reader_eval)
writer.export_scalars_to_json(os.path.join(log_dir, "all_scalars.json"))
writer.close()
print("best_acc:%.6f after epoch: %d/%d at iter %d" % (best_val_accuracy, best_epoch, iepoch, best_iter))
sys.stdout.flush()
示例9: __init__
class TBVisualizer:
def __init__(self, opt):
self._opt = opt
self._save_path = os.path.join(opt.checkpoints_dir, opt.name)
self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
self._tb_path = os.path.join(self._save_path, 'summary.json')
self._writer = SummaryWriter(self._save_path)
with open(self._log_path, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def __del__(self):
self._writer.close()
def display_current_results(self, visuals, it, is_train, save_visuals=False):
for label, image_numpy in visuals.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
self._writer.add_image(sum_name, image_numpy, it)
if save_visuals:
util.save_image(image_numpy,
os.path.join(self._opt.checkpoints_dir, self._opt.name,
'event_imgs', sum_name, '%08d.png' % it))
self._writer.export_scalars_to_json(self._tb_path)
def plot_scalars(self, scalars, it, is_train):
for label, scalar in scalars.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
self._writer.add_scalar(sum_name, scalar, it)
def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
visuals_info = "v" if visuals_were_stored else ""
message = '%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs) ' % (log_time, visuals_info, epoch, i, iters_per_epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def print_current_validate_errors(self, epoch, errors, t):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
message = '%s (V, epoch: %d, time_to_val: %ds) ' % (log_time, epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def save_images(self, visuals):
for label, image_numpy in visuals.items():
image_name = '%s.png' % label
save_path = os.path.join(self._save_path, "samples", image_name)
util.save_image(image_numpy, save_path)
示例10: test_global_step
def test_global_step(self):
with TemporaryDirectory() as tmp_dir:
writer = SummaryWriter(tmp_dir)
writer.add_scalar = MagicMock()
with summary_writer_context(writer):
SummaryWriterContext.add_scalar("test", torch.ones(1))
SummaryWriterContext.increase_global_step()
SummaryWriterContext.add_scalar("test", torch.zeros(1))
writer.add_scalar.assert_has_calls(
[
call("test", torch.ones(1), global_step=0),
call("test", torch.zeros(1), global_step=1),
]
)
self.assertEqual(2, len(writer.add_scalar.mock_calls))
示例11: test_writing_stack
def test_writing_stack(self):
with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2:
writer1 = SummaryWriter(tmp_dir1)
writer1.add_scalar = MagicMock()
writer2 = SummaryWriter(tmp_dir2)
writer2.add_scalar = MagicMock()
with summary_writer_context(writer1):
with summary_writer_context(writer2):
SummaryWriterContext.add_scalar("test2", torch.ones(1))
SummaryWriterContext.add_scalar("test1", torch.zeros(1))
writer1.add_scalar.assert_called_once_with(
"test1", torch.zeros(1), global_step=0
)
writer2.add_scalar.assert_called_once_with(
"test2", torch.ones(1), global_step=0
)
示例12: setup
def setup(self):
"""Setups TensorBoard logger."""
def replace_loggers():
# Replace all log_* methods with dummy _nop
self.log_metrics = self._nop
self.log_scalar = self._nop
self.log_activations = self._nop
self.log_gradients = self._nop
# No log_dir given, bail out
if not self.log_dir:
replace_loggers()
return
# Detect tensorboard
try:
from tensorboardX import SummaryWriter
except ImportError as ie:
replace_loggers()
return
else:
self.available = True
# Construct full folder path
self.log_dir = pathlib.Path(self.log_dir).expanduser()
self.log_dir = self.log_dir / self.subfolder / self.exp_id
self.log_dir.mkdir(parents=True, exist_ok=True)
# Set up summary writer
self.writer = SummaryWriter(self.log_dir)
示例13: __init__
def __init__(self, opt):
self.opt = opt
print('> training arguments:')
for arg in vars(opt):
print('>>> {0}: {1}'.format(arg, getattr(opt, arg)))
# >>> model_name: lstm
# >>> dataset: twitter
# >>> optimizer: <class 'torch.optim.adam.Adam'>
# >>> initializer: <function xavier_uniform_ at 0x10858b510>
# >>> learning_rate: 0.001
# >>> dropout: 0
# >>> num_epoch: 20
# >>> batch_size: 128
# >>> log_step: 5
# >>> logdir: tmp_log
# >>> embed_dim: 100
# >>> hidden_dim: 200
# >>> max_seq_len: 80
# >>> polarities_dim: 3
# >>> hops: 3
# >>> device: cpu
# >>> model_class: <class 'models.lstm.LSTM'>
# >>> inputs_cols: ['text_raw_indices']
absa_dataset = ABSADatesetReader(dataset=opt.dataset, embed_dim=opt.embed_dim, max_seq_len=opt.max_seq_len)
self.train_data_loader = DataLoader(dataset=absa_dataset.train_data, batch_size=opt.batch_size, shuffle=True)
self.test_data_loader = DataLoader(dataset=absa_dataset.test_data, batch_size=len(absa_dataset.test_data), shuffle=False)
self.writer = SummaryWriter(log_dir=opt.logdir)
self.model = opt.model_class(absa_dataset.embedding_matrix, opt).to(opt.device) # 这里的 embedding_matrix 只存了索引对应的 vector, 没有存字典
self.reset_parameters()
示例14: TensorBoard
class TensorBoard(Callback):
# TODO: add option to write images; find fix for graph
def __init__(self, log_dir, update_frequency = 10):
super(Callback, self).__init__()
self.log_dir = log_dir
self.writer = None
self.update_frequency = update_frequency
def on_train_begin(self, **_):
self.writer = SummaryWriter(os.path.join(self.log_dir, datetime.datetime.now().__str__()))
rndm_input = torch.autograd.Variable(torch.rand(1, *self.model.input_shape), requires_grad = True).to(self.logger['device'])
# fwd_pass = self.model(rndm_input)
self.writer.add_graph(self.model, rndm_input)
return self
def on_epoch_end(self, **_):
if (self.logger['epoch'] % self.update_frequency) == 0:
epoch_metrics = self.logger['epoch_metrics'][self.logger['epoch']]
for e_metric, e_metric_dct in epoch_metrics.iteritems():
for e_metric_split, e_metric_val in e_metric_dct.iteritems():
self.writer.add_scalar('{}/{}'.format(e_metric_split, e_metric), e_metric_val, self.logger['epoch'])
for name, param in self.model.named_parameters():
self.writer.add_histogram(name.replace('.', '/'), param.clone().cpu().data.numpy(), self.logger['epoch'])
return self
def on_train_end(self, **_):
return self.writer.close()
示例15: __init__
def __init__(self, log_file_dir=None):
self.counter = 0
self.epi_counter = 0
self.step_counter = 0
self.u_stats = dict()
self.path_info = defaultdict(list)
self.extra_info = dict()
self.scores = []
self.tstart = time.time()
self.writer = SummaryWriter(log_file_dir)