本文整理汇总了Python中tensorboardX.SummaryWriter.add_scalars方法的典型用法代码示例。如果您正苦于以下问题:Python SummaryWriter.add_scalars方法的具体用法?Python SummaryWriter.add_scalars怎么用?Python SummaryWriter.add_scalars使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorboardX.SummaryWriter
的用法示例。
在下文中一共展示了SummaryWriter.add_scalars方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end-start))
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
logger.add_scalars("logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.module.state_dict() if args.mGPUs else fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
示例2: main
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
#.........这里部分代码省略.........
'ld_ap {:.4f}, ld_an {:.4f}, '
'lL {:.4f}'.format(
l_prec_meter.val, l_m_meter.val,
l_dist_ap_meter.val, l_dist_an_meter.val,
l_loss_meter.val, ))
else:
l_log = ''
if cfg.id_loss_weight > 0:
id_log = (', idL {:.4f}'.format(id_loss_meter.val))
else:
id_log = ''
total_loss_log = ', loss {:.4f}'.format(loss_meter.val)
log = time_log + \
g_log + l_log + id_log + \
total_loss_log
print(log)
#############
# Epoch Log #
#############
time_log = 'Ep {}, {:.2f}s'.format(ep + 1, time.time() - ep_st, )
if cfg.g_loss_weight > 0:
g_log = (', gp {:.2%}, gm {:.2%}, '
'gd_ap {:.4f}, gd_an {:.4f}, '
'gL {:.4f}'.format(
g_prec_meter.avg, g_m_meter.avg,
g_dist_ap_meter.avg, g_dist_an_meter.avg,
g_loss_meter.avg, ))
else:
g_log = ''
if cfg.l_loss_weight > 0:
l_log = (', lp {:.2%}, lm {:.2%}, '
'ld_ap {:.4f}, ld_an {:.4f}, '
'lL {:.4f}'.format(
l_prec_meter.avg, l_m_meter.avg,
l_dist_ap_meter.avg, l_dist_an_meter.avg,
l_loss_meter.avg, ))
else:
l_log = ''
if cfg.id_loss_weight > 0:
id_log = (', idL {:.4f}'.format(id_loss_meter.avg))
else:
id_log = ''
total_loss_log = ', loss {:.4f}'.format(loss_meter.avg)
log = time_log + \
g_log + l_log + id_log + \
total_loss_log
print(log)
# Log to TensorBoard
if cfg.log_to_file:
if writer is None:
writer = SummaryWriter(log_dir=osp.join(cfg.exp_dir, 'tensorboard'))
writer.add_scalars(
'loss',
dict(global_loss=g_loss_meter.avg,
local_loss=l_loss_meter.avg,
id_loss=id_loss_meter.avg,
loss=loss_meter.avg, ),
ep)
writer.add_scalars(
'tri_precision',
dict(global_precision=g_prec_meter.avg,
local_precision=l_prec_meter.avg, ),
ep)
writer.add_scalars(
'satisfy_margin',
dict(global_satisfy_margin=g_m_meter.avg,
local_satisfy_margin=l_m_meter.avg, ),
ep)
writer.add_scalars(
'global_dist',
dict(global_dist_ap=g_dist_ap_meter.avg,
global_dist_an=g_dist_an_meter.avg, ),
ep)
writer.add_scalars(
'local_dist',
dict(local_dist_ap=l_dist_ap_meter.avg,
local_dist_an=l_dist_an_meter.avg, ),
ep)
# save ckpt
if cfg.log_to_file:
save_ckpt(modules_optims, ep + 1, 0, cfg.ckpt_file)
########
# Test #
########
test(load_model_weight=False)
示例3: SummaryWorker
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
class SummaryWorker(multiprocessing.Process):
def __init__(self, env):
super(SummaryWorker, self).__init__()
self.env = env
self.config = env.config
self.queue = multiprocessing.Queue()
try:
self.timer_scalar = utils.train.Timer(env.config.getfloat('summary', 'scalar'))
except configparser.NoOptionError:
self.timer_scalar = lambda: False
try:
self.timer_image = utils.train.Timer(env.config.getfloat('summary', 'image'))
except configparser.NoOptionError:
self.timer_image = lambda: False
try:
self.timer_histogram = utils.train.Timer(env.config.getfloat('summary', 'histogram'))
except configparser.NoOptionError:
self.timer_histogram = lambda: False
with open(os.path.expanduser(os.path.expandvars(env.config.get('summary_histogram', 'parameters'))), 'r') as f:
self.histogram_parameters = utils.RegexList([line.rstrip() for line in f])
self.draw_bbox = utils.visualize.DrawBBox(env.config, env.category)
self.draw_iou = utils.visualize.DrawIou(env.config)
def __call__(self, name, **kwargs):
if getattr(self, 'timer_' + name)():
kwargs = getattr(self, 'copy_' + name)(**kwargs)
self.queue.put((name, kwargs))
def stop(self):
self.queue.put((None, {}))
def run(self):
self.writer = SummaryWriter(os.path.join(self.env.model_dir, self.env.args.run))
while True:
name, kwargs = self.queue.get()
if name is None:
break
func = getattr(self, 'summary_' + name)
try:
func(**kwargs)
except:
traceback.print_exc()
def copy_scalar(self, **kwargs):
step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
loss_total = loss_total.data.clone().cpu().numpy()
loss = {key: loss[key].data.clone().cpu().numpy() for key in loss}
loss_hparam = {key: loss_hparam[key].data.clone().cpu().numpy() for key in loss_hparam}
return dict(
step=step,
loss_total=loss_total,
loss=loss, loss_hparam=loss_hparam,
)
def summary_scalar(self, **kwargs):
step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
for key in loss:
self.writer.add_scalar('loss/' + key, loss[key][0], step)
if self.config.getboolean('summary_scalar', 'loss_hparam'):
self.writer.add_scalars('loss_hparam', {key: loss_hparam[key][0] for key in loss_hparam}, step)
self.writer.add_scalar('loss_total', loss_total[0], step)
def copy_image(self, **kwargs):
step, height, width, rows, cols, data, pred, debug = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, debug'.split(', '))
data = {key: data[key].clone().cpu().numpy() for key in 'image, yx_min, yx_max, cls'.split(', ')}
pred = {key: pred[key].data.clone().cpu().numpy() for key in 'yx_min, yx_max, iou, logits'.split(', ') if key in pred}
matching = (debug['positive'].float() - debug['negative'].float() + 1) / 2
matching = matching.data.clone().cpu().numpy()
return dict(
step=step, height=height, width=width, rows=rows, cols=cols,
data=data, pred=pred,
matching=matching,
)
def summary_image(self, **kwargs):
step, height, width, rows, cols, data, pred, matching = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, matching'.split(', '))
image = data['image']
limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
image = image[:limit, :, :, :]
yx_min, yx_max, iou = (pred[key] for key in 'yx_min, yx_max, iou'.split(', '))
scale = [height / rows, width / cols]
yx_min, yx_max = (a * scale for a in (yx_min, yx_max))
if 'logits' in pred:
cls = np.argmax(F.softmax(torch.autograd.Variable(torch.from_numpy(pred['logits'])), -1).data.cpu().numpy(), -1)
else:
cls = np.zeros(iou.shape, np.int)
if self.config.getboolean('summary_image', 'bbox'):
# data
canvas = np.copy(image)
canvas = pybenchmark.profile('bbox/data')(self.draw_bbox_data)(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')))
self.writer.add_image('bbox/data', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
# pred
canvas = np.copy(image)
canvas = pybenchmark.profile('bbox/pred')(self.draw_bbox_pred)(canvas, yx_min, yx_max, cls, iou, nms=True)
self.writer.add_image('bbox/pred', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
if self.config.getboolean('summary_image', 'iou'):
# bbox
canvas = np.copy(image)
canvas_data = self.draw_bbox_data(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')), colors=['g'])
# data
#.........这里部分代码省略.........
示例4: main
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
#.........这里部分代码省略.........
# Epoch Log #
#############
time_log = 'Ep {}, {:.2f}s'.format(ep + 1, time.time() - ep_st, )
if cfg.g_loss_weight > 0:
g_log = (', gp {:.2%}, gm {:.2%}, '
'gd_ap {:.4f}, gd_an {:.4f}, '
'gL {:.4f}'.format(
g_prec_meter.avg, g_m_meter.avg,
g_dist_ap_meter.avg, g_dist_an_meter.avg,
g_loss_meter.avg, ))
else:
g_log = ''
if cfg.l_loss_weight > 0:
l_log = (', lp {:.2%}, lm {:.2%}, '
'ld_ap {:.4f}, ld_an {:.4f}, '
'lL {:.4f}'.format(
l_prec_meter.avg, l_m_meter.avg,
l_dist_ap_meter.avg, l_dist_an_meter.avg,
l_loss_meter.avg, ))
else:
l_log = ''
if cfg.id_loss_weight > 0:
id_log = (', idL {:.4f}'.format(id_loss_meter.avg))
else:
id_log = ''
if (cfg.num_models > 1) and (cfg.pm_loss_weight > 0):
pm_log = (', pmL {:.4f}'.format(pm_loss_meter.avg))
else:
pm_log = ''
if (cfg.num_models > 1) and (cfg.gdm_loss_weight > 0):
gdm_log = (', gdmL {:.4f}'.format(gdm_loss_meter.avg))
else:
gdm_log = ''
if (cfg.num_models > 1) \
and cfg.local_dist_own_hard_sample \
and (cfg.ldm_loss_weight > 0):
ldm_log = (', ldmL {:.4f}'.format(ldm_loss_meter.avg))
else:
ldm_log = ''
total_loss_log = ', loss {:.4f}'.format(loss_meter.avg)
log = time_log + \
g_log + l_log + id_log + \
pm_log + gdm_log + ldm_log + \
total_loss_log
print(log)
# Log to TensorBoard
if cfg.log_to_file:
if writer is None:
writer = SummaryWriter(log_dir=osp.join(cfg.exp_dir, 'tensorboard'))
writer.add_scalars(
'loss',
dict(global_loss=g_loss_meter.avg,
local_loss=l_loss_meter.avg,
id_loss=id_loss_meter.avg,
pm_loss=pm_loss_meter.avg,
gdm_loss=gdm_loss_meter.avg,
ldm_loss=ldm_loss_meter.avg,
loss=loss_meter.avg, ),
ep)
writer.add_scalars(
'tri_precision',
dict(global_precision=g_prec_meter.avg,
local_precision=l_prec_meter.avg, ),
ep)
writer.add_scalars(
'satisfy_margin',
dict(global_satisfy_margin=g_m_meter.avg,
local_satisfy_margin=l_m_meter.avg, ),
ep)
writer.add_scalars(
'global_dist',
dict(global_dist_ap=g_dist_ap_meter.avg,
global_dist_an=g_dist_an_meter.avg, ),
ep)
writer.add_scalars(
'local_dist',
dict(local_dist_ap=l_dist_ap_meter.avg,
local_dist_an=l_dist_an_meter.avg, ),
ep)
# save ckpt
if cfg.log_to_file:
save_ckpt(modules_optims, ep + 1, 0, cfg.ckpt_file)
########
# Test #
########
test(load_model_weight=False)
示例5: main
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
#.........这里部分代码省略.........
verbose=True)
print()
return mAP, cmc_scores[0]
if cfg.only_test:
test(load_model_weight=True)
return
############
# Training #
############
start_ep = resume_ep if cfg.resume else 0
for ep in range(start_ep, cfg.total_epochs):
# Adjust Learning Rate
adjust_lr_staircase(
optimizer.param_groups,
[cfg.finetuned_params_lr, cfg.new_params_lr],
ep + 1,
cfg.staircase_decay_at_epochs,
cfg.staircase_decay_multiply_factor)
may_set_mode(modules_optims, 'train')
# For recording loss
loss_meter = AverageMeter()
ep_st = time.time()
step = 0
epoch_done = False
while not epoch_done:
step += 1
step_st = time.time()
ims, im_names, labels, mirrored, epoch_done = train_set.next_batch()
ims_var = Variable(TVT(torch.from_numpy(ims).float()))
labels_var = Variable(TVT(torch.from_numpy(labels).long()))
_, logits_list = model_w(ims_var)
loss = torch.sum(
torch.cat([criterion(logits, labels_var) for logits in logits_list]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
############
# Step Log #
############
loss_meter.update(to_scalar(loss))
if step % cfg.steps_per_log == 0:
log = '\tStep {}/Ep {}, {:.2f}s, loss {:.4f}'.format(
step, ep + 1, time.time() - step_st, loss_meter.val)
print(log)
#############
# Epoch Log #
#############
log = 'Ep {}, {:.2f}s, loss {:.4f}'.format(
ep + 1, time.time() - ep_st, loss_meter.avg)
print(log)
##########################
# Test on Validation Set #
##########################
mAP, Rank1 = 0, 0
if ((ep + 1) % cfg.epochs_per_val == 0) and (val_set is not None):
mAP, Rank1 = validate()
# Log to TensorBoard
if cfg.log_to_file:
if writer is None:
writer = SummaryWriter(log_dir=osp.join(cfg.exp_dir, 'tensorboard'))
writer.add_scalars(
'val scores',
dict(mAP=mAP,
Rank1=Rank1),
ep)
writer.add_scalars(
'loss',
dict(loss=loss_meter.avg, ),
ep)
# save ckpt
if cfg.log_to_file:
save_ckpt(modules_optims, ep + 1, 0, cfg.ckpt_file)
########
# Test #
########
test(load_model_weight=False)
示例6: train
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
def train(self, protocol_name, subset='development', n_calls=1):
train_dir = self.TRAIN_DIR.format(
experiment_dir=self.experiment_dir,
protocol=protocol_name,
subset=subset)
mkdir_p(train_dir)
protocol = get_protocol(protocol_name, progress=False,
preprocessors=self.preprocessors_)
tune_db = f'{train_dir}/tune.db'
params_yml = f'{train_dir}/params.yml'
params_yml_lock = f'{train_dir}/params.yml.lock'
pid = os.getpid()
writer = SummaryWriter(log_dir=f"{train_dir}/{pid}")
progress_bar = tqdm(unit='trial')
progress_bar.set_description('Trial #1 : ...')
progress_bar.update(0)
iterations = self.pipeline_.tune_iter(
tune_db, protocol, subset=subset,
sampler=self.sampler_)
for s, status in enumerate(iterations):
if s+1 == n_calls:
break
loss = status['latest']['loss']
writer.add_scalar(f'train/{protocol_name}.{subset}/loss/latest',
loss, global_step=s + 1)
writer.add_scalars(
f'train/{protocol_name}.{subset}/params/latest',
status['latest']['params'], global_step=s + 1)
if 'new_best' in status:
_ = self.dump(status['new_best'], params_yml, params_yml_lock)
n_trials = status['new_best']['n_trials']
best_loss = status['new_best']['loss']
writer.add_scalar(f'train/{protocol_name}.{subset}/loss/best',
best_loss, global_step=n_trials)
writer.add_scalars(
f'train/{protocol_name}.{subset}/params/best',
status['new_best']['params'], global_step=n_trials)
# progress bar
desc = f"Trial #{s+1}"
loss = status['latest']['loss']
if abs(loss) < 1:
desc += f" = {100 * loss:.3f}%"
desc += f" : Best = {100 * best_loss:.3f}% after {n_trials} trials"
else:
desc += f" = {loss:.3f}"
desc += f" : Best = {best_loss:.3f} after {n_trials} trials"
progress_bar.set_description(desc=desc)
progress_bar.update(1)
best = self.pipeline_.best(tune_db)
content = self.dump(best, params_yml, params_yml_lock)
sep = "=" * max(len(params_yml),
max(len(l) for l in content.split('\n')))
print(f"\n{sep}\n{params_yml}\n{sep}\n{content}{sep}")
print(f"Loss = {best['loss']:g} | {best['n_trials']} trials")
print(f"{sep}")
示例7: range
# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_scalars [as 别名]
dim=1)
viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=opts,
)
if main_proc and args.tensorboard and \
package[
'loss_results'] is not None and start_epoch > 0: # Previous scores to tensorboard logs
for i in range(start_epoch):
values = {
'Avg Train Loss': loss_results[i],
'Avg WER': wer_results[i],
'Avg CER': cer_results[i]
}
tensorboard_writer.add_scalars(args.id, values, i + 1)
else:
with open(args.labels_path) as label_file:
labels = str(''.join(json.load(label_file)))
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max))
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size=args.hidden_size,