本文整理汇总了Python中tqdm.auto.tqdm.write方法的典型用法代码示例。如果您正苦于以下问题:Python tqdm.write方法的具体用法?Python tqdm.write怎么用?Python tqdm.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tqdm.auto.tqdm
的用法示例。
在下文中一共展示了tqdm.write方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def run(self):
"""
Train model.
The processes:
Run each epoch -> Run scheduler -> Should stop early?
"""
self._model.train()
timer = Timer()
for epoch in range(self._start_epoch, self._epochs + 1):
self._epoch = epoch
self._run_epoch()
self._run_scheduler()
if self._early_stopping.should_stop_early:
break
if self._verbose:
tqdm.write(f'Cost time: {timer.time}s')
示例2: load_model_state
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def load_model_state(self, f, strict=True):
"""
Loads the model's state from a file.
:param f: a file-like object (has to implement write and flush) or a string containing a file name.
:param strict: Whether the file must contain exactly the same weight keys as the model.
:return: NamedTuple with two lists (`missing_keys` and `unexpected_keys`).
"""
model_state = torch.load(f, map_location=torch.device('cpu'))
if isinstance(self.model, nn.DataParallel):
model_state = {'module.' + k: v for k, v in model_state.items()}
invalid_keys = self.model.load_state_dict(model_state, strict)
self.model.to(self._device)
return invalid_keys
示例3: _step_wrapper_fn
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def _step_wrapper_fn(self, hyper_parameters):
"""
Wraps the user-defined _step method and stores information regarding the current iteration.
:param hyper_parameters: Dict containing the chosen hyper-parameters for the current iteration.
:return: Numeric value representing the loss returned from the user-defined _step method.
"""
self._current_iteration += 1
tqdm.write('Iteration: {0:d}/{1:d}'.format(self._current_iteration, self._fit_iterations))
self._print_hyper_parameters(hyper_parameters)
loss = self._step(hyper_parameters)
self._points.append((loss, hyper_parameters))
if self._trials_save_path is not None:
with open(self._trials_save_path, 'wb') as fw:
pickle.dump(self._current_trials_object, fw)
return loss
示例4: emit
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
示例5: on_training_end
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def on_training_end(self, training_context):
if training_context['_verbose']:
tqdm.write("Epoch chosen: %d" % self._best_epoch)
training_context['system'].load_model_state(self._best_state_filepath)
示例6: save
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def save(self, f):
"""
Saves the System to a file.
:param f: a file-like object (has to implement write and flush) or a string containing a file name.
"""
torch.save({
'model': self.model,
'last_activation': self.last_activation
}, f)
示例7: load
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def load(f):
"""
Loads a System from a file. The model will reside in the CPU initially.
:param f: a file-like object (has to implement write and flush) or a string containing a file name.
"""
loaded_data = torch.load(f, map_location=torch.device('cpu'))
return System(loaded_data['model'], loaded_data['last_activation'])
示例8: save_model_state
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def save_model_state(self, f):
"""
Saves the model's state to a file.
:param f: a file-like object (has to implement write and flush) or a string containing a file name.
"""
if isinstance(self.model, nn.DataParallel):
model_state = {k[len('module.'):]: v for k, v in self.model.state_dict().items()}
else:
model_state = self.model.state_dict()
torch.save(model_state, f)
示例9: _train_evaluation
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def _train_evaluation(self):
"""
Evaluates the model after each epoch.
"""
if self.evaluation_data_loaders is not None and self.evaluators is not None:
if self.training_context['_verbose']:
auto_tqdm.write('Evaluating...')
auto_tqdm.write('')
for callback in self.callbacks:
callback.on_evaluation_start(self.training_context)
current_results = {}
for current_dataset_name in self.evaluation_data_loaders:
auto_tqdm.write(current_dataset_name)
current_dataset_results = self.training_context['system'].evaluate(
self.evaluation_data_loaders[current_dataset_name],
self.evaluators,
self.batch_input_key,
self.training_context['_verbose']
)
current_results[current_dataset_name] = current_dataset_results
for evaluator_name in self.evaluators:
auto_tqdm.write(str(current_results[current_dataset_name][evaluator_name]))
self.training_context['_results_history'].append(current_results)
for callback in self.callbacks:
callback.on_evaluation_end(self.training_context)
示例10: _print_hyper_parameters
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def _print_hyper_parameters(hyper_parameters):
"""
Prints parameters.
:param hyper_parameters: Dict with the hyper parameters.
"""
tqdm.write('-' * 80)
tqdm.write('Hyper-Parameters')
tqdm.write('-' * 80)
tqdm.write(pprint.pformat(hyper_parameters))
tqdm.write('-' * 80)
示例11: emit
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def emit(self, record: Any) -> None:
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
示例12: _run_epoch
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def _run_epoch(self):
"""
Run each epoch.
The training steps:
- Get batch and feed them into model
- Get outputs. Caculate all losses and sum them up
- Loss backwards and optimizer steps
- Evaluation
- Update and output result
"""
# Get total number of batch
num_batch = len(self._trainloader)
train_loss = AverageMeter()
with tqdm(enumerate(self._trainloader), total=num_batch,
disable=not self._verbose) as pbar:
for step, (inputs, target) in pbar:
outputs = self._model(inputs)
# Caculate all losses and sum them up
loss = torch.sum(
*[c(outputs, target) for c in self._criterions]
)
self._backward(loss)
train_loss.update(loss.item())
# Set progress bar
pbar.set_description(f'Epoch {self._epoch}/{self._epochs}')
pbar.set_postfix(loss=f'{loss.item():.3f}')
# Run validate
self._iteration += 1
if self._iteration % self._validate_interval == 0:
pbar.update(1)
if self._verbose:
pbar.write(
f'[Iter-{self._iteration} '
f'Loss-{train_loss.avg:.3f}]:')
result = self.evaluate(self._validloader)
if self._verbose:
pbar.write(' Validation: ' + ' - '.join(
f'{k}: {round(v, 4)}' for k, v in result.items()))
# Early stopping
self._early_stopping.update(result)
if self._early_stopping.should_stop_early:
self._save()
pbar.write('Ran out of patience. Stop training...')
break
elif self._early_stopping.is_best_so_far:
self._save()
示例13: _train_epoch
# 需要导入模块: from tqdm.auto import tqdm [as 别名]
# 或者: from tqdm.auto.tqdm import write [as 别名]
def _train_epoch(self):
"""
Trains the model for a single epoch.
"""
self.training_context['_current_epoch'] += 1
self.training_context['system'].model.train(True)
for callback in self.callbacks:
callback.on_epoch_start(self.training_context)
if self.training_context['_verbose']:
pre_time = time.time()
auto_tqdm.write('-' * 80)
auto_tqdm.write('')
auto_tqdm.write('Epoch: %d' % (self.training_context['_current_epoch']))
auto_tqdm.write('')
auto_tqdm.write('Training...')
auto_tqdm.write('')
pbar = auto_tqdm(total=len(self.train_data_loader), ncols=NCOLS)
cum_loss = 0
self.training_context['optimizer'].zero_grad()
for i, batch in enumerate(self.train_data_loader):
perform_opt_step = (i % self.gradient_accumulation_steps == 0) or (i == (len(self.train_data_loader) - 1))
cum_loss += self._train_batch(batch, perform_opt_step)
if self.training_context['_verbose']:
train_loss = cum_loss / (i + 1)
pbar.update(1)
pbar.set_postfix(ordered_dict=OrderedDict([('loss', '%5.4f' % train_loss)]))
for callback in self.callbacks:
callback.on_epoch_end(self.training_context)
if self.training_context['_verbose']:
pbar.close()
auto_tqdm.write('Time elapsed: %d' % (time.time() - pre_time))
auto_tqdm.write('')