本文整理汇总了Python中tensorflow.python.training.basic_session_run_hooks.SecondOrStepTimer方法的典型用法代码示例。如果您正苦于以下问题:Python basic_session_run_hooks.SecondOrStepTimer方法的具体用法?Python basic_session_run_hooks.SecondOrStepTimer怎么用?Python basic_session_run_hooks.SecondOrStepTimer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.basic_session_run_hooks
的用法示例。
在下文中一共展示了basic_session_run_hooks.SecondOrStepTimer方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self,
save_steps=None,
save_secs=None,
output_dir="",
show_dataflow=True,
show_memory=False):
"""Initializes a hook that takes periodic profiling snapshots.
Args:
save_steps: `int`, save profile traces every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save profile traces every N seconds.
output_dir: `string`, the directory to save the profile traces to.
Defaults to the current directory.
show_dataflow: `bool`, if True, add flow events to the trace connecting
producers and consumers of tensors.
show_memory: `bool`, if True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
"""
self._output_file = os.path.join(output_dir, "timeline-{}.json")
self._show_dataflow = show_dataflow
self._show_memory = show_memory
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
示例2: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(
self,
batch_size,
every_n_steps=100,
every_n_secs=None,):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError('exactly one of every_n_steps'
' and every_n_secs should be provided.')
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
示例3: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self, input_fn, estimator, metrics,
metric_name='loss', every_steps=100,
max_patience=100, minimize=True):
self._input_fn = input_fn
self._estimator = estimator
self._metrics = metrics
self._metric_name = metric_name
self._every_steps = every_steps
self._max_patience = max_patience
self._minimize = minimize
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_steps,
every_secs=None)
self._global_step = None
self._best_value = None
self._best_step = None
示例4: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self, batch_size, every_n_steps=100, every_n_secs=None):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps" " and every_n_secs should be provided."
)
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs
)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
示例5: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(
self,
batch_size,
every_n_steps=100,
every_n_secs=None,):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError('exactly one of every_n_steps'
' and every_n_secs should be provided.')
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
示例6: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
示例7: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self, timeline_dir, every_n_iter=None, every_n_secs=None):
if (every_n_iter is None and every_n_secs is None) or (
every_n_iter is not None and every_n_secs is not None):
raise ValueError(
"Either every_n_iter or every_n_secs should be used.")
self._timeline_dir = timeline_dir
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=every_n_secs, every_steps=every_n_iter)
self._iter_count = 0
示例8: __init__
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def __init__(self, should_stop_fn, run_every_secs=60, run_every_steps=None):
if not callable(should_stop_fn):
raise TypeError('`should_stop_fn` must be callable.')
self._should_stop_fn = should_stop_fn
self._timer = tf.compat.v1.train.SecondOrStepTimer(
every_secs=run_every_secs, every_steps=run_every_steps)
self._global_step_tensor = None
self._stop_var = None
self._stop_op = None
示例9: begin
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
self._is_first_run = True
示例10: run_master
# 需要导入模块: from tensorflow.python.training import basic_session_run_hooks [as 别名]
# 或者: from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer [as 别名]
def run_master(self):
"""Runs task master."""
class NewCheckpointListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self, evaluator, eval_throttle_secs):
self._evaluator = evaluator
self._eval_throttle_secs = eval_throttle_secs
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
def after_save(self, session, global_step_value):
del session # unused; required by signature.
if self._timer.should_trigger_for_step(global_step_value):
self._timer.update_last_triggered_step(global_step_value)
self._evaluator.evaluate_and_export()
else:
logging.info(
'Skip the current checkpoint eval due to throttle secs '
'({} secs).'.format(self._eval_throttle_secs))
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. There is a
# small chance that the Estimator.train stopping logic sees a different
# global_step value (due to global step race condition and the fact the
# saver sees a larger value for checkpoing saving), which does not end
# the training. When the training ends, a new checkpoint is generated, which
# triggers the listener again. So, it could be the case the final export is
# triggered twice.
#
# But here, throttle_secs will skip the next intermediate checkpoint and,
# so, the double final export chance is very small.
evaluator = _TrainingExecutor._Evaluator(
self._estimator, self._eval_spec, self._train_spec.max_steps)
# When the underlying `Estimator` object saves a new checkpoint, we would
# like this callback to be called so that evaluation and export can trigger.
saving_listeners = [
NewCheckpointListener(evaluator, self._eval_spec.throttle_secs)
]
self._start_distributed_training(saving_listeners=saving_listeners)
if not evaluator.is_final_export_triggered:
logging.info('Training has already ended. But the last eval is skipped '
'due to eval throttle_secs. Now evaluating the final '
'checkpoint.')
evaluator.evaluate_and_export()
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:53,代码来源:training.py