本文整理汇总了Python中official.utils.logs.hooks.ExamplesPerSecondHook方法的典型用法代码示例。如果您正苦于以下问题:Python hooks.ExamplesPerSecondHook方法的具体用法?Python hooks.ExamplesPerSecondHook怎么用?Python hooks.ExamplesPerSecondHook使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类official.utils.logs.hooks
的用法示例。
在下文中一共展示了hooks.ExamplesPerSecondHook方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_examples_per_second_hook
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
示例2: _validate_log_every_n_secs
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def _validate_log_every_n_secs(self, every_n_secs):
hook = hooks.ExamplesPerSecondHook(
batch_size=256,
every_n_steps=None,
every_n_secs=every_n_secs,
metric_logger=self._logger)
with tf.train.MonitoredSession(
tf.train.ChiefSessionCreator(), [hook]) as mon_sess:
# Explicitly run global_step after train_op to get the accurate
# global_step value
mon_sess.run(self.train_op)
mon_sess.run(self.global_step)
# Nothing should be in the list yet
self.assertFalse(self._logger.logged_metric)
time.sleep(every_n_secs)
mon_sess.run(self.train_op)
mon_sess.run(self.global_step)
self._assert_metrics()
示例3: get_examples_per_second_hook
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(every_n_steps=every_n_steps,
batch_size=batch_size,
warm_steps=warm_steps)
示例4: benchmark_graph_fp16_8_gpu_ring_tweaked
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_fp16_8_gpu_ring_tweaked(self):
"""Benchmarks graph fp16 8 gpus with ring collective tweaked."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.distribution_strategy = 'multi_worker_mirrored'
FLAGS.all_reduce_alg = 'ring'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.intra_op_parallelism_threads = 1
FLAGS.datasets_num_private_threads = 32
FLAGS.model_dir = self._get_model_dir(
folder_name='benchmark_graph_fp16_8_gpu_ring_tweaked')
FLAGS.batch_size = 256*8
FLAGS.dtype = 'fp16'
FLAGS.hooks = ['ExamplesPerSecondHook']
self._run_and_report_benchmark()
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:18,代码来源:estimator_benchmark.py
示例5: benchmark_graph_fp16_8_gpu_nccl_tweaked
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_fp16_8_gpu_nccl_tweaked(self):
"""Benchmarks graph fp16 8 gpus with nccl collective tweaked."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.distribution_strategy = 'multi_worker_mirrored'
FLAGS.all_reduce_alg = 'nccl'
FLAGS.tf_gpu_thread_mode = 'gpu_private'
FLAGS.intra_op_parallelism_threads = 1
FLAGS.datasets_num_private_threads = 32
FLAGS.model_dir = self._get_model_dir(
folder_name='benchmark_graph_fp16_8_gpu_nccl_tweaked')
FLAGS.batch_size = 256*8
FLAGS.dtype = 'fp16'
FLAGS.hooks = ['ExamplesPerSecondHook']
self._run_and_report_benchmark()
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:18,代码来源:estimator_benchmark.py
示例6: benchmark_graph_8_gpu
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_8_gpu(self):
"""Benchmark graph mode 8 gpus.
SOTA is 28.4 BLEU (uncased).
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072 * 8
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
FLAGS.hooks = ['ExamplesPerSecondHook']
self._run_and_report_benchmark()
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:21,代码来源:transformer_estimator_benchmark.py
示例7: benchmark_graph_8_gpu_static_batch
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_8_gpu_static_batch(self):
"""Benchmark graph mode 8 gpus.
SOTA is 28.4 BLEU (uncased).
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072 * 8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
FLAGS.hooks = ['ExamplesPerSecondHook']
self._run_and_report_benchmark()
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:23,代码来源:transformer_estimator_benchmark.py
示例8: benchmark_graph_2_gpu
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_2_gpu(self):
"""Benchmark graph mode 2 gpus.
The paper uses 8 GPUs and a much larger effective batch size, this is will
not converge to the 27.3 BLEU (uncased) SOTA.
"""
self._setup()
FLAGS.num_gpus = 2
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096 * 2
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu')
FLAGS.hooks = ['ExamplesPerSecondHook']
# These bleu scores are based on test runs after at this limited
# number of steps and batch size after verifying SOTA at 8xV100s.
self._run_and_report_benchmark(bleu_min=25.3, bleu_max=26)
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:24,代码来源:transformer_estimator_benchmark.py
示例9: benchmark_graph_fp16_8_gpu
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def benchmark_graph_fp16_8_gpu(self):
"""benchmark 8 gpus with fp16 mixed precision.
SOTA is 27.3 BLEU (uncased).
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096 * 8
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_graph_fp16_8_gpu')
FLAGS.hooks = ['ExamplesPerSecondHook']
self._run_and_report_benchmark()
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:22,代码来源:transformer_estimator_benchmark.py
示例10: __init__
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
train_data_dir = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME)
vocab_file = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME,
'vocab.ende.32768')
def_flags = {}
def_flags['param_set'] = 'base'
def_flags['vocab_file'] = vocab_file
def_flags['data_dir'] = train_data_dir
def_flags['train_steps'] = 200
def_flags['steps_between_evals'] = 200
def_flags['hooks'] = ['ExamplesPerSecondHook']
super(TransformerBaseEstimatorBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags)
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:19,代码来源:transformer_estimator_benchmark.py
示例11: get_train_hooks
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.logging.warning("hooks_helper received name_list `{}`, but a TPU is "
"specified. No hooks will be used.".format(name_list))
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
示例12: get_train_hooks
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
'TPU is specified. No hooks will be used.'
.format(name_list))
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
示例13: test_raise_in_both_secs_and_steps
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
hooks.ExamplesPerSecondHook(
batch_size=256,
every_n_steps=10,
every_n_secs=20,
metric_logger=self._logger)
示例14: test_raise_in_none_secs_and_steps
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
hooks.ExamplesPerSecondHook(
batch_size=256,
every_n_steps=None,
every_n_secs=None,
metric_logger=self._logger)
示例15: _validate_log_every_n_steps
# 需要导入模块: from official.utils.logs import hooks [as 别名]
# 或者: from official.utils.logs.hooks import ExamplesPerSecondHook [as 别名]
def _validate_log_every_n_steps(self, every_n_steps, warm_steps):
hook = hooks.ExamplesPerSecondHook(
batch_size=256,
every_n_steps=every_n_steps,
warm_steps=warm_steps,
metric_logger=self._logger)
with tf.compat.v1.train.MonitoredSession(
tf.compat.v1.train.ChiefSessionCreator(), [hook]) as mon_sess:
for _ in range(every_n_steps):
# Explicitly run global_step after train_op to get the accurate
# global_step value
mon_sess.run(self.train_op)
mon_sess.run(self.global_step)
# Nothing should be in the list yet
self.assertFalse(self._logger.logged_metric)
mon_sess.run(self.train_op)
global_step_val = mon_sess.run(self.global_step)
if global_step_val > warm_steps:
self._assert_metrics()
else:
# Nothing should be in the list yet
self.assertFalse(self._logger.logged_metric)
# Add additional run to verify proper reset when called multiple times.
prev_log_len = len(self._logger.logged_metric)
mon_sess.run(self.train_op)
global_step_val = mon_sess.run(self.global_step)
if every_n_steps == 1 and global_step_val > warm_steps:
# Each time, we log two additional metrics. Did exactly 2 get added?
self.assertEqual(len(self._logger.logged_metric), prev_log_len + 2)
else:
# No change in the size of the metric list.
self.assertEqual(len(self._logger.logged_metric), prev_log_len)