本文整理汇总了Python中official.utils.misc.distribution_utils.get_distribution_strategy方法的典型用法代码示例。如果您正苦于以下问题:Python distribution_utils.get_distribution_strategy方法的具体用法?Python distribution_utils.get_distribution_strategy怎么用?Python distribution_utils.get_distribution_strategy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类official.utils.misc.distribution_utils
的用法示例。
在下文中一共展示了distribution_utils.get_distribution_strategy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_distribution_strategy
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def _get_distribution_strategy(self, ds_type='mirrored'):
"""Gets the distribution strategy.
Args:
ds_type: String, the distribution strategy type to be used. Can be
'mirrored', 'multi_worker_mirrored', 'tpu' and 'off'.
Returns:
A `tf.distribute.DistibutionStrategy` object.
"""
if self.tpu or ds_type == 'tpu':
return distribution_utils.get_distribution_strategy(
distribution_strategy='tpu', tpu_address=self.tpu)
elif ds_type == 'multi_worker_mirrored':
# Configures cluster spec for multi-worker distribution strategy.
_ = distribution_utils.configure_cluster(FLAGS.worker_hosts,
FLAGS.task_index)
return distribution_utils.get_distribution_strategy(
distribution_strategy=ds_type,
num_gpus=self.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg)
示例2: __init__
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def __init__(self, strategy_type=None, strategy_config=None):
_ = distribution_utils.configure_cluster(
strategy_config.worker_hosts, strategy_config.task_index)
"""Constructor.
Args:
strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'.
If None. User is responsible to set the strategy before calling
build_executor(...).
strategy_config: necessary config for constructing the proper Strategy.
Check strategy_flags_dict() for examples of the structure.
"""
self._strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=strategy_type,
num_gpus=strategy_config.num_gpus,
all_reduce_alg=strategy_config.all_reduce_alg,
num_packs=strategy_config.num_packs,
tpu_address=strategy_config.tpu)
示例3: get_v1_distribution_strategy
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def get_v1_distribution_strategy(params):
"""Returns the distribution strategy to use."""
if params["use_tpu"]:
# Some of the networking libraries are quite chatty.
for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache",
"oauth2client.transport"]:
logging.getLogger(name).setLevel(logging.ERROR)
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=params["tpu"],
zone=params["tpu_zone"],
project=params["tpu_gcp_project"],
coordinator_name="coordinator"
)
logging.info("Issuing reset command to TPU to ensure a clean state.")
tf.Session.reset(tpu_cluster_resolver.get_master())
# Estimator looks at the master it connects to for MonitoredTrainingSession
# by reading the `TF_CONFIG` environment variable, and the coordinator
# is used by StreamingFilesDataset.
tf_config_env = {
"session_master": tpu_cluster_resolver.get_master(),
"eval_session_master": tpu_cluster_resolver.get_master(),
"coordinator": tpu_cluster_resolver.cluster_spec()
.as_dict()["coordinator"]
}
os.environ["TF_CONFIG"] = json.dumps(tf_config_env)
distribution = tf.distribute.experimental.TPUStrategy(
tpu_cluster_resolver, steps_per_run=100)
else:
distribution = distribution_utils.get_distribution_strategy(
num_gpus=params["num_gpus"])
return distribution
示例4: test_one_device_strategy_cpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(0)
self.assertTrue(ds.is_single_tower)
self.assertEquals(ds.num_towers, 1)
self.assertEquals(len(ds.worker_devices), 1)
self.assertIn('CPU', ds.worker_devices[0])
示例5: test_one_device_strategy_gpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(1)
self.assertTrue(ds.is_single_tower)
self.assertEquals(ds.num_towers, 1)
self.assertEquals(len(ds.worker_devices), 1)
self.assertIn('GPU', ds.worker_devices[0])
示例6: test_mirrored_strategy
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(5)
self.assertFalse(ds.is_single_tower)
self.assertEquals(ds.num_towers, 5)
self.assertEquals(len(ds.worker_devices), 5)
for device in ds.worker_devices:
self.assertIn('GPU', device)
示例7: test_one_device_strategy_cpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:7,代码来源:distribution_utils_test.py
示例8: test_one_device_strategy_gpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:7,代码来源:distribution_utils_test.py
示例9: test_mirrored_strategy
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:8,代码来源:distribution_utils_test.py
示例10: _run_bert_classifier
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def _run_bert_classifier(self, callbacks=None, use_ds=True):
"""Starts BERT classification task."""
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
epochs = self.num_epochs if self.num_epochs else FLAGS.num_train_epochs
if self.num_steps_per_epoch:
steps_per_epoch = self.num_steps_per_epoch
else:
train_data_size = input_meta_data['train_data_size']
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * steps_per_epoch * 0.1)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy='mirrored' if use_ds else 'off',
num_gpus=self.num_gpus)
steps_per_loop = 1
run_classifier.run_customized_training(
strategy,
bert_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
FLAGS.init_checkpoint,
custom_callbacks=callbacks)
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:36,代码来源:bert_benchmark.py
示例11: test_one_device_strategy_cpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
示例12: test_one_device_strategy_gpu
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
示例13: test_mirrored_strategy
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
示例14: _run_and_report_benchmark
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def _run_and_report_benchmark(self, summary_path: str, report_accuracy: bool):
"""Runs and reports the benchmark given the provided configuration."""
distribution = distribution_utils.get_distribution_strategy(
distribution_strategy='tpu', tpu_address=self.tpu)
logging.info('Flags: %s', flags_core.get_nondefault_flags_as_str())
start_time_sec = time.time()
run_pretraining.run_bert_pretrain(
strategy=distribution, custom_callbacks=self.timer_callback)
wall_time_sec = time.time() - start_time_sec
with tf.io.gfile.GFile(summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
self._report_benchmark(summary, start_time_sec, wall_time_sec,
report_accuracy)
示例15: main
# 需要导入模块: from official.utils.misc import distribution_utils [as 别名]
# 或者: from official.utils.misc.distribution_utils import get_distribution_strategy [as 别名]
def main(_):
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
# Configures cluster spec for multi-worker distribution strategy.
if FLAGS.num_gpus > 0:
_ = distribution_utils.configure_cluster(FLAGS.worker_hosts,
FLAGS.task_index)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if 'train' in FLAGS.mode:
train_squad(strategy, input_meta_data, run_eagerly=FLAGS.run_eagerly)
if 'predict' in FLAGS.mode:
predict_squad(strategy, input_meta_data)
if 'eval' in FLAGS.mode:
eval_metrics = eval_squad(strategy, input_meta_data)
f1_score = eval_metrics['final_f1']
logging.info('SQuAD eval F1-score: %f', f1_score)
summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')
summary_writer = tf.summary.create_file_writer(summary_dir)
with summary_writer.as_default():
# TODO(lehou): write to the correct step number.
tf.summary.scalar('F1-score', f1_score, step=0)
summary_writer.flush()
# Also write eval_metrics to json file.
squad_lib_sp.write_to_json_files(
eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))
time.sleep(60)