本文整理汇总了Python中tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimator方法的典型用法代码示例。如果您正苦于以下问题:Python tpu_estimator.TPUEstimator方法的具体用法?Python tpu_estimator.TPUEstimator怎么用?Python tpu_estimator.TPUEstimator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.tpu.python.tpu.tpu_estimator
的用法示例。
在下文中一共展示了tpu_estimator.TPUEstimator方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_create_tpu_estimator_and_inputs
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def test_create_tpu_estimator_and_inputs(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tpu_config.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
use_tpu_estimator=True)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
self.assertIsInstance(estimator, tpu_estimator.TPUEstimator)
self.assertEqual(20, train_steps)
示例2: test_create_tpu_estimator_and_inputs
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def test_create_tpu_estimator_and_inputs(self):
"""Tests that number of train/eval defaults to config values."""
run_config = tpu_config.RunConfig()
hparams = model_hparams.create_hparams(
hparams_overrides='load_pretrained=false')
pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST)
train_steps = 20
eval_steps = 10
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config,
hparams,
pipeline_config_path,
train_steps=train_steps,
eval_steps=eval_steps,
use_tpu_estimator=True)
estimator = train_and_eval_dict['estimator']
train_steps = train_and_eval_dict['train_steps']
eval_steps = train_and_eval_dict['eval_steps']
self.assertIsInstance(estimator, tpu_estimator.TPUEstimator)
self.assertEqual(20, train_steps)
self.assertEqual(10, eval_steps)
示例3: _get_tpu_estimator
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def _get_tpu_estimator():
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=None, project=None)
tpu_grpc_url = tpu_cluster_resolver.get_master()
run_config = contrib_tpu_python_tpu_tpu_config.RunConfig(
master=tpu_grpc_url,
evaluation_master=tpu_grpc_url,
model_dir=FLAGS.work_dir,
save_checkpoints_steps=max(1000, FLAGS.iterations_per_loop),
save_summary_steps=FLAGS.summary_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=contrib_tpu_python_tpu_tpu_config.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=contrib_tpu_python_tpu_tpu_config.InputPipelineConfig.PER_HOST_V2))
return contrib_tpu_python_tpu_tpu_estimator.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size * FLAGS.num_tpu_cores,
eval_batch_size=FLAGS.train_batch_size * FLAGS.num_tpu_cores,
params=FLAGS.flag_values_dict())
示例4: testTrainingPipeline
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def testTrainingPipeline(self, training_method):
output_directory = '/tmp/'
g = tf.Graph()
with g.as_default():
dataset = self._retrieve_data(is_training=False, data_dir=False)
FLAGS.transpose_input = False
FLAGS.use_tpu = False
FLAGS.mode = 'train'
FLAGS.mask_init_method = 'random'
FLAGS.precision = 'float32'
FLAGS.train_steps = 1
FLAGS.train_batch_size = 1
FLAGS.eval_batch_size = 1
FLAGS.steps_per_eval = 1
FLAGS.model_architecture = 'resnet'
params = {}
params['output_dir'] = output_directory
params['training_method'] = training_method
params['use_tpu'] = False
set_lr_schedule()
run_config = tpu_config.RunConfig(
master=None,
model_dir=None,
save_checkpoints_steps=1,
tpu_config=tpu_config.TPUConfig(iterations_per_loop=1, num_shards=1))
classifier = tpu_estimator.TPUEstimator(
use_tpu=False,
model_fn=resnet_model_fn_w_pruning,
params=params,
config=run_config,
train_batch_size=1,
eval_batch_size=1)
classifier.train(input_fn=dataset.input_fn, max_steps=1)
示例5: _build_estimator
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def _build_estimator(self, is_training):
"""Returns an Estimator object.
Args:
is_training: Boolean, whether or not we're in training mode.
Returns:
A tf.estimator.Estimator.
"""
config = self._config
save_checkpoints_steps = config.logging.checkpoint.save_checkpoints_steps
keep_checkpoint_max = self._config.logging.checkpoint.num_to_keep
if is_training and config.use_tpu:
iterations = config.tpu.iterations
num_shards = config.tpu.num_shards
run_config = tpu_config.RunConfig(
save_checkpoints_secs=None,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=self._logdir,
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=iterations,
num_shards=num_shards,
per_host_input_for_training=num_shards <= 8),
tf_random_seed=FLAGS.tf_random_seed)
batch_size = config.data.batch_size
return tpu_estimator.TPUEstimator(
model_fn=self._get_model_fn(),
config=run_config,
use_tpu=True,
train_batch_size=batch_size,
eval_batch_size=batch_size)
else:
run_config = tf.estimator.RunConfig().replace(
model_dir=self._logdir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
tf_random_seed=FLAGS.tf_random_seed)
return tf.estimator.Estimator(
model_fn=self._get_model_fn(),
config=run_config)
示例6: tpu_test_from_params
# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_estimator [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_estimator import TPUEstimator [as 别名]
def tpu_test_from_params(params, test_args, use_tpu=False):
"""
Main tpu testing interface function, called by test_from_params in tfutils.test.
See the doc string there for info.
"""
# use this for tpu and estimator logging
tf.logging.set_verbosity(tf.logging.INFO)
# For convenience, use list of dicts instead of dict of lists
_params = [{key: value[i] for (key, value) in params.items()}
for i in range(len(params['model_params']))]
_ttargs = [{key: value[i] for (key, value) in test_args.items()}
for i in range(len(params['model_params']))]
param = _params[0]
ttarg = _ttargs[0]
# Support only single model
assert(len(_params) == 1)
model_params = param['model_params']
validation_params = param['validation_params']
save_params = param['save_params']
# store a dictionary of estimators, one for each validation params target
# since may have a different set of eval steps to run on tpu
# if dict of estimators not feasible, can just create one single estimator
# and run its predict method multiple times on the same data function in test_estimator (I think)
cls_dict = {}
for valid_k in validation_params.keys():
# set up estimator func
valid_target_parameter = validation_params[valid_k]
estimator_fn, params_to_pass = create_test_estimator_fn(use_tpu=use_tpu,
model_params=model_params,
target_params=valid_target_parameter)
validation_data_params = valid_target_parameter['data_params']
eval_val_steps = valid_target_parameter['num_steps']
if use_tpu:
# grab tpu name and gcp, etc from model params
m_config = create_test_tpu_config(model_dir=save_params.get('cache_dir', ''),
eval_steps=eval_val_steps,
tpu_name=model_params.get('tpu_name', None),
gcp_project=model_params.get('gcp_project', None),
tpu_zone=model_params.get('tpu_zone', DEFAULT_TPU_ZONE),
num_shards=model_params.get('num_shards', DEFAULT_NUM_SHARDS),
iterations_per_loop=model_params.get('iterations_per_loop', DEFAULT_ITERATIONS_PER_LOOP))
estimator_classifier = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=estimator_fn,
config=m_config,
train_batch_size=validation_data_params['batch_size'],
predict_batch_size=validation_data_params['batch_size'],
params=params_to_pass)
else:
estimator_classifier = tf.estimator.Estimator(model_fn=estimator_fn, params=params_to_pass)
cls_dict[valid_k] = estimator_classifier
return test_estimator(cls_dict=cls_dict, param=param, ttarg=ttarg)