本文整理汇总了Python中tensorflow.python.estimator.estimator._load_global_step_from_checkpoint_dir方法的典型用法代码示例。如果您正苦于以下问题:Python estimator._load_global_step_from_checkpoint_dir方法的具体用法?Python estimator._load_global_step_from_checkpoint_dir怎么用?Python estimator._load_global_step_from_checkpoint_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.estimator.estimator
的用法示例。
在下文中一共展示了estimator._load_global_step_from_checkpoint_dir方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import _load_global_step_from_checkpoint_dir [as 别名]
def main(argv):
del argv
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_shards,
iterations_per_loop=FLAGS.iterations_per_loop))
# Set module-level global variable so that model_fn and input_fn can be
# identical for each different kind of dataset and model
global dataset, model
dataset = bias_input
model = bias_model
# TPU-based estimator used for TRAIN and EVAL
est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
# CPU-based estimator used for PREDICT (generating images)
cpu_est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
predict_batch_size=_NUM_VIZ_AUDIO)
current_step = estimator._load_global_step_from_checkpoint_dir(
FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
tf.logging.info('Starting training for %d steps, current step: %d' %
(FLAGS.train_steps, current_step))
# Render some generated images
G_z = cpu_est.predict(input_fn=noise_input_fn)
G_z = [p['generated_audio'][:, :] for p in G_z]
G_z = np.array(G_z)
preview_dir = './preview'
if not os.path.isdir(preview_dir):
os.makedirs(preview_dir)
for i in range(len(G_z)):
audio = np.int16(G_z[i]/np.max(np.abs(G_z[i])) * 32767)
preview_fp = os.path.join(preview_dir, '{}_{}_{}.wav'.format(str(i % 10), str(current_step), str(i)))
wavwrite(preview_fp, _FS, audio)
tf.logging.info('Finished generating images')
示例2: main
# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import _load_global_step_from_checkpoint_dir [as 别名]
def main(argv):
del argv
global is_bias
global noise_dim
is_bias = True if FLAGS.condition == 'bias' else False
noise_dim = 100 if is_bias else 90
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_shards,
iterations_per_loop=FLAGS.iterations_per_loop))
# Set module-level global variable so that model_fn and input_fn can be
# identical for each different kind of dataset and model
global dataset, model
dataset = tpu_input
model = tpu_model
# TPU-based estimator used for TRAIN and EVAL
est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=FLAGS.use_tpu,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
# CPU-based estimator used for PREDICT (generating images)
cpu_est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
predict_batch_size=_NUM_VIZ_AUDIO)
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
tf.logging.info('Starting training for %d steps, current step: %d' %
(FLAGS.train_steps, current_step))
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + FLAGS.train_steps_per_eval,
FLAGS.train_steps)
est.train(input_fn=generate_input_fn(True),
max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training step %d' % current_step)
if FLAGS.eval_loss:
# Evaluate loss on test set
metrics = est.evaluate(input_fn=generate_input_fn(False),
steps=dataset.NUM_EVAL_IMAGES // FLAGS.batch_size)
tf.logging.info('Finished evaluating')
tf.logging.info(metrics)
示例3: train_and_eval
# 需要导入模块: from tensorflow.python.estimator import estimator [as 别名]
# 或者: from tensorflow.python.estimator.estimator import _load_global_step_from_checkpoint_dir [as 别名]
def train_and_eval(deeplab_estimator, train_dataset, eval_dataset,
num_batches_per_epoch):
"""Interleaves training and evaluation."""
# pylint: disable=protected-access
current_step = estimator._load_global_step_from_checkpoint_dir(
FLAGS.model_dir)
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' %
(FLAGS.train_steps,
FLAGS.train_steps / num_batches_per_epoch,
current_step))
start_timestamp = time.time()
while current_step < FLAGS.train_steps:
# Train for up to steps_per_eval number of steps. At the end of training,
# a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
FLAGS.train_steps)
train_input_fn = data_pipeline.InputReader(
train_dataset,
FLAGS.train_split,
is_training=True,
model_variant=FLAGS.model_variant
)
deeplab_estimator.train(
input_fn=train_input_fn,
max_steps=next_checkpoint
)
current_step = next_checkpoint
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info('Finished training up to step %d. Elapsed seconds %d.' %
(current_step, elapsed_time))
tf.logging.info('Starting to evaluate.')
eval_input_fn = data_pipeline.InputReader(
eval_dataset,
FLAGS.eval_split,
is_training=False,
model_variant=FLAGS.model_variant
)
eval_results = deeplab_estimator.evaluate(
input_fn=eval_input_fn,
steps=eval_dataset.num_samples // FLAGS.eval_batch_size
)
tf.logging.info('Eval results: %s' % eval_results)