本文整理汇总了Python中tensorflow.InvalidArgumentError方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.InvalidArgumentError方法的具体用法?Python tensorflow.InvalidArgumentError怎么用?Python tensorflow.InvalidArgumentError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.InvalidArgumentError方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_once
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import InvalidArgumentError [as 别名]
def run_once(model, losses, weights, saver, summary_writer, summary_op):
"""Evaluates the latest model checkpoint.
Args:
model: Instance of SkipThoughtsModel; the model to evaluate.
losses: Tensor; the target cross entropy losses for the current batch.
weights: A Tensor of weights corresponding to losses.
saver: Instance of tf.train.Saver for restoring model Variables.
summary_writer: Instance of FileWriter.
summary_op: Op for generating model summaries.
"""
model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if not model_path:
tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
FLAGS.checkpoint_dir)
return
with tf.Session() as sess:
# Load model from checkpoint.
tf.logging.info("Loading model from checkpoint: %s", model_path)
saver.restore(sess, model_path)
global_step = tf.train.global_step(sess, model.global_step.name)
tf.logging.info("Successfully loaded %s at global step = %d.",
os.path.basename(model_path), global_step)
if global_step < FLAGS.min_global_step:
tf.logging.info("Skipping evaluation. Global step = %d < %d", global_step,
FLAGS.min_global_step)
return
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
num_eval_batches = int(
math.ceil(FLAGS.num_eval_examples / model.config.batch_size))
# Run evaluation on the latest checkpoint.
try:
evaluate_model(sess, losses, weights, num_eval_batches, global_step,
summary_writer, summary_op)
except tf.InvalidArgumentError:
tf.logging.error(
"Evaluation raised InvalidArgumentError (e.g. due to Nans).")
finally:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
示例2: run_once
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import InvalidArgumentError [as 别名]
def run_once(model, losses, weights, saver, summary_writer, summary_op):
"""Evaluates the latest model checkpoint.
Args:
model: Instance of SkipThoughtsModel; the model to evaluate.
losses: Tensor; the target cross entropy losses for the current batch.
weights: A Tensor of weights corresponding to losses.
saver: Instance of tf.train.Saver for restoring model Variables.
summary_writer: Instance of FileWriter.
summary_op: Op for generating model summaries.
"""
model_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if not model_path:
tf.logging.info("Skipping evaluation. No checkpoint found in: %s",
FLAGS.checkpoint_dir)
return
with tf.Session() as sess:
# Load model from checkpoint.
tf.logging.info("Loading model from checkpoint: %s", model_path)
saver.restore(sess, model_path)
global_step = tf.train.global_step(sess, model.global_step.name)
tf.logging.info("Successfully loaded %s at global step = %d.",
os.path.basename(model_path), global_step)
if global_step < FLAGS.min_global_step:
tf.logging.info("Skipping evaluation. Global step = %d < %d",
global_step,
FLAGS.min_global_step)
return
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
num_eval_batches = int(
math.ceil(FLAGS.num_eval_examples / model.config.batch_size))
# Run evaluation on the latest checkpoint.
try:
evaluate_model(sess, losses, weights, num_eval_batches, global_step,
summary_writer, summary_op)
except tf.InvalidArgumentError:
tf.logging.error(
"Evaluation raised InvalidArgumentError (e.g. due to Nans).")
finally:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)