本文整理匯總了Python中tensorflow.logging.info方法的典型用法代碼示例。如果您正苦於以下問題:Python logging.info方法的具體用法?Python logging.info怎麽用?Python logging.info使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.logging
的用法示例。
在下文中一共展示了logging.info方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: export_model
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def export_model(self, global_step_val, saver, save_path, session):
# If the model has already been exported at this step, return.
if global_step_val == self.last_model_export_step:
return
last_checkpoint = saver.save(session, save_path, global_step_val)
model_dir = "{0}/export/step_{1}".format(self.train_dir, global_step_val)
logging.info("%s: Exporting the model at step %s to %s.",
task_as_string(self.task), global_step_val, model_dir)
self.model_exporter.export_model(
model_dir=model_dir,
global_step_val=global_step_val,
last_checkpoint=last_checkpoint)
示例2: start_server_if_distributed
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def start_server_if_distributed(self):
"""Starts a server if the execution is distributed."""
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
示例3: get_meta_filename
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
示例4: get_input_data_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_data_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=4 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例5: get_input_evaluation_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_evaluation_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
print data_pattern, files
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=3 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例6: get_input_data_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_data_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例7: get_input_data_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_data_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=3 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例8: get_input_evaluation_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_evaluation_tensors(reader,
data_pattern,
batch_size=256):
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
print data_pattern, files
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
eval_data,
batch_size=batch_size,
capacity=3 * FLAGS.batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例9: get_input_data_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_data_tensors(reader,
data_pattern,
batch_size=256,
num_epochs=None):
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
files.sort()
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=False)
training_data = reader.prepare_reader(filename_queue)
return tf.train.batch(
training_data,
batch_size=batch_size,
capacity=FLAGS.batch_size * 4,
allow_smaller_final_batch=True,
enqueue_many=True)
示例10: start_server_if_distributed
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def start_server_if_distributed(self):
"""Starts a server if the execution is distributed."""
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
示例11: get_meta_filename
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
示例12: main
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
if not FLAGS.json_prediction_files_pattern:
raise ValueError(
"The flag --json_prediction_files_pattern must be specified.")
if not FLAGS.csv_output_file:
raise ValueError("The flag --csv_output_file must be specified.")
logging.info("Looking for prediction files with pattern: %s",
FLAGS.json_prediction_files_pattern)
file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)
logging.info("Found files: %s", file_paths)
logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
output_file.write(get_csv_header())
for file_path in file_paths:
logging.info("processing file: %s", file_path)
with gfile.Open(file_path) as input_file:
for line in input_file:
json_data = json.loads(line)
output_file.write(to_csv_row(json_data))
output_file.flush()
logging.info("done")
示例13: get_input_data_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1):
"""Creates the section of the graph which reads the input data.
Args:
reader: A class which parses the input data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
with tf.name_scope("input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find input files. data_pattern='" +
data_pattern + "'")
logging.info("number of input files: " + str(len(files)))
filename_queue = tf.train.string_input_producer(
files, num_epochs=1, shuffle=False)
examples_and_labels = [reader.prepare_reader(filename_queue)
for _ in range(num_readers)]
video_id_batch, video_batch, unused_labels, num_frames_batch = (
tf.train.batch_join(examples_and_labels,
batch_size=batch_size,
allow_smaller_final_batch = True,
enqueue_many=True))
return video_id_batch, video_batch, num_frames_batch
示例14: get_input_evaluation_tensors
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def get_input_evaluation_tensors(reader,
data_pattern,
batch_size=1024,
num_readers=1):
"""Creates the section of the graph which reads the evaluation data.
Args:
reader: A class which parses the training data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
with tf.name_scope("eval_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find the evaluation files.")
logging.info("number of evaluation files: " + str(len(files)))
filename_queue = tf.train.string_input_producer(
files, shuffle=False, num_epochs=1)
eval_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.batch_join(
eval_data,
batch_size=batch_size,
capacity=3 * batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
示例15: remove_training_directory
# 需要導入模塊: from tensorflow import logging [as 別名]
# 或者: from tensorflow.logging import info [as 別名]
def remove_training_directory(self, train_dir):
"""Removes the training directory."""
try:
logging.info(
"%s: Removing existing train directory.",
task_as_string(self.task))
gfile.DeleteRecursively(train_dir)
except:
logging.error(
"%s: Failed to delete directory " + train_dir +
" when starting a new model. Please delete it manually and" +
" try again.", task_as_string(self.task))