本文整理匯總了Python中tensorflow.python.training.saver.checkpoint_exists方法的典型用法代碼示例。如果您正苦於以下問題:Python saver.checkpoint_exists方法的具體用法?Python saver.checkpoint_exists怎麽用?Python saver.checkpoint_exists使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.training.saver
的用法示例。
在下文中一共展示了saver.checkpoint_exists方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _read_latest_config_files
# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import checkpoint_exists [as 別名]
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run directory."""
configs = {}
config_fpaths = {}
for run_name, assets_dir in run_path_pairs:
config = projector_config_pb2.ProjectorConfig()
config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath)
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
if not embedding.tensor_name:
embedding.tensor_name = os.path.basename(embedding.tensor_path)
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
logdir = _assets_dir_to_logdir(assets_dir)
ckpt_path = _find_latest_checkpoint(logdir)
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if (config.model_checkpoint_path and
not checkpoint_exists(config.model_checkpoint_path)):
logging.warning('Checkpoint file "%s" not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
示例2: _read_latest_config_files
# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import checkpoint_exists [as 別名]
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run directory."""
configs = {}
config_fpaths = {}
for run_name, logdir in run_path_pairs:
config = ProjectorConfig()
config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# Or in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if (config.model_checkpoint_path and
not checkpoint_exists(config.model_checkpoint_path)):
logging.warning('Checkpoint file %s not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
示例3: _read_latest_config_files
# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import checkpoint_exists [as 別名]
def _read_latest_config_files(self, run_path_pairs):
"""Reads and returns the projector config files in every run directory."""
configs = {}
config_fpaths = {}
for run_name, logdir in run_path_pairs:
config = ProjectorConfig()
config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
text_format.Merge(file_content, config)
has_tensor_files = False
for embedding in config.embeddings:
if embedding.tensor_path:
has_tensor_files = True
break
if not config.model_checkpoint_path:
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# Or in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join('../', logdir))
if not ckpt_path and not has_tensor_files:
continue
if ckpt_path:
config.model_checkpoint_path = ckpt_path
# Sanity check for the checkpoint file.
if (config.model_checkpoint_path and
not checkpoint_exists(config.model_checkpoint_path)):
logging.warning('Checkpoint file %s not found',
config.model_checkpoint_path)
continue
configs[run_name] = config
config_fpaths[run_name] = config_fpath
return configs, config_fpaths
示例4: freeze_graph_with_def_protos
# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import checkpoint_exists [as 別名]
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
_ = importer.import_graph_def(input_graph_def, name='')
with session.Session() as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def
示例5: freeze_graph_with_def_protos
# 需要導入模塊: from tensorflow.python.training import saver [as 別名]
# 或者: from tensorflow.python.training.saver import checkpoint_exists [as 別名]
def freeze_graph_with_def_protos(
input_graph_def,
input_saver_def,
input_checkpoint,
output_node_names,
restore_op_name,
filename_tensor_name,
clear_devices,
initializer_nodes,
variable_names_blacklist=''):
"""Converts all variables in a graph and checkpoint into constants."""
del restore_op_name, filename_tensor_name # Unused by updated loading code.
# 'input_checkpoint' may be a prefix if we're using Saver V2 format
if not saver_lib.checkpoint_exists(input_checkpoint):
raise ValueError(
'Input checkpoint "' + input_checkpoint + '" does not exist!')
if not output_node_names:
raise ValueError(
'You must supply the name of a node to --output_node_names.')
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ''
with tf.Graph().as_default():
tf.import_graph_def(input_graph_def, name='')
config = tf.ConfigProto(graph_options=tf.GraphOptions())
with session.Session(config=config) as sess:
if input_saver_def:
saver = saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
else:
var_list = {}
reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
try:
tensor = sess.graph.get_tensor_by_name(key + ':0')
except KeyError:
# This tensor doesn't exist in the graph (for example it's
# 'global_step' or a similar housekeeping element) so skip it.
continue
var_list[key] = tensor
saver = saver_lib.Saver(var_list=var_list)
saver.restore(sess, input_checkpoint)
if initializer_nodes:
sess.run(initializer_nodes)
variable_names_blacklist = (variable_names_blacklist.split(',') if
variable_names_blacklist else None)
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(','),
variable_names_blacklist=variable_names_blacklist)
return output_graph_def