本文整理汇总了Python中tensorflow.python.lib.io.file_io.file_exists方法的典型用法代码示例。如果您正苦于以下问题:Python file_io.file_exists方法的具体用法?Python file_io.file_exists怎么用?Python file_io.file_exists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.lib.io.file_io
的用法示例。
在下文中一共展示了file_io.file_exists方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: save_pipeline_config
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
示例2: __init__
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
raise AssertionError(
"Export directory already exists. Please specify a different export "
"directory: %s" % export_dir)
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
示例3: maybe_saved_model_directory
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
示例4: _latest_checkpoints_changed
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def _latest_checkpoints_changed(configs, run_path_pairs):
"""Returns true if the latest checkpoint has changed in any of the runs."""
for run_name, assets_dir in run_path_pairs:
if run_name not in configs:
config = projector_config_pb2.ProjectorConfig()
config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath)
text_format.Merge(file_content, config)
else:
config = configs[run_name]
# See if you can find a checkpoint file in the logdir.
logdir = _assets_dir_to_logdir(assets_dir)
ckpt_path = _find_latest_checkpoint(logdir)
if not ckpt_path:
continue
if config.model_checkpoint_path != ckpt_path:
return True
return False
示例5: _latest_checkpoints_changed
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def _latest_checkpoints_changed(configs, run_path_pairs):
"""Returns true if the latest checkpoint has changed in any of the runs."""
for run_name, logdir in run_path_pairs:
if run_name not in configs:
config = ProjectorConfig()
config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
if file_io.file_exists(config_fpath):
file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
text_format.Merge(file_content, config)
else:
config = configs[run_name]
# See if you can find a checkpoint file in the logdir.
ckpt_path = latest_checkpoint(logdir)
if not ckpt_path:
# See if you can find a checkpoint in the parent of logdir.
ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
if not ckpt_path:
continue
if config.model_checkpoint_path != ckpt_path:
return True
return False
示例6: get_vocabulary
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def get_vocabulary(preprocess_output_dir, name):
"""Loads the vocabulary file as a list of strings.
Args:
preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
name: name of the csv column.
Returns:
List of strings.
Raises:
ValueError: if file is missing.
"""
vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)
if not file_io.file_exists(vocab_file):
raise ValueError('File %s not found in %s' %
(CATEGORICAL_ANALYSIS % name, preprocess_output_dir))
labels = python_portable_string(
file_io.read_file_to_string(vocab_file)).split('\n')
label_values = [x for x in labels if x] # remove empty lines
return label_values
示例7: read_vocab
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def read_vocab(args, column_name):
"""Reads a vocab file if it exists.
Args:
args: command line flags
column_name: name of column to that has a vocab file.
Returns:
List of vocab words or [] if the vocab file is not found.
"""
vocab_path = os.path.join(args.analysis,
feature_transforms.VOCAB_ANALYSIS_FILE % column_name)
if not file_io.file_exists(vocab_path):
return []
vocab, _ = feature_transforms.read_vocab_file(vocab_path)
return vocab
示例8: _write_assets
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def _write_assets(assets_directory, assets_filename):
"""Writes asset files to be used with SavedModel for half plus two.
Args:
assets_directory: The directory to which the assets should be written.
assets_filename: Name of the file to which the asset contents should be
written.
Returns:
The path to which the assets file was written.
"""
if not file_io.file_exists(assets_directory):
file_io.recursive_create_dir(assets_directory)
path = os.path.join(
compat.as_bytes(assets_directory), compat.as_bytes(assets_filename))
file_io.write_string_to_file(path, "asset-file-contents")
return path
示例9: __init__
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def __init__(self, export_dir):
self._saved_model = saved_model_pb2.SavedModel()
self._saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
self._export_dir = export_dir
if file_io.file_exists(export_dir):
raise AssertionError(
"Export directory already exists. Please specify a different export "
"directory.")
file_io.recursive_create_dir(self._export_dir)
# Boolean to track whether variables and assets corresponding to the
# SavedModel have been saved. Specifically, the first meta graph to be added
# MUST use the add_meta_graph_and_variables() API. Subsequent add operations
# on the SavedModel MUST use the add_meta_graph() API which does not save
# weights.
self._has_saved_variables = False
示例10: read_metadata
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def read_metadata(path):
"""Load metadata in JSON format from a path into a new DatasetMetadata."""
schema_file = os.path.join(path, 'schema.pbtxt')
legacy_schema_file = os.path.join(path, 'v1-json', 'schema.json')
if file_io.file_exists(schema_file):
text_proto = file_io.FileIO(schema_file, 'r').read()
schema_proto = text_format.Parse(text_proto, schema_pb2.Schema(),
allow_unknown_extension=True)
elif file_io.file_exists(legacy_schema_file):
schema_json = file_io.FileIO(legacy_schema_file, 'r').read()
schema_proto = _parse_schema_json(schema_json)
else:
raise IOError(
'Schema file {} does not exist and neither did legacy format file '
'{}'.format(schema_file, legacy_schema_file))
return dataset_metadata.DatasetMetadata(schema_proto)
示例11: _write_assets
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def _write_assets(assets_directory, assets_filename):
"""Writes asset files to be used with SavedModel for half plus two.
Args:
assets_directory: The directory to which the assets should be written.
assets_filename: Name of the file to which the asset contents should be
written.
Returns:
The path to which the assets file was written.
"""
if not file_io.file_exists(assets_directory):
file_io.recursive_create_dir(assets_directory)
path = os.path.join(
tf.compat.as_bytes(assets_directory), tf.compat.as_bytes(assets_filename))
file_io.write_string_to_file(path, "asset-file-contents")
return path
示例12: parse_schema_file
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def parse_schema_file(schema_path): # type: (str) -> Schema
"""
Read a schema file and return the proto object.
"""
assert file_io.file_exists(schema_path), "File not found: {}".format(schema_path)
schema = Schema()
with file_io.FileIO(schema_path, "rb") as f:
schema.ParseFromString(f.read())
return schema
示例13: parse_schema_txt_file
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def parse_schema_txt_file(schema_path): # type: (str) -> Schema
"""
Parse a tf.metadata Schema txt file into its in-memory representation.
"""
assert file_io.file_exists(schema_path), "File not found: {}".format(schema_path)
schema = Schema()
schema_text = file_io.read_file_to_string(schema_path)
google.protobuf.text_format.Parse(schema_text, schema)
return schema
示例14: __get_featran_settings_file
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def __get_featran_settings_file(dir_path, settings_filename=None):
# type: (str, str) -> str
filename = settings_filename if settings_filename else "part-00000-of-00001.txt"
filepath = pjoin(dir_path, filename)
assert file_io.file_exists(filepath), "settings file `%s` does not exist" % filepath
return filepath
示例15: resolve_schema
# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import file_exists [as 别名]
def resolve_schema(dir, default_schema=None):
if default_schema is not None:
return default_schema
for schema_file_name in ["_schema.pb", "_inferred_schema.pb"]:
s = os.path.join(dir, schema_file_name)
if file_io.file_exists(s):
return s