本文整理汇总了Python中tensorflow.python.platform.gfile.MakeDirs方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.MakeDirs方法的具体用法?Python gfile.MakeDirs怎么用?Python gfile.MakeDirs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.platform.gfile
的用法示例。
在下文中一共展示了gfile.MakeDirs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: maybe_download
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
示例2: testReturnsSingleCheckpointIfOneCheckpointFound
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
checkpoint_dir = tempfile.mkdtemp('one_checkpoint_found')
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.get_or_create_global_step()
saver = saver_lib.Saver() # Saves the global step.
with self.cached_session() as session:
session.run(variables_lib.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
示例3: testFinalOpsOnEvaluationLoop
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = slim.metrics.streaming_accuracy(
self._predictions, self._labels)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Create Checkpoint and log directories
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to checkpoint directory
saver = tf.train.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = slim.evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
示例4: testPathsWithParse
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def testPathsWithParse(self):
base_dir = os.path.join(tf.test.get_temp_dir(), "paths_parse")
self.assertFalse(gfile.Exists(base_dir))
for p in xrange(3):
gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
# add a base_directory to ignore
gfile.MakeDirs(os.path.join(base_dir, "ignore"))
# create a simple parser that pulls the export_version from the directory.
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
self.assertEquals(
gc.get_paths(base_dir, parser=parser),
[gc.Path(os.path.join(base_dir, "0"), 0),
gc.Path(os.path.join(base_dir, "1"), 1),
gc.Path(os.path.join(base_dir, "2"), 2)])
示例5: maybe_download_and_extract
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def maybe_download_and_extract(filename, data_dir, source_url):
"""Maybe download and extract a file."""
if not gfile.Exists(data_dir):
gfile.MakeDirs(data_dir)
filepath = os.path.join(data_dir, filename)
if not gfile.Exists(filepath):
print('Downloading from {}'.format(source_url))
temp_file_name, _ = urllib.request.urlretrieve(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded \'{}\' of {} bytes'.format(filename, size))
if filename.endswith('.zip'):
print('Extracting {}'.format(filename))
zipfile.ZipFile(file=filepath, mode='r').extractall(data_dir)
示例6: maybe_download
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def maybe_download(filepath, source_url):
"""Download the data from source url, unless it's already here.
Args:
basename: string, name of the file in the directory.
target_dir: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
target_dir = path.dirname(filepath)
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
if not gfile.Exists(filepath):
print('Downloading', source_url, 'to', filepath)
temp_file_name, _ = _urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filepath, size, 'bytes.')
return filepath
示例7: get_summary_writer
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def get_summary_writer(tensorboard_dir):
"""Creates a directory for writing summaries and returns a writer."""
tf.logging.info('TensorBoard directory: %s', tensorboard_dir)
tf.logging.info('Deleting prior data if exists...')
try:
gfile.DeleteRecursively(tensorboard_dir)
except errors.OpError as err:
tf.logging.error('Directory did not exist? Error: %s', err)
tf.logging.info('Deleted! Creating the directory again...')
gfile.MakeDirs(tensorboard_dir)
tf.logging.info('Created! Instatiating SummaryWriter...')
summary_writer = tf.summary.FileWriter(tensorboard_dir)
return summary_writer
示例8: __init__
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def __init__(self, logdir, max_queue=10, flush_secs=120,
filename_suffix=None):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._logdir = logdir
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
self._event_queue = six.moves.queue.Queue(max_queue)
self._ev_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
self._flush_secs = flush_secs
self._sentinel_event = self._get_sentinel_event()
if filename_suffix:
self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))
self._closed = False
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
示例9: _write_plugin_assets
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def _write_plugin_assets(self, graph):
plugin_assets = plugin_asset.get_all_plugin_assets(graph)
logdir = self.event_writer.get_logdir()
for asset_container in plugin_assets:
plugin_name = asset_container.plugin_name
plugin_dir = os.path.join(logdir, _PLUGINS_DIR, plugin_name)
gfile.MakeDirs(plugin_dir)
assets = asset_container.assets()
for (asset_name, content) in assets.items():
asset_path = os.path.join(plugin_dir, asset_name)
with gfile.Open(asset_path, "w") as f:
f.write(content)
示例10: gfile_copy_callback
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def gfile_copy_callback(files_to_copy, export_dir_path):
"""Callback to copy files using `gfile.Copy` to an export directory.
This method is used as the default `assets_callback` in `Exporter.init` to
copy assets from the `assets_collection`. It can also be invoked directly to
copy additional supplementary files into the export directory (in which case
it is not a callback).
Args:
files_to_copy: A dictionary that maps original file paths to desired
basename in the export directory.
export_dir_path: Directory to copy the files to.
"""
logging.info("Write assets into: %s using gfile_copy.", export_dir_path)
gfile.MakeDirs(export_dir_path)
for source_filepath, basename in files_to_copy.items():
new_path = os.path.join(
compat.as_bytes(export_dir_path), compat.as_bytes(basename))
logging.info("Copying asset %s to path %s.", source_filepath, new_path)
if gfile.Exists(new_path):
# Guard against being restarted while copying assets, and the file
# existing and being in an unknown state.
# TODO(b/28676216): Do some file checks before deleting.
logging.info("Removing file %s.", new_path)
gfile.Remove(new_path)
gfile.Copy(source_filepath, new_path)
示例11: __init__
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def __init__(self, logdir, max_queue=10, flush_secs=120):
"""Creates a `EventFileWriter` and an event file to write to.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers, which are written to
disk via the add_event method.
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
"""
self._logdir = logdir
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
self._event_queue = six.moves.queue.Queue(max_queue)
self._ev_writer = pywrap_tensorflow.EventsWriter(
compat.as_bytes(os.path.join(self._logdir, "events")))
self._closed = False
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
flush_secs)
self._worker.start()
示例12: _create_tfrecord_dataset
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
示例13: testFinalOpsOnEvaluationLoop
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create Checkpoint and log directories
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to checkpoint directory
saver = saver_lib.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'',
chkpt_dir,
logdir,
eval_op=update_op,
final_op=value_op,
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
示例14: main
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def main():
if not (os.path.exists(train_set_file) & os.path.exists(validation_set_file)):
GetLSPData.main()
if not gfile.Exists(FLAGS.train_dir):
gfile.MakeDirs(FLAGS.train_dir)
train()
示例15: main
# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import MakeDirs [as 别名]
def main():
trainSetFileNames = os.path.join(FLAGS.data_dir, FLAGS.trainLabels_fn)
testSetFileNames = os.path.join(FLAGS.data_dir, FLAGS.testLabels_fn)
if not (os.path.exists(trainSetFileNames) & os.path.exists(testSetFileNames)):
GetLSPData.main()
#if gfile.Exists(FLAGS.train_dir):
#gfile.DeleteRecursively(FLAGS.train_dir)
if not gfile.Exists(FLAGS.train_dir):
gfile.MakeDirs(FLAGS.train_dir)
train(trainSetFileNames, testSetFileNames)