當前位置: 首頁>>代碼示例>>Python>>正文


Python gfile.MkDir方法代碼示例

本文整理匯總了Python中tensorflow.python.platform.gfile.MkDir方法的典型用法代碼示例。如果您正苦於以下問題:Python gfile.MkDir方法的具體用法?Python gfile.MkDir怎麽用?Python gfile.MkDir使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.platform.gfile的用法示例。


在下文中一共展示了gfile.MkDir方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _prepare_run_debug_urls

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def _prepare_run_debug_urls(self, fetches, feed_dict):
    """Implementation of abstrat method in superclass.

    See doc of `NonInteractiveDebugWrapperSession.__prepare_run_debug_urls()`
    for details. This implentation creates a run-specific subdirectory under
    self._session_root and stores information regarding run `fetches` and
    `feed_dict.keys()` in the subdirectory.

    Args:
      fetches: Same as the `fetches` argument to `Session.run()`
      feed_dict: Same as the `feed_dict` argument to `Session.run()`

    Returns:
      debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
        this `Session.run()` call.
    """

    # Add a UUID to accommodate the possibility of concurrent run() calls.
    run_dir = os.path.join(self._session_root, "run_%d_%s" %
                           (int(time.time() * 1e6), uuid.uuid4().hex))
    gfile.MkDir(run_dir)

    fetches_event = event_pb2.Event()
    fetches_event.log_message.message = repr(fetches)
    fetches_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
    with gfile.Open(os.path.join(fetches_path), "wb") as f:
      f.write(fetches_event.SerializeToString())

    feed_keys_event = event_pb2.Event()
    feed_keys_event.log_message.message = (repr(feed_dict.keys()) if feed_dict
                                           else repr(feed_dict))

    feed_keys_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)
    with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
      f.write(feed_keys_event.SerializeToString())

    return ["file://" + run_dir] 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:43,代碼來源:dumping_wrapper.py

示例2: testGraphFromMetaGraphBecomesAvailable

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def testGraphFromMetaGraphBecomesAvailable(self):
    """Test accumulator by writing values and then reading them."""

    directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
    if gfile.IsDirectory(directory):
      gfile.DeleteRecursively(directory)
    gfile.MkDir(directory)

    writer = tf.train.SummaryWriter(directory, max_queue=100)

    with tf.Graph().as_default() as graph:
      _ = tf.constant([2.0, 1.0])
    # Add a graph to the summary writer.
    meta_graph_def = saver.export_meta_graph(
        graph_def=graph.as_graph_def(add_shapes=True))
    writer.add_meta_graph(meta_graph_def)

    writer.flush()

    # Verify that we can load those events properly
    acc = ea.EventAccumulator(directory)
    acc.Reload()
    self.assertTagsEqual(
        acc.Tags(),
        {
            ea.IMAGES: [],
            ea.AUDIO: [],
            ea.SCALARS: [],
            ea.HISTOGRAMS: [],
            ea.COMPRESSED_HISTOGRAMS: [],
            ea.GRAPH: True,
            ea.META_GRAPH: True,
            ea.RUN_METADATA: []
        })
    self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
    self.assertProtoEquals(meta_graph_def, acc.MetaGraph()) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:38,代碼來源:event_accumulator_test.py

示例3: _CreateCleanDirectory

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def _CreateCleanDirectory(path):
  if gfile.IsDirectory(path):
    gfile.DeleteRecursively(path)
  gfile.MkDir(path) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:6,代碼來源:event_multiplexer_test.py

示例4: testAddRunsFromDirectory

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def testAddRunsFromDirectory(self):
    x = event_multiplexer.EventMultiplexer()
    tmpdir = self.get_temp_dir()
    join = os.path.join
    fakedir = join(tmpdir, 'fake_accumulator_directory')
    realdir = join(tmpdir, 'real_accumulator_directory')
    self.assertEqual(x.Runs(), {})
    x.AddRunsFromDirectory(fakedir)
    self.assertEqual(x.Runs(), {}, 'loading fakedir had no effect')

    _CreateCleanDirectory(realdir)
    x.AddRunsFromDirectory(realdir)
    self.assertEqual(x.Runs(), {}, 'loading empty directory had no effect')

    path1 = join(realdir, 'path1')
    gfile.MkDir(path1)
    x.AddRunsFromDirectory(realdir)
    self.assertEqual(x.Runs(), {}, 'creating empty subdirectory had no effect')

    _AddEvents(path1)
    x.AddRunsFromDirectory(realdir)
    self.assertItemsEqual(x.Runs(), ['path1'], 'loaded run: path1')
    loader1 = x._GetAccumulator('path1')
    self.assertEqual(loader1._path, path1, 'has the correct path')

    path2 = join(realdir, 'path2')
    _AddEvents(path2)
    x.AddRunsFromDirectory(realdir)
    self.assertItemsEqual(x.Runs(), ['path1', 'path2'])
    self.assertEqual(
        x._GetAccumulator('path1'), loader1, 'loader1 not regenerated')

    path2_2 = join(path2, 'path2')
    _AddEvents(path2_2)
    x.AddRunsFromDirectory(realdir)
    self.assertItemsEqual(x.Runs(), ['path1', 'path2', 'path2/path2'])
    self.assertEqual(
        x._GetAccumulator('path2/path2')._path, path2_2, 'loader2 path correct') 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:40,代碼來源:event_multiplexer_test.py

示例5: prepare_run_debug_urls

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def prepare_run_debug_urls(self, fetches, feed_dict):
    """Implementation of abstrat method in superclass.

    See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
    for details. This implentation creates a run-specific subdirectory under
    self._session_root and stores information regarding run `fetches` and
    `feed_dict.keys()` in the subdirectory.

    Args:
      fetches: Same as the `fetches` argument to `Session.run()`
      feed_dict: Same as the `feed_dict` argument to `Session.run()`

    Returns:
      debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
        this `Session.run()` call.
    """

    # Add a UUID to accommodate the possibility of concurrent run() calls.
    self._run_counter_lock.acquire()
    run_dir = os.path.join(self._session_root, "run_%d_%d" %
                           (int(time.time() * 1e6), self._run_counter))
    self._run_counter += 1
    self._run_counter_lock.release()
    gfile.MkDir(run_dir)

    fetches_event = event_pb2.Event()
    fetches_event.log_message.message = repr(fetches)
    fetches_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
    with gfile.Open(os.path.join(fetches_path), "wb") as f:
      f.write(fetches_event.SerializeToString())

    feed_keys_event = event_pb2.Event()
    feed_keys_event.log_message.message = (repr(feed_dict.keys()) if feed_dict
                                           else repr(feed_dict))

    feed_keys_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)
    with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
      f.write(feed_keys_event.SerializeToString())

    return ["file://" + run_dir] 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:46,代碼來源:dumping_wrapper.py

示例6: prepare_run_debug_urls

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import MkDir [as 別名]
def prepare_run_debug_urls(self, fetches, feed_dict):
    """Implementation of abstrat method in superclass.

    See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()`
    for details. This implementation creates a run-specific subdirectory under
    self._session_root and stores information regarding run `fetches` and
    `feed_dict.keys()` in the subdirectory.

    Args:
      fetches: Same as the `fetches` argument to `Session.run()`
      feed_dict: Same as the `feed_dict` argument to `Session.run()`

    Returns:
      debug_urls: (`str` or `list` of `str`) file:// debug URLs to be used in
        this `Session.run()` call.
    """

    # Add a UUID to accommodate the possibility of concurrent run() calls.
    self._run_counter_lock.acquire()
    run_dir = os.path.join(self._session_root, "run_%d_%d" %
                           (int(time.time() * 1e6), self._run_counter))
    self._run_counter += 1
    self._run_counter_lock.release()
    gfile.MkDir(run_dir)

    fetches_event = event_pb2.Event()
    fetches_event.log_message.message = repr(fetches)
    fetches_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)
    with gfile.Open(os.path.join(fetches_path), "wb") as f:
      f.write(fetches_event.SerializeToString())

    feed_keys_event = event_pb2.Event()
    feed_keys_event.log_message.message = (repr(feed_dict.keys()) if feed_dict
                                           else repr(feed_dict))

    feed_keys_path = os.path.join(
        run_dir,
        debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)
    with gfile.Open(os.path.join(feed_keys_path), "wb") as f:
      f.write(feed_keys_event.SerializeToString())

    return ["file://" + run_dir] 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:46,代碼來源:dumping_wrapper.py


注:本文中的tensorflow.python.platform.gfile.MkDir方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。