當前位置: 首頁>>代碼示例>>Python>>正文


Python training_util.write_graph方法代碼示例

本文整理匯總了Python中tensorflow.python.training.training_util.write_graph方法的典型用法代碼示例。如果您正苦於以下問題:Python training_util.write_graph方法的具體用法?Python training_util.write_graph怎麽用?Python training_util.write_graph使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.training.training_util的用法示例。


在下文中一共展示了training_util.write_graph方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: before_run

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # We do write graph and saver_def at the first call of before_run.
      # We cannot do this in begin, since we let other hooks to change graph and
      # add variables in begin. Graph is finalized after all begin calls.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._get_saver().saver_def if self._get_saver() else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:basic_session_run_hooks.py

示例2: after_create_session

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def after_create_session(self, session, coord):
    global_step = session.run(self._global_step_tensor)

    # We do write graph and saver_def at the first call of before_run.
    # We cannot do this in begin, since we let other hooks to change graph and
    # add variables in begin. Graph is finalized after all begin calls.
    def _write_graph_fn(self):
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir, "graph.pbtxt")
    self._write_graph_thread = threading.Thread(target=_write_graph_fn,
                                                args=[self])
    self._write_graph_thread.start()

    saver_def = self._get_saver().saver_def if self._get_saver() else None
    graph = ops.get_default_graph()
    meta_graph_def = meta_graph.create_meta_graph_def(
        graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
    self._summary_writer.add_graph(graph)
    self._summary_writer.add_meta_graph(meta_graph_def)
    # The checkpoint saved here is the state at step "global_step".
    self._save(session, global_step)
    self._timer.update_last_triggered_step(global_step) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:25,代碼來源:async_checkpoint.py

示例3: end

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def end(self, session):
    if self._save_thread:
      logging.info("Waiting for any pending checkpoints to finish.")
      self._save_thread.join()
    if self._write_graph_thread:
      logging.info("Waiting for any pending write_graph to finish.")
      self._write_graph_thread.join()

    last_step = session.run(self._global_step_tensor)

    # Save the last checkpoint synchronously if needed.
    if last_step != self._timer.last_triggered_step():
      self._save(session, last_step, asynchronous=False)

    for l in self._listeners:
      l.end(session, last_step) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:18,代碼來源:async_checkpoint.py

示例4: before_run

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # Write graph in the first call.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._saver.saver_def if self._saver else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:18,代碼來源:basic_session_run_hooks.py

示例5: _write_graph

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def _write_graph(self):
    """Writes graph_def to `logdir` and adds it to summary if applicable."""
    assert self._is_chief
    if self._logdir:
      training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
                                self._logdir, "graph.pbtxt")
    if self._summary_writer and not self._graph_added_to_summary:
      self._summary_writer.add_graph(self._graph)
      self._summary_writer.add_meta_graph(self._meta_graph_def)
      self._graph_added_to_summary = True 
開發者ID:yuantailing,項目名稱:ctw-baseline,代碼行數:12,代碼來源:supervisor.py

示例6: before_run

# 需要導入模塊: from tensorflow.python.training import training_util [as 別名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 別名]
def before_run(self, run_context):
        """ Dumps graphs and loads checkpoint if there exits.

        Called before each call to run().

        Args:
            run_context: A `SessionRunContext` object.

        Returns: A `SessionRunArgs` object containing global_step.
        """
        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        if self._is_chief and self._first_call:
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir,
                "graph.pbtxt")
            # dump model details "model_analysis.txt"
            dump_model_analysis(self._checkpoint_dir)  # dump model configs
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=self._saver.saver_def)
            if self._summary_writer is not None:
                self._summary_writer.add_graph(graph)
                self._summary_writer.add_meta_graph(meta_graph_def)
            tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
        self._first_call = False
        return tf.train.SessionRunArgs(self._global_step) 
開發者ID:zhaocq-nlp,項目名稱:NJUNMT-tf,代碼行數:32,代碼來源:hooks.py


注:本文中的tensorflow.python.training.training_util.write_graph方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。