当前位置: 首页>>代码示例>>Python>>正文


Python training_util.write_graph方法代码示例

本文整理汇总了Python中tensorflow.python.training.training_util.write_graph方法的典型用法代码示例。如果您正苦于以下问题:Python training_util.write_graph方法的具体用法?Python training_util.write_graph怎么用?Python training_util.write_graph使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.training.training_util的用法示例。


在下文中一共展示了training_util.write_graph方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: before_run

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # We do write graph and saver_def at the first call of before_run.
      # We cannot do this in begin, since we let other hooks to change graph and
      # add variables in begin. Graph is finalized after all begin calls.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._get_saver().saver_def if self._get_saver() else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:basic_session_run_hooks.py

示例2: after_create_session

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def after_create_session(self, session, coord):
    global_step = session.run(self._global_step_tensor)

    # We do write graph and saver_def at the first call of before_run.
    # We cannot do this in begin, since we let other hooks to change graph and
    # add variables in begin. Graph is finalized after all begin calls.
    def _write_graph_fn(self):
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir, "graph.pbtxt")
    self._write_graph_thread = threading.Thread(target=_write_graph_fn,
                                                args=[self])
    self._write_graph_thread.start()

    saver_def = self._get_saver().saver_def if self._get_saver() else None
    graph = ops.get_default_graph()
    meta_graph_def = meta_graph.create_meta_graph_def(
        graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
    self._summary_writer.add_graph(graph)
    self._summary_writer.add_meta_graph(meta_graph_def)
    # The checkpoint saved here is the state at step "global_step".
    self._save(session, global_step)
    self._timer.update_last_triggered_step(global_step) 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:25,代码来源:async_checkpoint.py

示例3: end

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def end(self, session):
    if self._save_thread:
      logging.info("Waiting for any pending checkpoints to finish.")
      self._save_thread.join()
    if self._write_graph_thread:
      logging.info("Waiting for any pending write_graph to finish.")
      self._write_graph_thread.join()

    last_step = session.run(self._global_step_tensor)

    # Save the last checkpoint synchronously if needed.
    if last_step != self._timer.last_triggered_step():
      self._save(session, last_step, asynchronous=False)

    for l in self._listeners:
      l.end(session, last_step) 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:18,代码来源:async_checkpoint.py

示例4: before_run

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._timer.last_triggered_step() is None:
      # Write graph in the first call.
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._saver.saver_def if self._saver else None
      graph = ops.get_default_graph()
      meta_graph_def = meta_graph.create_meta_graph_def(
          graph_def=graph.as_graph_def(add_shapes=True),
          saver_def=saver_def)
      self._summary_writer.add_graph(graph)
      self._summary_writer.add_meta_graph(meta_graph_def)

    return SessionRunArgs(self._global_step_tensor) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:basic_session_run_hooks.py

示例5: _write_graph

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def _write_graph(self):
    """Writes graph_def to `logdir` and adds it to summary if applicable."""
    assert self._is_chief
    if self._logdir:
      training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
                                self._logdir, "graph.pbtxt")
    if self._summary_writer and not self._graph_added_to_summary:
      self._summary_writer.add_graph(self._graph)
      self._summary_writer.add_meta_graph(self._meta_graph_def)
      self._graph_added_to_summary = True 
开发者ID:yuantailing,项目名称:ctw-baseline,代码行数:12,代码来源:supervisor.py

示例6: before_run

# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context):
        """ Dumps graphs and loads checkpoint if there exits.

        Called before each call to run().

        Args:
            run_context: A `SessionRunContext` object.

        Returns: A `SessionRunArgs` object containing global_step.
        """
        # We do write graph and saver_def at the first call of before_run.
        # We cannot do this in begin, since we let other hooks to change graph and
        # add variables in begin. Graph is finalized after all begin calls.
        if self._is_chief and self._first_call:
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir,
                "graph.pbtxt")
            # dump model details "model_analysis.txt"
            dump_model_analysis(self._checkpoint_dir)  # dump model configs
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=self._saver.saver_def)
            if self._summary_writer is not None:
                self._summary_writer.add_graph(graph)
                self._summary_writer.add_meta_graph(meta_graph_def)
            tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
        self._first_call = False
        return tf.train.SessionRunArgs(self._global_step) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:32,代码来源:hooks.py


注:本文中的tensorflow.python.training.training_util.write_graph方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。