本文整理汇总了Python中tensorflow.python.training.training_util.write_graph方法的典型用法代码示例。如果您正苦于以下问题:Python training_util.write_graph方法的具体用法?Python training_util.write_graph怎么用?Python training_util.write_graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.training_util
的用法示例。
在下文中一共展示了training_util.write_graph方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: before_run
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
示例2: after_create_session
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
示例3: end
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
# Save the last checkpoint synchronously if needed.
if last_step != self._timer.last_triggered_step():
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
示例4: before_run
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context): # pylint: disable=unused-argument
if self._timer.last_triggered_step() is None:
# Write graph in the first call.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._saver.saver_def if self._saver else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
return SessionRunArgs(self._global_step_tensor)
示例5: _write_graph
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
示例6: before_run
# 需要导入模块: from tensorflow.python.training import training_util [as 别名]
# 或者: from tensorflow.python.training.training_util import write_graph [as 别名]
def before_run(self, run_context):
""" Dumps graphs and loads checkpoint if there exits.
Called before each call to run().
Args:
run_context: A `SessionRunContext` object.
Returns: A `SessionRunArgs` object containing global_step.
"""
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
if self._is_chief and self._first_call:
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
# dump model details "model_analysis.txt"
dump_model_analysis(self._checkpoint_dir) # dump model configs
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def)
if self._summary_writer is not None:
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
tf.logging.info("CheckpointSaverHook (before_run): dump graph...")
self._first_call = False
return tf.train.SessionRunArgs(self._global_step)