本文整理汇总了Python中tensorflow.core.util.event_pb2.SessionLog.START属性的典型用法代码示例。如果您正苦于以下问题:Python SessionLog.START属性的具体用法?Python SessionLog.START怎么用?Python SessionLog.START使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tensorflow.core.util.event_pb2.SessionLog
的用法示例。
在下文中一共展示了SessionLog.START属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: after_run
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def after_run(self, run_context, run_values):
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if "summary" in run_values.results:
self._timer.update_last_triggered_step(global_step)
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._summary_writer.flush()
self._next_step = global_step + 1
示例2: after_run
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
示例3: _CheckForRestartAndMaybePurge
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
示例4: after_run
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def after_run(self, run_context, run_values):
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), global_step)
if "summary" in run_values.results:
self._timer.update_last_triggered_step(global_step)
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
示例5: after_run
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._next_step = global_step + 1
示例6: testSessionLogSummaries
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def testSessionLogSummaries(self):
data = [
{'session_log': SessionLog(status=SessionLog.START), 'step': 0},
{'session_log': SessionLog(status=SessionLog.CHECKPOINT), 'step': 1},
{'session_log': SessionLog(status=SessionLog.CHECKPOINT), 'step': 2},
{'session_log': SessionLog(status=SessionLog.CHECKPOINT), 'step': 3},
{'session_log': SessionLog(status=SessionLog.STOP), 'step': 4},
{'session_log': SessionLog(status=SessionLog.START), 'step': 5},
{'session_log': SessionLog(status=SessionLog.STOP), 'step': 6},
]
self._WriteScalarSummaries(data)
units = efi.get_inspection_units(self.logdir)
self.assertEqual(1, len(units))
printable = efi.get_dict_to_print(units[0].field_to_obs)
self.assertEqual(printable['sessionlog:start']['steps'], [0, 5])
self.assertEqual(printable['sessionlog:stop']['steps'], [4, 6])
self.assertEqual(printable['sessionlog:checkpoint']['num_steps'], 3)
示例7: after_run
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:23,代码来源:basic_session_run_hooks.py
示例8: get_field_to_observations_map
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def get_field_to_observations_map(generator, query_for_tag=''):
"""Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list.
"""
def increment(stat, event, tag=''):
assert stat in TRACKED_FIELDS
field_to_obs[stat].append(Observation(step=event.step,
wall_time=event.wall_time,
tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
for event in generator:
## Process the event
if event.HasField('graph_def') and (not query_for_tag):
increment('graph', event)
if event.HasField('session_log') and (not query_for_tag):
status = event.session_log.status
if status == SessionLog.START:
increment('sessionlog:start', event)
elif status == SessionLog.STOP:
increment('sessionlog:stop', event)
elif status == SessionLog.CHECKPOINT:
increment('sessionlog:checkpoint', event)
elif event.HasField('summary'):
for value in event.summary.value:
if query_for_tag and value.tag != query_for_tag:
continue
for proto_name, display_name in SUMMARY_TYPE_TO_FIELD.items():
if value.HasField(proto_name):
increment(display_name, event, value.tag)
return field_to_obs
示例9: start_standard_services
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
示例10: testCloseAndReopen
# 需要导入模块: from tensorflow.core.util.event_pb2 import SessionLog [as 别名]
# 或者: from tensorflow.core.util.event_pb2.SessionLog import START [as 别名]
def testCloseAndReopen(self):
test_dir = self._CleanTestDir("close_and_reopen")
sw = tf.train.SummaryWriter(test_dir)
sw.add_session_log(tf.SessionLog(status=SessionLog.START), 1)
sw.close()
# Sleep at least one second to make sure we get a new event file name.
time.sleep(1.2)
sw.reopen()
sw.add_session_log(tf.SessionLog(status=SessionLog.START), 2)
sw.close()
# We should now have 2 events files.
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
self.assertEquals(2, len(event_paths))
# Check the first file contents.
rr = tf.train.summary_iterator(event_paths[0])
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(1, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
# Check the second file contents.
rr = tf.train.summary_iterator(event_paths[1])
# The first event should list the file_version.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals("brain.Event:2", ev.file_version)
# The next event should be the START message.
ev = next(rr)
self._assertRecent(ev.wall_time)
self.assertEquals(2, ev.step)
self.assertEquals(SessionLog.START, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
# Checks that values returned from session Run() calls are added correctly to
# summaries. These are numpy types so we need to check they fit in the
# protocol buffers correctly.