本文整理汇总了Python中tensorflow.python.ops.summary_ops_v2.always_record_summaries函数的典型用法代码示例。如果您正苦于以下问题:Python always_record_summaries函数的具体用法?Python always_record_summaries怎么用?Python always_record_summaries使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了always_record_summaries函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _write_custom_summaries
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Arguments:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.always_record_summaries():
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
示例2: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
try:
# Not using .as_default() to avoid implicit flush when exiting
writer.set_as_default()
summary_ops.scalar('one', 1.0, step=1)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
# Calling init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
writer.init()
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
get_total = lambda: len(summary_test_util.events_from_file(files[1]))
self.assertEqual(1, get_total()) # file_version Event
summary_ops.scalar('two', 2.0, step=2)
writer.close()
self.assertEqual(2, get_total())
finally:
# Clean up by resetting default writer
summary_ops.create_file_writer(None).set_as_default()
示例3: _test_summary_for_replica_zero_only
def _test_summary_for_replica_zero_only(self, d):
logdir = tempfile.mkdtemp()
def run_fn():
"""Function executed for each replica."""
with summary_writer.as_default():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
return summary_ops.write("a", replica_id)
with self.cached_session() as sess, d.scope(), \
summary_ops.always_record_summaries():
# We need global_step because summary writing op *always* has global_step
# as input, even when we always record summary or never record summary.
global_step = training_util.get_or_create_global_step()
if not context.executing_eagerly():
# When executing eagerly, variables are initialized immediately after
# creation, and its initializer will be None.
global_step.initializer.run()
summary_ops.set_step(0)
summary_writer = summary_ops.create_file_writer(logdir)
output = d.extended.call_for_each_replica(run_fn)
unwrapped = d.unwrap(output)
if not context.executing_eagerly():
sess.run(summary_writer.init())
sess.run(unwrapped)
sess.run(summary_writer.close())
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by replica 0.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "a")
self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
示例4: testWriterInitAndClose
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
示例5: testIntegerSummaries
def testIntegerSummaries(self):
step = training_util.create_global_step()
writer = self.create_db_writer()
def adder(x, y):
state_ops.assign_add(step, 1)
summary_ops.generic('x', x)
summary_ops.generic('y', y)
sum_ = x + y
summary_ops.generic('sum', sum_)
return sum_
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(5, adder(int64(2), int64(3)).numpy())
six.assertCountEqual(
self, [1, 1, 1],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, ['x', 'y', 'sum'],
get_all(self.db, 'SELECT tag_name FROM Tags'))
x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"')
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(9, adder(int64(4), int64(5)).numpy())
six.assertCountEqual(
self, [1, 1, 1, 2, 2, 2],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, [x_id, y_id, sum_id],
get_all(self.db, 'SELECT tag_id FROM Tags'))
self.assertEqual(2, get_tensor(self.db, x_id, 1))
self.assertEqual(3, get_tensor(self.db, y_id, 1))
self.assertEqual(5, get_tensor(self.db, sum_id, 1))
self.assertEqual(4, get_tensor(self.db, x_id, 2))
self.assertEqual(5, get_tensor(self.db, y_id, 2))
self.assertEqual(9, get_tensor(self.db, sum_id, 2))
six.assertCountEqual(
self, ['experiment'],
get_all(self.db, 'SELECT experiment_name FROM Experiments'))
six.assertCountEqual(self, ['run'],
get_all(self.db, 'SELECT run_name FROM Runs'))
six.assertCountEqual(self, ['user'],
get_all(self.db, 'SELECT user_name FROM Users'))
示例6: testGraphSummary
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
summary_ops.graph(graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
示例7: __init__
def __init__(self, session, logdir, max_queue=10, flush_secs=120,
filename_suffix=''):
"""Creates an `EventFileWriterV2` and an event file to write to.
On construction, this calls `tf.contrib.summary.create_file_writer` within
the graph from `session.graph` to look up a shared summary writer resource
for `logdir` if one exists, and create one if not. Creating the summary
writer resource in turn creates a new event file in `logdir` to be filled
with `Event` protocol buffers passed to `add_event`. Graph ops to control
this writer resource are added to `session.graph` during this init call;
stateful methods on this class will call `session.run()` on these ops.
Note that because the underlying resource is shared, it is possible that
other parts of the code using the same session may interact independently
with the resource, e.g. by flushing or even closing it. It is the caller's
responsibility to avoid any undesirable sharing in this regard.
The remaining arguments to the constructor (`flush_secs`, `max_queue`, and
`filename_suffix`) control the construction of the shared writer resource
if one is created. If an existing resource is reused, these arguments have
no effect. See `tf.contrib.summary.create_file_writer` for details.
Args:
session: A `tf.compat.v1.Session`. Session that will hold shared writer
resource. The writer ops will be added to session.graph during this
init call.
logdir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
filename_suffix: A string. Every event file's name is suffixed with
`filename_suffix`.
"""
self._session = session
self._logdir = logdir
self._closed = False
if not gfile.IsDirectory(self._logdir):
gfile.MakeDirs(self._logdir)
with self._session.graph.as_default():
with ops.name_scope('filewriter'):
file_writer = summary_ops_v2.create_file_writer(
logdir=self._logdir,
max_queue=max_queue,
flush_millis=flush_secs * 1000,
filename_suffix=filename_suffix)
with summary_ops_v2.always_record_summaries(), file_writer.as_default():
self._event_placeholder = array_ops.placeholder_with_default(
constant_op.constant('unused', dtypes.string),
shape=[])
self._add_event_op = summary_ops_v2.import_event(
self._event_placeholder)
self._init_op = file_writer.init()
self._flush_op = file_writer.flush()
self._close_op = file_writer.close()
self._session.run(self._init_op)
示例8: testEagerMemory
def testEagerMemory(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
示例9: testSummaryName
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
示例10: testSummaryGlobalStep
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
示例11: testMaxQueue
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=1, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
示例12: testWriteSummaries
def testWriteSummaries(self):
m = metrics.Mean()
m([1, 10, 100])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result() # As a side-effect will write summaries.
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
示例13: testSummaryGlobalStep
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
示例14: testSummaryOps
def testSummaryOps(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
示例15: testDbURIOpen
def testDbURIOpen(self):
tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
tmpdb_uri = six.moves.urllib_parse.urljoin("file:", tmpdb_path)
tmpdb_writer = summary_ops.create_db_writer(
tmpdb_uri,
"experimentA",
"run1",
"user1")
with summary_ops.always_record_summaries():
with tmpdb_writer.as_default():
summary_ops.scalar('t1', 2.0)
tmpdb = sqlite3.connect(tmpdb_path)
num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
self.assertEqual(num, 1)
tmpdb.close()