本文整理汇总了Python中tensorflow.contrib.summary.summary_ops.always_record_summaries函数的典型用法代码示例。如果您正苦于以下问题:Python always_record_summaries函数的具体用法?Python always_record_summaries怎么用?Python always_record_summaries使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了always_record_summaries函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSummaryOps
def testSummaryOps(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t0')
summary_ops.always_record_summaries()
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
示例2: testSummaryName
def testSummaryName(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
summary_ops.create_summary_file_writer(logdir, max_queue=0, name='t2')
summary_ops.always_record_summaries()
summary_ops.scalar('scalar', 2.0)
self.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
self.assertEqual(len(files), 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
self.assertEqual(len(records), 2)
event = event_pb2.Event()
event.ParseFromString(records[1])
self.assertEqual(event.summary.value[0].tag, 'scalar')
示例3: testGraphSummary
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
summary_ops.graph(graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
示例4: testIntegerSummaries
def testIntegerSummaries(self):
step = training_util.create_global_step()
def adder(x, y):
state_ops.assign_add(step, 1)
summary_ops.generic('x', x)
summary_ops.generic('y', y)
sum_ = x + y
summary_ops.generic('sum', sum_)
return sum_
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
self.assertEqual(5, adder(int64(2), int64(3)).numpy())
six.assertCountEqual(self, [1, 1, 1],
get_all(self.db, 'SELECT step FROM Tensors'))
six.assertCountEqual(self, ['x', 'y', 'sum'],
get_all(self.db, 'SELECT tag_name FROM Tags'))
x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"')
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
self.assertEqual(9, adder(int64(4), int64(5)).numpy())
six.assertCountEqual(self, [1, 1, 1, 2, 2, 2],
get_all(self.db, 'SELECT step FROM Tensors'))
six.assertCountEqual(self, [x_id, y_id, sum_id],
get_all(self.db, 'SELECT tag_id FROM Tags'))
self.assertEqual(2, get_tensor(self.db, x_id, 1))
self.assertEqual(3, get_tensor(self.db, y_id, 1))
self.assertEqual(5, get_tensor(self.db, sum_id, 1))
self.assertEqual(4, get_tensor(self.db, x_id, 2))
self.assertEqual(5, get_tensor(self.db, y_id, 2))
self.assertEqual(9, get_tensor(self.db, sum_id, 2))
six.assertCountEqual(
self, ['experiment'],
get_all(self.db, 'SELECT experiment_name FROM Experiments'))
six.assertCountEqual(self, ['run'],
get_all(self.db, 'SELECT run_name FROM Runs'))
six.assertCountEqual(self, ['user'],
get_all(self.db, 'SELECT user_name FROM Users'))
示例5: testSummaryGlobalStep
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
示例6: testMaxQueue
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=2, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
示例7: testWriteSummaries
def testWriteSummaries(self):
m = metrics.Mean()
m([1, 10, 100])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result() # As a side-effect will write summaries.
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 37.0)
示例8: testDefunSummarys
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t1').as_default(), summary_ops.always_record_summaries():
@function.defun
def write():
summary_ops.scalar('scalar', 2.0)
write()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
示例9: testWriteSummaries
def testWriteSummaries(self):
m = metrics.Mean()
m([1, 10, 100])
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_summary_file_writer(
logdir, max_queue=0,
name="t0").as_default(), summary_ops.always_record_summaries():
m.result() # As a side-effect will write summaries.
self.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
self.assertEqual(len(files), 1)
records = list(
tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
self.assertEqual(len(records), 2)
event = event_pb2.Event()
event.ParseFromString(records[1])
self.assertEqual(event.summary.value[0].simple_value, 37.0)
示例10: testSummaryGraphModeCond
def testSummaryGraphModeCond(self):
with ops.Graph().as_default(), self.test_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def f():
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(True)
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.cond(pred, f,
lambda: constant_op.constant(False))
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
示例11: testDefunSummarys
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_summary_file_writer(
logdir, max_queue=0,
name='t1').as_default(), summary_ops.always_record_summaries():
@function.defun
def write():
summary_ops.scalar('scalar', 2.0)
write()
self.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
self.assertEqual(len(files), 1)
records = list(
tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
self.assertEqual(len(records), 2)
event = event_pb2.Event()
event.ParseFromString(records[1])
self.assertEqual(event.summary.value[0].simple_value, 2.0)
示例12: f
def f():
with summary_ops.create_file_writer(
summary_logdir).as_default(), summary_ops.always_record_summaries():
return self._all_metric_results()
示例13: testShouldRecordSummary
def testShouldRecordSummary(self):
self.assertFalse(summary_ops.should_record_summaries())
with summary_ops.always_record_summaries():
self.assertTrue(summary_ops.should_record_summaries())