本文整理汇总了Python中tensorboard.backend.event_processing.event_accumulator.EventAccumulator方法的典型用法代码示例。如果您正苦于以下问题:Python event_accumulator.EventAccumulator方法的具体用法?Python event_accumulator.EventAccumulator怎么用?Python event_accumulator.EventAccumulator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorboard.backend.event_processing.event_accumulator
的用法示例。
在下文中一共展示了event_accumulator.EventAccumulator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_log_dirs
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def load_log_dirs(self, dirs, **kwargs):
kwargs.setdefault('right_align', False)
kwargs.setdefault('window', 0)
xy_list = []
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
for dir in dirs:
event_acc = EventAccumulator(dir)
event_acc.Reload()
_, x, y = zip(*event_acc.Scalars(kwargs['tag']))
xy_list.append([x, y])
if kwargs['right_align']:
x_max = float('inf')
for x, y in xy_list:
x_max = min(x_max, len(y))
xy_list = [[x[:x_max], y[:x_max]] for x, y in xy_list]
if kwargs['window']:
xy_list = [self._window_func(np.asarray(x), np.asarray(y), kwargs['window'], np.mean) for x, y in xy_list]
return xy_list
示例2: main
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--output-dir', '-o', type=str)
args = parser.parse_args()
event_acc = event_accumulator.EventAccumulator(
args.path, size_guidance={'scalars': 0})
event_acc.Reload()
scalars = {}
for tag in event_acc.Tags()['scalars']:
events = event_acc.Scalars(tag)
scalars[tag] = [event.value for event in events]
if args.output_dir is not None:
output_dir = pathlib.Path(args.output_dir)
else:
output_dir = pathlib.Path(args.path).parent
output_dir.mkdir(exist_ok=True, parents=True)
outpath = output_dir / 'all_scalars.json'
with open(outpath, 'w') as fout:
json.dump(scalars, fout)
示例3: testTags
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testTags(self):
"""Tags should be found in EventAccumulator after adding some
events."""
gen = _EventGenerator(self)
gen.AddScalar("s1")
gen.AddScalar("s2")
gen.AddHistogram("hst1")
gen.AddHistogram("hst2")
gen.AddImage("im1")
gen.AddImage("im2")
gen.AddAudio("snd1")
gen.AddAudio("snd2")
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: ["im1", "im2"],
ea.AUDIO: ["snd1", "snd2"],
ea.SCALARS: ["s1", "s2"],
ea.HISTOGRAMS: ["hst1", "hst2"],
ea.COMPRESSED_HISTOGRAMS: ["hst1", "hst2"],
},
)
示例4: testReload
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testReload(self):
"""EventAccumulator contains suitable tags after calling Reload."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {})
gen.AddScalar("s1")
gen.AddScalar("s2")
gen.AddHistogram("hst1")
gen.AddHistogram("hst2")
gen.AddImage("im1")
gen.AddImage("im2")
gen.AddAudio("snd1")
gen.AddAudio("snd2")
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: ["im1", "im2"],
ea.AUDIO: ["snd1", "snd2"],
ea.SCALARS: ["s1", "s2"],
ea.HISTOGRAMS: ["hst1", "hst2"],
ea.COMPRESSED_HISTOGRAMS: ["hst1", "hst2"],
},
)
示例5: testKeyError
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testKeyError(self):
"""KeyError should be raised when accessing non-existing keys."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars("s1")
with self.assertRaises(KeyError):
acc.Scalars("hst1")
with self.assertRaises(KeyError):
acc.Scalars("im1")
with self.assertRaises(KeyError):
acc.Histograms("s1")
with self.assertRaises(KeyError):
acc.Histograms("im1")
with self.assertRaises(KeyError):
acc.Images("s1")
with self.assertRaises(KeyError):
acc.Images("hst1")
with self.assertRaises(KeyError):
acc.Audio("s1")
with self.assertRaises(KeyError):
acc.Audio("hst1")
示例6: testNonValueEvents
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testNonValueEvents(self):
"""Non-value events in the generator don't cause early exits."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen)
gen.AddScalar("s1", wall_time=1, step=10, value=20)
gen.AddEvent(
event_pb2.Event(wall_time=2, step=20, file_version="nots2")
)
gen.AddScalar("s3", wall_time=3, step=100, value=1)
gen.AddHistogram("hst1")
gen.AddImage("im1")
gen.AddAudio("snd1")
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: ["im1"],
ea.AUDIO: ["snd1"],
ea.SCALARS: ["s1", "s3"],
ea.HISTOGRAMS: ["hst1"],
ea.COMPRESSED_HISTOGRAMS: ["hst1"],
},
)
示例7: testOrphanedDataNotDiscardedIfFlagUnset
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is
false."""
gen = _EventGenerator(self)
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(
event_pb2.Event(wall_time=0, step=0, file_version="brain.Event:1")
)
gen.AddScalar("s1", wall_time=1, step=100, value=20)
gen.AddScalar("s1", wall_time=1, step=200, value=20)
gen.AddScalar("s1", wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars("s1")], [100, 200, 300])
gen.AddScalar("s1", wall_time=1, step=101, value=20)
gen.AddScalar("s1", wall_time=1, step=201, value=20)
gen.AddScalar("s1", wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have NOT discarded 200 and 300 from s1
self.assertEqual(
[x.step for x in acc.Scalars("s1")], [100, 200, 300, 101, 201, 301]
)
示例8: testSummaryMetadata_FirstMetadataWins
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def testSummaryMetadata_FirstMetadataWins(self):
logdir = self.get_temp_dir()
summary_metadata_1 = summary_pb2.SummaryMetadata(
display_name="current tagee",
summary_description="no",
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name="outlet", content=b"120v"
),
)
self._writeMetadata(logdir, summary_metadata_1, nonce="1")
acc = ea.EventAccumulator(logdir)
acc.Reload()
summary_metadata_2 = summary_pb2.SummaryMetadata(
display_name="tagee of the future",
summary_description="definitely not",
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name="plug", content=b"110v"
),
)
self._writeMetadata(logdir, summary_metadata_2, nonce="2")
acc.Reload()
self.assertProtoEquals(
summary_metadata_1, acc.SummaryMetadata("you_are_it")
)
示例9: Reload
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logger.info("Beginning EventMultiplexer.Reload()")
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logger.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
logger.info("Finished with EventMultiplexer.Reload()")
return self
示例10: test_study_name
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def test_study_name() -> None:
dirname = tempfile.mkdtemp()
metric_name = "target"
study_name = "test_tensorboard_integration"
tbcallback = TensorBoardCallback(dirname, metric_name)
study = optuna.create_study(study_name=study_name)
study.optimize(_objective_func, n_trials=1, callbacks=[tbcallback])
event_acc = EventAccumulator(os.path.join(dirname, "trial-0"))
event_acc.Reload()
try:
assert len(event_acc.Tensors("target")) == 1
except Exception as e:
raise e
finally:
shutil.rmtree(dirname)
示例11: parse_indicators_single_path_nas
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def parse_indicators_single_path_nas(path, tf_size_guidance):
event_acc = EventAccumulator(path, tf_size_guidance)
event_acc.Reload()
# Show all tags in the log file
tags = event_acc.Tags()['scalars']
labels = ['t5x5_','t50c_','t100c_']
inds = []
for idx in range(20):
layer_row = []
for label_ in labels:
summary_label_ = label_ + str(idx+1)
decision_ij = event_acc.Scalars(summary_label_)
layer_row.append(decision_ij[-1].value)
inds.append(layer_row)
return inds
示例12: parse_progress
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def parse_progress(path, tf_size_guidance):
event_acc = EventAccumulator(path, tf_size_guidance)
event_acc.Reload()
tags = event_acc.Tags()['scalars']
print(tags)
# Show all tags in the log file
tags = event_acc.Tags()['scalars']
runtimes_scalar = event_acc.Scalars('runtime_ms')
runtimes = [runtimes_scalar[i].value for i in range(len(runtimes_scalar))]
loss_scalar = event_acc.Scalars('loss')
loss = [loss_scalar[i].value for i in range(len(loss_scalar))]
assert len(runtimes) == len(loss)
return runtimes, loss
示例13: plot_from_summaries
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def plot_from_summaries(summaries_path, title=None, samples_per_update=512, updates_per_log=100):
acc = EventAccumulator(summaries_path)
acc.Reload()
rews_mean = np.array([s[2] for s in acc.Scalars('Rewards/Mean')])
rews_std = np.array([s[2] for s in acc.Scalars('Rewards/Std')])
x = samples_per_update * updates_per_log * np.arange(0, len(rews_mean))
if not title:
title = summaries_path.split('/')[-1].split('_')[0]
plt.plot(x, rews_mean)
plt.fill_between(x, rews_mean - rews_std, rews_mean + rews_std, alpha=0.2)
plt.xlabel('Samples')
plt.ylabel('Episode Rewards')
plt.title(title)
plt.xlim([0, x[-1]+1])
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
示例14: Reload
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
tf.logging.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
tf.logging.error("Unable to reload accumulator '%s': %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
tf.logging.warning("Deleting accumulator '%s'", name)
del self._accumulators[name]
tf.logging.info('Finished with EventMultiplexer.Reload()')
return self
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:event_multiplexer.py
示例15: main
# 需要导入模块: from tensorboard.backend.event_processing import event_accumulator [as 别名]
# 或者: from tensorboard.backend.event_processing.event_accumulator import EventAccumulator [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--output-dir', '-o', type=str)
args = parser.parse_args()
event_acc = event_accumulator.EventAccumulator(args.path,
size_guidance={'images': 0})
event_acc.Reload()
if args.output_dir is not None:
output_dir = pathlib.Path(args.output_dir)
else:
output_dir = pathlib.Path(args.path).parent / 'images'
output_dir.mkdir(exist_ok=True, parents=True)
for tag in event_acc.Tags()['images']:
events = event_acc.Images(tag)
tag_name = tag.replace('/', '_')
dirpath = output_dir / tag_name
dirpath.mkdir(exist_ok=True, parents=True)
for index, event in enumerate(events):
s = np.frombuffer(event.encoded_image_string, dtype=np.uint8)
image = cv2.imdecode(s, cv2.IMREAD_COLOR)
outpath = dirpath / f'{index:04}.jpg'
cv2.imwrite(outpath.as_posix(), image)