本文整理汇总了Python中tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig方法的典型用法代码示例。如果您正苦于以下问题:Python projector.ProjectorConfig方法的具体用法?Python projector.ProjectorConfig怎么用?Python projector.ProjectorConfig使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.tensorboard.plugins.projector
的用法示例。
在下文中一共展示了projector.ProjectorConfig方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def main(argv):
embeddings = tf.get_variable('W', shape=[10, 100], initializer=tf.contrib.layers.xavier_initializer())
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
saver = tf.train.Saver()
saver.save(session, "model.ckpt", 0)
summary_writer = tf.summary.FileWriter('.')
projector_config = projector.ProjectorConfig()
embedding = projector_config.embeddings.add()
embedding.tensor_name = embeddings.name
projector.visualize_embeddings(summary_writer, projector_config)
示例2: save
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def save(self, filename):
"""Saves a checkpoint of the current model weights
Args:
filename: Checkpoint filename, such as best_model_checkpoint.ckpt.
This file must exist within model_dir.
"""
filepath = path.join(self.model_dir, filename)
self.saver.save(self.session, filepath)
config = projector.ProjectorConfig()
if self.model_hparams.share_embedding:
shared_embedding = config.embeddings.add()
shared_embedding.tensor_name = "model/encoder/shared_embeddings_matrix"
shared_embedding.metadata_path = Vocabulary.SHARED_VOCAB_FILENAME
else:
encoder_embedding = config.embeddings.add()
encoder_embedding.tensor_name = "model/encoder/encoder_embeddings_matrix"
encoder_embedding.metadata_path = Vocabulary.INPUT_VOCAB_FILENAME
decoder_embedding = config.embeddings.add()
decoder_embedding.tensor_name = "model/decoder/decoder_embeddings_matrix"
decoder_embedding.metadata_path = Vocabulary.OUTPUT_VOCAB_FILENAME
projector.visualize_embeddings(self.summary_writer, config)
示例3: write_embedding_metadata
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def write_embedding_metadata(writer, word2int):
metadata_path = os.path.join(hyper.train_dir, 'embedding_meta.tsv')
# dump embedding mapping
items = sorted(word2int.items(), key=operator.itemgetter(1))
with open(metadata_path, 'w') as f:
for item in items:
print(item[0], file=f)
config = projector.ProjectorConfig()
config.model_checkpoint_dir = hyper.train_dir
# the above line not work yet. TF doesn't support model_checkpoint_dir
# thus create a symlink from train_dir to log_dir
os.symlink(os.path.join(hyper.train_dir, 'checkpoint'), os.path.join(hyper.log_dir, 'checkpoint'))
embedding = config.embeddings.add()
embedding.tensor_name = param.get('We').name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = metadata_path
# Saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(writer, config)
示例4: visualize_embeddings
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualize_embeddings(self) -> None:
"""Insert visualization of embeddings in TensorBoard.
Visualize the embeddings of `EmbeddedFactorSequence` objects specified
in the `main.visualize_embeddings` config attribute.
"""
tb_projector = projector.ProjectorConfig()
for sequence in self.model.visualize_embeddings:
for i, (vocabulary, emb_matrix) in enumerate(
zip(sequence.vocabularies, sequence.embedding_matrices)):
# TODO when vocabularies will have name parameter, change it
path = self.get_path("seq.{}-{}.tsv".format(sequence.name, i))
vocabulary.save_wordlist(path)
embedding = tb_projector.embeddings.add()
# pylint: disable=unsubscriptable-object
embedding.tensor_name = emb_matrix.name
embedding.metadata_path = path
# pylint: enable=unsubscriptable-object
summary_writer = tf.summary.FileWriter(self.model.output)
projector.visualize_embeddings(summary_writer, tb_projector)
示例5: visualize_sample_embeddings
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualize_sample_embeddings(sess, log_dir, words, word2idx, embeddings): # embedding -> tf.get_variable()
list_idx = map(lambda word: word2idx[word], words)
# sample_embeddings = tf.gather(embeddings, list_idx, name="my_embeddings")
# sample_embeddings = embeddings[list_idx]
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
metadata_path = os.path.join(log_dir, 'metadata.tsv')
with open(metadata_path, "w") as f:
[f.write(word + "\n") for word in words]
embedding_conf.tensor_name = embeddings.name # embeddings.name
embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
summary_writer = tf.summary.FileWriter(log_dir, graph=sess.graph)
projector.visualize_embeddings(summary_writer, config)
# summary_writer.close()
示例6: _add_emb_vis
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def _add_emb_vis(self, embedding_var):
"""Do setup so that we can view word embedding visualization in Tensorboard, as described here:
https://www.tensorflow.org/get_started/embedding_viz
Make the vocab metadata file, then make the projector config file pointing to it."""
train_dir = os.path.join(FLAGS.log_root, "train")
vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
self._vocab.write_metadata(vocab_metadata_path) # write metadata file
summary_writer = tf.summary.FileWriter(train_dir)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = vocab_metadata_path
chkpt_dir = tf.train.latest_checkpoint(train_dir)
print('chkpt_dir for embeddings: ', chkpt_dir)
if chkpt_dir:
config.model_checkpoint_path = chkpt_dir
else:
chkpt_dir = train_dir
projector.visualize_embeddings(summary_writer, config)
示例7: __get_tensorboard_writer
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def __get_tensorboard_writer(self, path):
tensorboard_writer = tf.summary.FileWriter(path, graph=self.graph, filename_suffix=".bot")
# set the projector's configuration to add the embedding summary also:
conf = projector.ProjectorConfig()
embedding_field = conf.embeddings.add()
embedding_content_label = conf.embeddings.add()
# set the tensors to these embedding matrices
embedding_field.tensor_name = self.field_embedding_matrix.name
embedding_content_label.tensor_name = self.content_label_embedding_matrix.name
# add the metadata paths to these embedding_summaries:
embedding_field.metadata_path = os.path.join("..", "Metadata/fields.vocab")
embedding_content_label.metadata_path = os.path.join("..", "Metadata/content_labels.vocab")
# save the configuration file for this
projector.visualize_embeddings(tensorboard_writer, conf)
# return the so created tensorboard_writer
return tensorboard_writer
# define the constructor of the graph
示例8: _visualize_embedding
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def _visualize_embedding(self, model_path, summary_writer):
"""Create metadata file (and its config file) for tensorboard's embedding visualization."""
metadata_path = os.path.join(model_path, self._tensorboard_dir, _EMBEDDING_METADATA_FILE)
# create the metadata config file
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = self._nodes['embeddings'].name
embedding.metadata_path = metadata_path
projector.visualize_embeddings(summary_writer, config)
# create metadata file
with open(metadata_path, 'w', encoding='utf8') as metadata_file:
metadata_file.write('Character\tID\n')
for id_, char in enumerate(self._encoder.chars):
metadata_file.write('{}\t{}\n'.format(char, id_))
示例9: visualisation
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualisation(final_result):
# 使用一个新的变量来保存最终输出层向量的结果
# 因为 embedding 是通过 Tensorflow 中的变量完成的,所以 PROJECTOR 可视化的都是 TF 变量
y = tf.Variable(final_result, name = TENSOR_NAME)
summary_writer = tf.summary.FileWriter(LOG_DIR)
# 通过 PROJECTOR 生成日志
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = y.name
# 指定 embedding 对应的原始数据信息
embedding.metadata_path = META_FILE
# 指定 sprite 图像及大小
embedding.sprite.image_path = SPRITE_FILE
embedding.sprite.single_image_dim.extend([28, 28])
# 写入日志
projector.visualize_embeddings(summary_writer, config)
# 生成会话,写入文件
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, os.path.join(LOG_DIR, "model"), TRAINING_STEPS)
summary_writer.close()
# 主函数先调用模型训练,再处理测试数据,最后将输出矩阵输出到 PROJECTOR 需要的日志文件中
示例10: visualize
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualize(self, visual_fld, num_visualize):
""" run "'tensorboard --logdir='visualization'" to see the embeddings """
# create the list of num_variable most common words to visualize
w2v_utils.most_common_words(visual_fld, num_visualize)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(os.path.dirname('data/checkpoints/checkpoint'))
# if that checkpoint exists, restore from checkpoint
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
final_embed_matrix = sess.run(self.embed_matrix)
# you have to store embeddings in a new variable
embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
sess.run(embedding_var.initializer)
config = projector.ProjectorConfig()
summary_writer = tf.summary.FileWriter(visual_fld)
# add embedding to the config file
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'
# saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(summary_writer, config)
saver_embed = tf.train.Saver([embedding_var])
saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1)
示例11: visualize_embeddings
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualize_embeddings(embeddings, experiment_name='default'):
"""Save the embeddings to be visualised using t-sne on TensorBoard
Based on https://medium.com/@vegi/visualizing-higher-dimensional-data-using-t-sne-on-tensorboard-7dbf22682cf2
"""
tf_embeddings = tf.Variable(np.concatenate(embeddings, 0))
# Generate metadata
metadata = 'video_index\tframe_index\n'
for video_index in range(len(embeddings)):
for frame_index in range(embeddings[video_index].shape[0]):
metadata += '{}\t{}\n'.format(video_index, frame_index)
metadata_path = 'embeddings/{}/labels.tsv'.format(experiment_name)
with open(metadata_path, 'w') as metadata_file:
metadata_file.write(metadata)
with tf.Session() as sess:
saver = tf.train.Saver([tf_embeddings])
sess.run(tf_embeddings.initializer)
saver.save(sess, 'embeddings/{}/embeddings.ckpt'.format(experiment_name))
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = tf_embeddings.name
embedding.metadata_path = metadata_path.split('/')[-1]
projector.visualize_embeddings(tf.summary.FileWriter('embeddings/{}'.format(experiment_name)), config)
示例12: _add_emb_vis
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def _add_emb_vis(self, embedding_var):
"""Do setup so that we can view word embedding visualization in Tensorboard, as described here:
https://www.tensorflow.org/get_started/embedding_viz
Make the vocab metadata file, then make the projector config file pointing to it."""
train_dir = os.path.join(FLAGS.log_root, "train")
vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
self._vocab.write_metadata(vocab_metadata_path) # write metadata file
summary_writer = tf.summary.FileWriter(train_dir)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = vocab_metadata_path
projector.visualize_embeddings(summary_writer, config)
示例13: add_multiple_embeddings
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def add_multiple_embeddings(log_dir, file_list, name_list):
""" Creates the files necessary for the multiple embeddings
:param log_dir: destination directory for the model and metadata (the one to which TensorBoard points)
:param file_list: list of embeddings files
:param name_list: names of the embeddings files
:return:
"""
# setup a TensorFlow session
tf.reset_default_graph()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
config = projector.ProjectorConfig()
for i, file in enumerate(file_list):
tensor_name = name_list[i]
print('creating the embedding with the name ' + tensor_name)
create_embeddings(sess, log_dir, embedding_file=file,
tensor_name=tensor_name)
# create a TensorFlow summary writer
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = tensor_name + ':0'
embedding_conf.metadata_path = os.path.join(tensor_name + '_' + 'metadata.tsv')
projector.visualize_embeddings(summary_writer, config)
# save the model
saver = tf.train.Saver()
saver.save(sess, os.path.join(log_dir, tensor_name + '_' + "model.ckpt"))
print('finished successfully!')
示例14: visualize_embeddings
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def visualize_embeddings(logdir, var_list, tsv_list):
assert len(var_list) == len(tsv_list), 'Inconsistent length of lists'
config = projector.ProjectorConfig()
for v, f in zip(var_list, tsv_list):
embedding = config.embeddings.add()
embedding.tensor_name = v.name
if f is not None:
_, filename = os.path.split(f)
meta_tsv = os.path.join(logdir, filename)
tf.gfile.Copy(f, meta_tsv)
embedding.metadata_path = filename # save relative path
writer = SummaryWriterCache.get(logdir)
projector.visualize_embeddings(writer, config)
示例15: _add_emb_vis
# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import ProjectorConfig [as 别名]
def _add_emb_vis(self, embedding_var):
"""Do setup so that we can view word embedding visualization in Tensorboard, as described here:
https://www.tensorflow.org/get_started/embedding_viz
Make the vocab metadata file, then make the projector config file pointing to it."""
train_dir = os.path.join(FLAGS.log_root, "train")
vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
self._vocab.write_metadata(vocab_metadata_path) # write metadata file
summary_writer = tf.summary.FileWriter(train_dir)
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
embedding.metadata_path = vocab_metadata_path
projector.visualize_embeddings(summary_writer, config)