当前位置: 首页>>代码示例>>Python>>正文


Python projector.visualize_embeddings方法代码示例

本文整理汇总了Python中tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings方法的典型用法代码示例。如果您正苦于以下问题:Python projector.visualize_embeddings方法的具体用法?Python projector.visualize_embeddings怎么用?Python projector.visualize_embeddings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.tensorboard.plugins.projector的用法示例。


在下文中一共展示了projector.visualize_embeddings方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def main(argv):
    embeddings = tf.get_variable('W', shape=[10, 100], initializer=tf.contrib.layers.xavier_initializer())
    init_op = tf.global_variables_initializer()

    with tf.Session() as session:
        session.run(init_op)

        saver = tf.train.Saver()
        saver.save(session, "model.ckpt", 0)

        summary_writer = tf.summary.FileWriter('.')

        projector_config = projector.ProjectorConfig()

        embedding = projector_config.embeddings.add()
        embedding.tensor_name = embeddings.name

        projector.visualize_embeddings(summary_writer, projector_config) 
开发者ID:uclnlp,项目名称:inferbeddings,代码行数:20,代码来源:visualize-tensorboard.py

示例2: save

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def save(self, filename):
        """Saves a checkpoint of the current model weights

        Args:
            filename: Checkpoint filename, such as best_model_checkpoint.ckpt.
                This file must exist within model_dir.
        """
        filepath = path.join(self.model_dir, filename)
        self.saver.save(self.session, filepath)

        config = projector.ProjectorConfig()
        if self.model_hparams.share_embedding:
            shared_embedding = config.embeddings.add()
            shared_embedding.tensor_name = "model/encoder/shared_embeddings_matrix"
            shared_embedding.metadata_path = Vocabulary.SHARED_VOCAB_FILENAME
        else:
            encoder_embedding = config.embeddings.add()
            encoder_embedding.tensor_name = "model/encoder/encoder_embeddings_matrix"
            encoder_embedding.metadata_path = Vocabulary.INPUT_VOCAB_FILENAME
            decoder_embedding = config.embeddings.add()
            decoder_embedding.tensor_name = "model/decoder/decoder_embeddings_matrix"
            decoder_embedding.metadata_path = Vocabulary.OUTPUT_VOCAB_FILENAME
        
        projector.visualize_embeddings(self.summary_writer, config) 
开发者ID:AbrahamSanders,项目名称:seq2seq-chatbot,代码行数:26,代码来源:chatbot_model.py

示例3: write_embedding_metadata

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def write_embedding_metadata(writer, word2int):
    metadata_path = os.path.join(hyper.train_dir, 'embedding_meta.tsv')
    # dump embedding mapping
    items = sorted(word2int.items(), key=operator.itemgetter(1))
    with open(metadata_path, 'w') as f:
        for item in items:
            print(item[0], file=f)

    config = projector.ProjectorConfig()
    config.model_checkpoint_dir = hyper.train_dir
    # the above line not work yet. TF doesn't support model_checkpoint_dir
    # thus create a symlink from train_dir to log_dir
    os.symlink(os.path.join(hyper.train_dir, 'checkpoint'), os.path.join(hyper.log_dir, 'checkpoint'))

    embedding = config.embeddings.add()
    embedding.tensor_name = param.get('We').name
    # Link this tensor to its metadata file (e.g. labels).
    embedding.metadata_path = metadata_path
    # Saves a configuration file that TensorBoard will read during startup.
    projector.visualize_embeddings(writer, config) 
开发者ID:Aetf,项目名称:tensorflow-tbcnn,代码行数:22,代码来源:embedding.py

示例4: visualize_embeddings

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualize_embeddings(self) -> None:
        """Insert visualization of embeddings in TensorBoard.

        Visualize the embeddings of `EmbeddedFactorSequence` objects specified
        in the `main.visualize_embeddings` config attribute.
        """
        tb_projector = projector.ProjectorConfig()

        for sequence in self.model.visualize_embeddings:
            for i, (vocabulary, emb_matrix) in enumerate(
                    zip(sequence.vocabularies, sequence.embedding_matrices)):

                # TODO when vocabularies will have name parameter, change it
                path = self.get_path("seq.{}-{}.tsv".format(sequence.name, i))
                vocabulary.save_wordlist(path)

                embedding = tb_projector.embeddings.add()
                # pylint: disable=unsubscriptable-object
                embedding.tensor_name = emb_matrix.name
                embedding.metadata_path = path
                # pylint: enable=unsubscriptable-object

        summary_writer = tf.summary.FileWriter(self.model.output)
        projector.visualize_embeddings(summary_writer, tb_projector) 
开发者ID:ufal,项目名称:neuralmonkey,代码行数:26,代码来源:experiment.py

示例5: visualize_sample_embeddings

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualize_sample_embeddings(sess, log_dir, words, word2idx, embeddings):  # embedding -> tf.get_variable()
    list_idx = map(lambda word: word2idx[word], words)
    # sample_embeddings = tf.gather(embeddings, list_idx, name="my_embeddings")
    # sample_embeddings = embeddings[list_idx]

    config = projector.ProjectorConfig()
    embedding_conf = config.embeddings.add()

    metadata_path = os.path.join(log_dir, 'metadata.tsv')
    with open(metadata_path, "w") as f:
        [f.write(word + "\n") for word in words]

    embedding_conf.tensor_name = embeddings.name  # embeddings.name
    embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv')
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    summary_writer = tf.summary.FileWriter(log_dir, graph=sess.graph)
    projector.visualize_embeddings(summary_writer, config)
    # summary_writer.close() 
开发者ID:Pelhans,项目名称:ZNLP,代码行数:22,代码来源:tf_utils.py

示例6: _add_emb_vis

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def _add_emb_vis(self, embedding_var):
    """Do setup so that we can view word embedding visualization in Tensorboard, as described here:
    https://www.tensorflow.org/get_started/embedding_viz
    Make the vocab metadata file, then make the projector config file pointing to it."""
    train_dir = os.path.join(FLAGS.log_root, "train")
    vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
    self._vocab.write_metadata(vocab_metadata_path) # write metadata file
    summary_writer = tf.summary.FileWriter(train_dir)
    config = projector.ProjectorConfig()
    embedding = config.embeddings.add()
    embedding.tensor_name = embedding_var.name
    embedding.metadata_path = vocab_metadata_path
    chkpt_dir = tf.train.latest_checkpoint(train_dir)
    print('chkpt_dir for embeddings: ', chkpt_dir)
    if chkpt_dir:
      config.model_checkpoint_path = chkpt_dir
    else:
      chkpt_dir = train_dir
    projector.visualize_embeddings(summary_writer, config) 
开发者ID:armancohan,项目名称:long-summarization,代码行数:21,代码来源:model.py

示例7: __get_tensorboard_writer

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def __get_tensorboard_writer(self, path):
        tensorboard_writer = tf.summary.FileWriter(path, graph=self.graph, filename_suffix=".bot")

        # set the projector's configuration to add the embedding summary also:
        conf = projector.ProjectorConfig()
        embedding_field = conf.embeddings.add()
        embedding_content_label = conf.embeddings.add()

        # set the tensors to these embedding matrices
        embedding_field.tensor_name = self.field_embedding_matrix.name
        embedding_content_label.tensor_name = self.content_label_embedding_matrix.name

        # add the metadata paths to these embedding_summaries:
        embedding_field.metadata_path = os.path.join("..", "Metadata/fields.vocab")
        embedding_content_label.metadata_path = os.path.join("..", "Metadata/content_labels.vocab")

        # save the configuration file for this
        projector.visualize_embeddings(tensorboard_writer, conf)

        # return the so created tensorboard_writer
        return tensorboard_writer

    # define the constructor of the graph 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:25,代码来源:Model.py

示例8: _visualize_embedding

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def _visualize_embedding(self, model_path, summary_writer):
        """Create metadata file (and its config file) for tensorboard's embedding visualization."""
        metadata_path = os.path.join(model_path, self._tensorboard_dir, _EMBEDDING_METADATA_FILE)

        # create the metadata config file
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = self._nodes['embeddings'].name
        embedding.metadata_path = metadata_path
        projector.visualize_embeddings(summary_writer, config)

        # create metadata file
        with open(metadata_path, 'w', encoding='utf8') as metadata_file:
            metadata_file.write('Character\tID\n')
            for id_, char in enumerate(self._encoder.chars):
                metadata_file.write('{}\t{}\n'.format(char, id_)) 
开发者ID:kensk8er,项目名称:chicksexer,代码行数:18,代码来源:classifier.py

示例9: visualisation

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualisation(final_result):
    # 使用一个新的变量来保存最终输出层向量的结果
    # 因为 embedding 是通过 Tensorflow 中的变量完成的,所以 PROJECTOR 可视化的都是 TF 变量
    y = tf.Variable(final_result, name = TENSOR_NAME)
    summary_writer = tf.summary.FileWriter(LOG_DIR)

    # 通过 PROJECTOR 生成日志
    config = projector.ProjectorConfig()
    embedding = config.embeddings.add()
    embedding.tensor_name = y.name

    # 指定 embedding 对应的原始数据信息
    embedding.metadata_path = META_FILE

    # 指定 sprite 图像及大小
    embedding.sprite.image_path = SPRITE_FILE
    embedding.sprite.single_image_dim.extend([28, 28])

    # 写入日志
    projector.visualize_embeddings(summary_writer, config)

    # 生成会话,写入文件
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.save(sess, os.path.join(LOG_DIR, "model"), TRAINING_STEPS)

    summary_writer.close()


# 主函数先调用模型训练,再处理测试数据,最后将输出矩阵输出到 PROJECTOR 需要的日志文件中 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:33,代码来源:mnist_projector_show.py

示例10: visualize

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualize(self, visual_fld, num_visualize):
        """ run "'tensorboard --logdir='visualization'" to see the embeddings """

        # create the list of num_variable most common words to visualize
        w2v_utils.most_common_words(visual_fld, num_visualize)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(os.path.dirname('data/checkpoints/checkpoint'))

            # if that checkpoint exists, restore from checkpoint
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            final_embed_matrix = sess.run(self.embed_matrix)

            # you have to store embeddings in a new variable
            embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
            sess.run(embedding_var.initializer)

            config = projector.ProjectorConfig()
            summary_writer = tf.summary.FileWriter(visual_fld)

            # add embedding to the config file
            embedding = config.embeddings.add()
            embedding.tensor_name = embedding_var.name

            # link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
            embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'

            # saves a configuration file that TensorBoard will read during startup.
            projector.visualize_embeddings(summary_writer, config)
            saver_embed = tf.train.Saver([embedding_var])
            saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:37,代码来源:11_w2v_visual.py

示例11: visualize_embeddings

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualize_embeddings(embeddings, experiment_name='default'):
    """Save the embeddings to be visualised using t-sne on TensorBoard
    
    Based on https://medium.com/@vegi/visualizing-higher-dimensional-data-using-t-sne-on-tensorboard-7dbf22682cf2
    """
    tf_embeddings = tf.Variable(np.concatenate(embeddings, 0))

    # Generate metadata
    metadata = 'video_index\tframe_index\n'
    for video_index in range(len(embeddings)):
        for frame_index in range(embeddings[video_index].shape[0]):
            metadata += '{}\t{}\n'.format(video_index, frame_index)

    metadata_path = 'embeddings/{}/labels.tsv'.format(experiment_name)
    with open(metadata_path, 'w') as metadata_file:
        metadata_file.write(metadata)


    with tf.Session() as sess:
        saver = tf.train.Saver([tf_embeddings])
        sess.run(tf_embeddings.initializer)
        saver.save(sess, 'embeddings/{}/embeddings.ckpt'.format(experiment_name))
        config = projector.ProjectorConfig()

        embedding = config.embeddings.add()

        embedding.tensor_name = tf_embeddings.name
        embedding.metadata_path = metadata_path.split('/')[-1]

        projector.visualize_embeddings(tf.summary.FileWriter('embeddings/{}'.format(experiment_name)), config) 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:32,代码来源:embedding_visualization.py

示例12: _add_emb_vis

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def _add_emb_vis(self, embedding_var):
    """Do setup so that we can view word embedding visualization in Tensorboard, as described here:
    https://www.tensorflow.org/get_started/embedding_viz
    Make the vocab metadata file, then make the projector config file pointing to it."""
    train_dir = os.path.join(FLAGS.log_root, "train")
    vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
    self._vocab.write_metadata(vocab_metadata_path) # write metadata file
    summary_writer = tf.summary.FileWriter(train_dir)
    config = projector.ProjectorConfig()
    embedding = config.embeddings.add()
    embedding.tensor_name = embedding_var.name
    embedding.metadata_path = vocab_metadata_path
    projector.visualize_embeddings(summary_writer, config) 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:15,代码来源:model.py

示例13: add_multiple_embeddings

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def add_multiple_embeddings(log_dir, file_list, name_list):
    """ Creates the files necessary for the multiple embeddings

    :param log_dir: destination directory for the model and metadata (the one to which TensorBoard points)
    :param file_list: list of embeddings files
    :param name_list: names of the embeddings files
    :return:
    """
    # setup a TensorFlow session
    tf.reset_default_graph()
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    config = projector.ProjectorConfig()

    for i, file in enumerate(file_list):
        tensor_name = name_list[i]

        print('creating the embedding with the name ' + tensor_name)
        create_embeddings(sess, log_dir, embedding_file=file,
                          tensor_name=tensor_name)
        # create a TensorFlow summary writer
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

        embedding_conf = config.embeddings.add()
        embedding_conf.tensor_name = tensor_name + ':0'
        embedding_conf.metadata_path = os.path.join(tensor_name + '_' + 'metadata.tsv')
        projector.visualize_embeddings(summary_writer, config)

        # save the model
        saver = tf.train.Saver()
        saver.save(sess, os.path.join(log_dir, tensor_name + '_' + "model.ckpt"))

    print('finished successfully!') 
开发者ID:harkous,项目名称:embeddingsviz,代码行数:35,代码来源:embeddings_formatter.py

示例14: visualize_embeddings

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def visualize_embeddings(logdir, var_list, tsv_list):
  assert len(var_list) == len(tsv_list), 'Inconsistent length of lists'

  config = projector.ProjectorConfig()
  for v, f in zip(var_list, tsv_list):
    embedding = config.embeddings.add()
    embedding.tensor_name = v.name
    if f is not None:
      _, filename = os.path.split(f)
      meta_tsv = os.path.join(logdir, filename)
      tf.gfile.Copy(f, meta_tsv)  
      embedding.metadata_path = filename  # save relative path

  writer = SummaryWriterCache.get(logdir)
  projector.visualize_embeddings(writer, config) 
开发者ID:JeremyCCHsu,项目名称:vqvae-speech,代码行数:17,代码来源:train.py

示例15: _add_emb_vis

# 需要导入模块: from tensorflow.contrib.tensorboard.plugins import projector [as 别名]
# 或者: from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings [as 别名]
def _add_emb_vis(self, embedding_var):
        """Do setup so that we can view word embedding visualization in Tensorboard, as described here:
    https://www.tensorflow.org/get_started/embedding_viz
    Make the vocab metadata file, then make the projector config file pointing to it."""
        train_dir = os.path.join(FLAGS.log_root, "train")
        vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
        self._vocab.write_metadata(vocab_metadata_path)  # write metadata file
        summary_writer = tf.summary.FileWriter(train_dir)
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name
        embedding.metadata_path = vocab_metadata_path
        projector.visualize_embeddings(summary_writer, config) 
开发者ID:IBM,项目名称:MAX-Text-Summarizer,代码行数:15,代码来源:model.py


注:本文中的tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。