当前位置: 首页>>代码示例>>Python>>正文


Python nn_utils.get_embedding方法代码示例

本文整理汇总了Python中nn_utils.get_embedding方法的典型用法代码示例。如果您正苦于以下问题:Python nn_utils.get_embedding方法的具体用法?Python nn_utils.get_embedding怎么用?Python nn_utils.get_embedding使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nn_utils的用法示例。


在下文中一共展示了nn_utils.get_embedding方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: LSTM_question_embedding

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import get_embedding [as 别名]
def LSTM_question_embedding(self, sentence, sentence_length):
    #LSTM processes the input question
    lstm_params = "question_lstm"
    hidden_vectors = []
    sentence = self.batch_question
    question_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    question_c_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    if (self.utility.FLAGS.rnn_dropout > 0.0):
      if (self.mode == "train"):
        rnn_dropout_mask = tf.cast(
            tf.random_uniform(
                tf.shape(question_hidden), minval=0.0, maxval=1.0) <
            self.utility.FLAGS.rnn_dropout,
            self.data_type) / self.utility.FLAGS.rnn_dropout
      else:
        rnn_dropout_mask = tf.ones_like(question_hidden)
    for question_iterator in range(self.question_length):
      curr_word = sentence[:, question_iterator]
      question_vector = nn_utils.apply_dropout(
          nn_utils.get_embedding(curr_word, self.utility, self.params),
          self.utility.FLAGS.dropout, self.mode)
      question_hidden, question_c_hidden = nn_utils.LSTMCell(
          question_vector, question_hidden, question_c_hidden, lstm_params,
          self.params)
      if (self.utility.FLAGS.rnn_dropout > 0.0):
        question_hidden = question_hidden * rnn_dropout_mask
      hidden_vectors.append(tf.expand_dims(question_hidden, 0))
    hidden_vectors = tf.concat(axis=0, values=hidden_vectors)
    return question_hidden, hidden_vectors 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:33,代码来源:model.py

示例2: get_column_hidden_vectors

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import get_embedding [as 别名]
def get_column_hidden_vectors(self):
    #vector representations for the column names
    self.column_hidden_vectors = tf.reduce_sum(
        nn_utils.get_embedding(self.batch_number_column_names, self.utility,
                               self.params), 2)
    self.word_column_hidden_vectors = tf.reduce_sum(
        nn_utils.get_embedding(self.batch_word_column_names, self.utility,
                               self.params), 2) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:10,代码来源:model.py

示例3: create_summary_embeddings

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import get_embedding [as 别名]
def create_summary_embeddings(self):
    #embeddings for each text entry in the table using parameters of the question module
    self.summary_text_entry_embeddings = tf.reduce_sum(
        tf.expand_dims(self.batch_exact_match, 3) * tf.expand_dims(
            tf.expand_dims(
                tf.expand_dims(
                    nn_utils.get_embedding(self.utility.entry_match_token_id,
                                           self.utility, self.params), 0), 1),
            2), 2) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:model.py

示例4: LSTM_question_embedding

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import get_embedding [as 别名]
def LSTM_question_embedding(self, sentence, sentence_length):
    #LSTM processes the input question
    lstm_params = "question_lstm"
    hidden_vectors = []
    sentence = self.batch_question
    question_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    question_c_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    if (self.utility.FLAGS.rnn_dropout > 0.0):
      if (self.mode == "train"):
        rnn_dropout_mask = tf.cast(
            tf.random_uniform(
                tf.shape(question_hidden), minval=0.0, maxval=1.0) <
            self.utility.FLAGS.rnn_dropout,
            self.data_type) / self.utility.FLAGS.rnn_dropout
      else:
        rnn_dropout_mask = tf.ones_like(question_hidden)
    for question_iterator in range(self.question_length):
      curr_word = sentence[:, question_iterator]
      question_vector = nn_utils.apply_dropout(
          nn_utils.get_embedding(curr_word, self.utility, self.params),
          self.utility.FLAGS.dropout, self.mode)
      question_hidden, question_c_hidden = nn_utils.LSTMCell(
          question_vector, question_hidden, question_c_hidden, lstm_params,
          self.params)
      if (self.utility.FLAGS.rnn_dropout > 0.0):
        question_hidden = question_hidden * rnn_dropout_mask
      hidden_vectors.append(tf.expand_dims(question_hidden, 0))
    hidden_vectors = tf.concat(0, hidden_vectors)
    return question_hidden, hidden_vectors 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:33,代码来源:model.py


注:本文中的nn_utils.get_embedding方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。