本文整理匯總了Python中nn_utils.get_embedding方法的典型用法代碼示例。如果您正苦於以下問題:Python nn_utils.get_embedding方法的具體用法?Python nn_utils.get_embedding怎麽用?Python nn_utils.get_embedding使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類nn_utils
的用法示例。
在下文中一共展示了nn_utils.get_embedding方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: LSTM_question_embedding
# 需要導入模塊: import nn_utils [as 別名]
# 或者: from nn_utils import get_embedding [as 別名]
def LSTM_question_embedding(self, sentence, sentence_length):
#LSTM processes the input question
lstm_params = "question_lstm"
hidden_vectors = []
sentence = self.batch_question
question_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
question_c_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
if (self.utility.FLAGS.rnn_dropout > 0.0):
if (self.mode == "train"):
rnn_dropout_mask = tf.cast(
tf.random_uniform(
tf.shape(question_hidden), minval=0.0, maxval=1.0) <
self.utility.FLAGS.rnn_dropout,
self.data_type) / self.utility.FLAGS.rnn_dropout
else:
rnn_dropout_mask = tf.ones_like(question_hidden)
for question_iterator in range(self.question_length):
curr_word = sentence[:, question_iterator]
question_vector = nn_utils.apply_dropout(
nn_utils.get_embedding(curr_word, self.utility, self.params),
self.utility.FLAGS.dropout, self.mode)
question_hidden, question_c_hidden = nn_utils.LSTMCell(
question_vector, question_hidden, question_c_hidden, lstm_params,
self.params)
if (self.utility.FLAGS.rnn_dropout > 0.0):
question_hidden = question_hidden * rnn_dropout_mask
hidden_vectors.append(tf.expand_dims(question_hidden, 0))
hidden_vectors = tf.concat(axis=0, values=hidden_vectors)
return question_hidden, hidden_vectors
示例2: get_column_hidden_vectors
# 需要導入模塊: import nn_utils [as 別名]
# 或者: from nn_utils import get_embedding [as 別名]
def get_column_hidden_vectors(self):
#vector representations for the column names
self.column_hidden_vectors = tf.reduce_sum(
nn_utils.get_embedding(self.batch_number_column_names, self.utility,
self.params), 2)
self.word_column_hidden_vectors = tf.reduce_sum(
nn_utils.get_embedding(self.batch_word_column_names, self.utility,
self.params), 2)
示例3: create_summary_embeddings
# 需要導入模塊: import nn_utils [as 別名]
# 或者: from nn_utils import get_embedding [as 別名]
def create_summary_embeddings(self):
#embeddings for each text entry in the table using parameters of the question module
self.summary_text_entry_embeddings = tf.reduce_sum(
tf.expand_dims(self.batch_exact_match, 3) * tf.expand_dims(
tf.expand_dims(
tf.expand_dims(
nn_utils.get_embedding(self.utility.entry_match_token_id,
self.utility, self.params), 0), 1),
2), 2)
示例4: LSTM_question_embedding
# 需要導入模塊: import nn_utils [as 別名]
# 或者: from nn_utils import get_embedding [as 別名]
def LSTM_question_embedding(self, sentence, sentence_length):
#LSTM processes the input question
lstm_params = "question_lstm"
hidden_vectors = []
sentence = self.batch_question
question_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
question_c_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
if (self.utility.FLAGS.rnn_dropout > 0.0):
if (self.mode == "train"):
rnn_dropout_mask = tf.cast(
tf.random_uniform(
tf.shape(question_hidden), minval=0.0, maxval=1.0) <
self.utility.FLAGS.rnn_dropout,
self.data_type) / self.utility.FLAGS.rnn_dropout
else:
rnn_dropout_mask = tf.ones_like(question_hidden)
for question_iterator in range(self.question_length):
curr_word = sentence[:, question_iterator]
question_vector = nn_utils.apply_dropout(
nn_utils.get_embedding(curr_word, self.utility, self.params),
self.utility.FLAGS.dropout, self.mode)
question_hidden, question_c_hidden = nn_utils.LSTMCell(
question_vector, question_hidden, question_c_hidden, lstm_params,
self.params)
if (self.utility.FLAGS.rnn_dropout > 0.0):
question_hidden = question_hidden * rnn_dropout_mask
hidden_vectors.append(tf.expand_dims(question_hidden, 0))
hidden_vectors = tf.concat(0, hidden_vectors)
return question_hidden, hidden_vectors