当前位置: 首页>>代码示例>>Python>>正文


Python nn_utils.apply_dropout方法代码示例

本文整理汇总了Python中nn_utils.apply_dropout方法的典型用法代码示例。如果您正苦于以下问题:Python nn_utils.apply_dropout方法的具体用法?Python nn_utils.apply_dropout怎么用?Python nn_utils.apply_dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nn_utils的用法示例。


在下文中一共展示了nn_utils.apply_dropout方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_column_softmax

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def compute_column_softmax(self, column_controller_vector, time_step):
    #compute softmax over all the columns using column controller vector
    column_controller_vector = tf.tile(
        tf.expand_dims(column_controller_vector, 1),
        [1, self.num_cols + self.num_word_cols, 1])  #max_cols * bs * d
    column_controller_vector = nn_utils.apply_dropout(
        column_controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.full_column_hidden_vectors = tf.concat(
        axis=1, values=[self.column_hidden_vectors, self.word_column_hidden_vectors])
    self.full_column_hidden_vectors += self.summary_text_entry_embeddings
    self.full_column_hidden_vectors = nn_utils.apply_dropout(
        self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode)
    column_logits = tf.reduce_sum(
        column_controller_vector * self.full_column_hidden_vectors, 2) + (
            self.params["word_match_feature_column_name"] *
            self.batch_column_exact_match) + self.full_column_mask
    column_softmax = tf.nn.softmax(column_logits)  #batch_size * max_cols
    return column_softmax 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:model.py

示例2: compute_column_softmax

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def compute_column_softmax(self, column_controller_vector, time_step):
    #compute softmax over all the columns using column controller vector
    column_controller_vector = tf.tile(
        tf.expand_dims(column_controller_vector, 1),
        [1, self.num_cols + self.num_word_cols, 1])  #max_cols * bs * d
    column_controller_vector = nn_utils.apply_dropout(
        column_controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.full_column_hidden_vectors = tf.concat(
        1, [self.column_hidden_vectors, self.word_column_hidden_vectors])
    self.full_column_hidden_vectors += self.summary_text_entry_embeddings
    self.full_column_hidden_vectors = nn_utils.apply_dropout(
        self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode)
    column_logits = tf.reduce_sum(
        column_controller_vector * self.full_column_hidden_vectors, 2) + (
            self.params["word_match_feature_column_name"] *
            self.batch_column_exact_match) + self.full_column_mask
    column_softmax = tf.nn.softmax(column_logits)  #batch_size * max_cols
    return column_softmax 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:20,代码来源:model.py

示例3: LSTM_question_embedding

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def LSTM_question_embedding(self, sentence, sentence_length):
    #LSTM processes the input question
    lstm_params = "question_lstm"
    hidden_vectors = []
    sentence = self.batch_question
    question_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    question_c_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    if (self.utility.FLAGS.rnn_dropout > 0.0):
      if (self.mode == "train"):
        rnn_dropout_mask = tf.cast(
            tf.random_uniform(
                tf.shape(question_hidden), minval=0.0, maxval=1.0) <
            self.utility.FLAGS.rnn_dropout,
            self.data_type) / self.utility.FLAGS.rnn_dropout
      else:
        rnn_dropout_mask = tf.ones_like(question_hidden)
    for question_iterator in range(self.question_length):
      curr_word = sentence[:, question_iterator]
      question_vector = nn_utils.apply_dropout(
          nn_utils.get_embedding(curr_word, self.utility, self.params),
          self.utility.FLAGS.dropout, self.mode)
      question_hidden, question_c_hidden = nn_utils.LSTMCell(
          question_vector, question_hidden, question_c_hidden, lstm_params,
          self.params)
      if (self.utility.FLAGS.rnn_dropout > 0.0):
        question_hidden = question_hidden * rnn_dropout_mask
      hidden_vectors.append(tf.expand_dims(question_hidden, 0))
    hidden_vectors = tf.concat(axis=0, values=hidden_vectors)
    return question_hidden, hidden_vectors 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:33,代码来源:model.py

示例4: one_pass

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def one_pass(self, select, question_embedding, hidden_vectors, hprev,
               prev_select_1, curr_pass):
    #Performs one timestep which involves selecting an operation and a column
    attention_vector = self.perform_attention(
        hprev, hidden_vectors, self.question_length,
        self.batch_question_attention_mask)  #batch_size * embedding_dims
    controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul(
            tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[
                "controller"]))
    column_controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul(
            tf.concat(axis=1, values=[question_embedding, attention_vector]), self.params[
                "column_controller"]))
    controller_vector = nn_utils.apply_dropout(
        controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.operation_logits = tf.matmul(controller_vector,
                                      tf.transpose(self.params_unit))
    softmax = tf.nn.softmax(self.operation_logits)
    soft_softmax = softmax
    #compute column softmax: bs * max_columns
    weighted_op_representation = tf.transpose(
        tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax)))
    column_controller_vector = tf.nn.relu(
        tf.matmul(
            tf.concat(axis=1, values=[
                column_controller_vector, weighted_op_representation
            ]), self.params["break_conditional"]))
    full_column_softmax = self.compute_column_softmax(column_controller_vector,
                                                      curr_pass)
    soft_column_softmax = full_column_softmax
    if (self.mode == "test"):
      full_column_softmax = self.make_hard_softmax(full_column_softmax)
      softmax = self.make_hard_softmax(softmax)
    output, select = self.perform_operations(softmax, full_column_softmax,
                                             select, prev_select_1, curr_pass)
    return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:39,代码来源:model.py

示例5: LSTM_question_embedding

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def LSTM_question_embedding(self, sentence, sentence_length):
    #LSTM processes the input question
    lstm_params = "question_lstm"
    hidden_vectors = []
    sentence = self.batch_question
    question_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    question_c_hidden = tf.zeros(
        [self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
    if (self.utility.FLAGS.rnn_dropout > 0.0):
      if (self.mode == "train"):
        rnn_dropout_mask = tf.cast(
            tf.random_uniform(
                tf.shape(question_hidden), minval=0.0, maxval=1.0) <
            self.utility.FLAGS.rnn_dropout,
            self.data_type) / self.utility.FLAGS.rnn_dropout
      else:
        rnn_dropout_mask = tf.ones_like(question_hidden)
    for question_iterator in range(self.question_length):
      curr_word = sentence[:, question_iterator]
      question_vector = nn_utils.apply_dropout(
          nn_utils.get_embedding(curr_word, self.utility, self.params),
          self.utility.FLAGS.dropout, self.mode)
      question_hidden, question_c_hidden = nn_utils.LSTMCell(
          question_vector, question_hidden, question_c_hidden, lstm_params,
          self.params)
      if (self.utility.FLAGS.rnn_dropout > 0.0):
        question_hidden = question_hidden * rnn_dropout_mask
      hidden_vectors.append(tf.expand_dims(question_hidden, 0))
    hidden_vectors = tf.concat(0, hidden_vectors)
    return question_hidden, hidden_vectors 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:33,代码来源:model.py

示例6: one_pass

# 需要导入模块: import nn_utils [as 别名]
# 或者: from nn_utils import apply_dropout [as 别名]
def one_pass(self, select, question_embedding, hidden_vectors, hprev,
               prev_select_1, curr_pass):
    #Performs one timestep which involves selecting an operation and a column
    attention_vector = self.perform_attention(
        hprev, hidden_vectors, self.question_length,
        self.batch_question_attention_mask)  #batch_size * embedding_dims
    controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "controller"]))
    column_controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "column_controller"]))
    controller_vector = nn_utils.apply_dropout(
        controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.operation_logits = tf.matmul(controller_vector,
                                      tf.transpose(self.params_unit))
    softmax = tf.nn.softmax(self.operation_logits)
    soft_softmax = softmax
    #compute column softmax: bs * max_columns
    weighted_op_representation = tf.transpose(
        tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax)))
    column_controller_vector = tf.nn.relu(
        tf.matmul(
            tf.concat(1, [
                column_controller_vector, weighted_op_representation
            ]), self.params["break_conditional"]))
    full_column_softmax = self.compute_column_softmax(column_controller_vector,
                                                      curr_pass)
    soft_column_softmax = full_column_softmax
    if (self.mode == "test"):
      full_column_softmax = self.make_hard_softmax(full_column_softmax)
      softmax = self.make_hard_softmax(softmax)
    output, select = self.perform_operations(softmax, full_column_softmax,
                                             select, prev_select_1, curr_pass)
    return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:39,代码来源:model.py


注:本文中的nn_utils.apply_dropout方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。