当前位置: 首页>>代码示例>>Python>>正文


Python rnn.DropoutWrapper方法代码示例

本文整理汇总了Python中tensorflow.contrib.rnn.DropoutWrapper方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.DropoutWrapper方法的具体用法?Python rnn.DropoutWrapper怎么用?Python rnn.DropoutWrapper使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.DropoutWrapper方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_permutation

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def build_permutation(self):

        with tf.variable_scope("encoder"):
            
            with tf.variable_scope("embedding"):
                # Embed input sequence
                W_embed =tf.get_variable("weights", [1,self.input_dimension+2, self.input_embed], initializer=self.initializer) # +2 for TW feat. here too
                embedded_input = tf.nn.conv1d(self.input_, W_embed, 1, "VALID", name="embedded_input")
                # Batch Normalization
                embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)

            with tf.variable_scope("dynamic_rnn"):
                # Encode input sequence
                cell1 = LSTMCell(self.num_neurons, initializer=self.initializer)  # BNLSTMCell(self.num_neurons, self.training) or cell1 = DropoutWrapper(cell1, output_keep_prob=0.9)
                # Return the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
                encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embedded_input, dtype=tf.float32)

        with tf.variable_scope('decoder'):
            # Ptr-net returns permutations (self.positions), with their log-probability for backprop
            self.ptr = Pointer_decoder(encoder_output, self.config)
            self.positions, self.log_softmax, self.attending, self.pointing = self.ptr.loop_decode(encoder_state)
            variable_summaries('log_softmax',self.log_softmax, with_max_min = True) 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:24,代码来源:actor.py

示例2: get_rnn_cell_trainable_variables

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def get_rnn_cell_trainable_variables(cell):
    """Returns the list of trainable variables of an RNN cell.

    Args:
        cell: an instance of :tf_main:`RNNCell <nn/rnn_cell/RNNCell>`.

    Returns:
        list: trainable variables of the cell.
    """
    cell_ = cell
    while True:
        try:
            return cell_.trainable_variables
        except AttributeError:
        # Cell wrappers (e.g., `DropoutWrapper`) cannot directly access to
        # `trainable_variables` as they don't initialize superclass
        # (tf==v1.3). So try to access through the cell in the wrapper.
            cell_ = cell._cell  # pylint: disable=protected-access 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:20,代码来源:layers.py

示例3: apply_dropout

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def apply_dropout(
    cell, input_keep_probability, output_keep_probability, random_seed=None):
  """Apply dropout to the outputs and inputs of `cell`.

  Args:
    cell: An `RNNCell`.
    input_keep_probability: Probability to keep inputs to `cell`. If `None`,
      no dropout is applied.
    output_keep_probability: Probability to keep outputs of `cell`. If `None`,
      no dropout is applied.
    random_seed: Seed for random dropout.

  Returns:
    An `RNNCell`, the result of applying the supplied dropouts to `cell`.
  """
  input_prob_none = input_keep_probability is None
  output_prob_none = output_keep_probability is None
  if input_prob_none and output_prob_none:
    return cell
  if input_prob_none:
    input_keep_probability = 1.0
  if output_prob_none:
    output_keep_probability = 1.0
  return contrib_rnn.DropoutWrapper(
      cell, input_keep_probability, output_keep_probability, random_seed) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:dynamic_rnn_estimator.py

示例4: build_lstm

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def build_lstm(self):
        def build_cell():
            cell = rnn.BasicLSTMCell(self._hidden_size, forget_bias=1.0, state_is_tuple=True)
            cell = rnn.DropoutWrapper(cell, output_keep_prob=self._keep_prob)
            return cell
        mul_cell = rnn.MultiRNNCell([build_cell() for _ in range(self._num_layer)], 
                                    state_is_tuple=True)
        self._init_state = mul_cell.zero_state(self._num_seq, dtype=tf.float32)
        outputs, self._final_state = tf.nn.dynamic_rnn(mul_cell, self._inputs, 
                                                       initial_state=self._init_state)
        outputs = tf.reshape(outputs, [-1, self._hidden_size])
        W = tf.Variable(tf.truncated_normal([self._hidden_size, self._corpus.word_num],
                                            stddev=0.1, dtype=tf.float32))
        bais = tf.Variable(tf.zeros([1, self._corpus.word_num], 
                                    dtype=tf.float32), dtype=tf.float32)
        self._prediction = tf.nn.softmax(tf.matmul(outputs, W) + bais) 
开发者ID:yhswjtuILMARE,项目名称:Machine-Learning-Study-Notes,代码行数:18,代码来源:model.py

示例5: _create_rnn_cell

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def _create_rnn_cell(self):
        """
        Creates a single RNN cell according to the architecture of this RNN.
        
        Returns
        -------
        rnn cell
            A single RNN cell according to the architecture of this RNN
        """
        keep_prob = 1.0 if self.keep_prob is None else self.keep_prob

        if self.cell_type == CellType.GRU:
            return DropoutWrapper(GRUCell(self.num_units), keep_prob, keep_prob)
        elif self.cell_type == CellType.LSTM:
            return DropoutWrapper(LSTMCell(self.num_units), keep_prob, keep_prob)
        else:
            raise ValueError("unknown cell type: {}".format(self.cell_type)) 
开发者ID:auDeep,项目名称:auDeep,代码行数:19,代码来源:rnn_base.py

示例6: inference

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def inference(self):
        """main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.concat, 4.FC layer 5.softmax """
        #1.get emebedding of words in the sentence
        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size]
        #2. Bi-lstm layer
        # define lstm cess:get lstm cell output
        lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_size) #forward direction cell
        lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_size) #backward direction cell
        if self.dropout_keep_prob is not None:
            lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
            lstm_bw_cell=rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
        # bidirectional_dynamic_rnn: input: [batch_size, max_time, input_size]
        #                            output: A tuple (outputs, output_states)
        #                                    where:outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
        outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32) #[batch_size,sequence_length,hidden_size] #creates a dynamic bidirectional recurrent neural network
        print("outputs:===>",outputs) #outputs:(<tf.Tensor 'bidirectional_rnn/fw/fw/transpose:0' shape=(?, 5, 100) dtype=float32>, <tf.Tensor 'ReverseV2:0' shape=(?, 5, 100) dtype=float32>))
        #3. concat output
        output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2]
        #self.output_rnn_last=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] 
        self.output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] #TODO
        print("output_rnn_last:", self.output_rnn_last) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32>
        #4. logits(use linear layer)
        with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
            logits = tf.matmul(self.output_rnn_last, self.W_projection) + self.b_projection  # [batch_size,num_classes]
        return logits 
开发者ID:brightmart,项目名称:text_classification,代码行数:27,代码来源:p8_TextRNN_model.py

示例7: inference

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def inference(self):
        """main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.mean pooling, 4.FC layer, 5.softmax """
        #1.get emebedding of words in the sentence
        self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size]
        #2. Bi-lstm layer
        # define lstm cess:get lstm cell output
        lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_size) #forward direction cell
        lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_size) #backward direction cell
        if self.dropout_keep_prob is not None:
            lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob)
            lstm_bw_cell==rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob)
        # bidirectional_dynamic_rnn: input: [batch_size, max_time, input_size]
        #                            output: A tuple (outputs, output_states)
        #                                    where:outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`.
        outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32) #[batch_size,sequence_length,hidden_size] #creates a dynamic bidirectional recurrent neural network
        print("outputs:===>",outputs) #outputs:(<tf.Tensor 'bidirectional_rnn/fw/fw/transpose:0' shape=(?, 5, 100) dtype=float32>, <tf.Tensor 'ReverseV2:0' shape=(?, 5, 100) dtype=float32>))
        #3. concat output
        output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2]
        output_rnn_pooled=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] #output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] #TODO
        print("output_rnn_pooled:", output_rnn_pooled) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32>
        #4. logits(use linear layer)
        with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`.  The forward activations of the input network.
            logits = tf.matmul(output_rnn_pooled, self.W_projection) + self.b_projection  # [batch_size,num_classes]
        return logits 
开发者ID:brightmart,项目名称:text_classification,代码行数:26,代码来源:p9_BiLstmTextRelation_model.py

示例8: get_rnn_cell_trainable_variables

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def get_rnn_cell_trainable_variables(cell):
    """Returns the list of trainable variables of an RNN cell.

    Args:
        cell: an instance of :tf_main:`RNNCell <nn/rnn_cell/RNNCell>`.

    Returns:
        list: trainable variables of the cell.
    """
    cell_ = cell
    while True:
        try:
            return cell_.trainable_variables
        except AttributeError:
            # Cell wrappers (e.g., `DropoutWrapper`) cannot directly access to
            # `trainable_variables` as they don't initialize superclass
            # (tf==v1.3). So try to access through the cell in the wrapper.
            cell_ = cell._cell  # pylint: disable=protected-access 
开发者ID:asyml,项目名称:texar,代码行数:20,代码来源:layers.py

示例9: horizontal_cell

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=1.0, scope=None):
  """Run an LSTM bidirectionally over all the rows of each image.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor, where
  """
  with tf.variable_scope(scope, "HorizontalGru", [images]):
    sequence = images_to_sequence(images)

    shapeT = tf.shape(sequence)
    sequence_length = shapeT[0]
    batch_sizeRNN = shapeT[1]
    sequence_lengths = tf.to_int64(
      tf.fill([batch_sizeRNN], sequence_length))
    forward_drop1 = DropoutWrapper(cell_fw, output_keep_prob=keep_prob)
    backward_drop1 = DropoutWrapper(cell_bw, output_keep_prob=keep_prob)
    rnn_out1, _ = tf.nn.bidirectional_dynamic_rnn(forward_drop1, backward_drop1, sequence, dtype=tf.float32,
                                                  sequence_length=sequence_lengths, time_major=True,
                                                  swap_memory=True, scope=scope)
    rnn_out1 = tf.concat(rnn_out1, 2)
    rnn_out1 = tf.reshape(rnn_out1, shape=[-1, batch_sizeRNN, 2, num_filters_out])
    output_sequence = tf.reduce_sum(rnn_out1, axis=2)
    batch_size=tf.shape(images)[0]
    output = sequence_to_images(output_sequence, batch_size)
    return output 
开发者ID:TobiasGruening,项目名称:ARU-Net,代码行数:32,代码来源:layers.py

示例10: apply_dropout

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
  """Applies dropout to the outputs and inputs of `cell`.

  Args:
    cells: A list of `RNNCell`s.
    dropout_keep_probabilities: a list whose elements are either floats in
    `[0.0, 1.0]` or `None`. It must have length one greater than `cells`.
    random_seed: Seed for random dropout.

  Returns:
    A list of `RNNCell`s, the result of applying the supplied dropouts.

  Raises:
    ValueError: If `len(dropout_keep_probabilities) != len(cells) + 1`.
  """
  if len(dropout_keep_probabilities) != len(cells) + 1:
    raise ValueError(
        'The number of dropout probabilites must be one greater than the '
        'number of cells. Got {} cells and {} dropout probabilities.'.format(
            len(cells), len(dropout_keep_probabilities)))
  wrapped_cells = [
      contrib_rnn.DropoutWrapper(cell, prob, 1.0, seed=random_seed)
      for cell, prob in zip(cells[:-1], dropout_keep_probabilities[:-2])
  ]
  wrapped_cells.append(
      contrib_rnn.DropoutWrapper(cells[-1], dropout_keep_probabilities[-2],
                                 dropout_keep_probabilities[-1]))
  return wrapped_cells 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:rnn_common.py

示例11: cell_create

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def cell_create(self,scope_name):
         with tf.variable_scope(scope_name):
             if self.cell_type == 'tanh':
                 cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTM': 
                 cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'GRU':
                 cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTMP':
                 cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph) 
         return cells 
开发者ID:CarlSouthall,项目名称:ADTLib,代码行数:14,代码来源:__init__.py

示例12: define_graph

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def define_graph(self):
        essays = tf.placeholder(tf.float32, [None, self.hp.d_e_len,
                                             self.hp.w_dim])
        scores = tf.placeholder(tf.float32, [None])

        # Long Short-Term Memory layer
        lstm_cell = tfrnn.BasicLSTMCell(num_units=self.hp.lstm_hidden_size)
        lstm_cell = tfrnn.DropoutWrapper(
            cell=lstm_cell,
            output_keep_prob=self.hp.dropout_keep_prob)
        init_state = lstm_cell.zero_state(self.hp.batch_size, dtype=tf.float32)
        lstm, _ = tf.nn.dynamic_rnn(lstm_cell, essays, dtype=tf.float32)

        # Mean over Time pooling
        mot = tf.reduce_mean(lstm, axis=1)

        # Dense layer
        dense = tf.layers.dense(inputs=mot, units=1, activation=tf.nn.sigmoid)

        # Prediction and Loss
        preds = tf.reshape(dense, [-1])
        tf.summary.histogram('preds', preds)
        loss = tf.losses.mean_squared_error(scores, preds)
        tf.summary.scalar('loss', loss)
        merged_summary = tf.summary.merge_all()

        return (essays,
                scores,
                merged_summary,
                loss,
                preds) 
开发者ID:zlliang,项目名称:essaysense,代码行数:33,代码来源:models.py

示例13: getLayeredCell

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def getLayeredCell(layer_size, num_units, input_keep_prob,
        output_keep_prob=1.0):
    return rnn.MultiRNNCell([rnn.DropoutWrapper(rnn.BasicLSTMCell(num_units),
        input_keep_prob, output_keep_prob) for i in range(layer_size)]) 
开发者ID:wb14123,项目名称:seq2seq-couplet,代码行数:6,代码来源:seq2seq.py

示例14: func

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def func():
    lstm_cell = rnn.BasicLSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
    lstm_cell = rnn.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)
    return lstm_cell 
开发者ID:yhswjtuILMARE,项目名称:Machine-Learning-Study-Notes,代码行数:6,代码来源:handWrittenUnit_2.py

示例15: func

# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import DropoutWrapper [as 别名]
def func():
    lstm_cell = rnn.BasicLSTMCell(num_units=hidden_size, forget_bias=1.0, state_is_tuple=True)
    lstm_cell = rnn.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)
    return lstm_cell 
开发者ID:yhswjtuILMARE,项目名称:Machine-Learning-Study-Notes,代码行数:6,代码来源:handWrittenUnit.py


注:本文中的tensorflow.contrib.rnn.DropoutWrapper方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。