當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn.LSTMCell方法代碼示例

本文整理匯總了Python中tensorflow.contrib.rnn.LSTMCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.LSTMCell方法的具體用法?Python rnn.LSTMCell怎麽用?Python rnn.LSTMCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.LSTMCell方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_permutation

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def build_permutation(self):

        with tf.variable_scope("encoder"):
            
            with tf.variable_scope("embedding"):
                # Embed input sequence
                W_embed =tf.get_variable("weights", [1,self.input_dimension+2, self.input_embed], initializer=self.initializer) # +2 for TW feat. here too
                embedded_input = tf.nn.conv1d(self.input_, W_embed, 1, "VALID", name="embedded_input")
                # Batch Normalization
                embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)

            with tf.variable_scope("dynamic_rnn"):
                # Encode input sequence
                cell1 = LSTMCell(self.num_neurons, initializer=self.initializer)  # BNLSTMCell(self.num_neurons, self.training) or cell1 = DropoutWrapper(cell1, output_keep_prob=0.9)
                # Return the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
                encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embedded_input, dtype=tf.float32)

        with tf.variable_scope('decoder'):
            # Ptr-net returns permutations (self.positions), with their log-probability for backprop
            self.ptr = Pointer_decoder(encoder_output, self.config)
            self.positions, self.log_softmax, self.attending, self.pointing = self.ptr.loop_decode(encoder_state)
            variable_summaries('log_softmax',self.log_softmax, with_max_min = True) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:24,代碼來源:actor.py

示例2: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def __init__(self,
               num_units,
               use_peepholes=False,
               forget_bias=1.0,
               state_is_tuple=True,
               output_is_tuple=True):

    def cell_fn(n):
      return rnn.LSTMCell(
          num_units=n, forget_bias=forget_bias, use_peepholes=use_peepholes)

    super(Grid1LSTMCell, self).__init__(
        num_units=num_units,
        num_dims=1,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        cell_fn=cell_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:22,代碼來源:grid_rnn_cell.py

示例3: get_rnn_cell_list

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def get_rnn_cell_list(config, name, reuse=False, seed=123, dtype=tf.float32):
    cell_list = []
    for i, units in enumerate(config['num_units']):
        cell = None
        if config['cell_type'] == 'clstm':
            cell = CustomLSTMCell(units, layer_norm=config['layer_norm'], activation=config['activation'], seed=seed,
                                  reuse=reuse, dtype=dtype, name='{}_{}'.format(name, i))
        elif config['cell_type'] == 'tflstm':

            act = get_activation(config['activation'])

            if config['layer_norm']:
                cell = LayerNormBasicLSTMCell(num_units=units, activation=act, layer_norm=config['layer_norm'],
                                              reuse=reuse)
            elif config['layer_norm'] == False and config['activation'] != 'tanh':
                cell = LSTMCell(num_units=units, activation=act, reuse=reuse)
            else:
                cell = LSTMBlockCell(num_units=units)
        cell_list.append(cell)

    return cell_list 
開發者ID:JoergFranke,項目名稱:ADNC,代碼行數:23,代碼來源:controller.py

示例4: _create_rnn_cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def _create_rnn_cell(self):
        """
        Creates a single RNN cell according to the architecture of this RNN.
        
        Returns
        -------
        rnn cell
            A single RNN cell according to the architecture of this RNN
        """
        keep_prob = 1.0 if self.keep_prob is None else self.keep_prob

        if self.cell_type == CellType.GRU:
            return DropoutWrapper(GRUCell(self.num_units), keep_prob, keep_prob)
        elif self.cell_type == CellType.LSTM:
            return DropoutWrapper(LSTMCell(self.num_units), keep_prob, keep_prob)
        else:
            raise ValueError("unknown cell type: {}".format(self.cell_type)) 
開發者ID:auDeep,項目名稱:auDeep,代碼行數:19,代碼來源:rnn_base.py

示例5: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def __init__(self,
               cell,
               attention_mechanism,
               attention_layer_size=None,
               alignment_history=False,
               cell_input_fn=None,
               output_attention=True,
               initial_cell_state=None,
               name=None):
    if not isinstance(cell, (rnn.LSTMCell, rnn.GRUCell)):
      raise ValueError('SyncAttentionWrapper only supports LSTMCell and GRUCell, '
                       'Got: {}'.format(cell))
    super(SyncAttentionWrapper, self).__init__(
      cell,
      attention_mechanism,
      attention_layer_size=attention_layer_size,
      alignment_history=alignment_history,
      cell_input_fn=cell_input_fn,
      output_attention=output_attention,
      initial_cell_state=initial_cell_state,
      name=name
    ) 
開發者ID:bgshih,項目名稱:aster,代碼行數:24,代碼來源:sync_attention_wrapper.py

示例6: build_cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def build_cell(self, name=None):
        if self.hparams.cell_type == 'linear':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.identity, name=name)
        elif self.hparams.cell_type == 'tanh':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.tanh, name=name)
        elif self.hparams.cell_type == 'relu':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.nn.relu, name=name)
        elif self.hparams.cell_type == 'gru':
            cell = GRUCell(self.hparams.hidden_units, name=name)
        elif self.hparams.cell_type == 'lstm':
            cell = LSTMCell(self.hparams.hidden_units, name=name, state_is_tuple=False)
        else:
            raise ValueError('Provided cell type not supported.')
        return cell 
開發者ID:microsoft,項目名稱:icecaps,代碼行數:19,代碼來源:abstract_recurrent_estimator.py

示例7: separable_rnn

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def separable_rnn(images, num_filters_out, scope=None, keep_prob=1.0, cellType='LSTM'):
  """Run bidirectional LSTMs first horizontally then vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    nhidden: hidden layer depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor
  """
  with tf.variable_scope(scope, "SeparableLstm", [images]):
    with tf.variable_scope("horizontal"):
      if 'LSTM' in cellType:
        cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
        cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
      if 'GRU' in cellType:
        cell_fw = GRUCell(num_filters_out)
        cell_bw = GRUCell(num_filters_out)
      hidden = horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
    with tf.variable_scope("vertical"):
      transposed = tf.transpose(hidden, [0, 2, 1, 3])
      if 'LSTM' in cellType:
        cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
        cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
      if 'GRU' in cellType:
        cell_fw = GRUCell(num_filters_out)
        cell_bw = GRUCell(num_filters_out)
      output_transposed = horizontal_cell(transposed, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
    output = tf.transpose(output_transposed, [0, 2, 1, 3])
    return output 
開發者ID:TobiasGruening,項目名稱:ARU-Net,代碼行數:34,代碼來源:layers.py

示例8: cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def cell(self):
        """ Return the cell """
        with tf.variable_scope(self.variable_scope, reuse=self.reuse):
            cell = rnn.LSTMCell(self.num_units, reuse=self.reuse)

            if self.num_layers > 1:
                cell = rnn.MultiRNNCell([cell] * self.num_layers)

        return cell 
開發者ID:dojoteef,項目名稱:glas,代碼行數:11,代碼來源:cell.py

示例9: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def __init__(self, num_units, use_peepholes=False, forget_bias=1.0):
    super(Grid1LSTMCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0,
        cell_fn=lambda n, i: rnn.LSTMCell(
            num_units=n, input_size=i, use_peepholes=use_peepholes,
            forget_bias=forget_bias, state_is_tuple=False)) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:9,代碼來源:grid_rnn_cell.py

示例10: baseline_forward

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def baseline_forward(self, X, size, n_class):
        shape = X.get_shape()
        seq = tf.transpose(X, [1, 0, 2]) 

        with tf.name_scope("LSTM"):
            lstm_cell = LSTMCell(size, forget_bias=1.0)
            outputs, states = nn.dynamic_rnn(time_major=True, cell=lstm_cell, inputs=seq, dtype=tf.float32)

        with tf.name_scope("LSTM-Classifier"):
            W = tf.Variable(tf.random_normal([size, n_class]), name="W")
            b = tf.Variable(tf.random_normal([n_class]), name="b")
            output = tf.matmul(outputs[-1], W) + b

        return output 
開發者ID:icoxfog417,項目名稱:tensorflow_qrnn,代碼行數:16,代碼來源:test_tf_qrnn_work.py

示例11: cell_create

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def cell_create(self,scope_name):
         with tf.variable_scope(scope_name):
             if self.cell_type == 'tanh':
                 cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTM': 
                 cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'GRU':
                 cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTMP':
                 cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph) 
         return cells 
開發者ID:CarlSouthall,項目名稱:ADTLib,代碼行數:14,代碼來源:__init__.py

示例12: biLSTM_layer_op

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def biLSTM_layer_op(self):
        with tf.variable_scope("bi-lstm"):
            cell_fw = LSTMCell(self.hidden_dim)
            cell_bw = LSTMCell(self.hidden_dim)
            (output_fw_seq, output_bw_seq), _ = tf.nn.bidirectional_dynamic_rnn(
                cell_fw=cell_fw,
                cell_bw=cell_bw,
                inputs=self.word_embeddings,
                sequence_length=self.sequence_lengths,
                dtype=tf.float32)
            output = tf.concat([output_fw_seq, output_bw_seq], axis=-1)
            output = tf.nn.dropout(output, self.dropout_pl)

        with tf.variable_scope("proj"):
            W = tf.get_variable(name="W",
                                shape=[2 * self.hidden_dim, self.num_tags],
                                initializer=tf.contrib.layers.xavier_initializer(),
                                dtype=tf.float32)

            b = tf.get_variable(name="b",
                                shape=[self.num_tags],
                                initializer=tf.zeros_initializer(),
                                dtype=tf.float32)

            s = tf.shape(output)
            output = tf.reshape(output, [-1, 2*self.hidden_dim])
            pred = tf.matmul(output, W) + b

            self.logits = tf.reshape(pred, [-1, s[1], self.num_tags]) 
開發者ID:baiyyang,項目名稱:medical-entity-recognition,代碼行數:31,代碼來源:model.py

示例13: test_lstm

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def test_lstm():
    """
    Test an LSTM model.
    """
    run_ac_test(partial(RNNCellAC, make_cell=lambda: LSTMCell(32))) 
開發者ID:flyyufelix,項目名稱:sonic_contest,代碼行數:7,代碼來源:test_ac_models.py

示例14: test_multi_rnn

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def test_multi_rnn():
    """
    Test a stacked LSTM with nested tuple state.
    """
    def make_cell():
        return MultiRNNCell([LSTMCell(16), LSTMCell(32)])

    run_ac_test(partial(RNNCellAC, make_cell=make_cell)) 
開發者ID:flyyufelix,項目名稱:sonic_contest,代碼行數:10,代碼來源:test_ac_models.py

示例15: __call__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LSTMCell [as 別名]
def __call__(self, inputs, training=False):
        """
        Runs the bidirectional LSTM, produces outputs and saves both forward and backward states as well as gradients.
        :param inputs: The inputs should be a list of shape [sequence_length, batch_size, 64]
        :param name: Name to give to the tensorflow op
        :param training: Flag that indicates if this is a training or evaluation stage
        :return: Returns the LSTM outputs, as well as the forward and backward hidden states.
        """
        with tf.variable_scope(self.name, reuse=self.reuse):
            with tf.variable_scope("encoder"):

                fw_lstm_cells_encoder = [rnn.LSTMCell(num_units=self.layer_sizes[i], activation=tf.nn.tanh)
                                         for i in range(len(self.layer_sizes))]
                bw_lstm_cells_encoder = [rnn.LSTMCell(num_units=self.layer_sizes[i], activation=tf.nn.tanh)
                                         for i in range(len(self.layer_sizes))]

                outputs, output_state_fw, output_state_bw = rnn.stack_bidirectional_rnn(
                    fw_lstm_cells_encoder,
                    bw_lstm_cells_encoder,
                    inputs,
                    dtype=tf.float32
                )

            print("g out shape", tf.stack(outputs, axis=1).get_shape().as_list())

        self.reuse = True
        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
        return outputs 
開發者ID:AntreasAntoniou,項目名稱:MatchingNetworks,代碼行數:30,代碼來源:one_shot_learning_network.py


注:本文中的tensorflow.contrib.rnn.LSTMCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。