當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn.LayerNormBasicLSTMCell方法代碼示例

本文整理匯總了Python中tensorflow.contrib.rnn.LayerNormBasicLSTMCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.LayerNormBasicLSTMCell方法的具體用法?Python rnn.LayerNormBasicLSTMCell怎麽用?Python rnn.LayerNormBasicLSTMCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.LayerNormBasicLSTMCell方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_rnn_cell_list

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 別名]
def get_rnn_cell_list(config, name, reuse=False, seed=123, dtype=tf.float32):
    cell_list = []
    for i, units in enumerate(config['num_units']):
        cell = None
        if config['cell_type'] == 'clstm':
            cell = CustomLSTMCell(units, layer_norm=config['layer_norm'], activation=config['activation'], seed=seed,
                                  reuse=reuse, dtype=dtype, name='{}_{}'.format(name, i))
        elif config['cell_type'] == 'tflstm':

            act = get_activation(config['activation'])

            if config['layer_norm']:
                cell = LayerNormBasicLSTMCell(num_units=units, activation=act, layer_norm=config['layer_norm'],
                                              reuse=reuse)
            elif config['layer_norm'] == False and config['activation'] != 'tanh':
                cell = LSTMCell(num_units=units, activation=act, reuse=reuse)
            else:
                cell = LSTMBlockCell(num_units=units)
        cell_list.append(cell)

    return cell_list 
開發者ID:JoergFranke,項目名稱:ADNC,代碼行數:23,代碼來源:controller.py

示例2: decoder

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 別名]
def decoder(x, decoder_inputs, keep_prob, sequence_length, memory, memory_length, first_attention):
    with tf.variable_scope("Decoder") as scope:
        label_embeddings = tf.get_variable(name="embeddings", shape=[n_classes, embedding_size], dtype=tf.float32)
        train_inputs_embedded = tf.nn.embedding_lookup(label_embeddings, decoder_inputs)
        lstm = rnn.LayerNormBasicLSTMCell(n_hidden, dropout_keep_prob=keep_prob)
        output_l = layers_core.Dense(n_classes, use_bias=True)
        encoder_state = rnn.LSTMStateTuple(x, x)
        attention_mechanism = BahdanauAttention(embedding_size, memory=memory, memory_sequence_length=memory_length)
        cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
        cell_state = cell.zero_state(dtype=tf.float32, batch_size=train_batch_size)
        cell_state = cell_state.clone(cell_state=encoder_state, attention=first_attention)
        train_helper = TrainingHelper(train_inputs_embedded, sequence_length)
        train_decoder = BasicDecoder(cell, train_helper, cell_state, output_layer=output_l)
        decoder_outputs_train, decoder_state_train, decoder_seq_train = dynamic_decode(train_decoder, impute_finished=True)
        tiled_inputs = tile_batch(memory, multiplier=beam_width)
        tiled_sequence_length = tile_batch(memory_length, multiplier=beam_width)
        tiled_first_attention = tile_batch(first_attention, multiplier=beam_width)
        attention_mechanism = BahdanauAttention(embedding_size, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length)
        x2 = tile_batch(x, beam_width)
        encoder_state2 = rnn.LSTMStateTuple(x2, x2)
        cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
        cell_state = cell.zero_state(dtype=tf.float32, batch_size=test_batch_size * beam_width)
        cell_state = cell_state.clone(cell_state=encoder_state2, attention=tiled_first_attention)
        infer_decoder = BeamSearchDecoder(cell, embedding=label_embeddings, start_tokens=[GO] * test_len, end_token=EOS,
                                          initial_state=cell_state, beam_width=beam_width, output_layer=output_l)
        decoder_outputs_infer, decoder_state_infer, decoder_seq_infer = dynamic_decode(infer_decoder, maximum_iterations=4)
        return decoder_outputs_train, decoder_outputs_infer, decoder_state_infer 
開發者ID:thunlp,項目名稱:Auto_CLIWC,代碼行數:29,代碼來源:train_liwc.py

示例3: _get_rnn_cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 別名]
def _get_rnn_cell(hparams):
  if hparams.rnn_type == "lstm":
    rnn_cell = tf.nn.rnn_cell.BasicLSTMCell
  elif hparams.rnn_type == "lstm_layernorm":
    rnn_cell = contrib_rnn.LayerNormBasicLSTMCell
  return tf.nn.rnn_cell.DropoutWrapper(
      rnn_cell(hparams.hidden_size),
      output_keep_prob=1.0-hparams.dropout) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:10,代碼來源:vqa_attention.py

示例4: _single_cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 別名]
def _single_cell(unit_type,
                 num_units,
                 forget_bias,
                 dropout,
                 mode,
                 residual_connection=False,
                 residual_fn=None,
                 trainable=True):
  """Create an instance of a single RNN cell."""
  # dropout (= 1 - keep_prob) is set to 0 during eval and infer
  dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0

  # Cell Type
  if unit_type == "lstm":
    single_cell = contrib_rnn.LSTMCell(
        num_units, forget_bias=forget_bias, trainable=trainable)
  elif unit_type == "gru":
    single_cell = contrib_rnn.GRUCell(num_units, trainable=trainable)
  elif unit_type == "layer_norm_lstm":
    single_cell = contrib_rnn.LayerNormBasicLSTMCell(
        num_units,
        forget_bias=forget_bias,
        layer_norm=True,
        trainable=trainable)
  elif unit_type == "nas":
    single_cell = contrib_rnn.NASCell(num_units, trainable=trainable)
  else:
    raise ValueError("Unknown unit type %s!" % unit_type)

  # Dropout (= 1 - keep_prob).
  if dropout > 0.0:
    single_cell = contrib_rnn.DropoutWrapper(
        cell=single_cell, input_keep_prob=(1.0 - dropout))

  # Residual.
  if residual_connection:
    single_cell = contrib_rnn.ResidualWrapper(
        single_cell, residual_fn=residual_fn)

  return single_cell 
開發者ID:google-research,項目名稱:language,代碼行數:42,代碼來源:model_utils.py

示例5: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 別名]
def __init__(self, ob_space, ac_space, lstm_size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        # introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
        x = tf.expand_dims(flatten(x), [0])

        size = lstm_size
        lnlstm = rnn.LayerNormBasicLSTMCell(size)
        self.state_size = lnlstm.state_size
        step_size = tf.shape(self.x)[:1]

        c_init = np.zeros((1, lnlstm.state_size.c), np.float32)
        h_init = np.zeros((1, lnlstm.state_size.h), np.float32)
        self.state_init = [c_init, h_init]
        c_in = tf.placeholder(tf.float32, [1, lnlstm.state_size.c])
        h_in = tf.placeholder(tf.float32, [1, lnlstm.state_size.h])
        self.state_in = [c_in, h_in]

        state_in = rnn.LSTMStateTuple(c_in, h_in)
        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
            lnlstm, x, initial_state=state_in, sequence_length=step_size,
            time_major=False)
        lstm_c, lstm_h = lstm_state
        x = tf.reshape(lstm_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) 
開發者ID:gsastry,項目名稱:human-rl,代碼行數:42,代碼來源:model.py


注:本文中的tensorflow.contrib.rnn.LayerNormBasicLSTMCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。