本文整理汇总了Python中tensorflow.contrib.rnn.LayerNormBasicLSTMCell方法的典型用法代码示例。如果您正苦于以下问题:Python rnn.LayerNormBasicLSTMCell方法的具体用法?Python rnn.LayerNormBasicLSTMCell怎么用?Python rnn.LayerNormBasicLSTMCell使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.rnn
的用法示例。
在下文中一共展示了rnn.LayerNormBasicLSTMCell方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_rnn_cell_list
# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 别名]
def get_rnn_cell_list(config, name, reuse=False, seed=123, dtype=tf.float32):
cell_list = []
for i, units in enumerate(config['num_units']):
cell = None
if config['cell_type'] == 'clstm':
cell = CustomLSTMCell(units, layer_norm=config['layer_norm'], activation=config['activation'], seed=seed,
reuse=reuse, dtype=dtype, name='{}_{}'.format(name, i))
elif config['cell_type'] == 'tflstm':
act = get_activation(config['activation'])
if config['layer_norm']:
cell = LayerNormBasicLSTMCell(num_units=units, activation=act, layer_norm=config['layer_norm'],
reuse=reuse)
elif config['layer_norm'] == False and config['activation'] != 'tanh':
cell = LSTMCell(num_units=units, activation=act, reuse=reuse)
else:
cell = LSTMBlockCell(num_units=units)
cell_list.append(cell)
return cell_list
示例2: decoder
# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 别名]
def decoder(x, decoder_inputs, keep_prob, sequence_length, memory, memory_length, first_attention):
with tf.variable_scope("Decoder") as scope:
label_embeddings = tf.get_variable(name="embeddings", shape=[n_classes, embedding_size], dtype=tf.float32)
train_inputs_embedded = tf.nn.embedding_lookup(label_embeddings, decoder_inputs)
lstm = rnn.LayerNormBasicLSTMCell(n_hidden, dropout_keep_prob=keep_prob)
output_l = layers_core.Dense(n_classes, use_bias=True)
encoder_state = rnn.LSTMStateTuple(x, x)
attention_mechanism = BahdanauAttention(embedding_size, memory=memory, memory_sequence_length=memory_length)
cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
cell_state = cell.zero_state(dtype=tf.float32, batch_size=train_batch_size)
cell_state = cell_state.clone(cell_state=encoder_state, attention=first_attention)
train_helper = TrainingHelper(train_inputs_embedded, sequence_length)
train_decoder = BasicDecoder(cell, train_helper, cell_state, output_layer=output_l)
decoder_outputs_train, decoder_state_train, decoder_seq_train = dynamic_decode(train_decoder, impute_finished=True)
tiled_inputs = tile_batch(memory, multiplier=beam_width)
tiled_sequence_length = tile_batch(memory_length, multiplier=beam_width)
tiled_first_attention = tile_batch(first_attention, multiplier=beam_width)
attention_mechanism = BahdanauAttention(embedding_size, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length)
x2 = tile_batch(x, beam_width)
encoder_state2 = rnn.LSTMStateTuple(x2, x2)
cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
cell_state = cell.zero_state(dtype=tf.float32, batch_size=test_batch_size * beam_width)
cell_state = cell_state.clone(cell_state=encoder_state2, attention=tiled_first_attention)
infer_decoder = BeamSearchDecoder(cell, embedding=label_embeddings, start_tokens=[GO] * test_len, end_token=EOS,
initial_state=cell_state, beam_width=beam_width, output_layer=output_l)
decoder_outputs_infer, decoder_state_infer, decoder_seq_infer = dynamic_decode(infer_decoder, maximum_iterations=4)
return decoder_outputs_train, decoder_outputs_infer, decoder_state_infer
示例3: _get_rnn_cell
# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 别名]
def _get_rnn_cell(hparams):
if hparams.rnn_type == "lstm":
rnn_cell = tf.nn.rnn_cell.BasicLSTMCell
elif hparams.rnn_type == "lstm_layernorm":
rnn_cell = contrib_rnn.LayerNormBasicLSTMCell
return tf.nn.rnn_cell.DropoutWrapper(
rnn_cell(hparams.hidden_size),
output_keep_prob=1.0-hparams.dropout)
示例4: _single_cell
# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 别名]
def _single_cell(unit_type,
num_units,
forget_bias,
dropout,
mode,
residual_connection=False,
residual_fn=None,
trainable=True):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.estimator.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
single_cell = contrib_rnn.LSTMCell(
num_units, forget_bias=forget_bias, trainable=trainable)
elif unit_type == "gru":
single_cell = contrib_rnn.GRUCell(num_units, trainable=trainable)
elif unit_type == "layer_norm_lstm":
single_cell = contrib_rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True,
trainable=trainable)
elif unit_type == "nas":
single_cell = contrib_rnn.NASCell(num_units, trainable=trainable)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob).
if dropout > 0.0:
single_cell = contrib_rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
# Residual.
if residual_connection:
single_cell = contrib_rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
return single_cell
示例5: __init__
# 需要导入模块: from tensorflow.contrib import rnn [as 别名]
# 或者: from tensorflow.contrib.rnn import LayerNormBasicLSTMCell [as 别名]
def __init__(self, ob_space, ac_space, lstm_size=256, **kwargs):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
rank = len(ob_space)
if rank == 3: # pixel input
for i in range(4):
x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
elif rank == 1: # plain features
#x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
pass
else:
raise TypeError("observation space must have rank 1 or 3, got %d" % rank)
# introduce a "fake" batch dimension of 1 after flatten so that we can do LSTM over time dim
x = tf.expand_dims(flatten(x), [0])
size = lstm_size
lnlstm = rnn.LayerNormBasicLSTMCell(size)
self.state_size = lnlstm.state_size
step_size = tf.shape(self.x)[:1]
c_init = np.zeros((1, lnlstm.state_size.c), np.float32)
h_init = np.zeros((1, lnlstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lnlstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lnlstm.state_size.h])
self.state_in = [c_in, h_in]
state_in = rnn.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lnlstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
x = tf.reshape(lstm_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)