本文整理匯總了Python中tensorflow.contrib.rnn.GRUCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.GRUCell方法的具體用法?Python rnn.GRUCell怎麽用?Python rnn.GRUCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.rnn
的用法示例。
在下文中一共展示了rnn.GRUCell方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __init__(self,
num_units,
tied=False,
non_recurrent_fn=None,
state_is_tuple=True,
output_is_tuple=True):
super(Grid2GRUCell, self).__init__(
num_units=num_units,
num_dims=2,
input_dims=0,
output_dims=0,
priority_dims=0,
tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n: rnn.GRUCell(num_units=n),
non_recurrent_fn=non_recurrent_fn,
state_is_tuple=state_is_tuple,
output_is_tuple=output_is_tuple)
# Helpers
示例2: ReferenceEncoder
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def ReferenceEncoder(inputs, input_lengths, filters, kernel_size, strides, is_training, scope='reference_encoder'):
with tf.variable_scope(scope):
reference_output = tf.expand_dims(inputs, axis=-1)
for i, channel in enumerate(filters):
reference_output = conv2d(reference_output, channel, kernel_size,
strides, tf.nn.relu, is_training, 'conv2d_{}'.format(i))
shape = shape_list(reference_output)
reference_output = tf.reshape(reference_output, shape[:-2] + [shape[2] * shape[3]])
#GRU
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell=GRUCell(128),
inputs=reference_output,
sequence_length=input_lengths,
dtype=tf.float32
)
return encoder_state
示例3: __init__
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __init__(self, state_size, num_layers, dropout_prob, base_cell):
"""Define the cell by composing/wrapping with tf.contrib.rnn functions.
Args:
state_size: number of units in the cell.
num_layers: how many cells to include in the MultiRNNCell.
dropout_prob: probability of a node being dropped.
base_cell: (str) name of underling cell to use (e.g. 'GRUCell')
"""
self._state_size = state_size
self._num_layers = num_layers
self._dropout_prob = dropout_prob
self._base_cell = base_cell
def single_cell():
"""Convert cell name (str) to class, and create it."""
return getattr(tf.contrib.rnn, base_cell)(num_units=state_size)
if num_layers == 1:
self._cell = single_cell()
else:
self._cell = MultiRNNCell(
[single_cell() for _ in range(num_layers)])
示例4: _create_rnn_cell
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def _create_rnn_cell(self):
"""
Creates a single RNN cell according to the architecture of this RNN.
Returns
-------
rnn cell
A single RNN cell according to the architecture of this RNN
"""
keep_prob = 1.0 if self.keep_prob is None else self.keep_prob
if self.cell_type == CellType.GRU:
return DropoutWrapper(GRUCell(self.num_units), keep_prob, keep_prob)
elif self.cell_type == CellType.LSTM:
return DropoutWrapper(LSTMCell(self.num_units), keep_prob, keep_prob)
else:
raise ValueError("unknown cell type: {}".format(self.cell_type))
示例5: answer_module
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def answer_module(self):
""" Answer Module:generate an answer from the final memory vector.
Input:
hidden state from episodic memory module:[batch_size,hidden_size]
question:[batch_size, embedding_size]
"""
steps=self.sequence_length if self.decode_with_sequences else 1 #decoder for a list of tokens with sequence. e.g."x1 x2 x3 x4..."
a=self.m_T #init hidden state
y_pred=tf.zeros((self.batch_size,self.hidden_size)) #TODO usually we will init this as a special token '<GO>', you can change this line by pass embedding of '<GO>' from outside.
logits_list=[]
logits_return=None
for i in range(steps):
cell = rnn.GRUCell(self.hidden_size)
y_previous_q=tf.concat([y_pred,self.query_embedding],axis=1) #[batch_hidden_size*2]
_, a = cell( y_previous_q,a)
logits=tf.layers.dense(a,units=self.num_classes) #[batch_size,vocab_size]
logits_list.append(logits)
if self.decode_with_sequences:#need to get sequences.
logits_return = tf.stack(logits_list, axis=1) # [batch_size,sequence_length,num_classes]
else:#only need to get an answer, not sequences
logits_return = logits_list[0] #[batcj_size,num_classes]
return logits_return
示例6: __init__
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
if not isinstance(cell, (rnn.LSTMCell, rnn.GRUCell)):
raise ValueError('SyncAttentionWrapper only supports LSTMCell and GRUCell, '
'Got: {}'.format(cell))
super(SyncAttentionWrapper, self).__init__(
cell,
attention_mechanism,
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
cell_input_fn=cell_input_fn,
output_attention=output_attention,
initial_cell_state=initial_cell_state,
name=name
)
示例7: bidirectional_RNN
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def bidirectional_RNN(self, num_hidden, inputs):
"""
desc: create bidirectional rnn layer
args:
num_hidden: number of hidden units
inputs: input word or sentence
returns:
concatenated encoder and decoder outputs
"""
with tf.name_scope("bidirectional_RNN"):
encoder_fw_cell = rnn.GRUCell(num_hidden)
encoder_bw_cell = rnn.GRUCell(num_hidden)
((encoder_fw_outputs, encoder_bw_outputs), (_, _)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_fw_cell,
cell_bw=encoder_bw_cell,
inputs=inputs,
dtype=tf.float32,
time_major=True)
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
return encoder_outputs
# end
示例8: __init__
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __init__(self, ob_space, ac_space, size=256, **kwargs):
self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))
for i in range(4):
x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
# introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
x = tf.expand_dims(flatten(x), 1)
gru = rnn.GRUCell(size)
h_init = np.zeros((1, size), np.float32)
self.state_init = [h_init]
h_in = tf.placeholder(tf.float32, [1, size])
self.state_in = [h_in]
gru_outputs, gru_state = tf.nn.dynamic_rnn(
gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
x = tf.reshape(gru_outputs, [-1, size])
self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
self.state_out = [gru_state[:1]]
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
示例9: build_cell
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def build_cell(self, name=None):
if self.hparams.cell_type == 'linear':
cell = BasicRNNCell(self.hparams.hidden_units,
activation=tf.identity, name=name)
elif self.hparams.cell_type == 'tanh':
cell = BasicRNNCell(self.hparams.hidden_units,
activation=tf.tanh, name=name)
elif self.hparams.cell_type == 'relu':
cell = BasicRNNCell(self.hparams.hidden_units,
activation=tf.nn.relu, name=name)
elif self.hparams.cell_type == 'gru':
cell = GRUCell(self.hparams.hidden_units, name=name)
elif self.hparams.cell_type == 'lstm':
cell = LSTMCell(self.hparams.hidden_units, name=name, state_is_tuple=False)
else:
raise ValueError('Provided cell type not supported.')
return cell
示例10: biLSTM_layer
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def biLSTM_layer(self):
with tf.variable_scope("bi-LSTM") as scope:
lstm_cell = {}
for direction in ["forward", "backward"]:
with tf.variable_scope(direction):
lstm_cell[direction] = rnn.GRUCell(
num_units=self.lstm_dim,
# use_peepholes=True,
# initializer=self.initializer,
# state_is_tuple=True
)
outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_cell['forward'],
cell_bw=lstm_cell['backward'],
inputs=self.model_inputs,
sequence_length=self.length,
dtype=tf.float32,
)
self.lstm_outputs = tf.concat(outputs, axis=2)
示例11: separable_rnn
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def separable_rnn(images, num_filters_out, scope=None, keep_prob=1.0, cellType='LSTM'):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor
"""
with tf.variable_scope(scope, "SeparableLstm", [images]):
with tf.variable_scope("horizontal"):
if 'LSTM' in cellType:
cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
if 'GRU' in cellType:
cell_fw = GRUCell(num_filters_out)
cell_bw = GRUCell(num_filters_out)
hidden = horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
with tf.variable_scope("vertical"):
transposed = tf.transpose(hidden, [0, 2, 1, 3])
if 'LSTM' in cellType:
cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
if 'GRU' in cellType:
cell_fw = GRUCell(num_filters_out)
cell_bw = GRUCell(num_filters_out)
output_transposed = horizontal_cell(transposed, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
output = tf.transpose(output_transposed, [0, 2, 1, 3])
return output
示例12: __build
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __build(self):
w_fc_in = self.__weight_variable([self.nClasses+1, 128], 'w_fc_in')
b_fc_in = self.__bias_variable([128], 'b_fc_in')
w_fc_o = self.__weight_variable([self.rnn_size, 128], 'w_fc_o')
b_fc_o = self.__bias_variable([128], 'b_fc_o')
w_output_action = self.__weight_variable([128, self.nClasses], 'w_fc_in')
b_output_action = self.__bias_variable([self.nClasses], 'b_fc_in')
w_output_len = self.__weight_variable([128, 2], 'w_fc_in')
b_output_len = self.__bias_variable([2], 'b_fc_in')
x = tf.reshape(self.input_seq, [-1, self.nClasses+1])
h1 = tf.nn.relu(tf.matmul(x, w_fc_in) + b_fc_in)
h1 = tf.reshape(h1, [-1,self.max_seq_sz,128])
#rnn
h1 = tf.unstack(h1, axis=1)
def get_cell():
return rnn.GRUCell(self.rnn_size)
gru_cell = rnn.MultiRNNCell([get_cell() for _ in range(self.num_layers)])
outputs, states = rnn.static_rnn(gru_cell, h1, dtype=tf.float32)
#fc_o
h2 = tf.nn.relu(tf.matmul(outputs[-1], w_fc_o) + b_fc_o)
#output
output_label = tf.matmul(h2, w_output_action) + b_output_action
output_len = tf.nn.relu(tf.matmul(h2, w_output_len) + b_output_len)
#
self.prediction = tf.concat([output_label, output_len], 1)
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep=100)
示例13: _init_word_encoder
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def _init_word_encoder(self):
with tf.variable_scope('word-encoder') as scope:
word_inputs = tf.reshape(self.embedded_inputs, [-1, self.max_word_length, self.emb_size])
word_lengths = tf.reshape(self.word_lengths, [-1])
# word encoder
cell_fw = rnn.GRUCell(self.cell_dim, name='cell_fw')
cell_bw = rnn.GRUCell(self.cell_dim, name='cell_bw')
init_state_fw = tf.tile(tf.get_variable('init_state_fw',
shape=[1, self.cell_dim],
initializer=tf.constant_initializer(0)),
multiples=[get_shape(word_inputs)[0], 1])
init_state_bw = tf.tile(tf.get_variable('init_state_bw',
shape=[1, self.cell_dim],
initializer=tf.constant_initializer(0)),
multiples=[get_shape(word_inputs)[0], 1])
rnn_outputs, _ = bidirectional_rnn(cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=word_inputs,
input_lengths=word_lengths,
initial_state_fw=init_state_fw,
initial_state_bw=init_state_bw,
scope=scope)
word_outputs, word_att_weights = attention(inputs=rnn_outputs,
att_dim=self.att_dim,
sequence_lengths=word_lengths)
self.word_outputs = tf.layers.dropout(word_outputs, self.dropout_rate, training=self.is_training)
示例14: _init_sent_encoder
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def _init_sent_encoder(self):
with tf.variable_scope('sent-encoder') as scope:
sent_inputs = tf.reshape(self.word_outputs, [-1, self.max_sent_length, 2 * self.cell_dim])
# sentence encoder
cell_fw = rnn.GRUCell(self.cell_dim, name='cell_fw')
cell_bw = rnn.GRUCell(self.cell_dim, name='cell_bw')
init_state_fw = tf.tile(tf.get_variable('init_state_fw',
shape=[1, self.cell_dim],
initializer=tf.constant_initializer(0)),
multiples=[get_shape(sent_inputs)[0], 1])
init_state_bw = tf.tile(tf.get_variable('init_state_bw',
shape=[1, self.cell_dim],
initializer=tf.constant_initializer(0)),
multiples=[get_shape(sent_inputs)[0], 1])
rnn_outputs, _ = bidirectional_rnn(cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=sent_inputs,
input_lengths=self.sent_lengths,
initial_state_fw=init_state_fw,
initial_state_bw=init_state_bw,
scope=scope)
sent_outputs, sent_att_weights = attention(inputs=rnn_outputs,
att_dim=self.att_dim,
sequence_lengths=self.sent_lengths)
self.sent_outputs = tf.layers.dropout(sent_outputs, self.dropout_rate, training=self.is_training)
示例15: __init__
# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import GRUCell [as 別名]
def __init__(self, num_units, tied=False, non_recurrent_fn=None):
super(Grid2GRUCell, self).__init__(
num_units=num_units, num_dims=2,
input_dims=0, output_dims=0, priority_dims=0, tied=tied,
non_recurrent_dims=None if non_recurrent_fn is None else 0,
cell_fn=lambda n, i: rnn.GRUCell(num_units=n, input_size=i),
non_recurrent_fn=non_recurrent_fn)