本文整理匯總了Python中tensorflow.python.ops.rnn_cell._linear方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn_cell._linear方法的具體用法?Python rnn_cell._linear怎麽用?Python rnn_cell._linear使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.rnn_cell
的用法示例。
在下文中一共展示了rnn_cell._linear方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testLinear
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def testLinear(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
x = tf.zeros([1, 2])
l = linear([x], 2, False)
sess.run([tf.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with tf.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with tf.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(tf.trainable_variables()), 2)
示例2: basic_rnn_cell
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = tf.float32
init_output = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype)
init_state = tf.zeros(tf.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with tf.variable_scope(scope, "BasicRNNCell", [inputs, state]):
output = tf.tanh(linear([inputs, state],
num_units, True))
return output, output
示例3: _attention
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("Attention"):
k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("AttnV", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
示例4: linear
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if input_keep_prob < 1.0:
assert is_train is not None
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, input_keep_prob), lambda: arg)
for arg in flat_args]
flat_out = _linear(flat_args, output_size, bias, bias_start=bias_start, scope=scope)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
示例5: build_encoder
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def build_encoder(self):
"""Inference Network. q(h|X)"""
with tf.variable_scope("encoder"):
self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
self.l1 = tf.nn.relu(self.l1_lin)
self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
self.l2 = tf.nn.relu(self.l2_lin)
self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")
self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))
self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))
_ = tf.histogram_summary("mu", self.mu)
_ = tf.histogram_summary("sigma", self.sigma)
_ = tf.histogram_summary("h", self.h)
_ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
示例6: __call__
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or type(self).__name__):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(1, nest.flatten(new_state))
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("AttnOutputProjection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(1, [new_attn_states,
array_ops.expand_dims(output, 1)])
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(1, list(new_state))
return output, new_state
示例7: _linear
# 需要導入模塊: from tensorflow.python.ops import rnn_cell [as 別名]
# 或者: from tensorflow.python.ops.rnn_cell import _linear [as 別名]
def _linear(self, args, scope="linear"):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
with vs.variable_scope(scope) as scope:
weights = vs.get_variable("weights", [proj_size, out_size])
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("b", [out_size])
out += bias
return out