本文整理汇总了Python中tensorflow.foldl方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.foldl方法的具体用法?Python tensorflow.foldl怎么用?Python tensorflow.foldl使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.foldl方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: neural_gpu_body
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True)
示例2: testFoldl_Scoped
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def testFoldl_Scoped(self):
with self.test_session() as sess:
with tf.variable_scope("root") as varscope:
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
r = tf.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(tf.trainable_variables()), 1)
self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
sess.run([tf.global_variables_initializer()])
self.assertAllEqual(208, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
r = tf.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(tf.trainable_variables()), 1)
self.assertAllEqual(880, r.eval())
示例3: testScanFoldl_Nested
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def testScanFoldl_Nested(self):
with self.test_session():
elems = tf.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = tf.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return tf.foldl(lambda b, y: b * y * x, inner_elems, initializer=a)
r = tf.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], r.eval())
示例4: top_kth_iterative
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def top_kth_iterative(x, k):
"""Compute the k-th top element of x on the last axis iteratively.
This assumes values in x are non-negative, rescale if needed.
It is often faster than tf.nn.top_k for small k, especially if k < 30.
Note: this does not support back-propagation, it stops gradients!
Args:
x: a Tensor of non-negative numbers of type float.
k: a python integer.
Returns:
a float tensor of the same shape as x but with 1 on the last axis
that contains the k-th largest number in x.
"""
# The iterative computation is as follows:
#
# cur_x = x
# for _ in range(k):
# top_x = maximum of elements of cur_x on the last axis
# cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements)
#
# We encode this computation in a TF graph using tf.foldl, so the inner
# part of the above loop is called "next_x" and tf.foldl does the loop.
def next_x(cur_x, _):
top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)
return cur_x * to_float(cur_x < top_x)
# We only do k-1 steps of the loop and compute the final max separately.
fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),
parallel_iterations=2, back_prop=False)
return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True))
示例5: foldl
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
# Arguments
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
# Returns
Tensor with same type and shape as `initializer`.
"""
return tf.foldl(fn, elems, initializer=initializer, name=name)
示例6: testFoldl_Simple
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def testFoldl_Simple(self):
with self.test_session():
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
r = tf.foldl(lambda a, x: tf.mul(tf.add(a, x), 2), elems)
self.assertAllEqual(208, r.eval())
r = tf.foldl(
lambda a, x: tf.mul(tf.add(a, x), 2), elems, initializer=10)
self.assertAllEqual(880, r.eval())
示例7: testFold_Grad
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def testFold_Grad(self):
with self.test_session():
elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = tf.constant(2.0, name="v")
r = tf.foldl(
lambda a, x: tf.mul(a, x), elems, initializer=v)
r = tf.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
r = tf.foldr(
lambda a, x: tf.mul(a, x), elems, initializer=v)
r = tf.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
示例8: testFoldShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def testFoldShape(self):
with self.test_session():
x = tf.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = tf.constant([0, 0, 0])
y = tf.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), y.eval().shape)
示例9: _sample_n
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def _sample_n(self, n, seed=None):
all_counts = tf.to_float(tf.range(self._total_count + 1))
for batch_dim in range(self.batch_shape.ndims):
all_counts = tf.expand_dims(all_counts, axis=-1)
all_cdfs = tf.map_fn(self.cdf, all_counts)
shape = tf.concat([[n], self.batch_shape_tensor()], 0)
uniform = tf.random_uniform(shape, seed=seed)
return tf.foldl(
lambda acc, cdfs: tf.where(uniform > cdfs, acc + 1, acc),
all_cdfs,
initializer=tf.zeros(shape, dtype=tf.int32))
示例10: perform_filter_operation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def perform_filter_operation(Y, filter_matrix_conj, taps, delay):
"""
>>> D, T, taps, delay = 1, 10, 2, 1
>>> tf.enable_eager_execution()
>>> Y = tf.ones([D, T])
>>> filter_matrix_conj = tf.ones([taps, D, D])
>>> X = perform_filter_operation_v2(Y, filter_matrix_conj, taps, delay)
>>> X.shape
TensorShape([Dimension(1), Dimension(10)])
>>> X.numpy()
array([[ 1., 0., -1., -1., -1., -1., -1., -1., -1., -1.]], dtype=float32)
"""
dyn_shape = tf.shape(Y)
T = dyn_shape[1]
def add_tap(accumulated, tau_minus_delay):
new = tf.einsum(
'de,dt',
filter_matrix_conj[tau_minus_delay, :, :],
Y[:, :(T - delay - tau_minus_delay)]
)
paddings = tf.convert_to_tensor([[0, 0], [delay + tau_minus_delay, 0]])
new = tf.pad(new, paddings, "CONSTANT")
return accumulated + new
reverb_tail = tf.foldl(
add_tap, tf.range(0, taps),
initializer=tf.zeros_like(Y)
)
return Y - reverb_tail
示例11: nested_control_flow_module_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import foldl [as 别名]
def nested_control_flow_module_fn():
"""Compute the sum of elements greater than 'a' with nested control flow."""
elems = tf_v1.placeholder(
dtype=tf.float32, name="elems", shape=[None])
a = tf_v1.placeholder(dtype=tf.float32, name="a")
def sum_above_a(acc, x):
return acc + tf.cond(x > a, lambda: x, lambda: 0.0)
hub.add_signature(
inputs={"elems": elems, "a": a},
outputs=tf.foldl(sum_above_a, elems, initializer=tf.constant(0.0)))