本文整理汇总了Python中tensorflow.python.ops.functional_ops.map_fn函数的典型用法代码示例。如果您正苦于以下问题:Python map_fn函数的具体用法?Python map_fn怎么用?Python map_fn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了map_fn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testMap_Scoped
def testMap_Scoped(self):
with self.cached_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = functional_ops.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, self.evaluate(r))
示例2: testMapSparseTensor
def testMapSparseTensor(self):
with self.cached_session():
with self.assertRaises(TypeError):
functional_ops.map_fn(
lambda x: x,
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
示例3: testMap_MultiOutputMismatchedDtype
def testMap_MultiOutputMismatchedDtype(self):
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same nested structure"):
# lambda emits tuple, but dtype is a list
functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
示例4: testMap_Simple
def testMap_Simple(self):
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
示例5: _sample_n
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = array_ops.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = array_ops.ones_like(
n_draws[..., array_ops.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = array_ops.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * array_ops.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = random_ops.multinomial(logits[array_ops.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = array_ops.reshape(x, shape=[n, -1]) # [n, n_draw]
x = math_ops.reduce_sum(array_ops.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = functional_ops.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = array_ops.transpose(x, perm=[1, 0, 2])
final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = array_ops.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
示例6: testMap_SimpleNotTensor
def testMap_SimpleNotTensor(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(
np.array([(x + 3) * 2 for x in nums]), self.evaluate(r))
示例7: compute_activations
def compute_activations(elems):
return functional_ops.map_fn(fn=classifier_fn,
elems=elems,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
示例8: sparse_boolean_mask
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
"""Boolean mask for `SparseTensor`s.
Args:
sparse_tensor: a `SparseTensor`.
mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
of `sparse_tensor`.
name: optional name for this operation.
Returns:
A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
`True`.
"""
# TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
with ops.op_scope([sparse_tensor, mask], name):
mask = ops.convert_to_tensor(mask)
mask_rows = array_ops.where(mask)
first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
[0, 0], [-1, 1]))
# Identify indices corresponding to the rows identified by mask_rows.
sparse_entry_matches = functional_ops.map_fn(
lambda x: math_ops.equal(first_indices, x),
mask_rows,
dtype=dtypes.bool)
# Combine the rows of index_matches to form a mask for the sparse indices
# and values.
to_retain = array_ops.reshape(
functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])
return sparse_ops.sparse_retain(sparse_tensor, to_retain)
示例9: ctc_unique_labels
def ctc_unique_labels(labels, name=None):
"""Get unique labels and indices for batched labels for tf.nn.ctc_loss.
For use with tf.nn.ctc_loss_v2 optional argument `unique`: This op can be
used to preprocess labels in input pipeline to for better speed/memory use
computing the ctc loss on TPU.
Example:
ctc_unique_labels([[3, 4, 4, 3]]) ->
unique labels padded with 0: [[3, 4, 0, 0]]
indices of original labels in unique: [0, 1, 1, 0]
Args:
labels: tensor of shape [batch_size, max_label_length] padded with 0.
name: A name for this `Op`. Defaults to "ctc_unique_labels".
Returns:
tuple of
- unique labels, tensor of shape `[batch_size, max_label_length]`
- indices into unique labels, shape `[batch_size, max_label_length]`
"""
with ops.name_scope(name, "ctc_unique_labels", [labels]):
labels = ops.convert_to_tensor(labels, name="labels")
def _unique(x):
u = array_ops.unique(x)
y = array_ops.pad(
u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]])
y = math_ops.cast(y, dtypes.int64)
return [y, u.idx]
return functional_ops.map_fn(
_unique, labels, dtype=[dtypes.int64, dtypes.int32])
示例10: compress
def compress(self, inputs):
"""Compress inputs and store their binary representations into strings.
Args:
inputs: `Tensor` with values to be compressed.
Returns:
String `Tensor` vector containing the compressed representation of each
batch element of `inputs`.
"""
with ops.name_scope(self._name_scope()):
inputs = ops.convert_to_tensor(inputs)
if not self.built:
# Check input assumptions set before layer building, e.g. input rank.
self._assert_input_compatibility(inputs)
if self.dtype is None:
self._dtype = inputs.dtype.base_dtype.name
self.build(inputs.shape)
# Check input assumptions set after layer building, e.g. input shape.
if not context.executing_eagerly():
self._assert_input_compatibility(inputs)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
# Bring inputs to the right range by centering the range on the medians.
half = constant_op.constant(.5, dtype=self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
# Expand offsets to input dimensions and add to inputs.
values = inputs + offsets[slices[:-1]]
# Clip to range and cast to integers. Because we have added .5 above, and
# all values are positive, the cast effectively implements rounding.
values = math_ops.maximum(values, half)
values = math_ops.minimum(
values, math_ops.cast(num_levels, self.dtype) - half)
values = math_ops.cast(values, dtypes.int16)
def loop_body(tensor):
return coder_ops.range_encode(
tensor, cdf, precision=self.range_coder_precision)
strings = functional_ops.map_fn(
loop_body, values, dtype=dtypes.string, back_prop=False)
if not context.executing_eagerly():
strings.set_shape(inputs.shape[:1])
return strings
示例11: testMap_MultiInputSingleOutput
def testMap_MultiInputSingleOutput(self):
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = self.evaluate(r)
self.assertAllEqual(nums * nums + (-nums), received)
示例12: testWhileAndTensorArray
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
# map_fn uses TensorArray internally.
r = functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y0)
self.assertAllClose([2.0, 4.0, 6.0, 8.0, 10.0, 12.0], self.evaluate(r))
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(21.0, self.evaluate(r))
示例13: build_dataset
def build_dataset(row, num):
# pylint: disable=g-long-lambda
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(row).map(
lambda elems: functional_ops.map_fn(
lambda x: control_map_fn(x, num), elems)))
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
示例14: loop
def loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(two_layer_model, elems, dtype=dtypes.float32)
return outputs
示例15: _loop_with_vec_and_4d
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs