本文整理匯總了Python中tensorflow.compat.v1.reverse方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.reverse方法的具體用法?Python v1.reverse怎麽用?Python v1.reverse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.reverse方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: calculate_generalized_advantage_estimator
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例2: _reverse_seq
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def _reverse_seq(sequence, sequence_lengths=None):
"""Reverse sequence along dim 0.
Args:
sequence: Tensor of shape [T, B, ...].
sequence_lengths: (optional) tensor of shape [B]. If `None`, only reverse
along dim 0.
Returns:
Tensor of same shape as sequence with dim 0 reversed up to sequence_lengths.
"""
if sequence_lengths is None:
return tf.reverse(sequence, [0])
sequence_lengths = tf.convert_to_tensor(sequence_lengths)
with tf.control_dependencies(
[tf.assert_equal(sequence.shape[1], sequence_lengths.shape[0])]):
return tf.reverse_sequence(
sequence, sequence_lengths, seq_axis=0, batch_axis=1)
示例3: _test_forward_tranapose_axes_input
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(
shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ['transpose_data:0'], 'transpose:0')
示例4: discounted_rewards
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def discounted_rewards(reward, done, gae_gamma, end_values):
"""Discounted rewards."""
not_done = tf.expand_dims(1 - tf.cast(done, tf.float32), axis=2)
end_values = end_values * not_done[-1, :, :]
return_ = tf.scan(
lambda agg, cur: cur + gae_gamma * agg,
tf.expand_dims(reward, axis=2) * not_done,
initializer=end_values,
reverse=True,
back_prop=False,
parallel_iterations=2)
return tf.check_numerics(return_, "return")
示例5: get_read_mask
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def get_read_mask(self, read_head_index):
"""Uses mask_pos_lt() instead of mask_pos_gt() to reverse read values.
Args:
read_head_index: Identifies which read head we're getting the mask for.
Returns:
A tf.float32 tensor of shape [1, 1, memory_size, memory_size].
"""
if read_head_index == 0:
return tf.expand_dims(
common_layers.mask_pos_gt(self._memory_size, self._memory_size),
axis=0)
else:
raise ValueError("Read head index must be 0 for queue.")
示例6: body
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def body(self, features):
"""Build the main body of the model.
Args:
features: A dict of "inputs" and "targets" which have already been passed
through an embedding layer. Inputs should have shape
[batch_size, max_seq_length, 1, embedding_size]. Targets should have
shape [batch_size, max_seq_length, 1, 1]
Returns:
The logits which get passed to the top of the model for inference.
A tensor of shape [batch_size, seq_length, 1, embedding_size]
"""
inputs = features.get("inputs")
targets = features["targets"]
if inputs is not None:
inputs = common_layers.flatten4d3d(inputs)
_, final_encoder_state = self._rnn(tf.reverse(inputs, axis=[1]),
"encoder")
else:
final_encoder_state = None
shifted_targets = common_layers.shift_right(targets)
decoder_outputs, _ = self._rnn(
common_layers.flatten4d3d(shifted_targets),
"decoder",
initial_state=final_encoder_state)
return decoder_outputs
示例7: pixel_wise_softmax
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def pixel_wise_softmax(output, name='pixel_wise_softmax'):
"""Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
Usually be used for image segmentation.
Parameters
------------
output : tensor
- For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2.
- For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2.
Examples
---------
>>> outputs = pixel_wise_softmax(network.outputs)
>>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)
References
-----------
- `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_
"""
with tf.name_scope(name) as scope:
return tf.nn.softmax(output)
## old implementation
# exp_map = tf.exp(output)
# if output.get_shape().ndims == 4: # 2d image
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True]))
# elif output.get_shape().ndims == 5: # 3d image
# evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True]))
# else:
# raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape))
# return tf.div(exp_map, evidence)
示例8: _test_space_to_batch_nd_infer_paddings
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype='int32'):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
示例9: _test_forward_reverse_v2
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reverse [as 別名]
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ['in_data:0'], 'reverse:0')