本文整理汇总了Python中tensorflow.reverse方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reverse方法的具体用法?Python tensorflow.reverse怎么用?Python tensorflow.reverse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reverse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculate_generalized_advantage_estimator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
"""Generalized advantage estimator."""
# Below is slight weirdness, we set the last reward to 0.
# This makes the advantage to be 0 in the last timestep
reward = tf.concat([reward[:-1, :], value[-1:, :]], axis=0)
next_value = tf.concat([value[1:, :], tf.zeros_like(value[-1:, :])], axis=0)
next_not_done = 1 - tf.cast(tf.concat([done[1:, :],
tf.zeros_like(done[-1:, :])], axis=0),
tf.float32)
delta = reward + gae_gamma * next_value * next_not_done - value
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例2: VGG
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def VGG(x,para):
x = tf.reverse(x, [-1]) - np.array([103.939, 116.779, 123.68])
conv1_1 = conv_layer_relu(x, para['conv1_1'][0], para['conv1_1'][1])
conv1_2 = conv_layer_relu(conv1_1, para['conv1_2'][0], para['conv1_2'][1])
conv1_2_ave = ave_pool(conv1_2)
conv2_1 = conv_layer_relu(conv1_2_ave, para['conv2_1'][0], para['conv2_1'][1])
conv2_2 = conv_layer_relu(conv2_1, para['conv2_2'][0], para['conv2_2'][1])
conv2_2_ave = ave_pool(conv2_2)
conv3_1 = conv_layer_relu(conv2_2_ave, para['conv3_1'][0], para['conv3_1'][1])
conv3_2 = conv_layer_relu(conv3_1, para['conv3_2'][0], para['conv3_2'][1])
conv3_3 = conv_layer_relu(conv3_2, para['conv3_3'][0], para['conv3_3'][1])
conv3_3_ave = ave_pool(conv3_3)
conv4_1 = conv_layer_relu(conv3_3_ave, para['conv4_1'][0], para['conv4_1'][1])
conv4_2 = conv_layer_relu(conv4_1, para['conv4_2'][0], para['conv4_2'][1])
conv4_3 = conv_layer_relu(conv4_2, para['conv4_3'][0], para['conv4_3'][1])
f = {}
f["conv1_2"] = conv1_2
f["conv2_2"] = conv2_2
f["conv3_3"] = conv3_3
f["conv4_3"] = conv4_3
return f
示例3: _discount_reward_tensor_1d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def _discount_reward_tensor_1d(reward, sequence_length,
discount=1., dtype=None):
if sequence_length is None:
raise ValueError('sequence_length must not be `None` for 1D reward.')
batch_size = tf.shape(reward)[0]
max_seq_length = tf.reduce_max(sequence_length)
dtype = dtype or reward.dtype
if discount == 1.:
dmat = tf.ones(
tf.concat([[batch_size], [max_seq_length]], 0), dtype=dtype)
else:
mask = tf.sequence_mask(sequence_length, dtype=dtype)
mask = tf.concat([mask[:, 1:], tf.zeros_like(mask[:, -1:])], axis=1)
# Make each row = [discount, ..., discount, 1, ..., 1]
dmat = mask * discount + (1 - mask)
dmat = tf.cumprod(dmat, axis=1, reverse=True)
disc_reward = dmat * tf.expand_dims(reward, -1)
disc_reward = mask_sequences(
disc_reward, sequence_length, dtype=dtype, tensor_rank=2)
return disc_reward
示例4: _discount_reward_tensor_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def _discount_reward_tensor_2d(reward, sequence_length=None,
discount=1., dtype=None):
if sequence_length is not None:
reward = mask_sequences(
reward, sequence_length, dtype=dtype, tensor_rank=2)
if discount == 1.:
disc_reward = tf.cumsum(reward, axis=1, reverse=True)
else:
# [max_time, batch_size]
rev_reward_T = tf.transpose(tf.reverse(reward, [1]), [1, 0])
rev_reward_T_cum = tf.scan(
fn=lambda acc, cur: cur + discount * acc,
elems=rev_reward_T,
initializer=tf.zeros_like(reward[:, 1]),
back_prop=False)
disc_reward = tf.reverse(
tf.transpose(rev_reward_T_cum, [1, 0]), [1])
return disc_reward
示例5: _compute_rnn_outputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def _compute_rnn_outputs(self):
reversed_inputs = tf.reverse(self.inputs, [False, True, False])
reversed_resets = tf.reverse(self.resets, [False, True, False])
with tf.variable_scope('fw'):
self._fw_lstm = LSTM(self.inputs, self.resets, self.training,
self.num_layers, self.hidden_layer_size,
self.init_scale, self.dropout_keep_prob)
with tf.variable_scope('rv'):
self._rv_lstm = LSTM(reversed_inputs, reversed_resets,
self.training, self.num_layers,
self.hidden_layer_size, self.init_scale,
self.dropout_keep_prob)
fw_outputs = self._fw_lstm.outputs
rv_outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False])
outputs = tf.concat(2, [fw_outputs, rv_outputs])
return outputs
示例6: reflection
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def reflection(data, decision):
"""Conditionally reflects the data in XYZ.
Args:
data: input tensor, shape: [..], z, y, x, c
decision: boolean tensor, shape 3, indicating on which spatial dimensions
to apply the reflection (x, y, z)
Returns:
TF op to conditionally apply reflection.
"""
with tf.name_scope('augment_reflection'):
rank = data.get_shape().ndims
spatial_dims = tf.constant([rank - 2, rank - 3, rank - 4])
selected_dims = tf.boolean_mask(spatial_dims, decision)
return tf.reverse(data, selected_dims)
示例7: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def __call__(self, x):
"""Applies the sampled permutation and reflection to `x`.
Args:
x: A Tensor of rank `self.rank`.
Returns:
The transformed Tensor, retaining as much static shape information as
possible.
"""
x = tf.convert_to_tensor(x)
with tf.name_scope('permute_and_reflect'):
if self.permutable_axes.size > 0:
x = permute_axes(x, self.full_permutation, self.permutable_axes)
if self.reflectable_axes.size > 0:
x = tf.reverse(x, self.reflected_axes)
return x
示例8: augment_stochastic
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def augment_stochastic(data_ops, augment_rc=False, augment_shifts=[]):
"""Apply stochastic augmentations,
Args:
data_ops: dict with keys 'sequence,' 'label,' and 'na.'
augment_rc: Boolean for whether to apply reverse complement augmentation.
augment_shifts: list of int offsets to sample shift augmentations.
Returns:
data_ops_aug: augmented data
"""
if augment_shifts:
data_ops['sequence'] = augment_stochastic_shifts(data_ops['sequence'],
augment_shifts)
if augment_rc:
data_ops = augment_stochastic_rc(data_ops)
else:
data_ops['reverse_preds'] = tf.zeros((), dtype=tf.bool)
return data_ops
示例9: reverse_complement
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def reverse_complement(input_seq, lengths=None):
# TODO(dbelanger) remove dependencies on this method,
# as it is easy to mis-use in ways that lead to buggy results.
"""Reverse complement a list of one hot coded nucleotide Tensors.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, 4)
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverse
complements the list.
Returns:
reverse complemented sequence
"""
if lengths is not None:
print("Not yet implemented", file=sys.stderr)
exit(1)
else:
nt_rc = tf.constant(
[[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],
dtype="float32")
return [tf.matmul(ris, nt_rc) for ris in reversed(input_seq)]
示例10: fix_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def fix_variables(self, sess, pretrained_model):
print('Fix VGG16 layers..')
with tf.variable_scope('Fix_VGG16') as scope:
with tf.device("/cpu:0"):
# fix the vgg16 issue from conv weights to fc weights
# fix RGB to BGR
fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False)
fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False)
conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False)
restorer_fc = tf.train.Saver({self._scope + "/fc6/weights": fc6_conv,
self._scope + "/fc7/weights": fc7_conv,
self._scope + "/conv1/conv1_1/weights": conv1_rgb})
restorer_fc.restore(sess, pretrained_model)
# print("_variables_to_fix:", self._variables_to_fix)
# sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv,
# self._variables_to_fix[
# self._scope + '/fc6/weights:0'].get_shape())))
# sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv,
# self._variables_to_fix[
# self._scope + '/fc7/weights:0'].get_shape())))
sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'],
tf.reverse(conv1_rgb, [2])))
示例11: LandmarkImageLayer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def LandmarkImageLayer(Landmarks):
def draw_landmarks(L):
def draw_landmarks_helper(Point):
intLandmark = tf.to_int32(Point)
locations = Offsets + intLandmark
dxdy = Point - tf.to_float(intLandmark)
offsetsSubPix = tf.to_float(Offsets) - dxdy
vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2))
img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE))
return img
Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1])
# Landmark = tf.reshape(L, (-1, 2))
Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize)
# Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1)
Ret = tf.map_fn(draw_landmarks_helper, Landmark)
Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1])
return Ret
return tf.map_fn(draw_landmarks, Landmarks)
示例12: _common
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def _common(cls, node, **kwargs):
axis = node.attrs.get("axis", 0)
keepdims = node.attrs.get("keepdims", 1)
select_last_index = node.attrs.get("select_last_index", 0)
if select_last_index == 0:
arg_min = cls.make_tensor_from_onnx_node(node, **kwargs)
else:
# reverse the input and apply argmax on that to get last occurrence of max
x = kwargs["tensor_dict"][node.inputs[0]]
x = tf.reverse(x, axis=[axis])
arg_min = cls.make_tensor_from_onnx_node(node, inputs=[x], **kwargs)
# adjust indices to account for the reverse
arg_min = tf_shape(x)[axis] - arg_min - 1
if keepdims == 1:
return [tf.expand_dims(arg_min, axis=axis)]
return [arg_min]
示例13: _common
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def _common(cls, node, **kwargs):
axis = node.attrs.get("axis", 0)
keepdims = node.attrs.get("keepdims", 1)
select_last_index = node.attrs.get("select_last_index", 0)
if select_last_index == 0:
arg_max = cls.make_tensor_from_onnx_node(node, **kwargs)
else:
# reverse the input and apply argmax on that to get last occurrence of max
x = kwargs["tensor_dict"][node.inputs[0]]
x = tf.reverse(x, axis=[axis])
arg_max = cls.make_tensor_from_onnx_node(node, inputs=[x], **kwargs)
# adjust indices to account for the reverse
arg_max = tf_shape(x)[axis] - arg_max - 1
if keepdims == 1:
return [tf.expand_dims(arg_max, axis=axis)]
return [arg_max]
示例14: calculate_generalized_advantage_estimator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return")
示例15: bw_dynamic_rnn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reverse [as 别名]
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
assert not time_major # TODO : to be implemented later!
flat_inputs = flatten(inputs, 2) # [-1, J, d]
flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64')
flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_inputs, sequence_length, 1)
flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len,
initial_state=initial_state, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=scope)
flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \
else tf.reverse_sequence(flat_outputs, sequence_length, 1)
outputs = reconstruct(flat_outputs, inputs, 2)
return outputs, final_state