本文整理汇总了Python中tensorflow.cumsum方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.cumsum方法的具体用法?Python tensorflow.cumsum怎么用?Python tensorflow.cumsum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.cumsum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def call(self, x):
if (self.size == None) or (self.mode == 'sum'):
self.size = int(x.shape[-1])
position_j = 1. / \
K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
position_j = K.expand_dims(position_j, 0)
position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
position_i = K.expand_dims(position_i, 2)
position_ij = K.dot(position_i, position_j)
outputs = K.concatenate(
[K.cos(position_ij), K.sin(position_ij)], 2)
if self.mode == 'sum':
if self.scale:
outputs = outputs * outputs ** 0.5
return x + outputs
elif self.mode == 'concat':
return K.concatenate([outputs, x], 2)
示例2: _discount_reward_py_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def _discount_reward_py_2d(reward, sequence_length=None,
discount=1., dtype=None):
if sequence_length is not None:
reward = mask_sequences(reward, sequence_length, dtype=dtype)
dtype = dtype or reward.dtype
if discount == 1.:
disc_reward = np.cumsum(
reward[:, ::-1], axis=1, dtype=dtype)[:, ::-1]
else:
disc_reward = np.copy(reward)
for i in range(reward.shape[1]-2, -1, -1):
disc_reward[:, i] += disc_reward[:, i+1] * discount
return disc_reward
示例3: _discount_reward_tensor_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def _discount_reward_tensor_2d(reward, sequence_length=None,
discount=1., dtype=None):
if sequence_length is not None:
reward = mask_sequences(
reward, sequence_length, dtype=dtype, tensor_rank=2)
if discount == 1.:
disc_reward = tf.cumsum(reward, axis=1, reverse=True)
else:
# [max_time, batch_size]
rev_reward_T = tf.transpose(tf.reverse(reward, [1]), [1, 0])
rev_reward_T_cum = tf.scan(
fn=lambda acc, cur: cur + discount * acc,
elems=rev_reward_T,
initializer=tf.zeros_like(reward[:, 1]),
back_prop=False)
disc_reward = tf.reverse(
tf.transpose(rev_reward_T_cum, [1, 0]), [1])
return disc_reward
示例4: take_top_p_logits
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def take_top_p_logits(logits, p):
"""Nucleus sampling"""
batch, sequence, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch)[:, tf.newaxis],
tf.range(0, sequence)[tf.newaxis, :],
# number of indices to include
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
示例5: disjoint_signal_to_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def disjoint_signal_to_batch(X, I):
"""
Converts a disjoint graph signal to batch node by zero-padding.
:param X: Tensor, node features of shape (nodes, features).
:param I: Tensor, graph IDs of shape `(N, )`;
:return batch: Tensor, batched node features of shape (batch, N_max, F)
"""
I = tf.cast(I, tf.int32)
num_nodes = tf.math.segment_sum(tf.ones_like(I), I)
start_index = tf.cumsum(num_nodes, exclusive=True)
n_graphs = tf.shape(num_nodes)[0]
max_n_nodes = tf.reduce_max(num_nodes)
batch_n_nodes = tf.shape(I)[0]
feature_dim = tf.shape(X)[-1]
index = tf.range(batch_n_nodes)
index = (index - tf.gather(start_index, I)) + (I * max_n_nodes)
dense = tf.zeros((n_graphs * max_n_nodes, feature_dim), dtype=X.dtype)
dense = tf.tensor_scatter_nd_update(dense, index[..., None], X)
batch = tf.reshape(dense, (n_graphs, max_n_nodes, feature_dim))
return batch
示例6: _vectorised_get_cum_graph_size
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def _vectorised_get_cum_graph_size(nodes, graph_sizes):
"""
Takes a list of node ids and graph sizes ordered by segment ID and returns the
number of nodes contained in graphs with smaller segment ID.
:param nodes: List of node ids of shape (nodes)
:param graph_sizes: List of graph sizes (i.e. tf.math.segment_sum(tf.ones_like(I), I) where I are the
segment IDs).
:return: A list of shape (nodes) where each entry corresponds to the number of nodes contained in graphs
with smaller segment ID for each node.
"""
def get_cum_graph_size(node):
cum_graph_sizes = tf.cumsum(graph_sizes, exclusive=True)
indicator_if_smaller = tf.cast(node - cum_graph_sizes >= 0, tf.int32)
graph_id = tf.reduce_sum(indicator_if_smaller) - 1
return tf.cumsum(graph_sizes, exclusive=True)[graph_id]
return tf.map_fn(get_cum_graph_size, nodes)
示例7: _distributional_to_value
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def _distributional_to_value(value_d, size, subscale, threshold):
"""Get a scalar value out of a value distribution in distributional RL."""
half = size // 2
value_range = (tf.to_float(tf.range(-half, half)) + 0.5) * subscale
probs = tf.nn.softmax(value_d)
if threshold == 0.0:
return tf.reduce_sum(probs * value_range, axis=-1)
# accumulated_probs[..., i] is the sum of probabilities in buckets upto i
# so it is the probability that value <= i'th bucket value
accumulated_probs = tf.cumsum(probs, axis=-1)
# New probs are 0 on all lower buckets, until the threshold
probs = tf.where(accumulated_probs < threshold, tf.zeros_like(probs), probs)
probs /= tf.reduce_sum(probs, axis=-1, keepdims=True) # Re-normalize.
return tf.reduce_sum(probs * value_range, axis=-1)
示例8: _compute_auxiliary_structure
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def _compute_auxiliary_structure(self, contents_and_mask):
"""Compute segment and position metadata."""
contents = contents_and_mask[:, :self._num_sequences]
start_mask = tf.cast(contents_and_mask[:, self._num_sequences:],
dtype=INDEX_DTYPE)
segment = tf.cumsum(start_mask, axis=0)
uniform_count = tf.ones_like(segment[:, 0])
position = []
for i in range(self._num_sequences):
segment_slice = segment[:, i]
counts = tf.math.segment_sum(uniform_count, segment[:, i])
position.append(tf.range(self._packed_length) - tf.cumsum(
tf.gather(counts, segment_slice - 1) * start_mask[:, i]))
position = tf.concat([i[:, tf.newaxis] for i in position], axis=1)
# Correct for padding tokens.
pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE)
segment *= pad_mask
position *= pad_mask
return segment, position
示例9: slice_sentences
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def slice_sentences(document_features, picks, sentence_lengths):
"""Extract selected sentence spans from the document features.
Arguments:
document_features: A `[batch, length, features]` representation
of the documents.
picks: Sentence to extract with shape
`[batch, selections]`.
sentence_lengths: Length of each sentence in the document with shape
`[batch, num_sentences]`.
Returns extracted features for each selected sentence as a tensor with shape
`[batch, selections, max_sentence_len, features]`
"""
sentence_offsets = tf.cumsum(
sentence_lengths, axis=1, exclusive=True)
starts = ops.gather_from_rows(sentence_offsets, picks)
lengths = ops.gather_from_rows(sentence_lengths, picks)
sentence_embeddings = ops.slice_fragments(
document_features, starts, lengths)
return sentence_embeddings
示例10: safe_cumprod
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def safe_cumprod(x: TensorLike, *args, **kwargs) -> tf.Tensor:
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the
argument is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to
tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with tf.name_scope("SafeCumprod"):
x = tf.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return tf.exp(
tf.cumsum(tf.math.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs)
)
示例11: weights_multi_problem_all
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task."""
weights = tf.to_float(tf.not_equal(labels, 0))
if taskid < 0:
raise ValueError("Task ID must be non-negative.")
past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
non_taskid = tf.to_float(labels)
example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = tf.to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1)
示例12: shift_values
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def shift_values(values, discount, rollout, final_values=0.0):
"""Shift values up by some amount of time.
Those values that shift from a value beyond the last value
are calculated using final_values.
"""
roll_range = tf.cumsum(tf.ones_like(values[:rollout, :]), 0,
exclusive=True, reverse=True)
final_pad = tf.expand_dims(final_values, 0) * discount ** roll_range
return tf.concat([discount ** rollout * values[rollout:, :],
final_pad], 0)
示例13: gather
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu."""
if not is_on_tpu():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out
# TODO(noam): remove this function after TPUs do cumsum faster.
示例14: cumsum
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
"""
if not is_on_tpu():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret
示例15: weights_prepend_inputs_to_targets
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import cumsum [as 别名]
def weights_prepend_inputs_to_targets(labels):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all nonzero labels past the first zero.
See prepend_mode in common_hparams.py
Args:
labels: A Tensor of int32s.
Returns:
A Tensor of floats.
"""
past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
nonzero = tf.to_float(labels)
return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))