本文整理汇总了Python中tensorflow.cumsum函数的典型用法代码示例。如果您正苦于以下问题:Python cumsum函数的具体用法?Python cumsum怎么用?Python cumsum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cumsum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: precision_recall
def precision_recall(num_gbboxes, num_detections, tp, fp, scores,
dtype=tf.float64, scope=None):
"""Compute precision and recall from scores, true positives and false
positives booleans arrays
"""
# Input dictionaries: dict outputs as streaming metrics.
if isinstance(scores, dict):
d_precision = {}
d_recall = {}
for c in num_gbboxes.keys():
scope = 'precision_recall_%s' % c
p, r = precision_recall(num_gbboxes[c], num_detections[c],
tp[c], fp[c], scores[c],
dtype, scope)
d_precision[c] = p
d_recall[c] = r
return d_precision, d_recall
# Sort by score.
with tf.name_scope(scope, 'precision_recall',
[num_gbboxes, num_detections, tp, fp, scores]):
# Sort detections by score.
scores, idxes = tf.nn.top_k(scores, k=num_detections, sorted=True)
tp = tf.gather(tp, idxes)
fp = tf.gather(fp, idxes)
# Computer recall and precision.
tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
recall = _safe_div(tp, tf.cast(num_gbboxes, dtype), 'recall')
precision = _safe_div(tp, tp + fp, 'precision')
return tf.tuple([precision, recall])
示例2: lovasz_grad
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard
示例3: logits_to_epsilon_bounds
def logits_to_epsilon_bounds(logits, images):
probs = tf.reshape(tf.nn.softmax(tf.reshape(logits, [-1, 256])), tf.shape(logits))
cdf_lower = tf.cumsum(probs, axis=4, exclusive=True)
cdf_upper = tf.cumsum(probs, axis=4, exclusive=False)
# Awful hack to select the correct values
images_mask = tf.one_hot(images, 256)
cdf_lower = tf.reduce_sum(cdf_lower * images_mask, reduction_indices=[4])
cdf_upper = tf.reduce_sum(cdf_upper * images_mask, reduction_indices=[4])
return cdf_lower, cdf_upper
示例4: unwrap
def unwrap(p, discont=np.pi, axis=-1):
"""Unwrap a cyclical phase tensor.
Args:
p: Phase tensor.
discont: Float, size of the cyclic discontinuity.
axis: Axis of which to unwrap.
Returns:
unwrapped: Unwrapped tensor of same size as input.
"""
dd = diff(p, axis=axis)
ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
ph_correct = ddmod - dd
idx = tf.less(tf.abs(dd), discont)
ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
ph_cumsum = tf.cumsum(ph_correct, axis=axis)
shape = p.get_shape().as_list()
shape[axis] = 1
ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
unwrapped = p + ph_cumsum
return unwrapped
示例5: _get_values_from_start_and_end
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.int32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(indexed_positions - 1,
total_num_samples,
dtype=tf.int32)
return tf.tensordot(input_tensor, one_hot_selector, axes=[0, 0])
示例6: weights_concatenated
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0],
[0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
return ret
示例7: specgrams_to_melspecgrams
def specgrams_to_melspecgrams(self, specgrams):
"""Converts specgrams to melspecgrams.
Args:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2].
Returns:
melspecgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2], mel scaling of frequencies.
"""
if self._mel_downscale is None:
return specgrams
logmag = specgrams[:, :, :, 0]
p = specgrams[:, :, :, 1]
mag2 = tf.exp(2.0 * logmag)
phase_angle = tf.cumsum(p * np.pi, axis=-2)
l2mel = tf.to_float(self._linear_to_mel_matrix())
logmelmag2 = self._safe_log(tf.tensordot(mag2, l2mel, 1))
mel_phase_angle = tf.tensordot(phase_angle, l2mel, 1)
mel_p = spectral_ops.instantaneous_frequency(mel_phase_angle)
return tf.concat(
[logmelmag2[:, :, :, tf.newaxis], mel_p[:, :, :, tf.newaxis]], axis=-1)
示例8: melspecgrams_to_specgrams
def melspecgrams_to_specgrams(self, melspecgrams):
"""Converts melspecgrams to specgrams.
Args:
melspecgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2], mel scaling of frequencies.
Returns:
specgrams: Tensor of log magnitudes and instantaneous frequencies,
shape [batch, time, freq, 2].
"""
if self._mel_downscale is None:
return melspecgrams
logmelmag2 = melspecgrams[:, :, :, 0]
mel_p = melspecgrams[:, :, :, 1]
mel2l = tf.to_float(self._mel_to_linear_matrix())
mag2 = tf.tensordot(tf.exp(logmelmag2), mel2l, 1)
logmag = 0.5 * self._safe_log(mag2)
mel_phase_angle = tf.cumsum(mel_p * np.pi, axis=-2)
phase_angle = tf.tensordot(mel_phase_angle, mel2l, 1)
p = spectral_ops.instantaneous_frequency(phase_angle)
return tf.concat(
[logmag[:, :, :, tf.newaxis], p[:, :, :, tf.newaxis]], axis=-1)
示例9: __init__
def __init__(self,
state_size,
num_timesteps,
mixing_coeff=0.5,
prior_mode_mean=1,
sigma_min=1e-5,
variance=1.0,
dtype=tf.float32,
random_seed=None,
trainable=True,
init_bs_to_zero=False,
graph_collection_name="P_VARS"):
self.state_size = state_size
self.num_timesteps = num_timesteps
self.sigma_min = sigma_min
self.dtype = dtype
self.variance = variance
self.mixing_coeff = mixing_coeff
self.prior_mode_mean = prior_mode_mean
if init_bs_to_zero:
initializers = [tf.zeros_initializer for _ in xrange(num_timesteps)]
else:
initializers = [tf.random_uniform_initializer(seed=random_seed) for _ in xrange(num_timesteps)]
self.bs = [
tf.get_variable(
shape=[state_size],
dtype=self.dtype,
name="b_%d" % (t + 1),
initializer=initializers[t],
collections=[tf.GraphKeys.GLOBAL_VARIABLES, graph_collection_name],
trainable=trainable) for t in xrange(num_timesteps)
]
self.Bs = tf.cumsum(self.bs, reverse=True, axis=0)
示例10: _compareGradient
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
result = tf.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = tf.test.compute_gradient(t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
示例11: boolean_mask
def boolean_mask(boxlist, indicator, fields=None, scope=None,
use_static_shapes=False, indicator_sum=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
indicator_sum: An integer containing the sum of `indicator` vector. Only
required if `use_static_shape` is True.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope, 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
if use_static_shapes:
if not (indicator_sum and isinstance(indicator_sum, int)):
raise ValueError('`indicator_sum` must be a of type int')
selected_positions = tf.to_float(indicator)
indexed_positions = tf.cast(
tf.multiply(
tf.cumsum(selected_positions), selected_positions),
dtype=tf.int32)
one_hot_selector = tf.one_hot(
indexed_positions - 1, indicator_sum, dtype=tf.float32)
sampled_indices = tf.cast(
tf.tensordot(
tf.to_float(tf.range(tf.shape(indicator)[0])),
one_hot_selector,
axes=[0, 0]),
dtype=tf.int32)
return gather(boxlist, sampled_indices, use_static_shapes=True)
else:
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
示例12: _precision_recall
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
"""Compute precision and recall from scores, true positives and false
positives booleans arrays
"""
# Sort by score.
with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
# Sort detections by score.
scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
tp = tf.gather(tp, idxes)
fp = tf.gather(fp, idxes)
# Computer recall and precision.
dtype = tf.float64
tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
precision = _safe_div(tp, tp + fp, 'precision')
return tf.tuple([precision, recall])
示例13: remap_keys
def remap_keys(sparse_tensor):
# Current indices of our SparseTensor that we need to fix
bad_indices = sparse_tensor.indices
# Current values of our SparseTensor that we need to fix
bad_values = sparse_tensor.values
# Group by the batch_indices and get the count for each
size = tf.segment_sum(data = tf.ones_like(bad_indices[:,0], dtype = tf.int64), segment_ids = bad_indices[:,0]) - 1
# The number of batch_indices (this should be batch_size unless it is a partially full batch)
length = tf.shape(size, out_type = tf.int64)[0]
# Finds the cumulative sum which we can use for indexing later
cum = tf.cumsum(size)
# The offsets between each example in the batch due to our concatentation of the keys in the decode_example method
length_range = tf.range(start = 0, limit = length, delta = 1, dtype = tf.int64)
# Indices of the SparseTensor's indices member of the rows we added by the concatentation of our keys in the decode_example method
cum_range = cum + length_range
# The keys that we have extracted back out of our concatentated SparseTensor
gathered_indices = tf.squeeze(tf.gather(bad_indices, cum_range)[:,1])
# The enumerated row indices of the SparseTensor's indices member
sparse_indices_range = tf.range(tf.shape(bad_indices, out_type = tf.int64)[0], dtype = tf.int64)
# We want to find here the row indices of the SparseTensor's indices member that are of our actual data and not the concatentated rows
# So we want to find the intersection of the two sets and then take the opposite of that
x = sparse_indices_range
s = cum_range
# Number of multiples we are going to tile x, which is our sparse_indices_range
tile_multiples = tf.concat([tf.ones(tf.shape(tf.shape(x)), dtype=tf.int64), tf.shape(s, out_type = tf.int64)], axis = 0)
# Expands x, our sparse_indices_range, into a rank 2 tensor and then multiplies the rows by 1 (no copying) and the columns by the number of examples in the batch
x_tile = tf.tile(tf.expand_dims(x, -1), tile_multiples)
# Essentially a vectorized logical or, that we then negate
x_not_in_s = ~tf.reduce_any(tf.equal(x_tile, s), -1)
# The SparseTensor's indices that are our actual data by using the boolean_mask we just made above applied to the entire indices member of our SparseTensor
selected_indices = tf.boolean_mask(tensor = bad_indices, mask = x_not_in_s, axis = 0)
# Apply the same boolean_mask to the entire values member of our SparseTensor to get the actual values data
selected_values = tf.boolean_mask(tensor = bad_values, mask = x_not_in_s, axis = 0)
# Need to replace the first column of our selected_indices with keys, so we first need to tile our gathered_indices
tiling = tf.tile(input = tf.expand_dims(gathered_indices[0], -1), multiples = tf.expand_dims(size[0] , -1))
# We have to repeatedly apply the tiling to each example in the batch
# Since it is jagged we cannot use tf.map_fn due to the stacking of the TensorArray, so we have to create our own custom version
def loop_body(i, tensor_grow):
return i + 1, tf.concat(values = [tensor_grow, tf.tile(input = tf.expand_dims(gathered_indices[i], -1), multiples = tf.expand_dims(size[i] , -1))], axis = 0)
_, result = tf.while_loop(lambda i, tensor_grow: i < length, loop_body, [tf.constant(1, dtype = tf.int64), tiling])
# Concatenate tiled keys with the 2nd column of selected_indices
selected_indices_fixed = tf.concat([tf.expand_dims(result, -1), tf.expand_dims(selected_indices[:, 1], -1)], axis = 1)
# Combine everything together back into a SparseTensor
remapped_sparse_tensor = tf.SparseTensor(indices = selected_indices_fixed, values = selected_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
示例14: reconstruction_loss
def reconstruction_loss(self, x_input, x_target, x_length, z=None):
"""Reconstruction loss calculation.
Args:
x_input: Batch of decoder input sequences for teacher forcing, sized
`[batch_size, max(x_length), output_depth]`.
x_target: Batch of expected output sequences to compute loss against,
sized `[batch_size, max(x_length), output_depth]`.
x_length: Length of input/output sequences, sized `[batch_size]`.
z: (Optional) Latent vectors. Required if model is conditional. Sized
`[n, z_size]`.
Returns:
r_loss: The reconstruction loss for each sequence in the batch.
metric_map: Map from metric name to tf.metrics return values for logging.
truths: Ground truth labels.
predictions: Predicted labels.
"""
batch_size = x_input.shape[0].value
has_z = z is not None
z = tf.zeros([batch_size, 0]) if z is None else z
repeated_z = tf.tile(
tf.expand_dims(z, axis=1), [1, tf.shape(x_input)[1], 1])
sampling_probability_static = tensor_util.constant_value(
self._sampling_probability)
if sampling_probability_static == 0.0:
# Use teacher forcing.
x_input = tf.concat([x_input, repeated_z], axis=2)
helper = seq2seq.TrainingHelper(x_input, x_length)
else:
# Use scheduled sampling.
helper = seq2seq.ScheduledOutputTrainingHelper(
inputs=x_input,
sequence_length=x_length,
auxiliary_inputs=repeated_z if has_z else None,
sampling_probability=self._sampling_probability,
next_inputs_fn=self._sample)
decoder_outputs = self._decode(z, helper=helper, x_input=x_input)
flat_x_target = flatten_maybe_padded_sequences(x_target, x_length)
flat_rnn_output = flatten_maybe_padded_sequences(
decoder_outputs.rnn_output, x_length)
r_loss, metric_map, truths, predictions = self._flat_reconstruction_loss(
flat_x_target, flat_rnn_output)
# Sum loss over sequences.
cum_x_len = tf.concat([(0,), tf.cumsum(x_length)], axis=0)
r_losses = []
for i in range(batch_size):
b, e = cum_x_len[i], cum_x_len[i + 1]
r_losses.append(tf.reduce_sum(r_loss[b:e]))
r_loss = tf.stack(r_losses)
return r_loss, metric_map, truths, predictions
示例15: crappy_plot
def crappy_plot(val, levels):
x_len = val.get_shape().as_list()[1]
left_val = tf.concat(1, (val[:, 0:1], val[:, 0:x_len - 1]))
right_val = tf.concat(1, (val[:, 1:], val[:, x_len - 1:]))
left_mean = (val + left_val) // 2
right_mean = (val + right_val) // 2
low_val = tf.minimum(tf.minimum(left_mean, right_mean), val)
high_val = tf.maximum(tf.maximum(left_mean, right_mean), val + 1)
return tf.cumsum(tf.one_hot(low_val, levels, axis=1) - tf.one_hot(high_val, levels, axis=1), axis=1)