本文整理汇总了Python中tensorflow.argsort方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.argsort方法的具体用法?Python tensorflow.argsort怎么用?Python tensorflow.argsort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.argsort方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _to_nd_indices
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def _to_nd_indices(indices):
"""Returns indices used for tf.gather_nd or tf.scatter_nd.
Args:
indices: A `Tensor` of shape [batch_size, size] with integer values. The
values are the indices of another `Tensor`. For example, `indices` is the
output of tf.argsort or tf.math.top_k.
Returns:
A `Tensor` with shape [batch_size, size, 2] that can be used by tf.gather_nd
or tf.scatter_nd.
"""
indices.get_shape().assert_has_rank(2)
batch_ids = tf.ones_like(indices) * tf.expand_dims(
tf.range(tf.shape(input=indices)[0]), 1)
return tf.stack([batch_ids, indices], axis=-1)
示例2: _to_nd_indices
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def _to_nd_indices(indices):
"""Returns indices used for tf.gather_nd or tf.scatter_nd.
Args:
indices: A `Tensor` of shape [batch_size, size] with integer values. The
values are the indices of another `Tensor`. For example, `indices` is the
output of tf.argsort or tf.math.top_k.
Returns:
A `Tensor` with shape [batch_size, size, 2] that can be used by tf.gather_nd
or tf.scatter_nd.
"""
indices.get_shape().assert_has_rank(2)
batch_ids = tf.ones_like(indices) * tf.expand_dims(
tf.range(tf.shape(input=indices)[0]), 1)
return tf.stack([batch_ids, indices], axis=-1)
示例3: hard_negative_mining
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def hard_negative_mining(loss, gt_confs, neg_ratio):
""" Hard negative mining algorithm
to pick up negative examples for back-propagation
base on classification loss values
Args:
loss: list of classification losses of all default boxes (B, num_default)
gt_confs: classification targets (B, num_default)
neg_ratio: negative / positive ratio
Returns:
conf_loss: classification loss
loc_loss: regression loss
"""
# loss: B x N
# gt_confs: B x N
pos_idx = gt_confs > 0
num_pos = tf.reduce_sum(tf.dtypes.cast(pos_idx, tf.int32), axis=1)
num_neg = num_pos * neg_ratio
rank = tf.argsort(loss, axis=1, direction='DESCENDING')
rank = tf.argsort(rank, axis=1)
neg_idx = rank < tf.expand_dims(num_neg, 1)
return pos_idx, neg_idx
示例4: _top_k_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def _top_k_sample(logits, ignore_ids=None, num_samples=1, k=10):
"""
Does top-k sampling. if ignore_ids is on, then we will zero out those logits.
:param logits: [batch_size, vocab_size] tensor
:param ignore_ids: [vocab_size] one-hot representation of the indices we'd like to ignore and never predict,
like padding maybe
:param p: topp threshold to use, either a float or a [batch_size] vector
:return: [batch_size, num_samples] samples
# TODO FIGURE OUT HOW TO DO THIS ON TPUS. IT'S HELLA SLOW RIGHT NOW, DUE TO ARGSORT I THINK
"""
with tf.variable_scope('top_p_sample'):
batch_size, vocab_size = get_shape_list(logits, expected_rank=2)
probs = tf.nn.softmax(logits if ignore_ids is None else logits - tf.cast(ignore_ids[None], tf.float32) * 1e10,
axis=-1)
# [batch_size, vocab_perm]
indices = tf.argsort(probs, direction='DESCENDING')
# find the top pth index to cut off. careful we don't want to cutoff everything!
# result will be [batch_size, vocab_perm]
k_expanded = k if isinstance(k, int) else k[:, None]
exclude_mask = tf.range(vocab_size)[None] >= k_expanded
# OPTION A - sample in the sorted space, then unsort.
logits_to_use = tf.batch_gather(logits, indices) - tf.cast(exclude_mask, tf.float32) * 1e10
sample_perm = tf.random.categorical(logits=logits_to_use, num_samples=num_samples)
sample = tf.batch_gather(indices, sample_perm)
return {
'probs': probs,
'sample': sample,
}
示例5: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
X = ops.disjoint_signal_to_batch(X, I)
else:
X = inputs
if self.data_mode == 'single':
X = tf.expand_dims(X, 0)
N = tf.shape(X)[-2]
sort_perm = tf.argsort(X[..., -1], direction='DESCENDING')
X_sorted = tf.gather(X, sort_perm, axis=-2, batch_dims=1)
def truncate():
_X_out = X_sorted[..., : self.k, :]
return _X_out
def pad():
padding = [[0, 0], [0, self.k - N], [0, 0]]
_X_out = tf.pad(X_sorted, padding)
return _X_out
X_out = tf.cond(tf.less_equal(self.k, N), truncate, pad)
if self.data_mode == 'single':
X_out = tf.squeeze(X_out, [0])
X_out.set_shape((self.k, self.F))
elif self.data_mode == 'batch' or self.data_mode == 'disjoint':
X_out.set_shape((None, self.k, self.F))
return X_out
示例6: gumbel_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def gumbel_softmax(logits, temperature, gumbel_samples=None, samples=1, greedy=False):
""" Draw a sample from the Gumbel-Softmax distribution"""
input_shape_list = bert_utils.get_shape_list(logits, expected_rank=2)
if samples > 1:
logits = tf.expand_dims(logits, -1)
if gumbel_samples is None:
gumbel_samples = sample_gumbel(input_shape_list, samples)
if greedy:
tf.logging.info("==apply greedy based sampling and discrete relax==")
# if int(tf.__version__.split(".")[1]) < 15:
# if not use_tpu:
# logits_index = tf.contrib.framework.argsort(logits, axis=1)
# gumbel_samples_sorted = tf.contrib.framework.sort(gumbel_samples, axis=1)
# gumbel_samples_sorted = reorder(gumbel_samples_sorted, logits_index)
# else:
# else:
# logits_index = tf.argsort(logits, axis=1)
# gumbel_samples_sorted = tf.sort(gumbel_samples, axis=1)
# gumbel_samples_sorted = reorder(gumbel_samples_sorted, logits_index)
gumbel_samples = reorder_approximate(logits, gumbel_samples)
y = logits + gumbel_samples
return [tf.exp(tf.nn.log_softmax(y / temperature, axis=1)),
y]
else:
y = logits + gumbel_samples
tf.logging.info("==apply sampling based sampling and discrete relax==")
return [tf.exp(tf.nn.log_softmax(y / temperature, axis=1)),
y]
示例7: nucleus_sampling
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def nucleus_sampling(logits, vocab_size, p=0.9,
input_ids=None, input_ori_ids=None,
**kargs):
input_shape_list = bert_utils.get_shape_list(logits, expected_rank=[2,3])
if len(input_shape_list) == 3:
logits = tf.reshape(logits, (-1, vocab_size))
probs = tf.nn.softmax(logits, axis=-1)
# [batch_size, seq, vocab_perm]
# indices = tf.argsort(probs, direction='DESCENDING')
indices = tf.contrib.framework.argsort(probs, direction='DESCENDING')
cumulative_probabilities = tf.math.cumsum(tf.batch_gather(probs, indices), axis=-1, exclusive=False)
# find the top pth index to cut off. careful we don't want to cutoff everything!
# result will be [batch_size, seq, vocab_perm]
exclude_mask = tf.logical_not(
tf.logical_or(cumulative_probabilities < p, tf.range(vocab_size)[None] < 1))
exclude_mask = tf.cast(exclude_mask, tf.float32)
indices_v1 = tf.contrib.framework.argsort(indices)
exclude_mask = reorder(exclude_mask, tf.cast(indices_v1, dtype=tf.int32))
if len(input_shape_list) == 3:
exclude_mask = tf.reshape(exclude_mask, input_shape_list)
# logits = tf.reshape(logits, input_shape_list)
if input_ids is not None and input_ori_ids is not None:
exclude_mask, input_ori_ids = get_extra_mask(
input_ids, input_ori_ids,
exclude_mask, vocab_size,
**kargs)
return [exclude_mask, input_ori_ids]
else:
return [exclude_mask]
示例8: sort_key_val
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def sort_key_val(t1, t2, dim=-1):
values = tf.sort(t1, axis=dim)
t2 = tf.broadcast_to(t2, t1.shape)
return values, tf.gather(t2, tf.argsort(t1, axis=dim), axis=dim)
示例9: _apply
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def _apply(self, words):
if self.max_distance == 0:
return tf.identity(words)
num_words = tf.shape(words)[0]
offset = tf.random.uniform([num_words], maxval=1) * (self.max_distance + 1)
offset = tf.cast(offset, num_words.dtype)
new_pos = tf.argsort(tf.range(num_words) + offset)
return tf.gather(words, new_pos)
示例10: get_top_elements
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def get_top_elements(list_of_elements, max_user_contribution):
"""Gets the top max_user_contribution words from the input list.
Note that the returned set of top words will not necessarily be sorted.
Args:
list_of_elements: A tensor containing a list of elements.
max_user_contribution: The maximum number of elements to keep.
Returns:
A tensor of a list of strings.
If the total number of unique words is less than or equal to
max_user_contribution, returns the set of unique words.
"""
words, _, counts = tf.unique_with_counts(list_of_elements)
if tf.size(words) > max_user_contribution:
# This logic is influenced by the focus on global heavy hitters and
# thus implements clipping by chopping the tail of the distribution
# of the words as present on a single client. Another option could
# be to provide pick max_words_per_user random words out of the unique
# words present locally.
top_indices = tf.argsort(
counts, axis=-1, direction='DESCENDING')[:max_user_contribution]
top_words = tf.gather(words, top_indices)
return top_words
return words
示例11: sort_by_scores
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def sort_by_scores(scores,
features_list,
topn=None,
shuffle_ties=True,
seed=None):
"""Sorts example features according to per-example scores.
Args:
scores: A `Tensor` of shape [batch_size, list_size] representing the
per-example scores.
features_list: A list of `Tensor`s with the same shape as scores to be
sorted.
topn: An integer as the cutoff of examples in the sorted list.
shuffle_ties: A boolean. If True, randomly shuffle before the sorting.
seed: The ops-level random seed used when `shuffle_ties` is True.
Returns:
A list of `Tensor`s as the list of sorted features by `scores`.
"""
with tf.compat.v1.name_scope(name='sort_by_scores'):
scores = tf.cast(scores, tf.float32)
scores.get_shape().assert_has_rank(2)
list_size = tf.shape(input=scores)[1]
if topn is None:
topn = list_size
topn = tf.minimum(topn, list_size)
shuffle_ind = None
if shuffle_ties:
shuffle_ind = _to_nd_indices(
tf.argsort(
tf.random.uniform(tf.shape(input=scores), seed=seed),
stable=True))
scores = tf.gather_nd(scores, shuffle_ind)
_, indices = tf.math.top_k(scores, topn, sorted=True)
nd_indices = _to_nd_indices(indices)
if shuffle_ind is not None:
nd_indices = tf.gather_nd(shuffle_ind, nd_indices)
return [tf.gather_nd(f, nd_indices) for f in features_list]
示例12: sorted_ranks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def sorted_ranks(scores, shuffle_ties=True, seed=None):
"""Returns an int `Tensor` as the ranks (1-based) after sorting scores.
Example: Given scores = [[1.0, 3.5, 2.1]], the returned ranks will be [[3, 1,
2]]. It means that scores 1.0 will be ranked at position 3, 3.5 will be ranked
at position 1, and 2.1 will be ranked at position 2.
Args:
scores: A `Tensor` of shape [batch_size, list_size] representing the
per-example scores.
shuffle_ties: See `sort_by_scores`.
seed: See `sort_by_scores`.
Returns:
A 1-based int `Tensor`s as the ranks.
"""
with tf.compat.v1.name_scope(name='sorted_ranks'):
batch_size, list_size = tf.unstack(tf.shape(input=scores))
# The current position in the list for each score.
positions = tf.tile(
tf.expand_dims(
tf.range(list_size), 0), [
batch_size, 1])
# For score [[1.0, 3.5, 2.1]], sorted_positions are [[1, 2, 0]], meaning the
# largest score is at poistion 1, the second is at postion 2 and third is at
# position 0.
sorted_positions = sort_by_scores(
scores, [positions], shuffle_ties=shuffle_ties, seed=seed)[0]
# The indices of sorting sorted_postions will be [[2, 0, 1]] and ranks are
# 1-based and thus are [[3, 1, 2]].
ranks = tf.argsort(sorted_positions) + 1
return ranks
示例13: organize_valid_indices
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def organize_valid_indices(is_valid, shuffle=True, seed=None):
"""Organizes indices in such a way that valid items appear first.
Args:
is_valid: A boolen `Tensor` for entry validity with shape [batch_size,
list_size].
shuffle: A boolean indicating whether valid items should be shuffled.
seed: An int for random seed at the op level. It works together with the
seed at global graph level together to determine the random number
generation. See `tf.set_random_seed`.
Returns:
A tensor of indices with shape [batch_size, list_size, 2]. The returned
tensor can be used with `tf.gather_nd` and `tf.scatter_nd` to compose a new
[batch_size, list_size] tensor. The values in the last dimension are the
indices for an element in the input tensor.
"""
with tf.compat.v1.name_scope(name='organize_valid_indices'):
is_valid = tf.convert_to_tensor(value=is_valid)
is_valid.get_shape().assert_has_rank(2)
output_shape = tf.shape(input=is_valid)
if shuffle:
values = tf.random.uniform(output_shape, seed=seed)
else:
values = (
tf.ones_like(is_valid, tf.float32) * tf.reverse(
tf.cast(tf.range(output_shape[1]), dtype=tf.float32), [-1]))
rand = tf.compat.v1.where(
is_valid, values, tf.ones(output_shape) * -1e-6)
# shape(indices) = [batch_size, list_size]
indices = tf.argsort(rand, direction='DESCENDING', stable=True)
return _to_nd_indices(indices)
示例14: sort_by_scores
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def sort_by_scores(scores,
features_list,
topn=None,
shuffle_ties=True,
seed=None):
"""Sorts example features according to per-example scores.
Args:
scores: A `Tensor` of shape [batch_size, list_size] representing the
per-example scores.
features_list: A list of `Tensor`s with the same shape as scores to be
sorted.
topn: An integer as the cutoff of examples in the sorted list.
shuffle_ties: A boolean. If True, randomly shuffle before the sorting.
seed: The ops-level random seed used when `shuffle_ties` is True.
Returns:
A list of `Tensor`s as the list of sorted features by `scores`.
"""
with tf.compat.v1.name_scope(name='sort_by_scores'):
scores = tf.cast(scores, tf.float32)
scores.get_shape().assert_has_rank(2)
list_size = tf.shape(input=scores)[1]
if topn is None:
topn = list_size
topn = tf.minimum(topn, list_size)
shuffle_ind = None
if shuffle_ties:
shuffle_ind = _to_nd_indices(
tf.argsort(
tf.random.uniform(tf.shape(input=scores), seed=seed),
stable=True))
scores = tf.gather_nd(scores, shuffle_ind)
_, indices = tf.math.top_k(scores, topn, sorted=True)
nd_indices = _to_nd_indices(indices)
if shuffle_ind is not None:
nd_indices = tf.gather_nd(shuffle_ind, nd_indices)
return [tf.gather_nd(f, nd_indices) for f in features_list]
示例15: sorted_ranks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import argsort [as 别名]
def sorted_ranks(scores, shuffle_ties=True, seed=None):
"""Returns an int `Tensor` as the ranks (1-based) after sorting scores.
Example: Given scores = [[1.0, 3.5, 2.1]], the returned ranks will be [[3, 1,
2]]. It means that scores 1.0 will be ranked at position 3, 3.5 will be ranked
at position 1, and 2.1 will be ranked at position 2.
Args:
scores: A `Tensor` of shape [batch_size, list_size] representing the
per-example scores.
shuffle_ties: See `sort_by_scores`.
seed: See `sort_by_scores`.
Returns:
A 1-based int `Tensor`s as the ranks.
"""
with tf.compat.v1.name_scope(name='sorted_ranks'):
batch_size, list_size = tf.unstack(tf.shape(input=scores))
# The current position in the list for each score.
positions = tf.tile(tf.expand_dims(tf.range(list_size), 0), [batch_size, 1])
# For score [[1.0, 3.5, 2.1]], sorted_positions are [[1, 2, 0]], meaning the
# largest score is at position 1, the 2nd is at position 2 and 3rd is at
# position 0.
sorted_positions = sort_by_scores(
scores, [positions], shuffle_ties=shuffle_ties, seed=seed)[0]
# The indices of sorting sorted_positions will be [[2, 0, 1]] and ranks are
# 1-based and thus are [[3, 1, 2]].
ranks = tf.argsort(sorted_positions) + 1
return ranks