本文整理汇总了Python中tensorflow.top_k方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.top_k方法的具体用法?Python tensorflow.top_k怎么用?Python tensorflow.top_k使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.top_k方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _MyTopK
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import top_k [as 别名]
def _MyTopK(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in xrange(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
示例2: _my_top_k
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import top_k [as 别名]
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1))
示例3: _prob_in_top_k
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import top_k [as 别名]
def _prob_in_top_k(
clean_values, noisy_values, noise_stddev, noisy_top_values, k):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
k: an integer.
Returns:
a `Tensor` of shape [batch, n].
"""
batch = tf.shape(clean_values)[0]
m = tf.shape(noisy_top_values)[1]
top_values_flat = tf.reshape(noisy_top_values, [-1])
# we want to compute the threshold that a particular value would have to
# exceed in order to make the top k. This computation differs depending
# on whether the value is already in the top k.
threshold_positions_if_in = tf.range(batch) * m + k
threshold_if_in = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_in), 1)
is_in = tf.greater(noisy_values, threshold_if_in)
if noise_stddev is None:
return tf.to_float(is_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_out), 1)
# is each value currently in the top k.
prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in,
noise_stddev)
prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out,
noise_stddev)
prob = tf.where(is_in, prob_if_in, prob_if_out)
return prob
示例4: _ProbInTopK
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import top_k [as 别名]
def _ProbInTopK(clean_values, noisy_values, noise_stddev, noisy_top_values, k):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
'values' Output of tf.top_k(noisy_top_values, m). m >= k+1
k: an integer.
Returns:
a `Tensor` of shape [batch, n].
"""
batch = tf.shape(clean_values)[0]
m = tf.shape(noisy_top_values)[1]
top_values_flat = tf.reshape(noisy_top_values, [-1])
# we want to compute the threshold that a particular value would have to
# exceed in order to make the top k. This computation differs depending
# on whether the value is already in the top k.
threshold_positions_if_in = tf.range(batch) * m + k
threshold_if_in = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_in), 1)
is_in = tf.greater(noisy_values, threshold_if_in)
if noise_stddev is None:
return tf.to_float(is_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = tf.expand_dims(
tf.gather(top_values_flat, threshold_positions_if_out), 1)
# is each value currently in the top k.
prob_if_in = _NormalDistributionCDF(clean_values - threshold_if_in,
noise_stddev)
prob_if_out = _NormalDistributionCDF(clean_values - threshold_if_out,
noise_stddev)
prob = tf.where(is_in, prob_if_in, prob_if_out)
return prob