本文整理汇总了Python中tensorflow.reduce_max方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_max方法的具体用法?Python tensorflow.reduce_max怎么用?Python tensorflow.reduce_max使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: from_float32_to_uint8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def from_float32_to_uint8(
tensor,
tensor_key='tensor',
min_key='min',
max_key='max'):
"""
:param tensor:
:param tensor_key:
:param min_key:
:param max_key:
:returns:
"""
tensor_min = tf.reduce_min(tensor)
tensor_max = tf.reduce_max(tensor)
return {
tensor_key: tf.cast(
(tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
* 255.9999, dtype=tf.uint8),
min_key: tensor_min,
max_key: tensor_max
}
示例2: one_hot_encoding
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def one_hot_encoding(labels, num_classes=None):
"""One-hot encodes the multiclass labels.
Example usage:
labels = tf.constant([1, 4], dtype=tf.int32)
one_hot = OneHotEncoding(labels, num_classes=5)
one_hot.eval() # evaluates to [0, 1, 0, 0, 1]
Args:
labels: A tensor of shape [None] corresponding to the labels.
num_classes: Number of classes in the dataset.
Returns:
onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
encoding of the labels.
Raises:
ValueError: if num_classes is not specified.
"""
with tf.name_scope('OneHotEncoding', values=[labels]):
if num_classes is None:
raise ValueError('num_classes must be specified')
labels = tf.one_hot(labels, num_classes, 1, 0)
return tf.reduce_max(labels, 0)
示例3: log_sum_exp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def log_sum_exp(x_k):
"""Computes log \sum exp in a numerically stable way.
log ( sum_i exp(x_i) )
log ( sum_i exp(x_i - m + m) ), with m = max(x_i)
log ( sum_i exp(x_i - m)*exp(m) )
log ( sum_i exp(x_i - m) + m
Args:
x_k - k -dimensional list of arguments to log_sum_exp.
Returns:
log_sum_exp of the arguments.
"""
m = tf.reduce_max(x_k)
x1_k = x_k - m
u_k = tf.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
示例4: set_precision
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例5: set_recall
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例6: top_1_tpu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
示例7: gumbel_softmax
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
# k = tf.shape(logits)[-1]
# y_hard = tf.cast(tf.one_hot(tf.argmax(y, 1), k), y.dtype)
y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
示例8: add_variable_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def add_variable_summaries(variable, scope):
'''
Attach some summaries to a tensor for TensorBoard visualization, namely
mean, standard deviation, minimum, maximum, and histogram.
Arguments:
var (TensorFlow Variable): A TensorFlow Variable of any shape to which to
add summary operations. Must be a numerical data type.
'''
with tf.name_scope(scope):
mean = tf.reduce_mean(variable)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(variable))
tf.summary.scalar('min', tf.reduce_min(variable))
tf.summary.histogram('histogram', variable)
示例9: tower
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def tower(image, mode, config):
image = image_normalization(image)
if image.shape[-1] == 1:
image = tf.tile(image, [1, 1, 1, 3])
with slim.arg_scope(resnet.resnet_arg_scope()):
is_training = config['train_backbone'] and (mode == Mode.TRAIN)
with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training):
_, encoder = resnet.resnet_v1_50(image,
is_training=is_training,
global_pool=False,
scope='resnet_v1_50')
feature_map = encoder['resnet_v1_50/block3']
if config['use_attention']:
descriptor = delf_attention(feature_map, config, mode == Mode.TRAIN,
resnet.resnet_arg_scope())
else:
descriptor = tf.reduce_max(feature_map, [1, 2])
if config['dimensionality_reduction']:
descriptor = dimensionality_reduction(descriptor, config)
return descriptor
示例10: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def call(self, seq_value_len_list, mask=None, **kwargs):
if self.supports_masking:
if mask is None:
raise ValueError(
"When supports_masking=True,input must support masking")
uiseq_embed_list = seq_value_len_list
mask = tf.to_float(mask)
user_behavior_length = tf.reduce_sum(mask, axis=-1, keep_dims=True)
mask = tf.expand_dims(mask, axis=2)
else:
uiseq_embed_list, user_behavior_length = seq_value_len_list
mask = tf.sequence_mask(user_behavior_length,
self.seq_len_max, dtype=tf.float32)
mask = tf.transpose(mask, (0, 2, 1))
embedding_size = uiseq_embed_list.shape[-1]
mask = tf.tile(mask, [1, 1, embedding_size])
uiseq_embed_list *= mask
hist = uiseq_embed_list
if self.mode == "max":
return tf.reduce_max(hist, 1, keep_dims=True)
hist = tf.reduce_sum(hist, 1, keep_dims=False)
if self.mode == "mean":
hist = tf.div(hist, user_behavior_length+self.eps)
hist = tf.expand_dims(hist, axis=1)
return hist
示例11: get_or_guess_labels
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def get_or_guess_labels(self, x, kwargs):
"""
Get the label to use in generating an adversarial example for x.
The kwargs are fed directly from the kwargs of the attack.
If 'y' is in kwargs, then assume it's an untargeted attack and
use that as the label.
If 'y_target' is in kwargs and is not none, then assume it's a
targeted attack and use that as the label.
Otherwise, use the model's prediction as the label and perform an
untargeted attack.
"""
import tensorflow as tf
if 'y' in kwargs and 'y_target' in kwargs:
raise ValueError("Can not set both 'y' and 'y_target'.")
elif 'y' in kwargs:
labels = kwargs['y']
elif 'y_target' in kwargs and kwargs['y_target'] is not None:
labels = kwargs['y_target']
else:
preds = self.model.get_probs(x)
preds_max = reduce_max(preds, 1, keepdims=True)
original_predictions = tf.to_float(tf.equal(preds, preds_max))
labels = tf.stop_gradient(original_predictions)
if isinstance(labels, np.ndarray):
nb_classes = labels.shape[1]
else:
nb_classes = labels.get_shape().as_list()[1]
return labels, nb_classes
示例12: fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def fprop(self, x, **kwargs):
mean = tf.reduce_mean(x)
std = tf.sqrt(tf.reduce_mean(tf.square(x - mean)))
return tf.Print(x,
[tf.reduce_min(x), mean, tf.reduce_max(x), std],
"Print layer")
示例13: reduce_max
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def reduce_max(input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Wrapper around the tf.reduce_max to handle argument keep_dims
"""
return reduce_function(tf.reduce_max, input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices)
示例14: spectrogram_to_db_uint
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def spectrogram_to_db_uint(spectrogram, db_range=100., **kwargs):
""" Encodes given spectrogram into uint8 using decibel scale.
:param spectrogram: Spectrogram to be encoded as TF float tensor.
:param db_range: Range in decibel for encoding.
:returns: Encoded decibel spectrogram as uint8 tensor.
"""
db_spectrogram = gain_to_db(spectrogram)
max_db_spectrogram = tf.reduce_max(db_spectrogram)
db_spectrogram = tf.maximum(db_spectrogram, max_db_spectrogram - db_range)
return from_float32_to_uint8(db_spectrogram, **kwargs)
示例15: variable_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_max [as 别名]
def variable_summaries(name,var, with_max_min=False):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
if with_max_min == True:
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))