本文整理匯總了Python中tensorflow.compat.v1.reduce_max方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.reduce_max方法的具體用法?Python v1.reduce_max怎麽用?Python v1.reduce_max使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.reduce_max方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: mask_from_lengths
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None):
"""Convert a length scalar to a vector of binary masks.
This function will convert a vector of lengths to a matrix of binary masks.
E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]]
Args:
lengths: a d-dimensional vector of integers corresponding to lengths.
max_length: an optional (default: None) scalar-like or 0-dimensional tensor
indicating the maximum length of the masks. If not provided, the maximum
length will be inferred from the lengths vector.
dtype: the dtype of the returned mask, if specified. If None, the dtype of
the lengths will be used.
name: a name for the operation (optional).
Returns:
A d x max_length tensor of binary masks (int32).
"""
with tf.name_scope(name, 'mask_from_lengths'):
dtype = lengths.dtype if dtype is None else dtype
max_length = tf.reduce_max(lengths) if max_length is None else max_length
indexes = tf.range(max_length, dtype=lengths.dtype)
mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1))
cast_mask = tf.cast(mask, dtype)
return tf.stop_gradient(cast_mask)
示例2: set_precision
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例3: set_recall
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例4: compute_max_pool_embedding
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def compute_max_pool_embedding(input_embeddings, input_lengths):
"""Computes max pool embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
Returns:
max_pool_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, max_seq_len]
mask = 1.0 - tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
mask = tf.squeeze(mask * (-1e-6), 1)
mask = tf.expand_dims(mask, 2)
# <tf.float32>[bs, emb_dim]
max_pool_embedding = tf.reduce_max(input_embeddings + mask, 1)
# <tf.float32>[bs, dim]
return max_pool_embedding
示例5: top_1_tpu
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
"""
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)
示例6: top_k_softmax
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def top_k_softmax(x, k):
"""Calculate softmax(x), select top-k and rescale to sum to 1.
Args:
x: Input to softmax over.
k: Number of top-k to select.
Returns:
softmax(x) and maximum item.
"""
x = tf.nn.softmax(x)
top_x, _ = tf.nn.top_k(x, k=k + 1)
min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)
x = tf.nn.relu((x - min_top) + 1e-12)
x /= tf.reduce_sum(x, axis=-1, keep_dims=True)
return x, tf.reduce_max(top_x, axis=-1)
示例7: _scanning_pack
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def _scanning_pack(self, dataset):
"""Apply scan based pack to a dataset."""
if self._chop_long_sequences:
dataset = dataset.map(lambda x: (x[:self._packed_length],))
else:
dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda
tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length)
# In order to retrieve the sequences which are still in the queue when the
# dataset is exhausted, we feed dummy sequences which are guaranteed to
# displace the remaining elements.
dataset = dataset.concatenate(
tf.data.Dataset.range(self._queue_size).map(self._eviction_fn))
initial_state = self._scan_initial_state()
step_fn = functools.partial(
tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length,
queue_size=self._queue_size, spacing=self._spacing,
num_sequences=self._num_sequences, token_dtype=self._token_dtype)
dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn))
is_real_sample = lambda valid_sample, _: valid_sample
return dataset.filter(is_real_sample)
示例8: apply_piecewise_monotonic_fn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args):
valid_values = []
for a in [self] + list(args):
vs = []
vs.append(a.lower)
vs.append(a.upper)
for b in boundaries:
vs.append(
tf.maximum(a.lower, tf.minimum(a.upper, b * tf.ones_like(a.lower))))
valid_values.append(vs)
outputs = []
for inputs in itertools.product(*valid_values):
outputs.append(fn(*inputs))
outputs = tf.stack(outputs, axis=-1)
return IntervalBounds(tf.reduce_min(outputs, axis=-1),
tf.reduce_max(outputs, axis=-1))
示例9: _simplex_bounds
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def _simplex_bounds(mapped_vertices, mapped_centres, r, axis):
"""Calculates naive bounds on the given layer-mapped vertices.
Args:
mapped_vertices: Tensor of shape (num_vertices, *output_shape)
or of shape (batch_size, num_vertices, *output_shape)
containing the vertices in the layer's output space.
mapped_centres: Tensor of shape (batch_size, *output_shape)
containing the layer's nominal outputs.
r: Scalar in [0, 1) specifying the radius (in vocab space) of the simplex.
axis: Index of the `num_vertices` dimension of `mapped_vertices`.
Returns:
lb_out: Tensor of shape (batch_size, *output_shape) with lower bounds
on the outputs of the affine layer.
ub_out: Tensor of shape (batch_size, *output_shape) with upper bounds
on the outputs of the affine layer.
"""
# Use the negative of r, instead of the complement of r, as
# we're shifting the input domain to be centred at the origin.
lb_out = -r * mapped_centres + r * tf.reduce_min(mapped_vertices, axis=axis)
ub_out = -r * mapped_centres + r * tf.reduce_max(mapped_vertices, axis=axis)
return lb_out, ub_out
示例10: extract_relation_representations
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def extract_relation_representations(input_layer, input_ids, tokenizer):
"""Extracts relation representation from sentence sequence layer."""
entity_representations = []
entity_marker_ids = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"])
for entity_marker_id in entity_marker_ids:
mask = tf.to_float(tf.equal(input_ids, entity_marker_id))
mask = tf.broadcast_to(tf.expand_dims(mask, -1), tf.shape(input_layer))
entity_representation = tf.reduce_max(
mask * input_layer, axis=1, keepdims=True)
entity_representations.append(entity_representation)
output_layer = tf.concat(entity_representations, axis=2)
output_layer = tf.squeeze(output_layer, [1])
tf.logging.info("entity marker pooling AFTER output shape %s",
output_layer.shape)
return output_layer
示例11: batch_boolean_mask
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def batch_boolean_mask(mask):
"""Get indices of true values.
Args:
mask: [batch_size, num_values]
Returns:
true_indices: [batch_size, max_true]
gathered_mask: [batch_size, max_true]
"""
# [batch_size, num_values]
mask = tf.to_int32(mask)
# [batch_size]
num_true = tf.reduce_sum(mask, 1)
# []
max_true = tf.reduce_max(num_true)
# [batch_size, max_true]
gathered_mask, true_indices = tf.nn.top_k(mask, max_true)
gathered_mask = tf.cast(gathered_mask, tf.bool)
return gathered_mask, true_indices
示例12: assert_box_normalized
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
示例13: keypoints_to_enclosing_bounding_boxes
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def keypoints_to_enclosing_bounding_boxes(keypoints):
"""Creates enclosing bounding boxes from keypoints.
Args:
keypoints: a [num_instances, num_keypoints, 2] float32 tensor with keypoints
in [y, x] format.
Returns:
A [num_instances, 4] float32 tensor that tightly covers all the keypoints
for each instance.
"""
ymin = tf.math.reduce_min(keypoints[:, :, 0], axis=1)
xmin = tf.math.reduce_min(keypoints[:, :, 1], axis=1)
ymax = tf.math.reduce_max(keypoints[:, :, 0], axis=1)
xmax = tf.math.reduce_max(keypoints[:, :, 1], axis=1)
return tf.stack([ymin, xmin, ymax, xmax], axis=1)
示例14: one_hot_encoding
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def one_hot_encoding(labels, num_classes=None):
"""One-hot encodes the multiclass labels.
Example usage:
labels = tf.constant([1, 4], dtype=tf.int32)
one_hot = OneHotEncoding(labels, num_classes=5)
one_hot.eval() # evaluates to [0, 1, 0, 0, 1]
Args:
labels: A tensor of shape [None] corresponding to the labels.
num_classes: Number of classes in the dataset.
Returns:
onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
encoding of the labels.
Raises:
ValueError: if num_classes is not specified.
"""
with tf.name_scope('OneHotEncoding', values=[labels]):
if num_classes is None:
raise ValueError('num_classes must be specified')
labels = tf.one_hot(labels, num_classes, 1, 0)
return tf.reduce_max(labels, 0)
示例15: compute_lengths
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import reduce_max [as 別名]
def compute_lengths(symbols_list, eos_symbol, name=None,
dtype=tf.int64):
"""Computes sequence lengths given end-of-sequence symbol.
Args:
symbols_list: list of [batch_size] tensors of symbols (e.g. integers).
eos_symbol: end of sequence symbol (e.g. integer).
name: name for the name scope of this op.
dtype: type of symbols, default: tf.int64.
Returns:
Tensor [batch_size] of lengths of sequences.
"""
with tf.name_scope(name, 'compute_lengths'):
max_len = len(symbols_list)
eos_symbol_ = tf.constant(eos_symbol, dtype=dtype)
# Array with max_len-time where we have EOS, 0 otherwise. Maximum of this is
# the first EOS in that example.
ends = [tf.constant(max_len - i, dtype=tf.int64)
* tf.to_int64(tf.equal(s, eos_symbol_))
for i, s in enumerate(symbols_list)]
# Lengths of sequences, or max_len for sequences that didn't have EOS.
# Note: examples that don't have EOS will have max value of 0 and value of
# max_len+1 in lens_.
lens_ = max_len + 1 - tf.reduce_max(tf.stack(ends, 1), axis=1)
# For examples that didn't have EOS decrease max_len+1 to max_len as the
# length.
lens = tf.subtract(lens_, tf.to_int64(tf.equal(lens_, max_len + 1)))
return tf.stop_gradient(tf.reshape(lens, [-1]))