當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.reduce_max方法代碼示例

本文整理匯總了Python中tensorflow.reduce_max方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.reduce_max方法的具體用法?Python tensorflow.reduce_max怎麽用?Python tensorflow.reduce_max使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.reduce_max方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: from_float32_to_uint8

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def from_float32_to_uint8(
        tensor,
        tensor_key='tensor',
        min_key='min',
        max_key='max'):
    """

    :param tensor:
    :param tensor_key:
    :param min_key:
    :param max_key:
    :returns:
    """
    tensor_min = tf.reduce_min(tensor)
    tensor_max = tf.reduce_max(tensor)
    return {
        tensor_key: tf.cast(
            (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
            * 255.9999, dtype=tf.uint8),
        min_key: tensor_min,
        max_key: tensor_max
    } 
開發者ID:deezer,項目名稱:spleeter,代碼行數:24,代碼來源:tensor.py

示例2: one_hot_encoding

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def one_hot_encoding(labels, num_classes=None):
  """One-hot encodes the multiclass labels.

  Example usage:
    labels = tf.constant([1, 4], dtype=tf.int32)
    one_hot = OneHotEncoding(labels, num_classes=5)
    one_hot.eval()    # evaluates to [0, 1, 0, 0, 1]

  Args:
    labels: A tensor of shape [None] corresponding to the labels.
    num_classes: Number of classes in the dataset.
  Returns:
    onehot_labels: a tensor of shape [num_classes] corresponding to the one hot
      encoding of the labels.
  Raises:
    ValueError: if num_classes is not specified.
  """
  with tf.name_scope('OneHotEncoding', values=[labels]):
    if num_classes is None:
      raise ValueError('num_classes must be specified')

    labels = tf.one_hot(labels, num_classes, 1, 0)
    return tf.reduce_max(labels, 0) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:25,代碼來源:preprocessor.py

示例3: log_sum_exp

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def log_sum_exp(x_k):
  """Computes log \sum exp in a numerically stable way.
    log ( sum_i exp(x_i) )
    log ( sum_i exp(x_i - m + m) ),       with m = max(x_i)
    log ( sum_i exp(x_i - m)*exp(m) )
    log ( sum_i exp(x_i - m) + m

  Args:
    x_k - k -dimensional list of arguments to log_sum_exp.

  Returns:
    log_sum_exp of the arguments.
  """
  m = tf.reduce_max(x_k)
  x1_k = x_k - m
  u_k = tf.exp(x1_k)
  z = tf.reduce_sum(u_k)
  return tf.log(z) + m 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:20,代碼來源:utils.py

示例4: set_precision

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:metrics.py

示例5: set_recall

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:22,代碼來源:metrics.py

示例6: top_1_tpu

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def top_1_tpu(inputs):
  """find max and argmax over the last dimension.

  Works well on TPU

  Args:
    inputs: A tensor with shape [..., depth]

  Returns:
    values: a Tensor with shape [...]
    indices: a Tensor with shape [...]
  """
  inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
  mask = tf.to_int32(tf.equal(inputs_max, inputs))
  index = tf.range(tf.shape(inputs)[-1]) * mask
  return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:18,代碼來源:common_layers.py

示例7: gumbel_softmax

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def gumbel_softmax(logits, temperature, hard=False):
    """Sample from the Gumbel-Softmax distribution and optionally discretize.
    Args:
    logits: [batch_size, n_class] unnormalized log-probs
    temperature: non-negative scalar
    hard: if True, take argmax, but differentiate w.r.t. soft sample y
    Returns:
    [batch_size, n_class] sample from the Gumbel-Softmax distribution.
    If hard=True, then the returned sample will be one-hot, otherwise it will
    be a probabilitiy distribution that sums to 1 across classes
    """
    y = gumbel_softmax_sample(logits, temperature)
    if hard:
        # k = tf.shape(logits)[-1]
        # y_hard = tf.cast(tf.one_hot(tf.argmax(y, 1), k), y.dtype)
        y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype)
        y = tf.stop_gradient(y_hard - y) + y
    return y 
開發者ID:simonkamronn,項目名稱:kvae,代碼行數:20,代碼來源:nn.py

示例8: add_variable_summaries

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def add_variable_summaries(variable, scope):
  '''
  Attach some summaries to a tensor for TensorBoard visualization, namely
  mean, standard deviation, minimum, maximum, and histogram.

  Arguments:
    var (TensorFlow Variable): A TensorFlow Variable of any shape to which to
        add summary operations. Must be a numerical data type.
  '''
  with tf.name_scope(scope):
    mean = tf.reduce_mean(variable)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(variable))
    tf.summary.scalar('min', tf.reduce_min(variable))
    tf.summary.histogram('histogram', variable) 
開發者ID:pierluigiferrari,項目名稱:fcn8s_tensorflow,代碼行數:20,代碼來源:tf_variable_summaries.py

示例9: tower

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def tower(image, mode, config):
        image = image_normalization(image)
        if image.shape[-1] == 1:
            image = tf.tile(image, [1, 1, 1, 3])

        with slim.arg_scope(resnet.resnet_arg_scope()):
            is_training = config['train_backbone'] and (mode == Mode.TRAIN)
            with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training):
                _, encoder = resnet.resnet_v1_50(image,
                                                 is_training=is_training,
                                                 global_pool=False,
                                                 scope='resnet_v1_50')
        feature_map = encoder['resnet_v1_50/block3']

        if config['use_attention']:
            descriptor = delf_attention(feature_map, config, mode == Mode.TRAIN,
                                        resnet.resnet_arg_scope())
        else:
            descriptor = tf.reduce_max(feature_map, [1, 2])

        if config['dimensionality_reduction']:
            descriptor = dimensionality_reduction(descriptor, config)
        return descriptor 
開發者ID:ethz-asl,項目名稱:hierarchical_loc,代碼行數:25,代碼來源:delf.py

示例10: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def call(self, seq_value_len_list, mask=None, **kwargs):
        if self.supports_masking:
            if mask is None:
                raise ValueError(
                    "When supports_masking=True,input must support masking")
            uiseq_embed_list = seq_value_len_list
            mask = tf.to_float(mask)
            user_behavior_length = tf.reduce_sum(mask, axis=-1, keep_dims=True)
            mask = tf.expand_dims(mask, axis=2)
        else:
            uiseq_embed_list, user_behavior_length = seq_value_len_list

            mask = tf.sequence_mask(user_behavior_length,
                                    self.seq_len_max, dtype=tf.float32)
            mask = tf.transpose(mask, (0, 2, 1))

        embedding_size = uiseq_embed_list.shape[-1]

        mask = tf.tile(mask, [1, 1, embedding_size])

        uiseq_embed_list *= mask
        hist = uiseq_embed_list
        if self.mode == "max":
            return tf.reduce_max(hist, 1, keep_dims=True)

        hist = tf.reduce_sum(hist, 1, keep_dims=False)

        if self.mode == "mean":
            hist = tf.div(hist, user_behavior_length+self.eps)

        hist = tf.expand_dims(hist, axis=1)
        return hist 
開發者ID:ShenDezhou,項目名稱:icme2019,代碼行數:34,代碼來源:sequence.py

示例11: get_or_guess_labels

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.
        If 'y' is in kwargs, then assume it's an untargeted attack and
        use that as the label.
        If 'y_target' is in kwargs and is not none, then assume it's a
        targeted attack and use that as the label.
        Otherwise, use the model's prediction as the label and perform an
        untargeted attack.
        """
        import tensorflow as tf

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Can not set both 'y' and 'y_target'.")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs and kwargs['y_target'] is not None:
            labels = kwargs['y_target']
        else:
            preds = self.model.get_probs(x)
            preds_max = reduce_max(preds, 1, keepdims=True)
            original_predictions = tf.to_float(tf.equal(preds, preds_max))
            labels = tf.stop_gradient(original_predictions)
        if isinstance(labels, np.ndarray):
            nb_classes = labels.shape[1]
        else:
            nb_classes = labels.get_shape().as_list()[1]
        return labels, nb_classes 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:31,代碼來源:attacks.py

示例12: fprop

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def fprop(self, x, **kwargs):
        mean = tf.reduce_mean(x)
        std = tf.sqrt(tf.reduce_mean(tf.square(x - mean)))
        return tf.Print(x,
                        [tf.reduce_min(x), mean, tf.reduce_max(x), std],
                        "Print layer") 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:8,代碼來源:picklable_model.py

示例13: reduce_max

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def reduce_max(input_tensor, axis=None, keepdims=None,
               name=None, reduction_indices=None):
    """
    Wrapper around the tf.reduce_max to handle argument keep_dims
    """
    return reduce_function(tf.reduce_max, input_tensor, axis=axis,
                           keepdims=keepdims, name=name,
                           reduction_indices=reduction_indices) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:10,代碼來源:compat.py

示例14: spectrogram_to_db_uint

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def spectrogram_to_db_uint(spectrogram, db_range=100., **kwargs):
    """ Encodes given spectrogram into uint8 using decibel scale.

    :param spectrogram: Spectrogram to be encoded as TF float tensor.
    :param db_range: Range in decibel for encoding.
    :returns: Encoded decibel spectrogram as uint8 tensor.
    """
    db_spectrogram = gain_to_db(spectrogram)
    max_db_spectrogram = tf.reduce_max(db_spectrogram)
    db_spectrogram = tf.maximum(db_spectrogram, max_db_spectrogram - db_range)
    return from_float32_to_uint8(db_spectrogram, **kwargs) 
開發者ID:deezer,項目名稱:spleeter,代碼行數:13,代碼來源:convertor.py

示例15: variable_summaries

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import reduce_max [as 別名]
def variable_summaries(name,var, with_max_min=False):
  with tf.name_scope(name):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    if with_max_min == True:
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var)) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:12,代碼來源:actor.py


注:本文中的tensorflow.reduce_max方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。