当前位置: 首页>>代码示例>>Python>>正文


Python backend.max方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.max方法的典型用法代码示例。如果您正苦于以下问题:Python backend.max方法的具体用法?Python backend.max怎么用?Python backend.max使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: amplitude_to_decibel

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
    """[K] Convert (linear) amplitude to decibel (log10(x)).

    Parameters
    ----------
    x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.

    amin: minimum amplitude. amplitude smaller than `amin` is set to this.

    dynamic_range: dynamic_range in decibel

    """
    log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
    if K.ndim(x) > 1:
        axis = tuple(range(K.ndim(x))[1:])
    else:
        axis = None

    log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)  # [-?, 0]
    log_spec = K.maximum(log_spec, -1 * dynamic_range)  # [-80, 0]
    return log_spec 
开发者ID:keunwoochoi,项目名称:kapre,代码行数:23,代码来源:backend_keras.py

示例2: _softmax

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, with additional alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
开发者ID:adalca,项目名称:neuron,代码行数:26,代码来源:utils.py

示例3: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_b = inp_b * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_b = K.l2_normalize(outp_b, -1)
            outp = K.batch_dot(outp_a, outp_b, axes=[2, 2])
            outp = K.max(outp, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:19,代码来源:keras_layers.py

示例4: _find_maxima

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):

    x = K.cast(x, K.floatx())

    col_max = K.max(x, axis=1)
    row_max = K.max(x, axis=2)

    maxima = K.max(col_max, 1)
    maxima = K.expand_dims(maxima, -2) / confidence_scale

    cols = K.cast(K.argmax(col_max, -2), K.floatx())
    rows = K.cast(K.argmax(row_max, -2), K.floatx())
    cols = K.expand_dims(cols, -2) * coordinate_scale
    rows = K.expand_dims(rows, -2) * coordinate_scale

    maxima = K.concatenate([cols, rows, maxima], -2)

    return maxima 
开发者ID:jgraving,项目名称:DeepPoseKit,代码行数:20,代码来源:backend.py

示例5: mean_q

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def mean_q(y_true, y_pred):
    return K.mean(K.max(y_pred, axis=-1)) 
开发者ID:wau,项目名称:keras-rl2,代码行数:4,代码来源:dqn.py

示例6: _hard_max

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def _hard_max(tens, axis):
    """
    we can't use the argmax function in a loss, as it's not differentiable
    We can use it in a metric, but not in a loss function
    therefore, we replace the 'hard max' operation (i.e. argmax + onehot)
    with this approximation
    """
    tensmax = K.max(tens, axis=axis, keepdims=True)
    eps_hot = K.maximum(tens - tensmax + K.epsilon(), 0)
    one_hot = eps_hot / K.epsilon()
    return one_hot 
开发者ID:adalca,项目名称:neuron,代码行数:13,代码来源:metrics.py

示例7: next_pred_label

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def next_pred_label(model, data_generator, verbose=False):
    """
    predict the next sample batch from the generator, and compute max labels
    return sample, prediction, max_labels
    """
    sample = next(data_generator)
    with timer.Timer('prediction', verbose):
        pred = model.predict(sample[0])
    sample_input = sample[0] if not isinstance(sample[0], (list, tuple)) else sample[0][0]
    max_labels = pred_to_label(sample_input, pred)
    return (sample, pred) + max_labels 
开发者ID:adalca,项目名称:neuron,代码行数:13,代码来源:utils.py

示例8: next_label

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def next_label(model, data_generator):
    """
    predict the next sample batch from the generator, and compute max labels
    return max_labels
    """
    batch_proc = next_pred_label(model, data_generator)
    return (batch_proc[2], batch_proc[3]) 
开发者ID:adalca,项目名称:neuron,代码行数:9,代码来源:utils.py

示例9: max

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def max(self):
    """Get maximum value that quantized_bits class can represent."""
    unsigned_bits = self.bits - self.keep_negative

    if unsigned_bits > 0:
      return max(1.0, np.power(2.0, self.integer))
    else:
      return 1.0 
开发者ID:google,项目名称:qkeras,代码行数:10,代码来源:quantizers.py

示例10: min

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def min(self):
    """Get minimum value that quantized_bits class can represent."""
    if not self.keep_negative:
      return 0.0
    unsigned_bits = self.bits - self.keep_negative
    if unsigned_bits > 0:
      return -max(1.0, np.power(2.0, self.integer))
    else:
      return -1.0 
开发者ID:google,项目名称:qkeras,代码行数:11,代码来源:quantizers.py

示例11: _chebyshev_distance

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def _chebyshev_distance(x, y):
    return K.max(K.abs(x - y), axis=-1, keepdims=True) 
开发者ID:beringresearch,项目名称:ivis,代码行数:4,代码来源:losses.py

示例12: consecutive_indexed

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def consecutive_indexed(Y):
    """ Assumes that Y is zero-indexed. """
    n_classes = len(np.unique(Y[Y != np.array(-1)]))
    if max(Y) >= n_classes:
        return False
    return True 
开发者ID:beringresearch,项目名称:ivis,代码行数:8,代码来源:losses.py

示例13: qscore

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def qscore(y_true, y_pred):
    """Keras metric function for calculating scaled error.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: class error expressed as a phred score.
    """
    from tensorflow.keras import backend as K
    error = K.cast(K.not_equal(
        K.max(y_true, axis=-1), K.cast(K.argmax(y_pred, axis=-1), K.floatx())),
        K.floatx()
    )
    error = K.sum(error) / K.sum(K.ones_like(error))
    return -10.0 * 0.434294481 * K.log(error) 
开发者ID:nanoporetech,项目名称:medaka,代码行数:17,代码来源:training.py

示例14: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def call(self, inputs, **kwargs):
        sent1 = inputs[0]
        sent2 = inputs[1]

        v1 = K.expand_dims(sent1, -2) * self.kernel
        v2 = K.expand_dims(sent2, -2) * self.kernel
        v1 = K.l2_normalize(v1, axis=-1)
        v2 = K.l2_normalize(v2, axis=-1)
        matching = K.max(K.sum(K.expand_dims(v1, 2) * K.expand_dims(v2, 1), axis=-1), axis=-2)
        return matching 
开发者ID:boat-group,项目名称:fancy-nlp,代码行数:12,代码来源:matching.py

示例15: _batch_hard_triplet_loss

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import max [as 别名]
def _batch_hard_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
        mask_anchor_positive = self._get_anchor_positive_triplet_mask(y_true, pairwise_dist)
        anchor_positive_dist = mask_anchor_positive * pairwise_dist
        hardest_positive_dist = K.max(anchor_positive_dist, axis=1, keepdims=True)
        mask_anchor_negative = self._get_anchor_negative_triplet_mask(y_true, pairwise_dist)
        anchor_negative_dist = mask_anchor_negative * pairwise_dist
        mask_anchor_negative = self._get_semihard_anchor_negative_triplet_mask(anchor_negative_dist,
                                                                               hardest_positive_dist,
                                                                               mask_anchor_negative)
        max_anchor_negative_dist = K.max(pairwise_dist, axis=1, keepdims=True)
        anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
        hardest_negative_dist = K.min(anchor_negative_dist, axis=1, keepdims=True)
        triplet_loss = K.clip(hardest_positive_dist - hardest_negative_dist + self.margin, 0.0, None)
        triplet_loss = K.mean(triplet_loss)
        return triplet_loss 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:17,代码来源:bilstm_siamese_network.py


注:本文中的tensorflow.keras.backend.max方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。