當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.pow方法代碼示例

本文整理匯總了Python中keras.backend.pow方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.pow方法的具體用法?Python backend.pow怎麽用?Python backend.pow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.backend的用法示例。


在下文中一共展示了backend.pow方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        squared = K.square(x)
        pooled = K.pool2d(squared, (self.n, self.n), strides=(1, 1),
            padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom 
開發者ID:dalmia,項目名稱:WannaPark,代碼行數:18,代碼來源:custom.py

示例2: sharpen

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def sharpen(_weight_t, scalar_gama_t):
    '''
    The convolution operation in convolutional shift can cause leakage or
    dispersion of weights over time if the shift weighting is no sharp.
    For example, if shift of -1, 0 and 1 are given weights of 0.1, 0.8,
    and 0.1, the rotation will transform a weighting focused at single
    point into one slightly blurred over three points. To combat this,
    each head emits one further scalar \gama >= 1 whose effect is sharpen
    the final weighting as follows:
    $$w_{i}^{(t)} = \frac{(\hat{w}_{i}^{(t)})^{\gama}}
    {\sum_{j}\hat{w}_{j}^{(t)})^{\gama}}$$
    :param _weight_t: the weight vector which denotes a memory address.
    :param scalar_gama_t: the scalar for sharpen.
    :return: the sharpened weight.
    '''
    weight_t = K.pow(_weight_t, scalar_gama_t)
    return weight_t / K.sum(weight_t) 
開發者ID:SigmaQuan,項目名稱:NTM-Keras,代碼行數:19,代碼來源:memory.py

示例3: get_updates

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = K.cast(self.iterations, K.floatx()) + 1
        lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            self.updates.append(K.update_sub(p, p_t))
        return self.updates 
開發者ID:CyberZHG,項目名稱:keras-lookahead,代碼行數:21,代碼來源:optimizers.py

示例4: focal_loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def focal_loss(y_true, y_pred, gamma=2, alpha=0.25):
    """Compute focal loss.
    
    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).
    
    # Returns
        focal_loss: Focal loss, tensor of shape (?, num_boxes).

    # References
        https://arxiv.org/abs/1708.02002
    """
    #y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
    eps = K.epsilon()
    y_pred = K.clip(y_pred, eps, 1. - eps)
    
    pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
    focal_loss = -tf.reduce_sum(alpha * K.pow(1. - pt, gamma) * K.log(pt), axis=-1)
    return focal_loss 
開發者ID:mogoweb,項目名稱:aiexamples,代碼行數:24,代碼來源:training.py

示例5: call

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def call(self, x):
        if (self.size is None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000.,
                                2 * K.arange(self.size / 2, dtype='float32'
                                             ) / self.size)
        position_j = K.expand_dims(position_j, 0)
        # K.arange不支持變長,隻好用這種方法生成
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
開發者ID:stevewyl,項目名稱:nlp_toolkit,代碼行數:20,代碼來源:position_embedding.py

示例6: full_affinity

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def full_affinity(X, scale):
    '''
    Calculates the symmetrized full Gaussian affinity matrix, scaled
    by a provided scale

    X:              input dataset of size n
    scale:          provided scale

    returns:        n x n affinity matrix
    '''
    sigma = K.variable(scale)
    Dx = squared_distance(X)
    sigma_squared = K.pow(sigma, 2)
    sigma_squared = K.expand_dims(sigma_squared, -1)
    Dx_scaled = Dx / (2 * sigma_squared)
    W = K.exp(-Dx_scaled)
    return W 
開發者ID:xdxuyang,項目名稱:Deep-Spectral-Clustering-using-Dual-Autoencoder-Network,代碼行數:19,代碼來源:costs.py

示例7: make_smooth_ndcg_loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def make_smooth_ndcg_loss(y_true, y_pred):
    y_true, y_pred = tensorify(y_true), tensorify(y_pred)
    n_objects = K.max(y_true) + 1.0
    y_true_f = K.cast(y_true, "float32")
    relevance = n_objects - y_true_f - 1.0
    log_term = K.log(relevance + 2.0) / K.log(2.0)
    exp_relevance = K.pow(2.0, relevance) - 1.0
    gains = exp_relevance / log_term

    # Calculate ideal dcg:
    idcg = K.sum(gains, axis=-1)

    # Calculate smoothed dcg:
    exped = K.exp(y_pred)
    exped = exped / K.sum(exped, axis=-1, keepdims=True)
    # toppred, toppred_ind = tf.nn.top_k(gains * exped, k)
    return 1 - K.sum(exped * gains, axis=-1) / idcg 
開發者ID:kiudee,項目名稱:cs-ranking,代碼行數:19,代碼來源:losses.py

示例8: focal_loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def focal_loss(gamma=2., alpha=.25):
	def focal_loss_fixed(y_true, y_pred):
		pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
		pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
		return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
	return focal_loss_fixed 
開發者ID:mkocabas,項目名稱:focal-loss-keras,代碼行數:8,代碼來源:focal_loss.py

示例9: __call__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def __call__(self):
        def calc_loss(y_true, y_pred):
            loss = K.pow(K.abs(y_true - y_pred) + 1e-8, self.gamma)
            return loss
        return calc_loss 
開發者ID:zxq2233,項目名稱:n2n-watermark-remove,代碼行數:7,代碼來源:model.py

示例10: gelu

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def gelu(x):
    return 0.5 * x * (1 + K.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * K.pow(x, 3))))


# https://stackoverflow.com/a/42194662/2796084 
開發者ID:yyht,項目名稱:BERT,代碼行數:7,代碼來源:funcs.py

示例11: loglik_discrete

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def loglik_discrete(y, u, a, b, epsilon=K.epsilon()):
    hazard0 = K.pow((y + epsilon) / a, b)
    hazard1 = K.pow((y + 1.0) / a, b)

    loglikelihoods = u * \
        K.log(K.exp(hazard1 - hazard0) - (1.0 - epsilon)) - hazard1
    return loglikelihoods 
開發者ID:ragulpr,項目名稱:wtte-rnn,代碼行數:9,代碼來源:wtte.py

示例12: loglik_continuous

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def loglik_continuous(y, u, a, b, epsilon=K.epsilon()):
    ya = (y + epsilon) / a
    loglikelihoods = u * (K.log(b) + b * K.log(ya)) - K.pow(ya, b)
    return loglikelihoods 
開發者ID:ragulpr,項目名稱:wtte-rnn,代碼行數:6,代碼來源:wtte.py

示例13: loglik_continuous_conditional_correction

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def loglik_continuous_conditional_correction(y, u, a, b, epsilon=K.epsilon()):
    """Integrated conditional excess loss.
        Explanation TODO
    """
    ya = (y + epsilon) / a
    loglikelihoods = y * \
        (u * (K.log(b) + b * K.log(ya)) - (b / (b + 1.)) * K.pow(ya, b))
    return loglikelihoods 
開發者ID:ragulpr,項目名稱:wtte-rnn,代碼行數:10,代碼來源:wtte.py

示例14: get_updates

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        gs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        self.weights = ms + vs

        for p, g, m, v, gg in zip(params, grads, ms, vs, gs):

            flag = K.equal(self.iterations % self.accum_iters, 0)
            flag = K.cast(flag, dtype='float32')

            gg_t = (1 - flag) * (gg + g)
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * (gg + flag * g) / self.accum_iters
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square((gg + flag * g) / self.accum_iters)
            p_t = p - flag * lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, flag * m_t + (1 - flag) * m))
            self.updates.append((v, flag * v_t + (1 - flag) * v))
            self.updates.append((gg, gg_t))

            new_p = p_t
            # apply constraints
            if getattr(p, 'constraint', None) is not None:
                c = p.constraints(new_p)
                new_p = c(new_p)
            self.updates.append((p, new_p))
        return self.updates 
開發者ID:crisie,項目名稱:RecurrentGaze,代碼行數:35,代碼來源:adamaccum.py

示例15: gelu_tanh

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import pow [as 別名]
def gelu_tanh(x):
    """基於Tanh近似計算的gelu函數
    """
    cdf = 0.5 * (1.0 + K.tanh(
        (np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3)))))
    return x * cdf 
開發者ID:liushaoweihua,項目名稱:keras-bert-ner,代碼行數:8,代碼來源:models.py


注:本文中的keras.backend.pow方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。