本文整理汇总了Python中tensorflow.keras.backend.sum方法的典型用法代码示例。如果您正苦于以下问题:Python backend.sum方法的具体用法?Python backend.sum怎么用?Python backend.sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def call(self, inputs):
"""
parent layers: atom_features, distance, distance_membership_i, distance_membership_j
"""
atom_features = inputs[0]
distance = inputs[1]
distance_membership_i = inputs[2]
distance_membership_j = inputs[3]
distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
outputs = tf.multiply(
distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))
# for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
# and embeddings of atom j(both gone through a hidden layer)
outputs = tf.matmul(outputs, self.W_fc)
outputs = self.activation_fn(outputs)
output_ii = tf.multiply(self.b_df, atom_features_hidden)
output_ii = tf.matmul(output_ii, self.W_fc)
output_ii = self.activation_fn(output_ii)
# for atom i, sum the influence from all other atom j in the molecule
return tf.math.segment_sum(
outputs, distance_membership_i) - output_ii + atom_features
示例2: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
inputs_linear = self.features_layer(X)
attn = self.attention_layer(X)
masked_inputs = inputs_linear * attn
if self.data_mode in {'single', 'batch'}:
output = K.sum(masked_inputs, axis=-2,
keepdims=self.data_mode == 'single')
else:
output = tf.math.segment_sum(masked_inputs, I)
return output
示例3: loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def loss(self, y_true, y_pred):
# get the value for the true and fake images
disc_true = self.disc(y_true)
disc_pred = self.disc(y_pred)
# sample a x_hat by sampling along the line between true and pred
# z = tf.placeholder(tf.float32, shape=[None, 1])
# shp = y_true.get_shape()[0]
# WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
# self.batch_size does not work, since it's not None!!!
alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
diff = y_pred - y_true
interp = y_true + alpha * diff
# take gradient of D(x_hat)
gradients = K.gradients(self.disc(interp), [interp])[0]
grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))
# compute loss
return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
示例4: focal_loss_binary
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def focal_loss_binary(y_true, y_pred):
"""Binary cross-entropy focal loss
"""
gamma = 2.0
alpha = 0.25
pt_1 = tf.where(tf.equal(y_true, 1),
y_pred,
tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0),
y_pred,
tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN and Inf
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
weight = alpha * K.pow(1. - pt_1, gamma)
fl1 = -K.sum(weight * K.log(pt_1))
weight = (1 - alpha) * K.pow(pt_0, gamma)
fl0 = -K.sum(weight * K.log(1. - pt_0))
return fl1 + fl0
示例5: focal_loss_categorical
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def focal_loss_categorical(y_true, y_pred):
"""Categorical cross-entropy focal loss"""
gamma = 2.0
alpha = 0.25
# scale to ensure sum of prob is 1.0
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip the prediction value to prevent NaN and Inf
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# calculate cross entropy
cross_entropy = -y_true * K.log(y_pred)
# calculate focal loss
weight = alpha * K.pow(1 - y_pred, gamma)
cross_entropy *= weight
return K.sum(cross_entropy, axis=-1)
示例6: surv_likelihood
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def surv_likelihood(n_intervals):
"""Create custom Keras loss function for neural network survival model.
Arguments
n_intervals: the number of survival time intervals
Returns
Custom loss function that can be used with Keras
"""
def loss(y_true, y_pred):
"""
Required to have only 2 arguments by Keras.
Arguments
y_true: Tensor.
First half of the values is 1 if individual survived that interval, 0 if not.
Second half of the values is for individuals who failed, and is 1 for time interval during which failure occured, 0 for other intervals.
See make_surv_array function.
y_pred: Tensor, predicted survival probability (1-hazard probability) for each time interval.
Returns
Vector of losses for this minibatch.
"""
cens_uncens = 1. + y_true[:,0:n_intervals] * (y_pred-1.) #component for all individuals
uncens = 1. - y_true[:,n_intervals:2*n_intervals] * y_pred #component for only uncensored individuals
return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood
return loss
示例7: dice_soft
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def dice_soft(y_true, y_pred, smooth=0.00001):
# Identify axis
axis = identify_axis(y_true.get_shape())
# Calculate required variables
intersection = y_true * y_pred
intersection = K.sum(intersection, axis=axis)
y_true = K.sum(y_true, axis=axis)
y_pred = K.sum(y_pred, axis=axis)
# Calculate Soft Dice Similarity Coefficient
dice = ((2 * intersection) + smooth) / (y_true + y_pred + smooth)
# Obtain mean of Dice & return result score
dice = K.mean(dice)
return dice
示例8: dice_weighted
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def dice_weighted(weights):
weights = K.variable(weights)
def weighted_loss(y_true, y_pred, smooth=0.00001):
axis = identify_axis(y_true.get_shape())
intersection = y_true * y_pred
intersection = K.sum(intersection, axis=axis)
y_true = K.sum(y_true, axis=axis)
y_pred = K.sum(y_pred, axis=axis)
dice = ((2 * intersection) + smooth) / (y_true + y_pred + smooth)
dice = dice * weights
return -dice
return weighted_loss
#-----------------------------------------------------#
# Dice & Crossentropy loss #
#-----------------------------------------------------#
示例9: dice_crossentropy
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def dice_crossentropy(y_truth, y_pred):
# Obtain Soft DSC
dice = dice_soft_loss(y_truth, y_pred)
# Obtain Crossentropy
crossentropy = K.categorical_crossentropy(y_truth, y_pred)
crossentropy = K.mean(crossentropy)
# Return sum
return dice + crossentropy
#-----------------------------------------------------#
# Tversky loss #
#-----------------------------------------------------#
# Reference: #
# Sadegh et al. (2017) #
# Tversky loss function for image segmentation #
# using 3D fully convolutional deep networks #
#-----------------------------------------------------#
# alpha=beta=0.5 : dice coefficient #
# alpha=beta=1 : jaccard #
# alpha+beta=1 : produces set of F*-scores #
#-----------------------------------------------------#
示例10: tversky_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def tversky_loss(y_true, y_pred, smooth=0.000001):
# Define alpha and beta
alpha = 0.5
beta = 0.5
# Calculate Tversky for each class
axis = identify_axis(y_true.get_shape())
tp = K.sum(y_true * y_pred, axis=axis)
fn = K.sum(y_true * (1-y_pred), axis=axis)
fp = K.sum((1-y_true) * y_pred, axis=axis)
tversky_class = (tp + smooth)/(tp + alpha*fn + beta*fp + smooth)
# Sum up classes to one score
tversky = K.sum(tversky_class, axis=[-1])
# Identify number of classes
n = K.cast(K.shape(y_true)[-1], 'float32')
# Return Tversky
return n-tversky
#-----------------------------------------------------#
# Tversky & Crossentropy loss #
#-----------------------------------------------------#
示例11: mcor
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def mcor(y_true, y_pred):
# matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
示例12: mcor
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def mcor(y_true, y_pred):
# Matthews correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
示例13: recall
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def recall(y_true, y_pred):
"""Precision for foreground pixels.
Calculates pixelwise recall TP/(TP + FN).
"""
# count true positives
truth = K.round(K.clip(y_true, K.epsilon(), 1))
pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))
true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),
dtype='float64'))
truth_ct = K.sum(K.round(K.clip(y_true, K.epsilon(), 1)))
if truth_ct == 0:
return 0
recall = true_pos/truth_ct
return recall
示例14: _cosine_dist
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def _cosine_dist(x, y):
"""Computes the inner product (cosine distance) between two tensors.
Parameters
----------
x: tf.Tensor
Input Tensor
y: tf.Tensor
Input Tensor
"""
denom = (backend.sqrt(backend.sum(tf.square(x)) * backend.sum(tf.square(y))) +
backend.epsilon())
return backend.dot(x, tf.transpose(y)) / denom
示例15: gradient_penalty
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import sum [as 别名]
def gradient_penalty(samples, output, weight):
gradients = K.gradients(output, samples)[0]
gradients_sqr = K.square(gradients)
gradient_penalty = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# (weight / 2) * ||grad||^2
# Penalize the gradient norm
return K.mean(gradient_penalty) * weight