本文整理汇总了Python中tensorflow.keras.backend.clip方法的典型用法代码示例。如果您正苦于以下问题:Python backend.clip方法的具体用法?Python backend.clip怎么用?Python backend.clip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.clip方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: focal_loss_binary
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def focal_loss_binary(y_true, y_pred):
"""Binary cross-entropy focal loss
"""
gamma = 2.0
alpha = 0.25
pt_1 = tf.where(tf.equal(y_true, 1),
y_pred,
tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0),
y_pred,
tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN and Inf
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
weight = alpha * K.pow(1. - pt_1, gamma)
fl1 = -K.sum(weight * K.log(pt_1))
weight = (1 - alpha) * K.pow(pt_0, gamma)
fl0 = -K.sum(weight * K.log(1. - pt_0))
return fl1 + fl0
示例2: focal_loss_categorical
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def focal_loss_categorical(y_true, y_pred):
"""Categorical cross-entropy focal loss"""
gamma = 2.0
alpha = 0.25
# scale to ensure sum of prob is 1.0
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip the prediction value to prevent NaN and Inf
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# calculate cross entropy
cross_entropy = -y_true * K.log(y_pred)
# calculate focal loss
weight = alpha * K.pow(1 - y_pred, gamma)
cross_entropy *= weight
return K.sum(cross_entropy, axis=-1)
示例3: action
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def action(self, args):
"""Given mean and stddev, sample an action, clip
and return
We assume Gaussian distribution of probability
of selecting an action given a state
Argument:
args (list) : mean, stddev list
Return:
action (tensor): policy action
"""
mean, stddev = args
dist = tfp.distributions.Normal(loc=mean, scale=stddev)
action = dist.sample(1)
action = K.clip(action,
self.env.action_space.low[0],
self.env.action_space.high[0])
return action
开发者ID:PacktPublishing,项目名称:Advanced-Deep-Learning-with-Keras,代码行数:19,代码来源:policygradient-car-10.1.1.py
示例4: mi_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def mi_loss(self, y_true, y_pred):
""" MINE loss function
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of predictions for
joint T(x,y) and marginal T(x,y)
"""
size = self.args.batch_size
# lower half is pred for joint dist
pred_xy = y_pred[0: size, :]
# upper half is pred for marginal dist
pred_x_y = y_pred[size : y_pred.shape[0], :]
loss = K.mean(K.exp(pred_x_y))
loss = K.clip(loss, K.epsilon(), np.finfo(float).max)
loss = K.mean(pred_xy) - K.log(loss)
return -loss
示例5: mcor
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def mcor(y_true, y_pred):
# matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
示例6: recall
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def recall(y_true, y_pred):
"""Precision for foreground pixels.
Calculates pixelwise recall TP/(TP + FN).
"""
# count true positives
truth = K.round(K.clip(y_true, K.epsilon(), 1))
pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))
true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),
dtype='float64'))
truth_ct = K.sum(K.round(K.clip(y_true, K.epsilon(), 1)))
if truth_ct == 0:
return 0
recall = true_pos/truth_ct
return recall
示例7: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def call(self, inputs):
F = K.int_shape(inputs)[-1]
minkowski_prod_mat = np.eye(F)
minkowski_prod_mat[-1, -1] = -1.
minkowski_prod_mat = K.constant(minkowski_prod_mat)
output = K.dot(inputs, minkowski_prod_mat)
output = K.dot(output, K.transpose(inputs))
output = K.clip(output, -10e9, -1.)
if self.activation is not None:
output = self.activation(output)
return output
示例8: loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def loss(self, y_true, y_pred):
""" categorical crossentropy loss """
if self.crop_indices is not None:
y_true = utils.batch_gather(y_true, self.crop_indices)
y_pred = utils.batch_gather(y_pred, self.crop_indices)
if self.use_float16:
y_true = K.cast(y_true, 'float16')
y_pred = K.cast(y_pred, 'float16')
# scale and clip probabilities
# this should not be necessary for softmax output.
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
y_pred = K.clip(y_pred, K.epsilon(), 1)
# compute log probability
log_post = K.log(y_pred) # likelihood
# loss
loss = - y_true * log_post
# weighted loss
if self.weights is not None:
loss *= self.weights
if self.vox_weights is not None:
loss *= self.vox_weights
# take the total loss
# loss = K.batch_flatten(loss)
mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
tf.verify_tensor_all_finite(mloss, 'Loss not finite')
return mloss
示例9: mi_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def mi_loss(self, y_true, y_pred):
"""Mutual information loss computed from the joint
distribution matrix and the marginals
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of softmax predictions for
the Siamese latent vectors (Z and Zbar)
"""
size = self.args.batch_size
n_labels = y_pred.shape[-1]
# lower half is Z
Z = y_pred[0: size, :]
Z = K.expand_dims(Z, axis=2)
# upper half is Zbar
Zbar = y_pred[size: y_pred.shape[0], :]
Zbar = K.expand_dims(Zbar, axis=1)
# compute joint distribution (Eq 10.3.2 & .3)
P = K.batch_dot(Z, Zbar)
P = K.sum(P, axis=0)
# enforce symmetric joint distribution (Eq 10.3.4)
P = (P + K.transpose(P)) / 2.0
# normalization of total probability to 1.0
P = P / K.sum(P)
# marginal distributions (Eq 10.3.5 & .6)
Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
P = K.clip(P, K.epsilon(), np.finfo(float).max)
Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
# negative MI loss (Eq 10.3.7)
neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
# each head contribute 1/n_heads to the total loss
return neg_mi/self.args.heads
示例10: precision
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def precision(y_true, y_pred):
"""Precision metric.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant. Only computes a batch-wise average of
precision.
"""
import tensorflow.keras.backend as k
true_positives = k.sum(k.round(k.clip(y_true * y_pred, 0, 1)))
predicted_positives = k.sum(k.round(k.clip(y_pred, 0, 1)))
return true_positives / (predicted_positives + k.epsilon())
示例11: focal_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def focal_loss(y_true, y_pred, gamma=2, alpha=0.95):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
pt_1 = K.clip(pt_1, 1e-3, .999)
pt_0 = K.clip(pt_0, 1e-3, .999)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
示例12: f1
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def f1(y_true, y_pred):
K.set_epsilon(1e-05)
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
K.set_epsilon(1e-05)
#y_pred = tf.convert_to_tensor(y_pred, np.float32)
#y_true = tf.convert_to_tensor(y_true, np.float32)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
K.set_epsilon(1e-05)
#y_pred = tf.convert_to_tensor(y_pred, np.float32)
#y_true = tf.convert_to_tensor(y_true, np.float32)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
示例13: precision
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def precision(y_true, y_pred):
K.set_epsilon(1e-05)
#y_pred = tf.convert_to_tensor(y_pred, np.float32)
#y_true = tf.convert_to_tensor(y_true, np.float32)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
示例14: recall
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def recall(y_true, y_pred):
K.set_epsilon(1e-05)
#y_pred = tf.convert_to_tensor(y_pred, np.float32)
#y_true = tf.convert_to_tensor(y_true, np.float32)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
示例15: _euclidian_dist
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import clip [as 别名]
def _euclidian_dist(self, x_pair: List[Tensor]) -> Tensor:
x1_norm = K.l2_normalize(x_pair[0], axis=1)
x2_norm = K.l2_normalize(x_pair[1], axis=1)
diff = x1_norm - x2_norm
square = K.square(diff)
_sum = K.sum(square, axis=1)
_sum = K.clip(_sum, min_value=1e-12, max_value=None)
dist = K.sqrt(_sum) / 2.
return dist