本文整理汇总了Python中keras.backend.maximum方法的典型用法代码示例。如果您正苦于以下问题:Python backend.maximum方法的具体用法?Python backend.maximum怎么用?Python backend.maximum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.maximum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def batch_iou(boxes, box):
"""Compute the Intersection-Over-Union of a batch of boxes with another
box.
Args:
box1: 2D array of [cx, cy, width, height].
box2: a single array of [cx, cy, width, height]
Returns:
ious: array of a float number in range [0, 1].
"""
lr = np.maximum(
np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \
np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]),
0
)
tb = np.maximum(
np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \
np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]),
0
)
inter = lr*tb
union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter
return inter/union
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def call(self, input_tensor, mask=None):
z_s = input_tensor[0]
z_n = input_tensor[1]
r_s = input_tensor[2]
z_s = K.l2_normalize(z_s, axis=-1)
z_n = K.l2_normalize(z_n, axis=-1)
r_s = K.l2_normalize(r_s, axis=-1)
steps = z_n.shape[1]
pos = K.sum(z_s * r_s, axis=-1, keepdims=True)
pos = K.repeat_elements(pos, steps, axis=1)
r_s = K.expand_dims(r_s, axis=-2)
r_s = K.repeat_elements(r_s, steps, axis=1)
neg = K.sum(z_n * r_s, axis=-1)
loss = K.cast(K.sum(K.maximum(0., (1. - pos + neg)), axis=-1, keepdims=True), K.floatx())
return loss
示例3: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def __init__(self, lr=1e-1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, decay=0., amsgrad=False, partial=1. / 8., **kwargs):
if partial < 0 or partial > 0.5:
raise ValueError(
"Padam: 'partial' must be a positive float with a maximum "
"value of `0.5`, since higher values will cause divergence "
"during training."
)
super(Padam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.partial = partial
self.initial_decay = decay
self.amsgrad = amsgrad
示例4: ranking_loss_with_margin
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def ranking_loss_with_margin(y_pred, y_true):
"""
Using this loss trains the model to give scores to all correct elements in y_true that are
higher than all scores it gives to incorrect elements in y_true, plus a margin.
For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``. We will
find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
highest score assigned to incorrect elements in ``y_true`` (``1`` in this case). We will then
compute a hinge loss given these values: ``K.maximum(0.0, 1 + 1 - 0)``.
Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
which means that if you have a lot of values in here, you'll only get gradients backpropping
through two of them (the ones on the margin). This could be an inefficient use of your
computation time. Think carefully about the data that you're using with this loss function.
Because of the way masking works with Keras loss functions, also, you need to be sure that any
masked elements in ``y_pred`` have very negative values before they get passed into this loss
function.
"""
correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
lowest_scoring_correct = K.min(correct_elements, axis=-1)
incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
return K.mean(K.maximum(0.0, 1.0 + highest_scoring_incorrect - lowest_scoring_correct))
示例5: mix_gaussian_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def mix_gaussian_loss(x, mu, log_sig, w):
'''
Combine the mixture of gaussian distribution and the loss into a single function
so that we can do the log sum exp trick for numerical stability...
'''
if K.backend() == "tensorflow":
x.set_shape([None, 1])
gauss = log_norm_pdf(K.repeat_elements(x=x, rep=mu.shape[1], axis=1), mu, log_sig)
# TODO: get rid of clipping.
gauss = K.clip(gauss, -40, 40)
max_gauss = K.maximum((0.), K.max(gauss))
# log sum exp trick...
gauss = gauss - max_gauss
out = K.sum(w * K.exp(gauss), axis=1)
loss = K.mean(-K.log(out) + max_gauss)
return loss
示例6: margin_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def margin_loss(y, pred):
"""
For the first part of loss(classification loss)
"""
return K.mean(K.sum(y * K.square(K.maximum(0.9 - pred, 0)) + \
0.5 * K.square((1 - y) * K.maximum(pred - 0.1, 0)), axis=1))
示例7: box_iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def box_iou(b1, b2):
'''Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
'''
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
示例8: compute_euclidean_match_score
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def compute_euclidean_match_score(l_r):
l, r = l_r
denominator = 1. + K.sqrt(
-2 * K.batch_dot(l, r, axes=[2, 2]) +
K.expand_dims(K.sum(K.square(l), axis=2), 2) +
K.expand_dims(K.sum(K.square(r), axis=2), 1)
)
denominator = K.maximum(denominator, K.epsilon())
return 1. / denominator
示例9: nrlu
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def nrlu(x):
std = K.mean(K.sigmoid(x))
eta = K.random_normal(shape=x.shape, std=std)
y = K.maximum(x + eta, 0)
return y
示例10: tensor_iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def tensor_iou(box1, box2, input_mask, config):
"""Computes pairwise IOU of two lists of boxes
Arguments:
box1 {[type]} -- First list of boxes
box2 {[type]} -- Second list of boxes
input_mask {[type]} -- Zero-One indicating which boxes to compute
config {[type]} -- dict containing hyperparameters
Returns:
[type] -- [description]
"""
xmin = K.maximum(box1[0], box2[0])
ymin = K.maximum(box1[1], box2[1])
xmax = K.minimum(box1[2], box2[2])
ymax = K.minimum(box1[3], box2[3])
w = K.maximum(0.0, xmax - xmin)
h = K.maximum(0.0, ymax - ymin)
intersection = w * h
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
union = w1 * h1 + w2 * h2 - intersection
return intersection / (union + config.EPSILON) * K.reshape(input_mask, [config.BATCH_SIZE, config.ANCHORS])
示例11: box_iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def box_iou(b1, b2):
# 13,13,3,1,4
# 计算左上角的坐标和右下角的坐标
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# 1,n,4
# 计算左上角和右下角的坐标
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
# 计算重合面积
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
#---------------------------------------------------#
# loss值计算
#---------------------------------------------------#
示例12: triplet_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def triplet_loss(y_true, y_pred):
batch = batch_num
ref1 = y_pred[0:batch,:]
pos1 = y_pred[batch:batch+batch,:]
neg1 = y_pred[batch+batch:3*batch,:]
dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
#dis_pos = K.sqrt(dis_pos)
#dis_neg = K.sqrt(dis_neg)
a1pha = 0.2
d1 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d2 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d = d1+d2
return K.mean(d)
示例13: triplet_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def triplet_loss(y_true, y_pred):
batch = batch_num
ref1 = y_pred[0:batch,:]
pos1 = y_pred[batch:batch+batch,:]
neg1 = y_pred[batch+batch:3*batch,:]
dis_pos = K.sum(K.square(ref1 - pos1), axis=1, keepdims=True)
dis_neg = K.sum(K.square(ref1 - neg1), axis=1, keepdims=True)
#dis_pos = K.sqrt(dis_pos)
#dis_neg = K.sqrt(dis_neg)
a1pha = 0.2
d1 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d2 = K.maximum(0.0,(dis_pos-dis_neg)+a1pha)
d = d1 + d2
return K.mean(d)
示例14: binary_dice
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def binary_dice(y_true, y_pred):
"""
N-D dice for binary segmentation
"""
ndims = len(y_pred.get_shape().as_list()) - 2
vol_axes = list(range(1, ndims+1))
top = 2 * tf.reduce_sum(y_true * y_pred, vol_axes)
bottom = tf.maximum(tf.reduce_sum(y_true + y_pred, vol_axes), 1e-5)
dice = tf.reduce_mean(top/bottom)
return -dice
示例15: conv_block
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import maximum [as 别名]
def conv_block(self,data, mask, conv_layer, mask_conv_layer, core_name):
'''
data is the data tensor
mask is a binary tensor the same size as data
steps:
- set empty voxels in data using data *= mask
- conv data and mask with the conv conv_layer
- re-weight data
- binarize mask
'''
# mask.dtype
# data.dtype
# make sure the data is sparse according to the mask
wt_data = keras.layers.Lambda(lambda x: x[0] * x[1], name='%s_pre_wmult' % core_name)([data, mask])
# convolve data
conv_data = conv_layer(wt_data)
# convolve mask
conv_mask = mask_conv_layer(mask)
zero_mask = keras.layers.Lambda(lambda x:x*0+1)(mask)
conv_mask_allones = mask_conv_layer(zero_mask) # all_ones mask to get the edge counts right.
mask_conv_layer.trainable = False
o = np.ones(mask_conv_layer.get_weights()[0].shape)
mask_conv_layer.set_weights([o])
# re-weight data (this is what makes the conv makes sense)
data_norm = lambda x: x[0] / (x[1] + 1e-2)
# data_norm = lambda x: x[0] / K.maximum(x[1]/x[2], 1)
out_data = keras.layers.Lambda(data_norm, name='%s_norm_im' % core_name)([conv_data, conv_mask])
mask_norm = lambda x: tf.cast(x > 0, tf.float32)
out_mask = keras.layers.Lambda(mask_norm, name='%s_norm_wt' % core_name)(conv_mask)
return (out_data, out_mask, conv_data, conv_mask)