本文整理汇总了Python中keras.backend.not_equal方法的典型用法代码示例。如果您正苦于以下问题:Python backend.not_equal方法的具体用法?Python backend.not_equal怎么用?Python backend.not_equal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.not_equal方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def call(self,x,mask=None):
conv_input,theta = x
s = theta.shape
theta = T.reshape(theta,[-1,s[2]])
m = K.not_equal(conv_input,0.)
#### For translation
trans = _trans(theta)
output = _transform_trans(trans, conv_input)
output = output * K.cast(m,K.floatx())
### For rotation
M = _fusion(theta)
output = _transform_rot(M,output)
return output
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:18,代码来源:transform_rnn.py
示例2: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例3: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例4: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例5: build_masked_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def build_masked_loss(loss_function, mask_value):
"""Builds a loss function that masks based on targets
Args:
loss_function: The loss function to mask
mask_value: The value to mask in the targets
Returns:
function: a loss function that acts like loss_function with masked inputs
"""
def masked_loss_function(y_true, y_pred):
mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
return loss_function(y_true * mask, y_pred * mask)
return masked_loss_function
示例6: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
'''RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
'''
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例7: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例8: mbce
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def mbce(y_true, y_pred):
""" Balanced sigmoid cross-entropy loss with masking """
mask = K.not_equal(y_true, -1.0)
mask = K.cast(mask, dtype=np.float32)
num_examples = K.sum(mask, axis=1)
pos = K.cast(K.equal(y_true, 1.0), dtype=np.float32)
num_pos = K.sum(pos, axis=None)
neg = K.cast(K.equal(y_true, 0.0), dtype=np.float32)
num_neg = K.sum(neg, axis=None)
pos_ratio = 1.0 - num_pos / num_neg
mbce = mask * tf.nn.weighted_cross_entropy_with_logits(
targets=y_true,
logits=y_pred,
pos_weight=pos_ratio
)
mbce = K.sum(mbce, axis=1) / num_examples
return K.mean(mbce, axis=-1)
示例9: _drop_path
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def _drop_path(self, inputs):
count = len(inputs)
drops = K.switch(
self.is_global,
self._gen_global_path(count),
self._gen_local_drops(count, self.p)
)
ave = K.zeros(shape=self.average_shape)
for i in range(0, count):
ave += inputs[i] * drops[i]
sum = K.sum(drops)
# Check that the sum is not 0 (global droppath can make it
# 0) to avoid divByZero
ave = K.switch(
K.not_equal(sum, 0.),
ave/sum,
ave)
return ave
示例10: masked_accuracy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def masked_accuracy(y_true, y_pred):
a = K.sum(K.cast(K.equal(y_true, K.round(y_pred)), K.floatx()))
c = K.sum(K.cast(K.not_equal(y_true, 0.5), K.floatx()))
acc = (a) / c
return acc
示例11: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def call(self, x, mask=None):
mask = K.not_equal(K.sum(K.abs(x), axis=2, keepdims=True), 0)
n = K.sum(K.cast(mask, 'float32'), axis=1, keepdims=False)
x_mean = K.sum(x, axis=1, keepdims=False) / (n + 1)
return K.cast(x_mean, 'float32')
示例12: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import not_equal [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
#loss = tf.py_func(np.sort,[loss],tf.float32)
#keep =tf.cast(tf.divide(tf.size(loss),2),tf.int32)
#loss = loss[0:keep]
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss