本文整理汇总了Python中keras.backend.switch方法的典型用法代码示例。如果您正苦于以下问题:Python backend.switch方法的具体用法?Python backend.switch怎么用?Python backend.switch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.switch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: time_distributed_nonzero_max_pooling
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def time_distributed_nonzero_max_pooling(x):
"""
Computes maximum along the first (time) dimension.
It ignores the mask m.
In:
x - input; a 3D tensor
mask_value - value to mask out, if None then no masking;
by default 0.0,
"""
import theano.tensor as T
mask_value=0.0
x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
masked_max_x = x.max(axis=1)
# replace infinities with mask_value
masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
return masked_max_x
示例2: time_distributed_masked_max
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def time_distributed_masked_max(x, m):
"""
Computes max along the first (time) dimension.
In:
x - input; a 3D tensor
m - mask
m_value - value for masking
"""
# place infinities where mask is off
m_value = 0.0
tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
x_with_inf = x + K.expand_dims(tmp)
x_max = K.max(x_with_inf, axis=1)
r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
return r
## classes ##
# Transforms existing layers to masked layers
示例3: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例4: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例5: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例6: _rpn_loss_regr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def _rpn_loss_regr(y_true, y_pred):
"""
smooth L1 loss
y_ture [1][HXWX10][3] (class,regr)
y_pred [1][HXWX10][2] (reger)
"""
sigma = 9.0
cls = y_true[0, :, 0]
regr = y_true[0, :, 1:3]
regr_keep = tf.where(K.equal(cls, 1))[:, 0]
regr_true = tf.gather(regr, regr_keep)
regr_pred = tf.gather(y_pred[0], regr_keep)
diff = tf.abs(regr_true - regr_pred)
less_one = tf.cast(tf.less(diff, 1.0 / sigma), 'float32')
loss = less_one * 0.5 * diff ** 2 * sigma + tf.abs(1 - less_one) * (diff - 0.5 / sigma)
loss = K.sum(loss, axis=1)
return K.switch(tf.size(loss) > 0, K.mean(loss), K.constant(0.0))
示例7: clip_norm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def clip_norm(g, c, n):
if c > 0:
if K.backend() == 'tensorflow':
import tensorflow as tf
import copy
condition = n >= c
then_expression = tf.scalar_mul(c / n, g)
else_expression = g
if hasattr(then_expression, 'get_shape'):
g_shape = copy.copy(then_expression.get_shape())
elif hasattr(then_expression, 'dense_shape'):
g_shape = copy.copy(then_expression.dense_shape)
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
g = K.tensorflow_backend.control_flow_ops.cond(
condition, lambda: then_expression, lambda: else_expression)
if hasattr(then_expression, 'get_shape'):
g.set_shape(g_shape)
elif hasattr(then_expression, 'dense_shape'):
g._dense_shape = g_shape
else:
g = K.switch(n >= c, g * c / n, g)
return g
示例8: f1_score_keras
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def f1_score_keras(y_true, y_pred):
#convert probas to 0,1
y_ppred = K.zeros_like(y_true)
y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#recall for each class
recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)
#f1 for each class
f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))
#return average f1 score over all classes
return K.mean(f1_class)
示例9: f1_score_taskB
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def f1_score_taskB(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#recall for each class
recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)
#f1 for each class
f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))
#return average f1 score over all classes
return f1_class
示例10: f1_score_semeval
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def f1_score_semeval(y_true, y_pred):
# convert probas to 0,1
y_ppred = K.zeros_like(y_true)
y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
# precision for each class
precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# recall for each class
recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred / gold_cnt)
# f1 for each class
f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))
#return average f1 score over all classes
return (f1_class[0] + f1_class[2])/2.0
示例11: precision_keras
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def precision_keras(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
#return average f1 score over all classes
return K.mean(precision)
示例12: f1_score_taskB
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def f1_score_taskB(y_true, y_pred):
# convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
# precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# recall for each class
recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)
# f1 for each class
f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))
# return average f1 score over all classes
return f1_class
示例13: precision_keras
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def precision_keras(y_true, y_pred):
# convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# precision for each class
precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)
# return average f1 score over all classes
return K.mean(precision)
示例14: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
'''RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
'''
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例15: offsets_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import switch [as 别名]
def offsets_loss(gt_offsets, pred_offsets, dump=False):
"""オフセット回帰の損失関数
positive(gt_fg > 0)データのみ評価対象とする
Args:
gt_offsets: 正解オフセット
[R, 4]
3軸目は領域提案とアンカーのオフセット(中心、幅、高さ)。
(tx, ty, th, tw)
pred_offsets: 予測値
[R, 4].
Note:
この関数の呼び出し元はrpn_offsets_lossとhead_offsets_loss。
RPNでのRoI予測が外れると全てNegativeなBBoxとなり、結果的にhead_offsets_lossへ渡される正解データのラベルが全てNegativeとなる。
その場合、head_offsets_lossで得られる損失は0となるが、rpn_offsets_lossで得られる損失は大きくなるはずなので、
損失全体(rpn_offsets_loss + head_offsets_loss)で評価すれば適切な損失になるはず。
"""
loss = K.switch(tf.size(gt_offsets) > 0,
smooth_l1(gt_offsets, pred_offsets), tf.constant(0.0))
loss = K.mean(loss)
return loss