本文整理汇总了Python中object_detection.core.losses.SigmoidFocalClassificationLoss方法的典型用法代码示例。如果您正苦于以下问题:Python losses.SigmoidFocalClassificationLoss方法的具体用法?Python losses.SigmoidFocalClassificationLoss怎么用?Python losses.SigmoidFocalClassificationLoss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.core.losses
的用法示例。
在下文中一共展示了losses.SigmoidFocalClassificationLoss方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testHardExamplesProduceLossComparableToSigmoidXEntropy
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
示例2: testNonAnchorWiseOutputComparableToSigmoidXEntropy
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights))
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAlmostEqual(order_of_ratio, 1.)
示例3: test_build_weighted_sigmoid_focal_classification_loss
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def test_build_weighted_sigmoid_focal_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
self.assertAlmostEqual(classification_loss._alpha, None)
self.assertAlmostEqual(classification_loss._gamma, 2.0)
示例4: test_build_weighted_sigmoid_focal_loss_non_default
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def test_build_weighted_sigmoid_focal_loss_non_default(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
alpha: 0.25
gamma: 3.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
self.assertAlmostEqual(classification_loss._alpha, 0.25)
self.assertAlmostEqual(classification_loss._gamma, 3.0)
示例5: testHardExamplesProduceLossComparableToSigmoidXEntropy
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
示例6: testNonAnchorWiseOutputComparableToSigmoidXEntropy
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights))
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAlmostEqual(order_of_ratio, 1.)
示例7: test_build_weighted_sigmoid_focal_classification_loss
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def test_build_weighted_sigmoid_focal_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_sigmoid_focal {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))
self.assertAlmostEqual(classification_loss._alpha, None)
self.assertAlmostEqual(classification_loss._gamma, 2.0)
示例8: testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
prediction_tensor = tf.constant([[[_logit(0.97)],
[_logit(0.91)],
[_logit(0.73)],
[_logit(0.27)],
[_logit(0.09)],
[_logit(0.03)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
示例9: testIgnoreNegativeExampleLossViaAlphaMultiplier
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=1.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][3:], [0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][:3] /
focal_loss[0][:3])))
self.assertAllClose(order_of_ratio, [1., 1., 1.])
示例10: testIgnorePositiveExampleLossViaAlphaMultiplier
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][3:] /
focal_loss[0][3:])))
self.assertAllClose(order_of_ratio, [1., 1.])
示例11: testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None, gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
with self.test_session() as sess:
sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
self.assertAllClose(sigmoid_loss, focal_loss)
示例12: testExpectedLossWithAlphaOneAndZeroGamma
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testExpectedLossWithAlphaOneAndZeroGamma(self):
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0, gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor
1.0 * # alpha
8), # positives from 8 anchors
focal_loss)
示例13: testExpectedLossWithAlpha75AndZeroGamma
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def testExpectedLossWithAlpha75AndZeroGamma(self):
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
with self.test_session() as sess:
focal_loss = sess.run(focal_loss)
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
示例14: build_faster_rcnn_classification_loss
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
示例15: test_build_sigmoid_focal_loss
# 需要导入模块: from object_detection.core import losses [as 别名]
# 或者: from object_detection.core.losses import SigmoidFocalClassificationLoss [as 别名]
def test_build_sigmoid_focal_loss(self):
losses_text_proto = """
weighted_sigmoid_focal {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(
isinstance(classification_loss,
losses.SigmoidFocalClassificationLoss))