本文整理匯總了Python中object_detection.core.losses.WeightedSoftmaxClassificationLoss方法的典型用法代碼示例。如果您正苦於以下問題:Python losses.WeightedSoftmaxClassificationLoss方法的具體用法?Python losses.WeightedSoftmaxClassificationLoss怎麽用?Python losses.WeightedSoftmaxClassificationLoss使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類object_detection.core.losses
的用法示例。
在下文中一共展示了losses.WeightedSoftmaxClassificationLoss方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_build_weighted_softmax_classification_loss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_weighted_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
示例2: test_build_weighted_softmax_classification_loss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_weighted_softmax_classification_loss(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
示例3: test_build_weighted_softmax_classification_loss_with_logit_scale
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_weighted_softmax_classification_loss_with_logit_scale(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
logit_scale: 2.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
示例4: test_build_weighted_softmax_classification_loss_with_logit_scale
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_weighted_softmax_classification_loss_with_logit_scale(self):
losses_text_proto = """
classification_loss {
weighted_softmax {
logit_scale: 2.0
}
}
localization_loss {
weighted_l2 {
}
}
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss, _, _, _, _ = losses_builder.build(losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
示例5: testReturnsCorrectLoss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = - 1.5 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
示例6: testReturnsCorrectAnchorWiseLoss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss(True)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
示例7: _build_classification_loss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
config = loss_config.weighted_sigmoid
return losses.WeightedSigmoidClassificationLoss(
anchorwise_output=config.anchorwise_output)
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
anchorwise_output=config.anchorwise_output)
if loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'),
anchorwise_output=config.anchorwise_output)
raise ValueError('Empty loss config.')
示例8: test_build_all_loss_parameters
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_all_loss_parameters(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss,
classification_weight, localization_weight,
hard_example_miner) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
self.assertTrue(isinstance(localization_loss,
losses.WeightedL2LocalizationLoss))
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
示例9: testReturnsCorrectLoss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def testReturnsCorrectLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
exp_loss = - 1.5 * math.log(.5)
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
示例10: testReturnsCorrectAnchorWiseLoss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def testReturnsCorrectAnchorWiseLoss(self):
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
示例11: testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self):
"""At very high logit_scale, all predictions will be ~0.33."""
# TODO(yonib): Also test logit_scale with anchorwise=False.
logit_scale = 10e16
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss(logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
uniform_distribution_loss = - math.log(.33333333333)
exp_loss = np.matrix([[uniform_distribution_loss] * 4,
[uniform_distribution_loss] * 4])
with self.test_session() as sess:
loss_output = sess.run(loss)
self.assertAllClose(loss_output, exp_loss)
示例12: build_faster_rcnn_classification_loss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
示例13: test_build_all_loss_parameters
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_all_loss_parameters(self):
losses_text_proto = """
localization_loss {
weighted_l2 {
}
}
classification_loss {
weighted_softmax {
}
}
hard_example_miner {
}
classification_weight: 0.8
localization_weight: 0.2
"""
losses_proto = losses_pb2.Loss()
text_format.Merge(losses_text_proto, losses_proto)
(classification_loss, localization_loss,
classification_weight, localization_weight,
hard_example_miner, _) = losses_builder.build(losses_proto)
self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
self.assertTrue(isinstance(localization_loss,
losses.WeightedL2LocalizationLoss))
self.assertAlmostEqual(classification_weight, 0.8)
self.assertAlmostEqual(localization_weight, 0.2)
示例14: test_build_softmax_loss
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_softmax_loss(self):
losses_text_proto = """
weighted_softmax {
}
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))
示例15: test_build_softmax_loss_by_default
# 需要導入模塊: from object_detection.core import losses [as 別名]
# 或者: from object_detection.core.losses import WeightedSoftmaxClassificationLoss [as 別名]
def test_build_softmax_loss_by_default(self):
losses_text_proto = """
"""
losses_proto = losses_pb2.ClassificationLoss()
text_format.Merge(losses_text_proto, losses_proto)
classification_loss = losses_builder.build_faster_rcnn_classification_loss(
losses_proto)
self.assertTrue(isinstance(classification_loss,
losses.WeightedSoftmaxClassificationLoss))