本文整理汇总了Python中mxnet.gluon.loss._apply_weighting方法的典型用法代码示例。如果您正苦于以下问题:Python loss._apply_weighting方法的具体用法?Python loss._apply_weighting怎么用?Python loss._apply_weighting使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.loss
的用法示例。
在下文中一共展示了loss._apply_weighting方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hybrid_forward
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import _apply_weighting [as 别名]
def hybrid_forward(self, F, pred, label, sample_weight=None):
"""Loss forward"""
if not self._from_logits:
pred = F.sigmoid(pred)
if self._sparse_label:
one_hot = F.one_hot(label, self._num_class)
else:
one_hot = label > 0
pt = F.where(one_hot, pred, 1 - pred)
t = F.ones_like(one_hot)
alpha = F.where(one_hot, self._alpha * t, (1 - self._alpha) * t)
loss = -alpha * ((1 - pt) ** self._gamma) * F.log(F.minimum(pt + self._eps, 1))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
if self._size_average:
return F.mean(loss, axis=self._batch_axis, exclude=True)
else:
return F.sum(loss, axis=self._batch_axis, exclude=True)
示例2: hybrid_forward
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import _apply_weighting [as 别名]
def hybrid_forward(self, F, pred, label, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, axis=self._axis)
if self._sparse_label:
if self._size_average:
valid_label_map = (label != self._ignore_label).astype('float32')
loss = -(F.pick(pred, label, axis=self._axis, keepdims=True) * valid_label_map)
else:
loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
loss = F.where(label.expand_dims(axis=self._axis) == self._ignore_label,
F.zeros_like(loss), loss)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(pred*label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
if self._size_average:
return F.mean(loss, axis=self._batch_axis, exclude=True) * \
valid_label_map.size / F.sum(valid_label_map)
else:
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例3: _mixup_forward
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import _apply_weighting [as 别名]
def _mixup_forward(self, F, pred, label1, label2, lam, sample_weight=None):
if not self._from_logits:
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss1 = -F.pick(pred, label1, axis=self._axis, keepdims=True)
loss2 = -F.pick(pred, label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
else:
label1 = _reshape_like(F, label1, pred)
label2 = _reshape_like(F, label2, pred)
loss1 = -F.sum(pred*label1, axis=self._axis, keepdims=True)
loss2 = -F.sum(pred*label2, axis=self._axis, keepdims=True)
loss = lam * loss1 + (1 - lam) * loss2
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例4: hybrid_forward
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import _apply_weighting [as 别名]
def hybrid_forward(self, F, images, num_classes, labels, X_l2norm,
lambda_value = 0.5, sample_weight=None):
self.num_classes = num_classes
labels_onehot = nd.one_hot(labels, num_classes)
first_term_base = F.square(nd.maximum(0.9-X_l2norm,0))
second_term_base = F.square(nd.maximum(X_l2norm -0.1, 0))
# import pdb; pdb.set_trace()
margin_loss = labels_onehot * first_term_base + lambda_value * (1-labels_onehot) * second_term_base
margin_loss = margin_loss.sum(axis=1)
loss = F.mean(margin_loss, axis=self._batch_axis, exclude=True)
loss = _apply_weighting(F, loss, self._weight/2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例5: forward
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import _apply_weighting [as 别名]
def forward(self,labels,y_pred):
labels_onehot = labels #nd.one_hot(labels, self.num_classes)
first_term_base = nd.square(nd.maximum(0.9-y_pred,0))
second_term_base = nd.square(nd.maximum(y_pred -0.1, 0))
# import pdb; pdb.set_trace()
margin_loss = labels_onehot * first_term_base + self.lambda_value * (1-labels_onehot) * second_term_base
margin_loss = margin_loss.sum(axis=1)
loss = nd.mean(margin_loss, axis=self._batch_axis, exclude=True)
loss = _apply_weighting(nd, loss, self._weight/2, self.sample_weight)
return nd.mean(loss, axis=self._batch_axis, exclude=True)