本文整理汇总了Python中chainer.functions.huber_loss方法的典型用法代码示例。如果您正苦于以下问题:Python functions.huber_loss方法的具体用法?Python functions.huber_loss怎么用?Python functions.huber_loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.huber_loss方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.huber_loss(x, t, delta=1, reduce=self.reduce)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
diff_data = cuda.to_cpu(x_data) - cuda.to_cpu(t_data)
loss_expect = numpy.zeros(self.shape)
mask = numpy.abs(diff_data) < 1
loss_expect[mask] = 0.5 * diff_data[mask] ** 2
loss_expect[~mask] = numpy.abs(diff_data[~mask]) - 0.5
if self.reduce == 'sum_along_second_axis':
loss_expect = numpy.sum(loss_expect, axis=1)
testing.assert_allclose(
loss_value, loss_expect, **self.forward_options)
示例2: check_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def check_double_backward(self, x_data, t_data, y_grad, x_grad_grad,
t_grad_grad):
delta = 1
eps = self.double_backward_options['eps'] * 2
xp = chainer.backend.get_array_module(x_data)
mask = xp.abs(xp.abs(x_data - t_data) - delta) < eps
x_data[mask] = 0
t_data[mask] = 0
def f(x, t):
return functions.huber_loss(x, t, delta=delta, reduce=self.reduce)
gradient_check.check_double_backward(
f, (x_data, t_data), y_grad, (x_grad_grad, t_grad_grad),
**self.double_backward_options)
示例3: compute_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def compute_value_loss(y, t, clip_delta=True, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
y (Variable or ndarray): Predicted values.
t (Variable or ndarray): Target values.
clip_delta (bool): Use the Huber loss function if set True.
batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
the loss values in a batch. 'sum' will use the sum.
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
y = F.reshape(y, (-1, 1))
t = F.reshape(t, (-1, 1))
if clip_delta:
loss_sum = F.sum(F.huber_loss(y, t, delta=1.0))
if batch_accumulator == 'mean':
loss = loss_sum / y.shape[0]
elif batch_accumulator == 'sum':
loss = loss_sum
else:
loss_mean = F.mean_squared_error(y, t) / 2
if batch_accumulator == 'mean':
loss = loss_mean
elif batch_accumulator == 'sum':
loss = loss_mean * y.shape[0]
return loss
示例4: compute_weighted_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def compute_weighted_value_loss(y, t, weights,
clip_delta=True, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
y (Variable or ndarray): Predicted values.
t (Variable or ndarray): Target values.
weights (ndarray): Weights for y, t.
clip_delta (bool): Use the Huber loss function if set True.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
y = F.reshape(y, (-1, 1))
t = F.reshape(t, (-1, 1))
if clip_delta:
losses = F.huber_loss(y, t, delta=1.0)
else:
losses = F.square(y - t) / 2
losses = F.reshape(losses, (-1,))
loss_sum = F.sum(losses * weights)
if batch_accumulator == 'mean':
loss = loss_sum / y.shape[0]
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
示例5: compute_eltwise_huber_quantile_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def compute_eltwise_huber_quantile_loss(y, t, taus, huber_loss_threshold=1.0):
"""Compute elementwise Huber losses for quantile regression.
This is based on Algorithm 1 of https://arxiv.org/abs/1806.06923.
This function assumes that, both of the two kinds of quantile thresholds,
taus (used to compute y) and taus_prime (used to compute t) are iid samples
from U([0,1]).
Args:
y (chainer.Variable): Quantile prediction from taus as a
(batch_size, N)-shaped array.
t (chainer.Variable or ndarray): Target values for quantile regression
as a (batch_size, N_prime)-array.
taus (ndarray): Quantile thresholds used to compute y as a
(batch_size, N)-shaped array.
huber_loss_threshold (float): Threshold of Huber loss. In the IQN
paper, this is denoted by kappa.
Returns:
chainer.Variable: Loss (batch_size, N, N_prime)
"""
assert y.shape == taus.shape
# (batch_size, N) -> (batch_size, N, 1)
y = F.expand_dims(y, axis=2)
# (batch_size, N_prime) -> (batch_size, 1, N_prime)
t = F.expand_dims(t, axis=1)
# (batch_size, N) -> (batch_size, N, 1)
taus = F.expand_dims(taus, axis=2)
# Broadcast to (batch_size, N, N_prime)
y, t, taus = F.broadcast(y, t, taus)
I_delta = ((t.array - y.array) > 0).astype('f')
eltwise_huber_loss = F.huber_loss(
y, t, delta=huber_loss_threshold, reduce='no')
eltwise_loss = abs(taus - I_delta) * eltwise_huber_loss
return eltwise_loss
示例6: smooth_l1
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def smooth_l1(x, t, beta):
return F.huber_loss(x, t, beta, reduce='no') / beta
# to avoid out of memory
示例7: check_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def check_backward(self, x_data, t_data, y_grad):
def f(x, t):
return functions.huber_loss(x, t, delta=1, reduce=self.reduce)
gradient_check.check_backward(
f, (x_data, t_data), y_grad, **self.backward_options)
示例8: check_invalid_option
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def check_invalid_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.huber_loss(x, t, 1, 'invalid_option')
示例9: clear
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def clear(self):
self.loss = None
# self.accuracy = None
# def forward(self, x, t):
# self.clear()
# #x = chainer.Variable(x_data) # x_data.astype(np.float32)
# #t = chainer.Variable(t_data) # [Note]: x_data, t_data must be np.float32 type
#
# #self.loss = F.huber_loss(h, t, delta= 1 / 255.)
# self.loss = F.mean_squared_error(self(x), t)
# # self.accuracy = F.accuracy(h, t) # type inconpatible
# return self.loss
示例10: compute_weighted_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def compute_weighted_value_loss(y, t, weights,
mask, clip_delta=True,
batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
y (Variable or ndarray): Predicted values.
t (Variable or ndarray): Target values.
weights (ndarray): Weights for y, t.
mask (ndarray): Mask to use for loss calculation
clip_delta (bool): Use the Huber loss function if set True.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
y = F.reshape(y, (-1, 1))
t = F.reshape(t, (-1, 1))
if clip_delta:
losses = F.huber_loss(y, t, delta=1.0)
else:
losses = F.square(y - t) / 2
losses = F.reshape(losses, (-1,))
loss_sum = F.sum(losses * weights * mask)
if batch_accumulator == 'mean':
loss = loss_sum / max(n_mask, 1.0)
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
示例11: test
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def test(self):
batch_size = self.batch_size
N = self.N
N_prime = self.N_prime
huber_loss_threshold = self.huber_loss_threshold
# Overestimation is penalized proportionally to tau
# Underestimation is penalized proportionally to (1-tau)
y = np.random.normal(size=(batch_size, N)).astype('f')
y_var = chainer.Variable(y)
t = np.random.normal(size=(batch_size, N_prime)).astype('f')
tau = np.random.uniform(size=(batch_size, N)).astype('f')
loss = iqn.compute_eltwise_huber_quantile_loss(
y_var, t, tau, huber_loss_threshold=huber_loss_threshold)
y_var_b, t_b = F.broadcast(
F.reshape(y_var, (batch_size, N, 1)),
F.reshape(t, (batch_size, 1, N_prime)),
)
self.assertEqual(loss.shape, (batch_size, N, N_prime))
huber_loss = F.huber_loss(
y_var_b, t_b, delta=huber_loss_threshold, reduce='no')
self.assertEqual(huber_loss.shape, (batch_size, N, N_prime))
for i in range(batch_size):
for j in range(N):
for k in range(N_prime):
# loss is always positive
scalar_loss = loss[i, j, k]
scalar_grad = chainer.grad(
[scalar_loss], [y_var])[0][i, j]
self.assertGreater(scalar_loss.array, 0)
if y[i, j] > t[i, k]:
# y over-estimates t
# loss equals huber loss scaled by tau
correct_scalar_loss = tau[i, j] * huber_loss[i, j, k]
else:
# y under-estimates t
# loss equals huber loss scaled by (1-tau)
correct_scalar_loss = (
(1 - tau[i, j]) * huber_loss[i, j, k])
correct_scalar_grad = chainer.grad(
[correct_scalar_loss], [y_var])[0][i, j]
self.assertAlmostEqual(
scalar_loss.array,
correct_scalar_loss.array,
places=5,
)
self.assertAlmostEqual(
scalar_grad.array,
correct_scalar_grad.array,
places=5,
)
示例12: calc_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import huber_loss [as 别名]
def calc_loss(self, image_size, predicted_grids, gt_bbox_points, objectness_scores, normalize=True):
predicted_bbox_points = self.get_corners(predicted_grids, image_size, scale_to_image_size=False)
# 1. transform box coordinates to aabb coordinates for determination of iou
predicted_bbox_points = predicted_bbox_points[0], predicted_bbox_points[4], predicted_bbox_points[3], predicted_bbox_points[7]
predicted_bbox_points = F.stack(predicted_bbox_points, axis=1)
# 2. find best prediction area for each gt bbox
gt_bboxes_to_use_for_loss = []
positive_anchor_indices = self.xp.empty((0,), dtype=self.xp.int32)
not_contributing_anchors = self.xp.empty((0,), dtype=self.xp.int32)
for index, gt_bbox in enumerate(gt_bbox_points):
# determine which bboxes are positive boxes as they have high iou with gt and also which bboxes are negative
# this is also used to train objectness classification
gt_bbox = self.xp.tile(gt_bbox[None, ...], (len(predicted_bbox_points), 1))
ious = bbox_iou(gt_bbox, predicted_bbox_points.data)
positive_boxes = self.xp.where((ious[0] >= 0.7))
not_contributing_boxes = self.xp.where(self.xp.logical_and(0.3 < ious[0], ious[0] < 0.7))
if len(positive_boxes[0]) == 0:
best_iou_index = ious[0, :].argmax()
positive_anchor_indices = self.xp.concatenate((positive_anchor_indices, best_iou_index[None, ...]), axis=0)
gt_bboxes_to_use_for_loss.append(gt_bbox[0])
else:
positive_anchor_indices = self.xp.concatenate((positive_anchor_indices, positive_boxes[0]), axis=0)
gt_bboxes_to_use_for_loss.extend(gt_bbox[:len(positive_boxes[0])])
not_contributing_anchors = self.xp.concatenate((not_contributing_anchors, not_contributing_boxes[0]), axis=0)
if len(gt_bboxes_to_use_for_loss) == 0:
return Variable(self.xp.array(0, dtype=predicted_grids.dtype))
gt_bboxes_to_use_for_loss = F.stack(gt_bboxes_to_use_for_loss)
# filter predicted bboxes and only keep bboxes from those regions that actually contain a bbox
predicted_bbox_points = F.get_item(predicted_bbox_points, positive_anchor_indices)
# 3. calculate L1 loss for bbox regression
loss = F.huber_loss(
predicted_bbox_points,
gt_bboxes_to_use_for_loss,
1
)
# 4. calculate objectness loss
objectness_labels = self.xp.zeros(len(objectness_scores), dtype=self.xp.int32)
objectness_labels[not_contributing_anchors] = -1
objectness_labels[positive_anchor_indices] = 1
objectness_loss = F.softmax_cross_entropy(
objectness_scores,
objectness_labels,
ignore_label=-1,
)
return F.mean(loss), objectness_loss