本文整理汇总了Python中chainer.functions.sigmoid_cross_entropy方法的典型用法代码示例。如果您正苦于以下问题:Python functions.sigmoid_cross_entropy方法的具体用法?Python functions.sigmoid_cross_entropy怎么用?Python functions.sigmoid_cross_entropy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.sigmoid_cross_entropy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_forward_no_reduction
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def check_forward_no_reduction(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(
x_val, t_val, self.normalize, reduce='no')
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
if not getattr(self, 'ignore_all', False):
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
loss_expect = 0
else:
loss_expect = -(
xd * (td - (xd >= 0)) -
math.log(1 + math.exp(-numpy.abs(xd))))
self.assertAlmostEqual(
loss_expect, loss_value[i, j], places=self.places)
示例2: check_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def check_double_backward(self, x_data, t_data, y_grad, gx_grad,
normalize=True, reduce='mean'):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
if reduce == 'mean':
y_grad = utils.force_array(y_grad.sum())
def f(x, t):
return chainer.functions.sigmoid_cross_entropy(
x, t, normalize=normalize, reduce=reduce)
gradient_check.check_double_backward(
f, (x_data, t_data), y_grad, (gx_grad,),
**self.check_double_backward_options)
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def __call__(self, x, labels):
x = BatchTransform(self.model.mean)(x)
x = self.xp.array(x)
scores = self.model(x)
B, n_class = scores.shape[:2]
one_hot_labels = self.xp.zeros((B, n_class), dtype=np.int32)
for i, label in enumerate(labels):
one_hot_labels[i, label] = 1
# sigmoid_cross_entropy normalizes the loss
# by the size of batch and the number of classes.
# It works better to remove the normalization factor
# of the number of classes.
loss = self.loss_scale * F.sigmoid_cross_entropy(
scores, one_hot_labels)
result = calc_accuracy(scores, labels)
reporter.report({'loss': loss}, self)
reporter.report({'accuracy': result['accuracy']}, self)
reporter.report({'n_pred': result['n_pred']}, self)
reporter.report({'n_pos': result['n_pos']}, self)
return loss
示例4: update_core
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def update_core(self):
image, labels = self.converter(self.get_iterator('main').next())
assert image.shape[0] == 1, "Batchsize of only 1 is allowed for now"
image = Variable(image)
if self.device >= 0:
image.to_gpu(self.device)
cl_output = self._optimizers['main'].target.classify(image)
xp = get_array_module(cl_output.data)
target = xp.asarray([[0]*(self.no_of_classes)]*cl_output.shape[0])
for i in range(labels.shape[0]):
gt_labels = np.unique(labels[i]).astype(np.int32)[2:] - 1 # Not considering -1 & 0
target[i][gt_labels] = 1
loss = F.sigmoid_cross_entropy(cl_output, target, normalize=True)
report({'Loss':loss}, self.get_optimizer('main').target)
self._optimizers['main'].target.cleargrads()
loss.backward()
self._optimizers['main'].update()
示例5: update_core
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def update_core(self):
batch = self._iterators['main'].next()
x = chainer.cuda.to_gpu(np.array([i[0] for i in batch]))
labels = [l[1] for l in batch]
row_idx, col_idx, val_idx = [], [], []
for i in range(len(labels)):
l_list = list(set(labels[i]))
for y in l_list:
row_idx.append(i)
col_idx.append(y)
val_idx.append(1)
m = len(labels)
n = self.class_dim
t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense()
t = chainer.cuda.to_gpu(t)
optimizer = self._optimizers['main']
optimizer.target.cleargrads()
loss = F.sigmoid_cross_entropy(optimizer.target(x), t)
chainer.reporter.report({'main/loss':loss})
loss.backward()
optimizer.update()
示例6: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def __call__(self, x, t):
h = F.relu(self.conv1(x))
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = F.dropout(F.relu(self.fc4(h)), train=self.train)
h = self.fc5(h)
self.pred = F.reshape(h, (x.data.shape[0], 16, 16))
if t is not None:
self.loss = F.sigmoid_cross_entropy(self.pred, t, normalize=False)
return self.loss
else:
self.pred = F.sigmoid(self.pred)
return self.pred
示例7: mask_head_loss_post
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def mask_head_loss_post(segms, mask_roi_indices, gt_segms, gt_mask_labels,
batchsize):
"""Loss function for Mask Head (post).
Args:
segms (array): An array whose shape is :math:`(R, n\_class, M, M)`,
where :math:`R` is the total number of RoIs in the given batch.
mask_roi_indices (array): A list of arrays returned by
:func:`mask_head_loss_pre`.
gt_segms (list of arrays): A list of arrays returned by
:func:`mask_head_loss_pre`.
gt_mask_labels (list of arrays): A list of arrays returned by
:func:`mask_head_loss_pre`.
batchsize (int): The size of batch.
Returns:
chainer.Variable:
Mask loss.
"""
xp = cuda.get_array_module(segms.array)
mask_roi_indices = xp.hstack(mask_roi_indices).astype(np.int32)
gt_segms = xp.vstack(gt_segms)
gt_mask_labels = xp.hstack(gt_mask_labels).astype(np.int32)
mask_loss = F.sigmoid_cross_entropy(
segms[np.arange(len(gt_mask_labels)), gt_mask_labels],
gt_segms.astype(np.int32))
return mask_loss
示例8: pit_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def pit_loss(pred, label, label_delay=0):
"""
Permutation-invariant training (PIT) cross entropy loss function.
Args:
pred: (T,C)-shaped pre-activation values
label: (T,C)-shaped labels in {0,1}
label_delay: if label_delay == 5:
pred: 0 1 2 3 4 | 5 6 ... 99 100 |
label: x x x x x | 0 1 ... 94 95 | 96 97 98 99 100
calculated area: | <------------> |
Returns:
min_loss: (1,)-shape mean cross entropy
label_perms[min_index]: permutated labels
"""
# label permutations along the speaker axis
label_perms = [label[..., list(p)] for p
in permutations(range(label.shape[-1]))]
losses = F.stack(
[F.sigmoid_cross_entropy(
pred[label_delay:, ...],
l[:len(l) - label_delay, ...]) for l in label_perms])
xp = cuda.get_array_module(losses)
min_loss = F.min(losses) * (len(label) - label_delay)
min_index = cuda.to_cpu(xp.argmin(losses.data))
return min_loss, label_perms[min_index]
示例9: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def check_forward(self, x_data, t_data, use_cudnn='always'):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
loss = functions.sigmoid_cross_entropy(x_val, t_val,
self.normalize)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0
non_ignore_count = 0
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
continue
loss_expect -= xd * (td - (xd >= 0)) \
- math.log(1 + math.exp(-numpy.abs(xd)))
non_ignore_count += 1
if non_ignore_count == 0:
loss_expect = 0
elif self.normalize:
loss_expect /= non_ignore_count
else:
loss_expect /= self.t.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=self.places)
示例10: check_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def check_backward(self, x_data, t_data):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
gradient_check.check_backward(
functions.sigmoid_cross_entropy,
(x_data, t_data), None, **self.check_backward_options)
示例11: check_backward_no_reduction
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def check_backward_no_reduction(
self, x_data, t_data, y_grad):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
def f(x, t):
return chainer.functions.sigmoid_cross_entropy(x, t, reduce='no')
gradient_check.check_backward(
f, (x_data, t_data), y_grad, **self.check_backward_options)
示例12: forward_chainerx
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def forward_chainerx(self, inputs):
x, = inputs
# TODO(aksub99): Improve implementation to avoid non-differentiability
# wrt targets
t = self.backend_config.get_array(self.t)
out = chainerx.sigmoid_cross_entropy(x, t)
return out,
示例13: forward_chainer
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def forward_chainer(self, inputs):
x, = inputs
t = self.t
out = F.sigmoid_cross_entropy(x, t, normalize=False, reduce='no')
return out,
示例14: update_core
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def update_core(self):
xp = self.fsgen.xp
fsgen_optimizer = self.get_optimizer('fsgen')
vgen_optimizer = self.get_optimizer('vgen')
vdis_optimizer = self.get_optimizer('vdis')
real_video, fake_video, dis_fake, dis_real = self.forward()
batchsize = real_video.shape[0]
loss_dis_fake = F.sigmoid_cross_entropy(
dis_fake, xp.ones((batchsize, 1, 1, 1), dtype="i"))
loss_dis_real = F.sigmoid_cross_entropy(
dis_real, xp.zeros((batchsize, 1, 1, 1), dtype="i"))
loss_gen = F.sigmoid_cross_entropy(
dis_fake, xp.zeros((batchsize, 1, 1, 1), dtype="i"))
chainer.report({'loss_dis_fake': loss_dis_fake}, self.vdis)
chainer.report({'loss_dis_real': loss_dis_real}, self.vdis)
fsgen_optimizer.target.zerograds()
vgen_optimizer.target.zerograds()
loss_gen.backward()
fsgen_optimizer.update()
vgen_optimizer.update()
fake_video.unchain_backward()
vdis_optimizer.target.zerograds()
(loss_dis_fake + loss_dis_real).backward()
vdis_optimizer.update()
示例15: evaluate
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sigmoid_cross_entropy [as 别名]
def evaluate(self):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
row_idx, col_idx, val_idx = [], [], []
x = cuda.to_gpu(np.array([i[0] for i in batch]))
labels = [l[1] for l in batch]
for i in range(len(labels)):
l_list = list(set(labels[i]))
for y in l_list:
row_idx.append(i)
col_idx.append(y)
val_idx.append(1)
m = len(labels)
n = self.class_dim
t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense()
t = cuda.to_gpu(t)
with function.no_backprop_mode():
loss = F.sigmoid_cross_entropy(eval_func(x), t)
summary.add({MyEvaluator.default_name + '/main/loss':loss})
summary.add(observation)
return summary.compute_mean()