本文整理汇总了Python中chainer.functions.sum方法的典型用法代码示例。如果您正苦于以下问题:Python functions.sum方法的具体用法?Python functions.sum怎么用?Python functions.sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例2: setUp
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def setUp(self):
def evaluator(actions):
# negative square norm of actions
return -F.sum(actions ** 2, axis=1)
self.evaluator = evaluator
if self.has_maximizer:
def maximizer():
return chainer.Variable(np.zeros(
(self.batch_size, self.action_size), dtype=np.float32))
else:
maximizer = None
self.maximizer = maximizer
self.av = action_value.SingleActionValue(
evaluator=evaluator, maximizer=maximizer)
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def __call__(self, x):
h = x
for l in self.conv_layers:
h = self.activation(l(h))
# Advantage
batch_size = x.shape[0]
ya = self.a_stream(h)
mean = F.reshape(
F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
ya, mean = F.broadcast(ya, mean)
ya -= mean
# State value
ys = self.v_stream(h)
ya, ys = F.broadcast(ya, ys)
q = ya + ys
return action_value.DiscreteActionValue(q)
示例4: compute_policy_gradient_full_correction
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def compute_policy_gradient_full_correction(
action_distrib, action_distrib_mu, action_value, v,
truncation_threshold):
"""Compute off-policy bias correction term wrt all actions."""
assert truncation_threshold is not None
assert np.isscalar(v)
with chainer.no_backprop_mode():
rho_all_inv = compute_full_importance(action_distrib_mu,
action_distrib)
correction_weight = (
np.maximum(1 - truncation_threshold * rho_all_inv,
np.zeros_like(rho_all_inv)) *
action_distrib.all_prob.array[0])
correction_advantage = action_value.q_values.array[0] - v
return -F.sum(correction_weight *
action_distrib.all_log_prob *
correction_advantage, axis=1)
示例5: compute_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def compute_value_loss(eltwise_loss, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example
batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
the loss values in a batch. 'sum' will use the sum.
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
assert eltwise_loss.ndim == 3
if batch_accumulator == 'sum':
# mean over N_prime, then sum over (batch_size, N)
loss = F.sum(F.mean(eltwise_loss, axis=2))
else:
# mean over (batch_size, N_prime), then sum over N
loss = F.sum(F.mean(eltwise_loss, axis=(0, 2)))
return loss
示例6: compute_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def compute_value_loss(eltwise_loss, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example per atom
batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
the loss values in a batch. 'sum' will use the sum.
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
if batch_accumulator == 'sum':
loss = F.sum(eltwise_loss)
else:
loss = F.mean(F.sum(eltwise_loss, axis=1))
return loss
示例7: compute_weighted_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def compute_weighted_value_loss(eltwise_loss, batch_size, weights,
batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example per atom
weights (ndarray): Weights for y, t.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
# eltwise_loss is (batchsize, n_atoms) array of losses
# weights is an array of shape (batch_size)
# sum loss across atoms and then apply weight per example in batch
loss_sum = F.matmul(F.sum(eltwise_loss, axis=1), weights)
if batch_accumulator == 'mean':
loss = loss_sum / batch_size
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
示例8: _compute_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute a loss of categorical DQN."""
y, t = self._compute_y_and_t(exp_batch)
# Minimize the cross entropy
# y is clipped to avoid log(0)
eltwise_loss = -t * F.log(F.clip(y, 1e-10, 1.))
if errors_out is not None:
del errors_out[:]
# The loss per example is the sum of the atom-wise loss
# Prioritization by KL-divergence
delta = F.sum(eltwise_loss, axis=1)
delta = cuda.to_cpu(delta.array)
for e in delta:
errors_out.append(e)
if 'weights' in exp_batch:
return compute_weighted_value_loss(
eltwise_loss, y.shape[0], exp_batch['weights'],
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(
eltwise_loss, batch_accumulator=self.batch_accumulator)
示例9: _sample_discrete_actions
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def _sample_discrete_actions(batch_probs):
"""Sample a batch of actions from a batch of action probabilities.
Args:
batch_probs (ndarray): batch of action probabilities BxA
Returns:
List consisting of sampled actions
"""
action_indices = []
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
batch_probs = batch_probs - np.finfo(np.float32).epsneg
for i in range(batch_probs.shape[0]):
histogram = np.random.multinomial(1, batch_probs[i])
action_indices.append(int(np.nonzero(histogram)[0]))
return action_indices
示例10: softmax_cross_entropy
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def softmax_cross_entropy(self, y, t):
import numpy as np
log_softmax = F.log_softmax(y)
# SelectItem is not supported by onnx-chainer.
# TODO(hamaji): Support it?
# log_prob = F.select_item(log_softmax, t)
# TODO(hamaji): Currently, F.sum with axis=1 cannot be
# backpropped properly.
# log_prob = F.sum(log_softmax * t, axis=1)
# self.batch_size = chainer.Variable(np.array(t.size, np.float32),
# name='batch_size')
# return -F.sum(log_prob, axis=0) / self.batch_size
log_prob = F.sum(log_softmax * t, axis=(0, 1))
batch_size = chainer.Variable(np.array(t.shape[0], np.float32),
name='batch_size')
self.extra_inputs = [batch_size]
loss = -log_prob / batch_size
loss.name = 'loss'
return loss
示例11: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def forward(self, x, t):
xp = cuda.get_array_module(x)
y = self.predictor(x)
log_softmax = F.log_softmax(y)
# SelectItem is not supported by onnx-chainer.
# TODO(hamaji): Support it?
# log_prob = F.select_item(log_softmax, t)
batch_size = chainer.Variable(xp.array(t.size, xp.float32),
name='batch_size')
self.extra_inputs = [batch_size]
# TODO(hamaji): Currently, F.sum with axis=1 cannot be
# backpropped properly.
# log_prob = F.sum(log_softmax * t, axis=1)
# return -F.sum(log_prob, axis=0) / self.batch_size
log_prob = F.sum(log_softmax * t, axis=(0, 1))
loss = -log_prob / batch_size
reporter.report({'loss': loss}, self)
if self.compute_accuracy:
acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
reporter.report({'accuracy': acc}, self)
loss.name = 'loss'
return loss
示例12: softmax_cross_entropy
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def softmax_cross_entropy(self, y, t):
import numpy as np
log_softmax = F.log_softmax(y)
# SelectItem is not supported by onnx-chainer.
# TODO(hamaji): Support it?
# log_prob = F.select_item(log_softmax, t)
# TODO(hamaji): Currently, F.sum with axis=1 cannot be
# backpropped properly.
# log_prob = F.sum(log_softmax * t, axis=1)
# self.batch_size = chainer.Variable(np.array(t.size, np.float32),
# name='batch_size')
# return -F.sum(log_prob, axis=0) / self.batch_size
log_prob = F.sum(log_softmax * t, axis=(0, 1))
batch_size = chainer.Variable(self.xp.array(t.shape[0], np.float32),
name='batch_size')
self.extra_inputs = [batch_size]
loss = -log_prob / batch_size
loss.name = 'loss'
return loss
示例13: listmle
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def listmle(x, t):
"""
The ListMLE loss as in Xia et al (2008), Listwise Approach to Learning to
Rank - Theory and Algorithm.
:param x: The activation of the previous layer
:param t: The target labels
:return: The loss
"""
# Get the ground truth by sorting activations by the relevance labels
xp = cuda.get_array_module(t)
t_hat = t[:, 0]
x_hat = x[xp.flip(xp.argsort(t_hat), axis=0)]
# Compute MLE loss
final = logcumsumexp(x_hat)
return F.sum(final - x_hat)
示例14: listpl
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def listpl(x, t, α=15.0):
"""
The ListPL loss, a stochastic variant of ListMLE that in expectation
approximates the true ListNet loss.
:param x: The activation of the previous layer
:param t: The target labels
:param α: The smoothing factor
:return: The loss
"""
# Sample permutation from PL(t)
index = _pl_sample(t, α)
x = x[index]
# Compute MLE loss
final = logcumsumexp(x)
return F.sum(final - x)
示例15: _pl_sample
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import sum [as 别名]
def _pl_sample(t, α):
"""
Sample from the plackett luce distribution directly
:param t: The target labels
:return: A random permutation from the plackett-luce distribution
parameterized by the target labels
"""
xp = cuda.get_array_module(t)
t = t[:, 0]
probs = xp.exp(t * α)
probs /= xp.sum(probs)
# Use CPU-based numpy implementation, because cupy.random.choice with
# replace=False does not work
probs = cuda.to_cpu(probs)
result = np.random.choice(probs.shape[0], probs.shape[0], replace=False,
p=probs)
return xp.array(result, copy=False)