本文整理汇总了Python中chainer.functions.mean方法的典型用法代码示例。如果您正苦于以下问题:Python functions.mean方法的具体用法?Python functions.mean怎么用?Python functions.mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update_recurrent
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def _update_recurrent(self, dataset):
"""Update both the policy and the value function."""
flat_dataset = list(itertools.chain.from_iterable(dataset))
if self.obs_normalizer:
self._update_obs_normalizer(flat_dataset)
xp = self.model.xp
assert 'state' in flat_dataset[0]
assert 'v_teacher' in flat_dataset[0]
if self.standardize_advantages:
all_advs = xp.array([b['adv'] for b in flat_dataset])
mean_advs = xp.mean(all_advs)
std_advs = xp.std(all_advs)
else:
mean_advs = None
std_advs = None
for _ in range(self.epochs):
random.shuffle(dataset)
for minibatch in _yield_subset_of_sequences_with_fixed_number_of_items( # NOQA
dataset, self.minibatch_size):
self._update_once_recurrent(minibatch, mean_advs, std_advs)
示例2: compute_weighted_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def compute_weighted_value_loss(eltwise_loss, weights,
batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example
weights (ndarray): Weights for y, t.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
batch_size = eltwise_loss.shape[0]
assert batch_accumulator in ('mean', 'sum')
assert eltwise_loss.ndim == 3
# eltwise_loss is (batchsize, n , n') array of losses
# weights is an array of shape (batch_size)
# apply weights per example in batch
loss_sum = F.matmul(F.sum(F.mean(eltwise_loss, axis=2), axis=1), weights)
if batch_accumulator == 'mean':
loss = loss_sum / batch_size
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
示例3: _compute_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute a loss.
Returns:
Returns:
chainer.Variable: Scalar loss.
"""
y, taus = self._compute_y_and_taus(exp_batch)
with chainer.no_backprop_mode():
t = self._compute_target_values(exp_batch)
eltwise_loss = compute_eltwise_huber_quantile_loss(y, t, taus)
if errors_out is not None:
del errors_out[:]
delta = F.mean(eltwise_loss, axis=(1, 2))
errors_out.extend(cuda.to_cpu(delta.array))
if 'weights' in exp_batch:
return compute_weighted_value_loss(
eltwise_loss, exp_batch['weights'],
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(
eltwise_loss, batch_accumulator=self.batch_accumulator)
示例4: compute_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def compute_value_loss(eltwise_loss, batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example per atom
batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
the loss values in a batch. 'sum' will use the sum.
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
if batch_accumulator == 'sum':
loss = F.sum(eltwise_loss)
else:
loss = F.mean(F.sum(eltwise_loss, axis=1))
return loss
示例5: compute_weighted_value_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def compute_weighted_value_loss(eltwise_loss, batch_size, weights,
batch_accumulator='mean'):
"""Compute a loss for value prediction problem.
Args:
eltwise_loss (Variable): Element-wise loss per example per atom
weights (ndarray): Weights for y, t.
batch_accumulator (str): 'mean' will divide loss by batchsize
Returns:
(Variable) scalar loss
"""
assert batch_accumulator in ('mean', 'sum')
# eltwise_loss is (batchsize, n_atoms) array of losses
# weights is an array of shape (batch_size)
# sum loss across atoms and then apply weight per example in batch
loss_sum = F.matmul(F.sum(eltwise_loss, axis=1), weights)
if batch_accumulator == 'mean':
loss = loss_sum / batch_size
elif batch_accumulator == 'sum':
loss = loss_sum
return loss
示例6: _simple_group_normalization
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def _simple_group_normalization(x, groups, gamma, beta, eps=1e-5):
batch_size, channels = x.shape[:2]
x_reshape = x.reshape(batch_size, groups, channels // groups, -1)
mean = numpy.mean(x_reshape, axis=(2, 3), keepdims=True)
var = numpy.var(x_reshape, axis=(2, 3), keepdims=True)
std = numpy.sqrt(var + eps, dtype=x.dtype)
x_hat = (x_reshape - mean) / std
x_hat = x_hat.reshape(x.shape)
for i in six.moves.xrange(x.ndim):
if i != 1: # except for channel dim
gamma = numpy.expand_dims(gamma, i)
beta = numpy.expand_dims(beta, i)
return x_hat * gamma + beta
示例7: get_gaussian_params
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def get_gaussian_params(self, x):
h = F.tanh(self.l1(x))
h = self.l2(h)
pi = h[:, :self.gaussian_mixtures]
mu_var_dim = self.gaussian_mixtures * self.input_dim
mu = h[:, self.gaussian_mixtures:self.gaussian_mixtures + mu_var_dim]
log_var = h[:, self.gaussian_mixtures + mu_var_dim:]
n_batch = x.shape[0]
# mixing coefficients
pi = F.reshape(pi, (n_batch, self.gaussian_mixtures))
pi = F.softmax(pi, axis=1)
# mean
mu = F.reshape(mu, (n_batch, self.gaussian_mixtures, self.input_dim))
# log variance
log_var = F.reshape(
log_var, (n_batch, self.gaussian_mixtures, self.input_dim))
return pi, mu, log_var
示例8: pretraining
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def pretraining(optimizer):
logger.info('pretraining')
copy_grand_opt = copy.deepcopy(optimizer.grand_optimizer)
losses = []
for _ in range(10):
x = optimizer.optnet.xp.random.normal(
scale=10., size=(10000, 1)).astype('f')
g = optimizer.optnet.step(x)
# loss forcing g's sign to be the flip of input's sign
# theta = theta - c*gradient
# theta = theta + g
loss = F.mean(F.clip(g, 0, 100) * (x > 0)
+ F.clip(-g, 0, 100) * (x < 0))
optimizer.optnet.cleargrads()
loss.backward()
optimizer.meta_update()
optimizer.optnet.reset_state()
losses.append(loss.item())
logger.info('finished pretraining. losses {}'.format(losses))
optimizer.release_all()
# reset adam state
optimizer = nets.optnets.OptimizerByNet(optimizer.optnet, copy_grand_opt)
return optimizer, copy_grand_opt
示例9: test_forward_case3
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def test_forward_case3(self):
"""Whether a silhouette by neural renderer matches that by Blender."""
# load teapot
vertices, faces, textures = utils.load_teapot_batch()
# create renderer
renderer = neural_renderer.Renderer()
renderer.image_size = 256
renderer.anti_aliasing = False
renderer.light_intensity_ambient = 1.0
renderer.light_intensity_directional = 0.0
images = renderer.render(vertices, faces, textures)
images = images.data.get()
image = images[2].mean(0)
# load reference image by blender
ref = scipy.misc.imread('./tests/data/teapot_blender.png')
ref = ref.astype('float32')
ref = (ref.min(-1) != 255).astype('float32')
chainer.testing.assert_allclose(ref, image)
示例10: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def __call__(self, x, t=None, w=None):
# t, w is on host.
# Forward network
alpha = self.forward(x)
if t is None:
assert not chainer.config.train
return
# Weighted mean squared error
# TODO: Do more tests
# loss = F.mean(F.squared_error(alpha, t) * w)
loss = F.mean_squared_error(alpha, t)
if np.isnan(float(loss.data)):
raise ValueError('Loss is nan.')
chainer.report({'loss': loss}, self)
return loss
示例11: _compute_target_values
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def _compute_target_values(self, exp_batch):
batch_next_state = exp_batch['next_state']
with chainer.using_config('train', False), state_kept(self.q_function):
next_qout = self.q_function(batch_next_state)
target_next_qout = self.target_q_function(batch_next_state)
next_q_max = target_next_qout.evaluate_actions(
next_qout.greedy_actions)
next_q_max = F.mean(next_q_max, axis=1)
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
discount = exp_batch['discount']
return batch_rewards + discount * (1.0 - batch_terminal) * next_q_max
示例12: loss_hinge_disc
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def loss_hinge_disc(self, fake, real):
loss = F.mean(F.relu(0.5 - real))
loss += F.mean(F.relu(0.5 + fake))
return loss
示例13: loss_hinge_gene
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def loss_hinge_gene(self, fake):
loss = F.mean(F.relu(-fake))
return loss
示例14: q_values
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def q_values(self):
with chainer.force_backprop_mode():
return F.mean(self.quantiles, axis=1)
示例15: _mean_or_nan
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean [as 别名]
def _mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan