本文整理汇总了Python中chainer.functions.broadcast方法的典型用法代码示例。如果您正苦于以下问题:Python functions.broadcast方法的具体用法?Python functions.broadcast怎么用?Python functions.broadcast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.broadcast方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def __call__(self, x):
h = x
for l in self.conv_layers:
h = self.activation(l(h))
# Advantage
batch_size = x.shape[0]
ya = self.a_stream(h)
mean = F.reshape(
F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
ya, mean = F.broadcast(ya, mean)
ya -= mean
# State value
ys = self.v_stream(h)
ya, ys = F.broadcast(ya, ys)
q = ya + ys
return action_value.DiscreteActionValue(q)
示例2: proportions
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def proportions(self, doc_ids, softmax=False):
""" Given an array of document indices, return a vector
for each document of just the unnormalized topic weights.
Returns:
doc_weights : chainer.Variable
Two dimensional topic weights of each document.
"""
w = self.weights(doc_ids)
if softmax:
size = w.data.shape
mask = self.xp.random.random_integers(0, 1, size=size)
y = (F.softmax(w * self.temperature) *
Variable(mask.astype('float32')))
norm, y = F.broadcast(F.expand_dims(F.sum(y, axis=1), 1), y)
return y / (norm + 1e-7)
else:
return w
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def __call__(self, adj, x):
h = F.broadcast(x)
# add uniform noise to node feature matrices
if chainer.config.train:
h += self.xp.random.uniform(0, 0.9, x.shape)
adj = F.broadcast(adj)
sum_log_det_jacs_x = F.broadcast(self.xp.zeros([h.shape[0]], dtype=self.xp.float32))
sum_log_det_jacs_adj = F.broadcast(self.xp.zeros([h.shape[0]], dtype=self.xp.float32))
# forward step of channel-coupling layers
for i in range(self.hyperparams.num_coupling['channel']):
h, log_det_jacobians = self.clinks[i](h, adj)
sum_log_det_jacs_x += log_det_jacobians
# add uniform noise to adjacency tensors
if chainer.config.train:
adj += self.xp.random.uniform(0, 0.9, adj.shape)
# forward step of adjacency-coupling
for i in range(self.hyperparams.num_coupling['channel'], len(self.clinks)):
adj, log_det_jacobians = self.clinks[i](adj)
sum_log_det_jacs_adj += log_det_jacobians
adj = F.reshape(adj, (adj.shape[0], -1))
h = F.reshape(h, (h.shape[0], -1))
out = [h, adj]
return out, [sum_log_det_jacs_x, sum_log_det_jacs_adj]
示例4: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def __call__(self, x):
h = x
for l in self.conv_layers:
h = self.activation(l(h))
# Advantage
batch_size = x.shape[0]
ya = self.a_stream(h)
mean = F.reshape(F.sum(ya, axis=1) / self.n_actions, (batch_size, 1))
ya, mean = F.broadcast(ya, mean)
ya -= mean
# State value
ys = self.v_stream(h)
ya, ys = F.broadcast(ya, ys)
q = ya + ys
return chainerrl.action_value.DiscreteActionValue(q)
示例5: compute_eltwise_huber_quantile_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def compute_eltwise_huber_quantile_loss(y, t, taus, huber_loss_threshold=1.0):
"""Compute elementwise Huber losses for quantile regression.
This is based on Algorithm 1 of https://arxiv.org/abs/1806.06923.
This function assumes that, both of the two kinds of quantile thresholds,
taus (used to compute y) and taus_prime (used to compute t) are iid samples
from U([0,1]).
Args:
y (chainer.Variable): Quantile prediction from taus as a
(batch_size, N)-shaped array.
t (chainer.Variable or ndarray): Target values for quantile regression
as a (batch_size, N_prime)-array.
taus (ndarray): Quantile thresholds used to compute y as a
(batch_size, N)-shaped array.
huber_loss_threshold (float): Threshold of Huber loss. In the IQN
paper, this is denoted by kappa.
Returns:
chainer.Variable: Loss (batch_size, N, N_prime)
"""
assert y.shape == taus.shape
# (batch_size, N) -> (batch_size, N, 1)
y = F.expand_dims(y, axis=2)
# (batch_size, N_prime) -> (batch_size, 1, N_prime)
t = F.expand_dims(t, axis=1)
# (batch_size, N) -> (batch_size, N, 1)
taus = F.expand_dims(taus, axis=2)
# Broadcast to (batch_size, N, N_prime)
y, t, taus = F.broadcast(y, t, taus)
I_delta = ((t.array - y.array) > 0).astype('f')
eltwise_huber_loss = F.huber_loss(
y, t, delta=huber_loss_threshold, reduce='no')
eltwise_loss = abs(taus - I_delta) * eltwise_huber_loss
return eltwise_loss
示例6: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def check_forward(self, data):
xs = [chainer.Variable(x) for x in data]
bxs = functions.broadcast(*xs)
# When len(xs) == 1, function returns a Variable object
if isinstance(bxs, chainer.Variable):
bxs = (bxs,)
for bx in bxs:
self.assertEqual(bx.data.shape, self.out_shape)
self.assertEqual(bx.data.dtype, self.dtype)
示例7: check_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def check_backward(self, data, grads):
def f(*xs):
return functions.broadcast(*xs)
gradient_check.check_backward(
f, data, grads, dtype=numpy.float64, **self.check_backward_options)
示例8: check_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def check_double_backward(self, data, grads, gg):
if len(data) == 1:
return
gradient_check.check_double_backward(
functions.broadcast, data, grads, gg, dtype=numpy.float64,
**self.check_double_backward_options)
示例9: test_invalid_shape
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def test_invalid_shape(self):
x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
y_data = numpy.zeros((1, 3, 4), dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
functions.broadcast(x, y)
示例10: test_invalid_shape_fill
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def test_invalid_shape_fill(self):
x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
y_data = numpy.zeros(4, dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
functions.broadcast(x, y)
示例11: attention_history
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def attention_history(self, dL, cue, train=True):
D = F.concat(dL, axis=0)
D, Cue = F.broadcast(D, cue)
S = self.m(F.tanh(self.W_dm(D) + Cue))
S = F.softmax(F.reshape(S, (1, len(dL))))
pre_v = F.matmul(S, D)
return pre_v
示例12: test
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def test(self):
batch_size = self.batch_size
N = self.N
N_prime = self.N_prime
huber_loss_threshold = self.huber_loss_threshold
# Overestimation is penalized proportionally to tau
# Underestimation is penalized proportionally to (1-tau)
y = np.random.normal(size=(batch_size, N)).astype('f')
y_var = chainer.Variable(y)
t = np.random.normal(size=(batch_size, N_prime)).astype('f')
tau = np.random.uniform(size=(batch_size, N)).astype('f')
loss = iqn.compute_eltwise_huber_quantile_loss(
y_var, t, tau, huber_loss_threshold=huber_loss_threshold)
y_var_b, t_b = F.broadcast(
F.reshape(y_var, (batch_size, N, 1)),
F.reshape(t, (batch_size, 1, N_prime)),
)
self.assertEqual(loss.shape, (batch_size, N, N_prime))
huber_loss = F.huber_loss(
y_var_b, t_b, delta=huber_loss_threshold, reduce='no')
self.assertEqual(huber_loss.shape, (batch_size, N, N_prime))
for i in range(batch_size):
for j in range(N):
for k in range(N_prime):
# loss is always positive
scalar_loss = loss[i, j, k]
scalar_grad = chainer.grad(
[scalar_loss], [y_var])[0][i, j]
self.assertGreater(scalar_loss.array, 0)
if y[i, j] > t[i, k]:
# y over-estimates t
# loss equals huber loss scaled by tau
correct_scalar_loss = tau[i, j] * huber_loss[i, j, k]
else:
# y under-estimates t
# loss equals huber loss scaled by (1-tau)
correct_scalar_loss = (
(1 - tau[i, j]) * huber_loss[i, j, k])
correct_scalar_grad = chainer.grad(
[correct_scalar_loss], [y_var])[0][i, j]
self.assertAlmostEqual(
scalar_loss.array,
correct_scalar_loss.array,
places=5,
)
self.assertAlmostEqual(
scalar_grad.array,
correct_scalar_grad.array,
places=5,
)
示例13: lighting
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast [as 别名]
def lighting(
faces, textures, intensity_ambient=0.5, intensity_directional=0.5, color_ambient=(1, 1, 1),
color_directional=(1, 1, 1), direction=(0, 1, 0)):
xp = chainer.cuda.get_array_module(faces)
bs, nf = faces.shape[:2]
# arguments
if isinstance(color_ambient, tuple) or isinstance(color_ambient, list):
color_ambient = xp.array(color_ambient, 'float32')
if isinstance(color_directional, tuple) or isinstance(color_directional, list):
color_directional = xp.array(color_directional, 'float32')
if isinstance(direction, tuple) or isinstance(direction, list):
direction = xp.array(direction, 'float32')
if color_ambient.ndim == 1:
color_ambient = cf.broadcast_to(color_ambient[None, :], (bs, 3))
if color_directional.ndim == 1:
color_directional = cf.broadcast_to(color_directional[None, :], (bs, 3))
if direction.ndim == 1:
direction = cf.broadcast_to(direction[None, :], (bs, 3))
# create light
light = xp.zeros((bs, nf, 3), 'float32')
# ambient light
if intensity_ambient != 0:
light = light + intensity_ambient * cf.broadcast_to(color_ambient[:, None, :], light.shape)
# directional light
if intensity_directional != 0:
faces = faces.reshape((bs * nf, 3, 3))
v10 = faces[:, 0] - faces[:, 1]
v12 = faces[:, 2] - faces[:, 1]
normals = cf.normalize(neural_renderer.cross(v10, v12))
normals = normals.reshape((bs, nf, 3))
if direction.ndim == 2:
direction = cf.broadcast_to(direction[:, None, :], normals.shape)
cos = cf.relu(cf.sum(normals * direction, axis=2))
light = (
light + intensity_directional * cfmath.mul(*cf.broadcast(color_directional[:, None, :], cos[:, :, None])))
# apply
light = cf.broadcast_to(light[:, :, None, None, None, :], textures.shape)
textures = textures * light
return textures