本文整理汇总了Python中chainer.functions.broadcast_to方法的典型用法代码示例。如果您正苦于以下问题:Python functions.broadcast_to方法的具体用法?Python functions.broadcast_to怎么用?Python functions.broadcast_to使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.broadcast_to方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例2: _evaluate_psi_x_with_quantile_thresholds
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def _evaluate_psi_x_with_quantile_thresholds(psi_x, phi, f, taus):
assert psi_x.ndim == 2
batch_size, hidden_size = psi_x.shape
assert taus.ndim == 2
assert taus.shape[0] == batch_size
n_taus = taus.shape[1]
phi_taus = phi(taus)
assert phi_taus.ndim == 3
assert phi_taus.shape == (batch_size, n_taus, hidden_size)
psi_x_b = F.broadcast_to(
F.expand_dims(psi_x, axis=1), phi_taus.shape)
h = psi_x_b * phi_taus
h = F.reshape(h, (-1, hidden_size))
assert h.shape == (batch_size * n_taus, hidden_size)
h = f(h)
assert h.ndim == 2
assert h.shape[0] == batch_size * n_taus
n_actions = h.shape[-1]
h = F.reshape(h, (batch_size, n_taus, n_actions))
return QuantileDiscreteActionValue(h)
示例3: get_initial_logits
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def get_initial_logits(self, mb_size = None):
if mb_size is None:
mb_size = self.src_mb_size
else:
assert self.src_mb_size == 1
assert mb_size is not None
bos_encoding = F.broadcast_to(self.decoder_chain.bos_encoding, (mb_size, 1, self.decoder_chain.d_model))
cross_mask = self.decoder_chain.xp.broadcast_to(self.mask_input[:,0:1,0:1,:], (self.mask_input.shape[0], self.decoder_chain.n_heads, 1, self.mask_input.shape[3]))
final_layer, prev_states = self.decoder_chain.encoding_layers.one_step(bos_encoding, None,
self.src_encoding, cross_mask)
logits = self.decoder_chain.logits_layer(F.reshape(final_layer, (mb_size, self.decoder_chain.d_model)))
return logits, DecoderState(pos=-1, prev_states=prev_states)
示例4: compute_logits
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def compute_logits(self, seq_list, encoded_input, mask_input):
mb_size = len(seq_list)
max_length_1 = max(len(x) for x in seq_list)
x, mask = self.make_batch(seq_list)
# print "padded_data", x
# print "mask", mask
assert self.xp.all(mask_input == self.xp.broadcast_to(mask_input[:,0:1,0:1,:], mask_input.shape))
encoded = self.emb(x)
encoded += self.get_pos_vect(mb_size, max_length_1)
if self.dropout is not None:
encoded = F.dropout(encoded, self.dropout)
bos_plus_encoded = F.concat((F.broadcast_to(self.bos_encoding, (mb_size, 1, self.d_model)), encoded), axis=1)
cross_mask = self.xp.broadcast_to(mask_input[:,0:1,0:1,:], (mask_input.shape[0], self.n_heads, bos_plus_encoded.data.shape[1], mask_input.shape[3]))
final_layer = self.encoding_layers(bos_plus_encoded, encoded_input, mask, cross_mask)
logits = apply_linear_layer_to_last_dims(final_layer, self.logits_layer)
return logits
示例5: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, inputs):
pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs)
batch_size, past_len, _ = pos_x.shape
h_pos = self.pos_encoder(pos_x)
h_ego = self.ego_encoder(ego_x)
h = F.concat((h_pos, h_ego), axis=1) # (B, C, 2)
h = self.inter(h)
h_pos = self.pos_decoder(h)
pred_y = self.last(h_pos) # (B, 10, C+6+28)
pred_y = F.swapaxes(pred_y, 1, 2)
pred_y = pred_y[:, :pos_y.shape[1], :]
loss = F.mean_squared_error(pred_y, pos_y)
pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape)
pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean
return loss, pred_y, None
示例6: attend
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def attend(self, query, key, value, mask, minfs=None):
"""
Input shapes:
q=(b, units, dec_l), k=(b, units, enc_l),
v=(b, units, dec_l, enc_l), m=(b, dec_l, enc_l)
"""
# Calculate Attention Scores with Mask for Zero-padded Areas
pre_a = F.batch_matmul(query, key, transa=True) # (b, dec_l, enc_l)
minfs = self.xp.full(pre_a.shape, -np.inf, pre_a.dtype) \
if minfs is None else minfs
pre_a = F.where(mask, pre_a, minfs)
a = F.softmax(pre_a, axis=2)
# if values in axis=2 are all -inf, they become nan. thus do re-mask.
a = F.where(self.xp.isnan(a.data),
self.xp.zeros(a.shape, dtype=a.dtype), a)
reshaped_a = a[:, None] # (b, 1, dec_xl, enc_l)
# Calculate Weighted Sum
pre_c = F.broadcast_to(reshaped_a, value.shape) * value
c = F.sum(pre_c, axis=3, keepdims=True) # (b, units, dec_xl, 1)
return c
示例7: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, x):
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(x, (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例8: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, x):
return x + F.broadcast_to(self.bias, x.shape)
示例9: clip_actions
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def clip_actions(actions, min_action, max_action):
min_actions = F.broadcast_to(min_action, actions.shape)
max_actions = F.broadcast_to(max_action, actions.shape)
return F.maximum(F.minimum(actions, max_actions), min_actions)
示例10: compute_mean_and_var
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def compute_mean_and_var(self, x):
h = x
for layer in self.hidden_layers:
h = self.nonlinearity(layer(h))
mean = self.mean_layer(h)
if self.bound_mean:
mean = bound_by_tanh(mean, self.min_action, self.max_action)
var = F.broadcast_to(F.softplus(self.var_layer(h)), mean.shape) + \
self.min_var
return mean, var
示例11: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, x):
mean = self.hidden_layers(x)
var = F.broadcast_to(self.var_func(self.var_param), mean.shape)
return distribution.GaussianDistribution(mean, var)
示例12: update_temperature
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def update_temperature(self, log_prob):
assert not isinstance(log_prob, chainer.Variable)
loss = -F.mean(
F.broadcast_to(self.temperature_holder(), log_prob.shape)
* (log_prob + self.entropy_target))
self.temperature_optimizer.update(lambda: loss)
示例13: _evaluate_model_and_update_recurrent_states
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def _evaluate_model_and_update_recurrent_states(self, batch_obs, test):
batch_xs = self.batch_states(batch_obs, self.xp, self.phi)
if self.recurrent:
if test:
tau2av, self.test_recurrent_states = self.model(
batch_xs, self.test_recurrent_states)
else:
self.train_prev_recurrent_states = self.train_recurrent_states
tau2av, self.train_recurrent_states = self.model(
batch_xs, self.train_recurrent_states)
else:
tau2av = self.model(batch_xs)
if test and self.act_deterministically:
# Instead of uniform sampling, use a deterministic sequence of
# equally spaced numbers from 0 to 1 as quantile thresholds.
taus_tilde = self.xp.broadcast_to(
self.xp.linspace(
0, 1, num=self.quantile_thresholds_K,
dtype=self.xp.float32),
(len(batch_obs), self.quantile_thresholds_K),
)
else:
taus_tilde = self.xp.random.uniform(
0, 1,
size=(len(batch_obs), self.quantile_thresholds_K)).astype('f')
return tau2av(taus_tilde)
示例14: get_noise
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def get_noise(self, batch_size, ch, shape):
xp = self.xp
if xp != np:
z = xp.random.normal(size=(batch_size,) + shape, dtype='f')
else:
# no "dtype" in kwargs for numpy.random.normal
z = xp.random.normal(size=(batch_size,) + shape).astype('f')
z = xp.broadcast_to(z, (ch, batch_size,) + shape)
z = z.transpose((1, 0, 2, 3))
return z
示例15: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import broadcast_to [as 别名]
def __call__(self, w, x=None, add_noise=False):
h = x
batch_size, _ = w.shape
if self.upsample:
assert h is not None
if self.blur_k is None:
k = np.asarray([1, 2, 1]).astype('f')
k = k[:, None] * k[None, :]
k = k / np.sum(k)
self.blur_k = self.xp.asarray(k)[None, None, :]
h = self.c0(upscale2x(h))
if self.enable_blur:
h = blur(h, self.blur_k)
else:
h = F.broadcast_to(self.W, (batch_size, self.ch_in, 4, 4))
# h should be (batch, ch, size, size)
if add_noise:
h = self.n0(h)
h = F.leaky_relu(self.b0(h))
h = self.s0(w, h)
h = self.c1(h)
if add_noise:
h = self.n1(h)
h = F.leaky_relu(self.b1(h))
h = self.s1(w, h)
return h