本文整理汇总了Python中chainer.functions.gaussian方法的典型用法代码示例。如果您正苦于以下问题:Python functions.gaussian方法的具体用法?Python functions.gaussian怎么用?Python functions.gaussian使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.gaussian方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_image
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def generate_image(self, v, r):
xp = cuda.get_array_module(v)
batch_size = v.shape[0]
h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state(
batch_size, xp)
v = cf.reshape(v, v.shape[:2] + (1, 1))
for t in range(self.num_layers):
generation_core = self.get_generation_core(t)
mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter(
h_t_gen)
z_t = cf.gaussian(mean_z_p, ln_var_z_p)
h_next_gen, c_next_gen, u_next = generation_core(
h_t_gen, c_t_gen, z_t, v, r, u_t)
u_t = u_next
h_t_gen = h_next_gen
c_t_gen = c_next_gen
mean_x = self.map_u_x(u_t)
return mean_x.data
示例2: get_loss_func
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def get_loss_func(self, C=1.0, k=1):
"""Get loss function of VAE.
The loss value is equal to ELBO (Evidence Lower Bound)
multiplied by -1.
Args:
C (int): Usually this is 1.0. Can be changed to control the
second term of ELBO bound, which works as regularization.
k (int): Number of Monte Carlo samples used in encoded vector.
"""
def lf(x):
mu, ln_var = self.encode(x)
batchsize = len(mu.data)
# reconstruction loss
rec_loss = 0
for l in six.moves.range(k):
z = F.gaussian(mu, ln_var)
rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \
/ (k * batchsize)
self.rec_loss = rec_loss
self.loss = self.rec_loss + \
C * gaussian_kl_divergence(mu, ln_var) / batchsize
return self.loss
return lf
示例3: test_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def test_forward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
m = chainer.Variable(m_data)
v = chainer.Variable(v_data)
# Call forward without eps and retrieve it
n1, eps = functions.gaussian(m, v, return_eps=True)
self.assertIsInstance(eps, backend_config.xp.ndarray)
self.assertEqual(n1.dtype, self.dtype)
self.assertEqual(n1.shape, m.shape)
self.assertEqual(eps.dtype, self.dtype)
self.assertEqual(eps.shape, m.shape)
# Call again with retrieved eps
n2 = functions.gaussian(m, v, eps=eps)
self.assertEqual(n2.dtype, self.dtype)
self.assertEqual(n2.shape, m.shape)
testing.assert_allclose(n1.array, n2.array)
示例4: test_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def test_double_backward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
y_grad = backend_config.get_array(self.gy)
m_grad_grad, v_grad_grad = (
backend_config.get_array((self.ggm, self.ggv)))
eps = backend_config.get_array(
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype))
def f(m, v):
# In case numerical gradient computation is held in more precise
# dtype than that of backward computation, cast the eps to reuse
# before the numerical computation.
eps_ = eps.astype(m.dtype)
return functions.gaussian(m, v, eps=eps_)
gradient_check.check_double_backward(
f, (m_data, v_data), y_grad, (m_grad_grad, v_grad_grad),
**self.check_double_backward_options)
示例5: sample
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def sample(self, h):
mean, ln_var = self.compute_parameter(h)
return cf.gaussian(mean, ln_var)
示例6: sample_z_and_x_params_from_posterior
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def sample_z_and_x_params_from_posterior(self, x, v, r):
batch_size = x.shape[0]
xp = cuda.get_array_module(x)
h_t_gen, c_t_gen, u_t, h_t_enc, c_t_enc = self.generate_initial_state(
batch_size, xp)
v = cf.reshape(v, v.shape + (1, 1))
z_t_params_array = []
for t in range(self.num_layers):
inference_core = self.get_inference_core(t)
generation_core = self.get_generation_core(t)
h_next_enc, c_next_enc = inference_core(h_t_gen, h_t_enc, c_t_enc,
x, v, r, u_t)
mean_z_q, ln_var_z_q = self.z_posterior_distribution.compute_parameter(
h_t_enc)
z_t = cf.gaussian(mean_z_q, ln_var_z_q)
mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter(
h_t_gen)
h_next_gen, c_next_gen, u_next = generation_core(
h_t_gen, c_t_gen, z_t, v, r, u_t)
z_t_params_array.append((mean_z_q, ln_var_z_q, mean_z_p,
ln_var_z_p))
u_t = u_next
h_t_gen = h_next_gen
c_t_gen = c_next_gen
h_t_enc = h_next_enc
c_t_enc = c_next_enc
mean_x = self.map_u_x(u_t)
return z_t_params_array, mean_x
示例7: generate_canvas_states
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def generate_canvas_states(self, v, r, xp):
batch_size = v.shape[0]
h_t_gen, c_t_gen, u_t, _, _ = self.generate_initial_state(
batch_size, xp)
v = cf.reshape(v, v.shape[:2] + (1, 1))
u_t_array = []
for t in range(self.num_layers):
generation_core = self.get_generation_core(t)
mean_z_p, ln_var_z_p = self.z_prior_distribution.compute_parameter(
h_t_gen)
z_t = cf.gaussian(mean_z_p, ln_var_z_p)
h_next_gen, c_next_gen, u_next = generation_core(
h_t_gen, c_t_gen, z_t, v, r, u_t)
u_t = u_next
h_t_gen = h_next_gen
c_t_gen = c_next_gen
u_t_array.append(u_t)
return u_t_array
示例8: sample
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def sample(self):
return F.gaussian(self.mean, self.ln_var)
示例9: sample_with_log_prob
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def sample_with_log_prob(self):
x = F.gaussian(self.mean, self.ln_var)
normal_log_prob = _eltwise_gaussian_log_likelihood(
x, self.mean, self.var, self.ln_var)
log_probs = normal_log_prob - _tanh_forward_log_det_jacobian(x)
y = F.tanh(x)
return y, F.sum(log_probs, axis=1)
示例10: advance_one_step
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def advance_one_step(self, previous_states, prev_y):
if self.noise_on_prev_word:
current_mb_size = prev_y.data.shape[0]
assert self.mb_size is None or current_mb_size <= self.mb_size
prev_y = prev_y * F.gaussian(Variable(self.noise_mean[:current_mb_size]),
Variable(self.noise_lnvar[:current_mb_size]))
new_states, concatenated, attn = self.advance_state(previous_states, prev_y)
logits = self.compute_logits(new_states, concatenated, attn)
return new_states, logits, attn
示例11: test_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def test_backward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
y_grad = backend_config.get_array(self.gy)
eps = backend_config.get_array(
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype))
def f(m, v):
# In case numerical gradient computation is held in more precise
# dtype than that of backward computation, cast the eps to reuse
# before the numerical computation.
eps_ = eps.astype(m.dtype)
return functions.gaussian(m, v, eps=eps_)
gradient_check.check_backward(
f, (m_data, v_data), y_grad, **self.check_backward_options)
示例12: encode
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def encode(self, bow):
""" Convert the bag of words vector of shape (n_docs, n_vocab)
into latent mean log variance vectors.
"""
lam = F.relu(self.l1(bow))
pi = F.relu(self.l2(lam))
mu, log_sigma = F.split_axis(self.mu_logsigma(pi), 2, 1)
sample = F.gaussian(mu, log_sigma)
loss = F.gaussian_kl_divergence(mu, log_sigma)
return sample, loss
示例13: sample
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def sample(self, x):
pi, mu, log_var = self.get_gaussian_params(x)
n_batch = pi.shape[0]
# Choose one of Gaussian means and vars n_batch times
ps = chainer.backends.cuda.to_cpu(pi.array)
idx = [np.random.choice(self.gaussian_mixtures, p=p) for p in ps]
mu = F.get_item(mu, [range(n_batch), idx])
log_var = F.get_item(log_var, [range(n_batch), idx])
# Sampling
z = F.gaussian(mu, log_var)
return z
示例14: get_inference_posterior
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import gaussian [as 别名]
def get_inference_posterior(self, t):
if self.hyperparams.inference_share_posterior:
return self.inference_posteriors[0]
return self.inference_posteriors[t]
# def compute_information_gain(self, x, r):
# xp = cuda
# h0_gen, c0_gen, u_0, h0_enc, c0_enc = self.generate_initial_state(
# 1, xp)
# loss_kld = 0
# hl_enc = h0_enc
# cl_enc = c0_enc
# hl_gen = h0_gen
# cl_gen = c0_gen
# ul_enc = u_0
# xq = self.inference_downsampler(x)
# for l in range(self.num_layers):
# inference_core = self.get_inference_core(l)
# inference_posterior = self.get_inference_posterior(l)
# generation_core = self.get_generation_core(l)
# generation_piror = self.get_generation_prior(l)
# h_next_enc, c_next_enc = inference_core.forward_onestep(
# hl_gen, hl_enc, cl_enc, xq, v, r)
# mean_z_q = inference_posterior.compute_mean_z(hl_enc)
# ln_var_z_q = inference_posterior.compute_ln_var_z(hl_enc)
# ze_l = cf.gaussian(mean_z_q, ln_var_z_q)
# mean_z_p = generation_piror.compute_mean_z(hl_gen)
# ln_var_z_p = generation_piror.compute_ln_var_z(hl_gen)
# h_next_gen, c_next_gen, u_next_enc = generation_core.forward_onestep(
# hl_gen, cl_gen, ul_enc, ze_l, v, r)
# kld = gqn.nn.functions.gaussian_kl_divergence(
# mean_z_q, ln_var_z_q, mean_z_p, ln_var_z_p)
# loss_kld += cf.sum(kld)
# hl_gen = h_next_gen
# cl_gen = c_next_gen
# ul_enc = u_next_enc
# hl_enc = h_next_enc
# cl_enc = c_next_enc