本文整理汇总了Python中torch.exp函数的典型用法代码示例。如果您正苦于以下问题:Python exp函数的具体用法?Python exp怎么用?Python exp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了exp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predictive_elbo
def predictive_elbo(self, x, k, s):
# No pW or qW
self.B = x.size()[0] #batch size
# self.k = k #number of z samples aka particles P
# self.s = s #number of W samples
elbo1s = []
for i in range(s):
Ws, logpW, logqW = self.sample_W() #_ , [1], [1]
mu, logvar = self.encode(x) #[B,Z]
z, logpz, logqz = self.sample_z(mu, logvar, k=k) #[P,B,Z], [P,B]
x_hat = self.decode(Ws, z) #[P,B,X]
logpx = log_bernoulli(x_hat, x) #[P,B]
elbo = logpx + logpz - logqz #[P,B]
if k>1:
max_ = torch.max(elbo, 0)[0] #[B]
elbo = torch.log(torch.mean(torch.exp(elbo - max_), 0)) + max_ #[B]
# elbo1 = elbo1 #+ (logpW - logqW)*.00000001 #[B], logp(x|W)p(w)/q(w)
elbo1s.append(elbo)
elbo1s = torch.stack(elbo1s) #[S,B]
if s>1:
max_ = torch.max(elbo1s, 0)[0] #[B]
elbo1 = torch.log(torch.mean(torch.exp(elbo1s - max_), 0)) + max_ #[B]
elbo = torch.mean(elbo1s) #[1]
return elbo#, logprobs2[0], logprobs2[1], logprobs2[2], logprobs2[3], logprobs2[4]
示例2: forward
def forward(self, feat, right, wrong, batch_wrong, fake=None, fake_diff_mask=None):
num_wrong = wrong.size(1)
batch_size = feat.size(0)
feat = feat.view(-1, self.ninp, 1)
right_dis = torch.bmm(right.view(-1, 1, self.ninp), feat)
wrong_dis = torch.bmm(wrong, feat)
batch_wrong_dis = torch.bmm(batch_wrong, feat)
wrong_score = torch.sum(torch.exp(wrong_dis - right_dis.expand_as(wrong_dis)),1) \
+ torch.sum(torch.exp(batch_wrong_dis - right_dis.expand_as(batch_wrong_dis)),1)
loss_dis = torch.sum(torch.log(wrong_score + 1))
loss_norm = right.norm() + feat.norm() + wrong.norm() + batch_wrong.norm()
if fake:
fake_dis = torch.bmm(fake.view(-1, 1, self.ninp), feat)
fake_score = torch.masked_select(torch.exp(fake_dis - right_dis), fake_diff_mask)
margin_score = F.relu(torch.log(fake_score + 1) - self.margin)
loss_fake = torch.sum(margin_score)
loss_dis += loss_fake
loss_norm += fake.norm()
loss = (loss_dis + 0.1 * loss_norm) / batch_size
if fake:
return loss, loss_fake.data[0] / batch_size
else:
return loss
示例3: guide
def guide(num_particles):
q1 = pyro.param("q1", torch.tensor(pi1, requires_grad=True))
q2 = pyro.param("q2", torch.tensor(pi2, requires_grad=True))
with pyro.iarange("particles", num_particles):
z = pyro.sample("z", dist.Normal(q2, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q1 * zz))
示例4: sample
def sample(self, mu, logvar, k):
# print (mu)
# print (logvar)
if torch.cuda.is_available():
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]
# print (mu.size())
# print (logvar.size())
# print (eps.size())
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()),
Variable(torch.zeros(self.B, self.z_size)).cuda()) #[P,B]
# logqz = lognormal(z, mu, logvar)
logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))
else:
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)),
Variable(torch.zeros(self.B, self.z_size))) #[P,B]
logqz = lognormal(z, mu, logvar)
return z, logpz, logqz
示例5: get_positive_expectation
def get_positive_expectation(p_samples, measure, average=True):
log_2 = math.log(2.)
if measure == 'GAN':
Ep = - F.softplus(-p_samples)
elif measure == 'JSD':
Ep = log_2 - F.softplus(- p_samples)
elif measure == 'X2':
Ep = p_samples ** 2
elif measure == 'KL':
Ep = p_samples + 1.
elif measure == 'RKL':
Ep = -torch.exp(-p_samples)
elif measure == 'DV':
Ep = p_samples
elif measure == 'H2':
Ep = 1. - torch.exp(-p_samples)
elif measure == 'W1':
Ep = p_samples
else:
raise_measure_error(measure)
if average:
return Ep.mean()
else:
return Ep
示例6: _kl_uniform_gumbel
def _kl_uniform_gumbel(p, q):
common_term = q.scale / (p.high - p.low)
high_loc_diff = (p.high - q.loc) / q.scale
low_loc_diff = (p.low - q.loc) / q.scale
t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff)
t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff))
return t1 - t2
示例7: encode_and_logprob
def encode_and_logprob(self, x):
for i in range(len(self.first_half_weights)-1):
x = self.act_func(self.first_half_weights[i](x))
# pre_act = self.first_half_weights[i](x) #[B,D]
# # pre_act_with_noise = Variable(torch.randn(1, self.arch_2[i][1]).type(self.dtype)) * pre_act
# probs = torch.ones(1, self.arch_2[i][1]) * .5
# pre_act_with_noise = Variable(torch.bernoulli(probs).type(self.dtype)) * pre_act
# x = self.act_func(pre_act_with_noise)
mean = self.first_half_weights[-1](x)
logvar = self.q_logvar(x)
# print (logvar)
#Sample
eps = Variable(torch.randn(1, self.z_size)) #.type(self.dtype))
# x = (torch.sqrt(torch.exp(W_logvars)) * eps) + W_means
x = (torch.exp(.5*logvar) * eps) + mean
logq = -torch.mean( logvar.sum(1) + ((x - mean).pow(2)/torch.exp(logvar)).sum(1))
logp = torch.mean( x.pow(2).sum(1))
return x, logq+logp
示例8: forward
def forward(self, true_binary, rule_masks, raw_logits):
if cmd_args.loss_type == 'binary':
exp_pred = torch.exp(raw_logits) * rule_masks
norm = F.torch.sum(exp_pred, 2, keepdim=True)
prob = F.torch.div(exp_pred, norm)
return F.binary_cross_entropy(prob, true_binary) * cmd_args.max_decode_steps
if cmd_args.loss_type == 'perplexity':
return my_perp_loss(true_binary, rule_masks, raw_logits)
if cmd_args.loss_type == 'vanilla':
exp_pred = torch.exp(raw_logits) * rule_masks + 1e-30
norm = torch.sum(exp_pred, 2, keepdim=True)
prob = torch.div(exp_pred, norm)
ll = F.torch.abs(F.torch.sum( true_binary * prob, 2))
mask = 1 - rule_masks[:, :, -1]
logll = mask * F.torch.log(ll)
loss = -torch.sum(logll) / true_binary.size()[1]
return loss
print('unknown loss type %s' % cmd_args.loss_type)
raise NotImplementedError
示例9: log_uniform_candidate_sampler
def log_uniform_candidate_sampler(self, targets, choice_func=_choice):
# returns sampled, true_expected_count, sampled_expected_count
# targets = (batch_size, )
#
# samples = (n_samples, )
# true_expected_count = (batch_size, )
# sampled_expected_count = (n_samples, )
# see: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.h
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/range_sampler.cc
# algorithm: keep track of number of tries when doing sampling,
# then expected count is
# -expm1(num_tries * log1p(-p))
# = (1 - (1-p)^num_tries) where p is self._probs[id]
np_sampled_ids, num_tries = choice_func(self._num_words, self._num_samples)
sampled_ids = torch.from_numpy(np_sampled_ids).to(targets.device)
# Compute expected count = (1 - (1-p)^num_tries) = -expm1(num_tries * log1p(-p))
# P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
target_probs = torch.log((targets.float() + 2.0) / (targets.float() + 1.0)) / self._log_num_words_p1
target_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-target_probs)) - 1.0)
sampled_probs = torch.log((sampled_ids.float() + 2.0) /
(sampled_ids.float() + 1.0)) / self._log_num_words_p1
sampled_expected_count = -1.0 * (torch.exp(num_tries * torch.log1p(-sampled_probs)) - 1.0)
sampled_ids.requires_grad_(False)
target_expected_count.requires_grad_(False)
sampled_expected_count.requires_grad_(False)
return sampled_ids, target_expected_count, sampled_expected_count
示例10: bbox_transform_inv
def bbox_transform_inv(boxes, deltas):
# Input should be both tensor or both Variable and on the same device
if len(boxes) == 0:
return deltas.detach() * 0
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
pred_w = torch.exp(dw) * widths.unsqueeze(1)
pred_h = torch.exp(dh) * heights.unsqueeze(1)
pred_boxes = torch.cat(\
[_.unsqueeze(2) for _ in [pred_ctr_x - 0.5 * pred_w,\
pred_ctr_y - 0.5 * pred_h,\
pred_ctr_x + 0.5 * pred_w,\
pred_ctr_y + 0.5 * pred_h]], 2).view(len(boxes), -1)
return pred_boxes
示例11: guide
def guide():
mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.334 * torch.ones(2),
requires_grad=True))
log_sig_q = pyro.param("log_sig_q", Variable(
self.analytic_log_sig_n.data - 0.29 * torch.ones(2),
requires_grad=True))
mu_q_prime = pyro.param("mu_q_prime", Variable(torch.Tensor([-0.34, 0.52]),
requires_grad=True))
kappa_q = pyro.param("kappa_q", Variable(torch.Tensor([0.74]),
requires_grad=True))
log_sig_q_prime = pyro.param("log_sig_q_prime",
Variable(-0.5 * torch.log(1.2 * self.lam0.data),
requires_grad=True))
sig_q, sig_q_prime = torch.exp(log_sig_q), torch.exp(log_sig_q_prime)
mu_latent_dist = dist.Normal(mu_q, sig_q, reparameterized=repa2)
mu_latent = pyro.sample("mu_latent", mu_latent_dist,
baseline=dict(use_decaying_avg_baseline=use_decaying_avg_baseline))
mu_latent_prime_dist = dist.Normal(kappa_q.expand_as(mu_latent) * mu_latent + mu_q_prime,
sig_q_prime,
reparameterized=repa1)
pyro.sample("mu_latent_prime",
mu_latent_prime_dist,
baseline=dict(nn_baseline=mu_prime_baseline,
nn_baseline_input=mu_latent,
use_decaying_avg_baseline=use_decaying_avg_baseline))
return mu_latent
示例12: model
def model(num_particles):
with pyro.iarange("particles", num_particles):
q3 = pyro.param("q3", torch.tensor(pi3, requires_grad=True))
q4 = pyro.param("q4", torch.tensor(0.5 * (pi1 + pi2), requires_grad=True))
z = pyro.sample("z", dist.Normal(q3, 1.0).expand_by([num_particles]))
zz = torch.exp(z) / (1.0 + torch.exp(z))
pyro.sample("y", dist.Bernoulli(q4 * zz))
示例13: get_negative_expectation
def get_negative_expectation(q_samples, measure, average=True):
log_2 = math.log(2.)
if measure == 'GAN':
Eq = F.softplus(-q_samples) + q_samples
elif measure == 'JSD':
Eq = F.softplus(-q_samples) + q_samples - log_2
elif measure == 'X2':
Eq = -0.5 * ((torch.sqrt(q_samples ** 2) + 1.) ** 2)
elif measure == 'KL':
Eq = torch.exp(q_samples)
elif measure == 'RKL':
Eq = q_samples - 1.
elif measure == 'DV':
Eq = log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))
elif measure == 'H2':
Eq = torch.exp(q_samples) - 1.
elif measure == 'W1':
Eq = q_samples
else:
raise_measure_error(measure)
if average:
return Eq.mean()
else:
return Eq
示例14: mmd
def mmd(Mxx, Mxy, Myy, sigma):
scale = Mxx.mean()
Mxx = torch.exp(-Mxx / (scale * 2 * sigma * sigma))
Mxy = torch.exp(-Mxy / (scale * 2 * sigma * sigma))
Myy = torch.exp(-Myy / (scale * 2 * sigma * sigma))
mmd = math.sqrt(Mxx.mean() + Myy.mean() - 2 * Mxy.mean())
return mmd
示例15: guide
def guide():
alpha_q_log = pyro.param("alpha_q_log",
Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
beta_q_log = pyro.param("beta_q_log",
Variable(self.log_beta_n.data - 0.143, requires_grad=True))
alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
pyro.sample("p_latent", dist.beta, alpha_q, beta_q)
pyro.map_data("aaa", self.data, lambda i, x: None, batch_size=self.batch_size)