本文整理汇总了Python中torch.autograd.Variable.mul方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.mul方法的具体用法?Python Variable.mul怎么用?Python Variable.mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: backward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def backward(ctx, grad_output):
input1, input2, weight, bias = ctx.saved_variables
grad_input1 = grad_input2 = grad_weight = grad_bias = None
buff = Variable(input1.data.new())
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input1 = torch.mm(input2, weight[0].t())
grad_input1 = grad_input1.mul(grad_output.narrow(1, 0, 1).expand(grad_input1.size()))
grad_input2 = torch.mm(input1, weight[0])
grad_input2 = grad_input2.mul(grad_output.narrow(1, 0, 1).expand(grad_input2.size()))
for k in range(1, weight.size(0)):
buff = input2.mm(weight[k].t())
buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input1.size()))
grad_input1.add_(buff)
buff = input1.mm(weight[k])
buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input2.size()))
grad_input2.add_(buff)
grad_weight = Variable(weight.data.new(weight.size()))
if ctx.needs_input_grad[2]:
# accumulate parameter gradients:
for k in range(weight.size(0)):
buff = input1.mul(grad_output.narrow(1, k, 1).expand_as(input1))
grad_weight[k] = torch.mm(buff.t(), input2)
if bias is not None and ctx.needs_input_grad[3]:
grad_bias = grad_output.sum(0, keepdim=False)
return grad_input1, grad_input2, grad_weight, grad_bias
示例2: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, mu, logvar, k):
# print (mu)
# print (logvar)
if torch.cuda.is_available():
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]
# print (mu.size())
# print (logvar.size())
# print (eps.size())
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()),
Variable(torch.zeros(self.B, self.z_size)).cuda()) #[P,B]
# logqz = lognormal(z, mu, logvar)
logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))
else:
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)),
Variable(torch.zeros(self.B, self.z_size))) #[P,B]
logqz = lognormal(z, mu, logvar)
return z, logpz, logqz
示例3: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, mu, logvar, k):
if torch.cuda.is_available():
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()),
Variable(torch.zeros(self.B, self.z_size)).cuda()) #[P,B]
logqz = lognormal(z, mu, logvar)
else:
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)),
Variable(torch.zeros(self.B, self.z_size))) #[P,B]
logqz = lognormal(z, mu, logvar)
return z, logpz, logqz
示例4: reparametrize
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def reparametrize(self, mu, logvar):
var = logvar.mul(0.5).exp_() #标准差
eps = torch.FloatTensor(var.size()).normal_()
#标准正态分布
eps = Variable(eps)
out = eps.mul(var).add_(mu)
return out
示例5: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def forward(self, k, x, logposterior):
'''
k: number of samples
x: [B,X]
logposterior(z) -> [P,B]
'''
self.B = x.size()[0]
self.P = k
#Encode
out = x
for i in range(len(self.encoder_weights)-1):
out = self.act_func(self.encoder_weights[i](out))
out = self.encoder_weights[-1](out)
mean = out[:,:self.z_size]
logvar = out[:,self.z_size:]
#Sample
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mean #[P,B,Z]
logqz = lognormal(z, mean, logvar) #[P,B]
logdetsum = 0.
for i in range(self.n_flows):
z, logdet = self.norm_flow(self.params[i],z)
logdetsum += logdet
return z, logqz-logdetsum
示例6: reparameterize
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
示例7: reparametrize
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if args.cuda:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
示例8: sample_z
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample_z(self, mu, logvar, k):
B = mu.size()[0]
eps = Variable(torch.FloatTensor(k, B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(B, self.z_size).type(self.dtype)),
Variable(torch.zeros(B, self.z_size)).type(self.dtype)) #[P,B]
logqz = lognormal(z, mu, logvar)
return z, logpz, logqz
示例9: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, k):
'''
k: # of samples
output: [k,X]
'''
eps = Variable(torch.FloatTensor(k, self.x_size).normal_()) #.type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*self.logvar)) + self.mean #[P,B,Z]
return z
示例10: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, mean, logvar, k):
self.B = mean.size()[0]
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mean #[P,B,Z]
logqz = lognormal(z, mean, logvar) #[P,B]
return z, logqz
示例11: reparametrize
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def reparametrize(self, mu, logvar):
"""
z = mu + eps *. sqrt(exp(log(s^2)))
The second term obtains interpreting logvar as the log-var of z
and observing that sqrt(exp(s^2)) == exp(s^2/2)
"""
eps = Variable(logvar.data.new(*logvar.size()).normal_())
std = logvar.mul(0.5).exp_()
return eps.mul(std).add_(mu)
示例12: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, mu, logvar, k):
eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
# z = eps.mul(torch.exp(.5*logvar)) + mu.detach() #[P,B,Z]
logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)),
Variable(torch.zeros(self.B, self.z_size))) #[P,B]
# logqz = lognormal(z, mu, logvar)
logqz = lognormal(z, mu.detach(), logvar.detach())
return z, logpz, logqz
示例13: reparameterize
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def reparameterize(self, mu, logvar):
'''
mu:均值
logvar:log(方差) 防止过大溢出
'''
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(mu.size()).normal_()
else:
eps = torch.FloatTensor(mu.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
示例14: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def forward(self, xs):
xs_emb = self.lt(xs)
if self.freqs is not None:
# tfidf embeddings
l = xs.size(1)
w = Variable(torch.Tensor(l))
for i in range(l):
w[i] = self.freqs[xs.data[0][i]]
w = w.mul(1 / w.norm())
xs_emb = xs_emb.squeeze(0).t().matmul(w.unsqueeze(1)).t()
else:
# basic embeddings (faster)
xs_emb = xs_emb.mean(1)
return xs_emb
示例15: sample
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
def sample(self, mu, logvar, k):
B = mu.size()[0]
eps = Variable(torch.FloatTensor(k, B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
z = eps.mul(torch.exp(.5*logvar)) + mu #[P,B,Z]
logqz = lognormal(z, mu, logvar) #[P,B]
#[P,B,Z], [P,B]
if self.flow_bool:
z, logdet = self.q_dist.forward(z)
logqz = logqz - logdet
logpz = lognormal(z, Variable(torch.zeros(B, self.z_size).type(self.dtype)),
Variable(torch.zeros(B, self.z_size)).type(self.dtype)) #[P,B]
return z, logpz, logqz