当前位置: 首页>>代码示例>>Python>>正文


Python Variable.mul方法代码示例

本文整理汇总了Python中torch.autograd.Variable.mul方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.mul方法的具体用法?Python Variable.mul怎么用?Python Variable.mul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.mul方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: backward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def backward(ctx, grad_output):
        input1, input2, weight, bias = ctx.saved_variables
        grad_input1 = grad_input2 = grad_weight = grad_bias = None

        buff = Variable(input1.data.new())

        if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
            grad_input1 = torch.mm(input2, weight[0].t())
            grad_input1 = grad_input1.mul(grad_output.narrow(1, 0, 1).expand(grad_input1.size()))
            grad_input2 = torch.mm(input1, weight[0])
            grad_input2 = grad_input2.mul(grad_output.narrow(1, 0, 1).expand(grad_input2.size()))

            for k in range(1, weight.size(0)):
                buff = input2.mm(weight[k].t())
                buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input1.size()))
                grad_input1.add_(buff)

                buff = input1.mm(weight[k])
                buff = buff.mul(grad_output.narrow(1, k, 1).expand(grad_input2.size()))
                grad_input2.add_(buff)

        grad_weight = Variable(weight.data.new(weight.size()))
        if ctx.needs_input_grad[2]:
            # accumulate parameter gradients:
            for k in range(weight.size(0)):
                buff = input1.mul(grad_output.narrow(1, k, 1).expand_as(input1))
                grad_weight[k] = torch.mm(buff.t(), input2)

        if bias is not None and ctx.needs_input_grad[3]:
            grad_bias = grad_output.sum(0, keepdim=False)

        return grad_input1, grad_input2, grad_weight, grad_bias
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:34,代码来源:linear.py

示例2: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample(self, mu, logvar, k):

        # print (mu)
        # print (logvar)


        if torch.cuda.is_available():
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]

            # print (mu.size())
            # print (logvar.size())
            # print (eps.size())

            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()), 
                                Variable(torch.zeros(self.B, self.z_size)).cuda())  #[P,B]



            # logqz = lognormal(z, mu, logvar)

            logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))



        else:
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                                Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
            logqz = lognormal(z, mu, logvar) 
        return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:34,代码来源:vae_with_policy.py

示例3: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def sample(self, mu, logvar, k):
     if torch.cuda.is_available():
         eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]
         z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
         logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()), 
                             Variable(torch.zeros(self.B, self.z_size)).cuda())  #[P,B]
         logqz = lognormal(z, mu, logvar)
     else:
         eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
         z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
         logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                             Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
         logqz = lognormal(z, mu, logvar) 
     return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:16,代码来源:vae_deconv.py

示例4: reparametrize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def reparametrize(self, mu, logvar):
     var = logvar.mul(0.5).exp_()  #标准差
     eps = torch.FloatTensor(var.size()).normal_()
     #标准正态分布
     eps = Variable(eps)
     out = eps.mul(var).add_(mu)
     return out
开发者ID:lovemyid,项目名称:autoencoder_project,代码行数:9,代码来源:VAE-FIRE.py

示例5: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def forward(self, k, x, logposterior):
        '''
        k: number of samples
        x: [B,X]
        logposterior(z) -> [P,B]
        '''

        self.B = x.size()[0]
        self.P = k

        #Encode
        out = x
        for i in range(len(self.encoder_weights)-1):
            out = self.act_func(self.encoder_weights[i](out))
        out = self.encoder_weights[-1](out)
        mean = out[:,:self.z_size]
        logvar = out[:,self.z_size:]

        #Sample
        eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*logvar)) + mean  #[P,B,Z]
        logqz = lognormal(z, mean, logvar) #[P,B]

        logdetsum = 0.
        for i in range(self.n_flows):

            z, logdet = self.norm_flow(self.params[i],z)
            logdetsum += logdet


        return z, logqz-logdetsum
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:approx_posteriors_v6.py

示例6: reparameterize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def reparameterize(self, mu, logvar):
     if self.training:
         std = logvar.mul(0.5).exp_()
         eps = Variable(std.data.new(std.size()).normal_())
         return eps.mul(std).add_(mu)
     else:
         return mu
开发者ID:Biocodings,项目名称:examples,代码行数:9,代码来源:main.py

示例7: reparametrize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def reparametrize(self, mu, logvar):
     std = logvar.mul(0.5).exp_()
     if args.cuda:
         eps = torch.cuda.FloatTensor(std.size()).normal_()
     else:
         eps = torch.FloatTensor(std.size()).normal_()
     eps = Variable(eps)
     return eps.mul(std).add_(mu)
开发者ID:episodeyang,项目名称:deep_learning_notes,代码行数:10,代码来源:vae.py

示例8: sample_z

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample_z(self, mu, logvar, k):
        B = mu.size()[0]
        eps = Variable(torch.FloatTensor(k, B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
        logpz = lognormal(z, Variable(torch.zeros(B, self.z_size).type(self.dtype)), 
                            Variable(torch.zeros(B, self.z_size)).type(self.dtype))  #[P,B]

        logqz = lognormal(z, mu, logvar)
        return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:11,代码来源:bvae_pytorch4_plot_true_posterior.py

示例9: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample(self, k):
        '''
        k: # of samples
        output: [k,X]
        '''

        eps = Variable(torch.FloatTensor(k, self.x_size).normal_()) #.type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*self.logvar)) + self.mean  #[P,B,Z]
        return z
开发者ID:chriscremer,项目名称:Other_Code,代码行数:11,代码来源:1d_examples_pytorch.py

示例10: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample(self, mean, logvar, k):

        self.B = mean.size()[0]

        eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*logvar)) + mean  #[P,B,Z]
        logqz = lognormal(z, mean, logvar) #[P,B]

        return z, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:11,代码来源:distributions.py

示例11: reparametrize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def reparametrize(self, mu, logvar):
        """
        z = mu + eps *. sqrt(exp(log(s^2)))

        The second term obtains interpreting logvar as the log-var of z
        and observing that sqrt(exp(s^2)) == exp(s^2/2)
        """
        eps = Variable(logvar.data.new(*logvar.size()).normal_())
        std = logvar.mul(0.5).exp_()
        return eps.mul(std).add_(mu)
开发者ID:mikekestemont,项目名称:seqmod,代码行数:12,代码来源:vae.py

示例12: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample(self, mu, logvar, k):
        eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()) #[P,B,Z]
        z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
        # z = eps.mul(torch.exp(.5*logvar)) + mu.detach()  #[P,B,Z]

        logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                            Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
        # logqz = lognormal(z, mu, logvar)
        logqz = lognormal(z, mu.detach(), logvar.detach())

        return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:13,代码来源:iwae.py

示例13: reparameterize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def reparameterize(self, mu, logvar):
     '''
     mu:均值
     logvar:log(方差) 防止过大溢出
     '''
     std = logvar.mul(0.5).exp_()
     if torch.cuda.is_available():
         eps = torch.cuda.FloatTensor(mu.size()).normal_()
     else:
         eps = torch.FloatTensor(mu.size()).normal_()
     eps = Variable(eps)
     return eps.mul(std).add_(mu)
开发者ID:MintYiqingchen,项目名称:tfStudy,代码行数:14,代码来源:vae_torch.py

示例14: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
 def forward(self, xs):
     xs_emb = self.lt(xs)
     if self.freqs is not None:
         # tfidf embeddings
         l = xs.size(1)
         w = Variable(torch.Tensor(l))
         for i in range(l):
             w[i] = self.freqs[xs.data[0][i]]
         w = w.mul(1 / w.norm())
         xs_emb = xs_emb.squeeze(0).t().matmul(w.unsqueeze(1)).t()
     else:
         # basic embeddings (faster)
         xs_emb = xs_emb.mean(1)
     return xs_emb
开发者ID:ahiroto,项目名称:ParlAI,代码行数:16,代码来源:modules.py

示例15: sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul [as 别名]
    def sample(self, mu, logvar, k):
        B = mu.size()[0]


        eps = Variable(torch.FloatTensor(k, B, self.z_size).normal_().type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
        logqz = lognormal(z, mu, logvar) #[P,B]

        #[P,B,Z], [P,B]
        if self.flow_bool:
            z, logdet = self.q_dist.forward(z)
            logqz = logqz - logdet

        logpz = lognormal(z, Variable(torch.zeros(B, self.z_size).type(self.dtype)), 
                            Variable(torch.zeros(B, self.z_size)).type(self.dtype))  #[P,B]

        return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:19,代码来源:pytorch_vae3.py


注:本文中的torch.autograd.Variable.mul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。