当前位置: 首页>>代码示例>>Python>>正文


Python Variable.sum方法代码示例

本文整理汇总了Python中torch.autograd.Variable.sum方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.sum方法的具体用法?Python Variable.sum怎么用?Python Variable.sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.sum方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_input

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def create_input(points, sigma2):
    bs, N, _ = points.size() #points has size bs,N,2
    OP = torch.zeros(bs,N,N,4).type(dtype)
    E = torch.eye(N).type(dtype).unsqueeze(0).expand(bs,N,N)
    OP[:,:,:,0] = E
    W = points.unsqueeze(1).expand(bs,N,N,dim) - points.unsqueeze(2).expand(bs,N,N,dim)
    dists2 = (W * W).sum(3)
    dists = torch.sqrt(dists2)
    W = torch.exp(-dists2 / sigma2)
    OP[:,:,:,1] = W
    D = E * W.sum(2,True).expand(bs,N,N)
    OP[:,:,:,2] = D
    U = (torch.ones(N,N).type(dtype)/N).unsqueeze(0).expand(bs,N,N)
    OP[:,:,:,3] = U
    OP = Variable(OP)
    x = Variable(points)
    Y = Variable(W.clone())

    # Normalize inputs
    if normalize:
        mu = x.sum(1)/N
        mu_ext = mu.unsqueeze(1).expand_as(x)
        var = ((x - mu_ext)*(x - mu_ext)).sum(1)/N
        var_ext = var.unsqueeze(1).expand_as(x)
        x = x - mu_ext
        x = x/(10 * var_ext)

    return (OP, x, Y), dists
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:30,代码来源:kmeans.py

示例2: uniform_weights

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def uniform_weights(x, x_mask):
    """Return uniform weights over non-masked input."""
    alpha = Variable(torch.ones(x.size(0), x.size(1)))
    if x.data.is_cuda:
        alpha = alpha.cuda()
    alpha = alpha * x_mask.eq(0).float()
    alpha = alpha / alpha.sum(1).expand(alpha.size())
    return alpha
开发者ID:ahiroto,项目名称:ParlAI,代码行数:10,代码来源:layers.py

示例3: compute_variance

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def compute_variance(e, probs):
    bs, N = probs.size()
    variance = Variable(torch.zeros(bs).type(dtype))
    for i in range(e.max()+1):
        mask = Variable((e == i).float())
        Ns = mask.sum(1).clamp(min=1)
        masked_probs = probs*mask
        probs_mean = (masked_probs).sum(1) / Ns
        v = (masked_probs*masked_probs).sum(1) / Ns - probs_mean*probs_mean
        variance += v
    return variance
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:13,代码来源:kmeans.py

示例4: target_MoG_1D

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
class target_MoG_1D(nn.Module):
    def __init__(self, seed=1):
        super(target_MoG_1D, self).__init__()

        torch.manual_seed(seed)

        self.x_size = 1

        self.mean = Variable(torch.FloatTensor([-2.5]), requires_grad = True) #[1]
        self.logvar = Variable(torch.FloatTensor([1.]), requires_grad = True) #[1]

        self.mean2 = Variable(torch.FloatTensor([4.]), requires_grad = True) #[1]
        self.logvar2 = Variable(torch.FloatTensor([-2.]), requires_grad = True) #[1]

    def log_prob(self, x):
        '''
        x: [B,X]
        mean,logvar: [X]
        output: [B]
        '''

        assert len(x.size()) == 2
        assert x.size()[1] == self.mean.size()[0]

        D = x.size()[1]
        term1 = Variable(D * torch.log(torch.FloatTensor([2.*math.pi]))) #[1]
        aaa = -.5 * (term1 + self.logvar.sum(0) + ((x - self.mean).pow(2)/torch.exp(self.logvar)).sum(1))

        bbb = -.5 * (term1 + self.logvar2.sum(0) + ((x - self.mean2).pow(2)/torch.exp(self.logvar2)).sum(1))

        # print (aaa)
        aaa = torch.log(torch.exp(aaa)*.7 + torch.exp(bbb)*.3 + torch.exp(Variable(torch.FloatTensor([-30]))))

        aaa = aaa.unsqueeze(1)
        # print (aaa)
        # fads
        return aaa
开发者ID:chriscremer,项目名称:Other_Code,代码行数:39,代码来源:1d_sqiggle_example.py

示例5: uniform_weights

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def uniform_weights(x, x_mask):
    """Return uniform weights over non-masked x (a sequence of vectors).

    Args:
        x: batch * len * hdim
        x_mask: batch * len (1 for padding, 0 for true)
    Output:
        x_avg: batch * hdim
    """
    alpha = Variable(torch.ones(x.size(0), x.size(1)))
    if x.data.is_cuda:
        alpha = alpha.cuda()
    alpha = alpha * x_mask.eq(0).float()
    alpha = alpha / alpha.sum(1).expand(alpha.size())
    return alpha
开发者ID:athiwatp,项目名称:DrQA,代码行数:17,代码来源:layers.py

示例6: compute_reward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def compute_reward(e, K, points):
    bs, N, _ = points.size()
    reward2 = Variable(torch.zeros(bs).type(dtype))
    reward3 = Variable(torch.zeros(bs).type(dtype))
    c = []
    for k in range(2**K):
        mask = Variable((e == k).float()).unsqueeze(2).expand_as(points)
        N1 = mask.sum(1)
        center = points*mask
        center = center.sum(1) / N1.clamp(min=1)
        c.append(center[0])
        subs = ((points-center.unsqueeze(1).expand_as(points)) * mask)
        subs2 = (subs * subs).sum(2).sum(1) / N
        subs3 = torch.abs(subs * subs * subs).sum(2).sum(1) / N
        reward2 += subs2
        reward3 += subs3
    return reward2, reward3, c
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:19,代码来源:kmeans.py

示例7: update_input

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def update_input(input, dists, sample, sigma2, e, k):
    OP, x, Y = input
    bs = x.size(0)
    N = x.size(1)
    sample = sample.float()
    mask = sample.unsqueeze(1).expand(bs,N,N)*sample.unsqueeze(2).expand(bs,N,N)
    mask += (1-sample).unsqueeze(1).expand(bs,N,N)*(1-sample).unsqueeze(2).expand(bs,N,N)
    U = (OP.data[:,:,:,3]>0).float()*mask
    
    W = dists*U
    Wm = W.max(2,True)[0].expand_as(W).max(1,True)[0].expand_as(W)
    W = W / Wm.clamp(min=1e-6) * np.sqrt(2)
    W = torch.exp(- W*W / sigma2)
    
    OP[:,:,:,1] = Variable(W)
    D = OP.data[:,:,:,0] * OP.data[:,:,:,1].sum(2,True).expand(bs,N,N)
    OP[:,:,:,2] = Variable(D)
    
    U = U / U.sum(2,True).expand_as(U)
    OP[:,:,:,3] = Variable(U)
    Y = Variable(OP[:,:,:,1].data.clone())

    # Normalize inputs
    if normalize:
        z = Variable(torch.zeros((bs, N, 2**k))).type(dtype)
        e = e.unsqueeze(2)
        o = Variable(torch.ones((bs, N, 1))).type(dtype)
        z = z.scatter_(2, e, o)
        z = z.unsqueeze(2).expand(bs, N, 2, 2**k)
        z_bar = z * x.unsqueeze(3).expand_as(z)
        Nk = z.sum(1)
        mu = z_bar.sum(1)/Nk
        mu_ext = mu.unsqueeze(1).expand_as(z)*z
        var = ((z_bar - mu_ext)*(z_bar - mu_ext)).sum(1)/Nk
        var_ext = var.unsqueeze(1).expand_as(z)*z
        x = x - mu_ext.sum(3)
        x = x/(10 * var_ext.sum(3))
        # plt.figure(1)
        # plt.clf()
        # plt.plot(x[0,:,0].data.cpu().numpy(), x[0,:,1].data.cpu().numpy(), 'o')
        # plt.savefig('./plots/norm.png')
        # pdb.set_trace()
    return OP, x, Y
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:45,代码来源:kmeans.py

示例8: Gaus_1D

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
class Gaus_1D(nn.Module):
    def __init__(self, mean, logvar, seed=1):
        super(Gaus_1D, self).__init__()

        torch.manual_seed(seed)

        self.x_size = 1

        self.mean = Variable(torch.FloatTensor(mean), requires_grad = True) #[1]
        self.logvar = Variable(torch.FloatTensor(logvar), requires_grad = True) #[1]


    def log_prob(self, x):
        '''
        x: [B,X]
        mean,logvar: [X]
        output: [B]
        '''

        assert len(x.size()) == 2
        assert x.size()[1] == self.mean.size()[0]

        D = x.size()[1]
        term1 = Variable(D * torch.log(torch.FloatTensor([2.*math.pi]))) #[1]
        aaa = -.5 * (term1 + self.logvar.sum(0) + ((x - self.mean).pow(2)/torch.exp(self.logvar)).sum(1))

        aaa = aaa.unsqueeze(1)
        # print (aaa)
        # fads
        return aaa



    def sample(self, k):
        '''
        k: # of samples
        output: [k,X]
        '''

        eps = Variable(torch.FloatTensor(k, self.x_size).normal_()) #.type(self.dtype)) #[P,B,Z]
        z = eps.mul(torch.exp(.5*self.logvar)) + self.mean  #[P,B,Z]
        return z
开发者ID:chriscremer,项目名称:Other_Code,代码行数:44,代码来源:1d_sqiggle_example.py

示例9: nllloss_double_backwards

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def nllloss_double_backwards(ctx, ggI):
    t = ctx.saved_variables
    target = t[1]
    weights = Variable(ctx.additional_args[1])
    size_average = ctx.additional_args[0]
    ignore_index = ctx.additional_args[3]
    reduce = ctx.additional_args[4]

    gI = None

    # can't scatter/gather on indices outside of range, let's just put them in range
    # and 0 out the weights later (so it doesn't matter where in range we put them)
    target_mask = target == ignore_index
    safe_target = target.clone()
    safe_target.masked_fill_(target_mask, 0)

    if weights.dim() == 0:
        weights_to_scatter = Variable(ggI.data.new(safe_target.size()).fill_(1))
    else:
        weights_maybe_resized = weights
        while weights_maybe_resized.dim() < target.dim():
            weights_maybe_resized = weights_maybe_resized.unsqueeze(1)

        weights_maybe_resized = weights_maybe_resized.expand(weights.size()[0:1] + target.size()[1:])
        weights_to_scatter = weights_maybe_resized.gather(0, safe_target)

    weights_to_scatter.masked_fill_(target_mask, 0)
    divisor = weights_to_scatter.sum() if size_average and reduce else 1
    weights_to_scatter = -1 * weights_to_scatter / divisor
    zeros = Variable(ggI.data.new(ggI.size()).zero_())
    mask = zeros.scatter_(1, safe_target.unsqueeze(1), weights_to_scatter.unsqueeze(1))

    if reduce:
        ggO = (ggI * mask).sum()
    else:
        ggO = (ggI * mask).sum(dim=1)

    return gI, None, ggO, None, None, None
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:40,代码来源:auto_double_backwards.py

示例10: kl_loguni

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def kl_loguni(log_alpha):
    k1, k2, k3 = 0.63576, 1.8732, 1.48695
    C = -k1
    mdkl = k1 * F.sigmoid(k2 + k3 * log_alpha) - 0.5 * Variable.log1p(Variable.exp(-log_alpha)) + C
    kl = -Variable.sum(mdkl)
    return kl
开发者ID:AlliedToasters,项目名称:elko_den,代码行数:8,代码来源:metrics.py

示例11: kl_ard

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sum [as 别名]
def kl_ard(log_alpha):
    return 0.5 * Variable.sum(Variable.log1p(Variable.exp(-log_alpha)))
开发者ID:AlliedToasters,项目名称:elko_den,代码行数:4,代码来源:metrics.py


注:本文中的torch.autograd.Variable.sum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。