当前位置: 首页>>代码示例>>Python>>正文


Python Variable.scatter_方法代码示例

本文整理汇总了Python中torch.autograd.Variable.scatter_方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.scatter_方法的具体用法?Python Variable.scatter_怎么用?Python Variable.scatter_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.scatter_方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: one_hot

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import scatter_ [as 别名]
def one_hot(num_cols, indices, use_cuda=False):
    """ Creates a matrix of one hot vectors.
        - num_cols: int
        - indices: FloatTensor array
    """
    batch_size = indices.size(0)
    mask = long_type(use_cuda)(batch_size, num_cols).fill_(0)
    ones = 1
    if isinstance(indices, Variable):
        ones = Variable(long_type(use_cuda)(indices.size()).fill_(1))
        mask = Variable(mask, volatile=indices.volatile)

    return mask.scatter_(1, indices, ones)
开发者ID:jramapuram,项目名称:memory,代码行数:15,代码来源:holographic_memory.py

示例2: classifier

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import scatter_ [as 别名]
    def classifier(self, xs):
        """
        classify an image (or a batch of images)

        :param xs: a batch of scaled vectors of pixels from an image
        :return: a batch of the corresponding class labels (as one-hots)
        """
        # use the trained model q(y|x) = categorical(alpha(x))
        # compute all class probabilities for the image(s)
        alpha = self.encoder_y.forward(xs)

        # get the index (digit) that corresponds to
        # the maximum predicted class probability
        res, ind = torch.topk(alpha, 1)

        # convert the digit(s) to one-hot tensor(s)
        ys = Variable(torch.zeros(alpha.size()))
        ys = ys.scatter_(1, ind, 1.0)
        return ys
开发者ID:Magica-Chen,项目名称:pyro,代码行数:21,代码来源:ss_vae_M2.py

示例3: update_input

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import scatter_ [as 别名]
def update_input(input, dists, sample, sigma2, e, k):
    OP, x, Y = input
    bs = x.size(0)
    N = x.size(1)
    sample = sample.float()
    mask = sample.unsqueeze(1).expand(bs,N,N)*sample.unsqueeze(2).expand(bs,N,N)
    mask += (1-sample).unsqueeze(1).expand(bs,N,N)*(1-sample).unsqueeze(2).expand(bs,N,N)
    U = (OP.data[:,:,:,3]>0).float()*mask
    
    W = dists*U
    Wm = W.max(2,True)[0].expand_as(W).max(1,True)[0].expand_as(W)
    W = W / Wm.clamp(min=1e-6) * np.sqrt(2)
    W = torch.exp(- W*W / sigma2)
    
    OP[:,:,:,1] = Variable(W)
    D = OP.data[:,:,:,0] * OP.data[:,:,:,1].sum(2,True).expand(bs,N,N)
    OP[:,:,:,2] = Variable(D)
    
    U = U / U.sum(2,True).expand_as(U)
    OP[:,:,:,3] = Variable(U)
    Y = Variable(OP[:,:,:,1].data.clone())

    # Normalize inputs
    if normalize:
        z = Variable(torch.zeros((bs, N, 2**k))).type(dtype)
        e = e.unsqueeze(2)
        o = Variable(torch.ones((bs, N, 1))).type(dtype)
        z = z.scatter_(2, e, o)
        z = z.unsqueeze(2).expand(bs, N, 2, 2**k)
        z_bar = z * x.unsqueeze(3).expand_as(z)
        Nk = z.sum(1)
        mu = z_bar.sum(1)/Nk
        mu_ext = mu.unsqueeze(1).expand_as(z)*z
        var = ((z_bar - mu_ext)*(z_bar - mu_ext)).sum(1)/Nk
        var_ext = var.unsqueeze(1).expand_as(z)*z
        x = x - mu_ext.sum(3)
        x = x/(10 * var_ext.sum(3))
        # plt.figure(1)
        # plt.clf()
        # plt.plot(x[0,:,0].data.cpu().numpy(), x[0,:,1].data.cpu().numpy(), 'o')
        # plt.savefig('./plots/norm.png')
        # pdb.set_trace()
    return OP, x, Y
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:45,代码来源:kmeans.py

示例4: nllloss_double_backwards

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import scatter_ [as 别名]
def nllloss_double_backwards(ctx, ggI):
    t = ctx.saved_variables
    target = t[1]
    weights = Variable(ctx.additional_args[1])
    size_average = ctx.additional_args[0]
    ignore_index = ctx.additional_args[3]
    reduce = ctx.additional_args[4]

    gI = None

    # can't scatter/gather on indices outside of range, let's just put them in range
    # and 0 out the weights later (so it doesn't matter where in range we put them)
    target_mask = target == ignore_index
    safe_target = target.clone()
    safe_target.masked_fill_(target_mask, 0)

    if weights.dim() == 0:
        weights_to_scatter = Variable(ggI.data.new(safe_target.size()).fill_(1))
    else:
        weights_maybe_resized = weights
        while weights_maybe_resized.dim() < target.dim():
            weights_maybe_resized = weights_maybe_resized.unsqueeze(1)

        weights_maybe_resized = weights_maybe_resized.expand(weights.size()[0:1] + target.size()[1:])
        weights_to_scatter = weights_maybe_resized.gather(0, safe_target)

    weights_to_scatter.masked_fill_(target_mask, 0)
    divisor = weights_to_scatter.sum() if size_average and reduce else 1
    weights_to_scatter = -1 * weights_to_scatter / divisor
    zeros = Variable(ggI.data.new(ggI.size()).zero_())
    mask = zeros.scatter_(1, safe_target.unsqueeze(1), weights_to_scatter.unsqueeze(1))

    if reduce:
        ggO = (ggI * mask).sum()
    else:
        ggO = (ggI * mask).sum(dim=1)

    return gI, None, ggO, None, None, None
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:40,代码来源:auto_double_backwards.py

示例5: range

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import scatter_ [as 别名]
    test_num_per_label = 1
    total_fix_length = test_num_per_label
    for label_num in label_num_list:
        total_fix_length *= label_num


    for label_num in label_num_list:
        fix_length = test_num_per_label * label_num
        fix_label = torch.FloatTensor(fix_length)
        for i in range(test_num_per_label):
            for j in range(label_num):
                fix_label[i * label_num + j] = j
        fix = torch.LongTensor(fix_length, 1).copy_(fix_label)
        fix_onehot = torch.FloatTensor(fix_length, label_num)
        fix_onehot.zero_()
        fix_onehot.scatter_(1, fix, 1)
        fix_onehot = fix_onehot.view(-1, label_num, 1, 1)
        fix_onehot = Variable(fix_onehot).cuda()
        fix_onehot_list.append(fix_onehot)

        fill = torch.zeros([label_num, label_num, 64, 64])
        for i in range(label_num):
            fill[i, i, :, :] = 1
        fill_list.append(fill)

    for i in range(len(fix_onehot_list)):
        fix_onehot = fix_onehot_list[i]
        repeat_time = total_fix_length / (test_num_per_label * fix_onehot.shape[1])
        fix_onehot_list[i] = fix_onehot.repeat(repeat_time, 1, 1, 1)

    fix_onehot_concat = torch.cat(fix_onehot_list, 1)
开发者ID:LuChengTHU,项目名称:SN-GAN,代码行数:33,代码来源:train-conditional.py


注:本文中的torch.autograd.Variable.scatter_方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。