當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.sum方法代碼示例

本文整理匯總了Python中torch.sum方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.sum方法的具體用法?Python torch.sum怎麽用?Python torch.sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.sum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
開發者ID:Nrgeup,項目名稱:controllable-text-attribute-transfer,代碼行數:23,代碼來源:model2.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        latent = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        latent = self.sigmoid(latent)
        # memory = self.position_layer(memory)

        latent = torch.sum(latent, dim=1)  # (batch_size, d_model)

        # latent = self.memory2latent(memory)  # (batch_size, max_src_seq, latent_size)

        # latent = self.memory2latent(memory)
        # memory = self.latent2memory(latent)  # (batch_size, max_src_seq, d_model)

        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
開發者ID:Nrgeup,項目名稱:controllable-text-attribute-transfer,代碼行數:20,代碼來源:model.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def forward(self, x_graphs, x_tensors, y_graphs, y_tensors, y_orders, beta):
        x_tensors = make_cuda(x_tensors)
        y_tensors = make_cuda(y_tensors)
        x_root_vecs, x_tree_vecs, x_graph_vecs = self.encode(x_tensors)
        _, y_tree_vecs, y_graph_vecs = self.encode(y_tensors)

        diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)
        diff_graph_vecs = y_graph_vecs.sum(dim=1) - x_graph_vecs.sum(dim=1)
        diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)
        diff_graph_vecs, graph_kl = self.rsample(diff_graph_vecs, self.G_mean, self.G_var)
        kl_div = tree_kl + graph_kl

        diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)
        diff_graph_vecs = diff_graph_vecs.unsqueeze(1).expand(-1, x_graph_vecs.size(1), -1)
        x_tree_vecs = self.W_tree( torch.cat([x_tree_vecs, diff_tree_vecs], dim=-1) )
        x_graph_vecs = self.W_graph( torch.cat([x_graph_vecs, diff_graph_vecs], dim=-1) )

        loss, wacc, iacc, tacc, sacc = self.decoder((x_root_vecs, x_tree_vecs, x_graph_vecs), y_graphs, y_tensors, y_orders)
        return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc 
開發者ID:wengong-jin,項目名稱:hgraph2graph,代碼行數:21,代碼來源:hgnn.py

示例4: plot_examples

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def plot_examples(data_loader, model, epoch, plotter, ind = [0, 10, 20]):

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(data_loader):
        if i in ind:
            subfolder_path = 'batch_' + str(i) + '_t_' + str(int(target[0][0])) + '/epoch_' + str(epoch) + '/'
            if not os.path.isdir(args.plotPath + subfolder_path):
                os.makedirs(args.plotPath + subfolder_path)

            num_nodes = torch.sum(torch.sum(torch.abs(h[0, :, :]), 1) > 0)
            am = g[0, 0:num_nodes, 0:num_nodes].numpy()
            pos = h[0, 0:num_nodes, :].numpy()

            plotter.plot_graph(am, position=pos, fig_name=subfolder_path+str(i) + '_input.png')

            # Prepare input data
            if args.cuda:
                g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
            g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

            # Compute output
            model(g, h, e, lambda cls, id: plotter.plot_graph(am, position=pos, cls=cls,
                                                          fig_name=subfolder_path+ id)) 
開發者ID:priba,項目名稱:nmp_qc,代碼行數:27,代碼來源:demo_letter_duvenaud.py

示例5: train

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def train(net, lr, num_epochs):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("train on", device)
    net = net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    for epoch in range(num_epochs):
        start, l_sum, n = time.time(), 0.0, 0
        for batch in data_iter:
            center, context_negative, mask, label = [d.to(device) for d in batch]

            pred = skip_gram(center, context_negative, net[0], net[1])

            # 使用掩碼變量mask來避免填充項對損失函數計算的影響
            l = (loss(pred.view(label.shape), label, mask) *
                 mask.shape[1] / mask.float().sum(dim=1)).mean()  # 一個batch的平均loss
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            l_sum += l.cpu().item()
            n += 1
        print('epoch %d, loss %.2f, time %.2fs'
              % (epoch + 1, l_sum / n, time.time() - start)) 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:24,代碼來源:49_word2vec.py

示例6: node_forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def node_forward(self, inputs, child_c, child_h):
        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)

        iou = self.ioux(inputs) + self.iouh(child_h_sum)
        i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
        i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)

        f = F.sigmoid(
            self.fh(child_h) +
            self.fx(inputs).repeat(len(child_h), 1)
        )
        fc = torch.mul(f, child_c)

        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
        h = torch.mul(o, F.tanh(c))
        return c, h 
開發者ID:dasguptar,項目名稱:treelstm.pytorch,代碼行數:18,代碼來源:model.py

示例7: iou_binary

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
    """
    IoU for foreground class
    binary: 1 foreground, 0 background
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        intersection = ((label == 1) & (pred == 1)).sum()
        union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
        if not union:
            iou = EMPTY
        else:
            iou = float(intersection) / float(union)
        ious.append(iou)
    iou = mean(ious)    # mean accross images if per_image
    return 100 * iou 
開發者ID:edwardzhou130,項目名稱:PolarSeg,代碼行數:20,代碼來源:lovasz_losses.py

示例8: iou

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
    """
    Array of IoU for each (non ignored) class
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        iou = []    
        for i in range(C):
            if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
                intersection = ((label == i) & (pred == i)).sum()
                union = ((label == i) | ((pred == i) & (label != ignore))).sum()
                if not union:
                    iou.append(EMPTY)
                else:
                    iou.append(float(intersection) / float(union))
        ious.append(iou)
    ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
    return 100 * np.array(ious)


# --------------------------- BINARY LOSSES --------------------------- 
開發者ID:edwardzhou130,項目名稱:PolarSeg,代碼行數:25,代碼來源:lovasz_losses.py

示例9: lovasz_hinge_flat

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.relu(errors_sorted), Variable(grad))
    return loss 
開發者ID:edwardzhou130,項目名稱:PolarSeg,代碼行數:20,代碼來源:lovasz_losses.py

示例10: final_report

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def final_report(self):
        correct_preds = self.confusion[:, :,
                                       range(self.args.num_classes),
                                       range(self.args.num_classes)]
        correct_percentage = correct_preds / (self.confusion.sum(3) + 1e-6) * 100
        balance_accuracy = correct_percentage.mean()
        per_sequence_element_accuracy = correct_percentage.view(
            correct_percentage.size(0), -1).mean(1)
        per_sequence_report = ', '.join(
            '{:.2f}'.format(acc) for acc in per_sequence_element_accuracy)
        report = ('Accuracy {meter.avg[0]:.2f}   Balanced {balanced:.2f}   '
                  'PerSeq [{per_seq}]').format(meter=self.meter,
                                               balanced=balance_accuracy,
                                               per_seq=per_sequence_report)
        report += '   Accuracy Matrix (seq x imu x label): {}'.format(
            correct_percentage)
        return report 
開發者ID:ehsanik,項目名稱:dogTorch,代碼行數:19,代碼來源:metrics.py

示例11: record_output

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def record_output(self, output, output_indices, target, prev_absolutes,
                      next_absolutes, batch_size=1):
        assert output.dim() == 4
        assert target.dim() == 3

        _, predictions = output.max(3)

        # Compute per class accuracy for unbalanced data.
        sequence_length = output.size(1)
        num_label = output.size(2)
        num_class = output.size(3)
        correct_alljoint = (target == predictions).float().sum(2)
        sum_of_corrects = correct_alljoint.sum(1)
        max_value = num_label * sequence_length
        count_correct = (sum_of_corrects == max_value).float().mean()
        correct_per_seq = ((correct_alljoint == num_label - 1).sum(1).float() /
                           sequence_length).mean()
        self.meter.update(
            torch.Tensor([count_correct * 100, correct_per_seq * 100]),
            batch_size) 
開發者ID:ehsanik,項目名稱:dogTorch,代碼行數:22,代碼來源:metrics.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def forward(self, Q, P):
        """
        Parameters
        ----------
        P: ground truth probability distribution [batch_size, n, n]
        Q: predicted probability distribution [batch_size, n, n]

        Description
        -----------
        compute the KL divergence of attention maps. Here P and Q denote 
        the pixel-level attention map with n spatial positions.
        """
        kl_loss = P * safe_log(P / Q)
        pixel_loss = torch.sum(kl_loss, dim=-1)
        total_loss = torch.mean(pixel_loss)
        return total_loss 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:18,代碼來源:losses.py

示例13: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def __init__(self, ignore_index=None, reduction='sum', use_weights=False, weight=None):
        """
        Parameters
        ----------
        ignore_index : Specifies a target value that is ignored
                       and does not contribute to the input gradient
        reduction : Specifies the reduction to apply to the output: 
                    'mean' | 'sum'. 'mean': elemenwise mean, 
                    'sum': class dim will be summed and batch dim will be averaged.
        use_weight : whether to use weights of classes.
        weight : Tensor, optional
                a manual rescaling weight given to each class.
                If given, has to be a Tensor of size "nclasses"
        """
        super(_BaseEntropyLoss2d, self).__init__()
        self.ignore_index = ignore_index
        self.reduction = reduction
        self.use_weights = use_weights
        if use_weights:
            print("w/ class balance")
            print(weight)
            self.weight = torch.FloatTensor(weight).cuda()
        else:
            print("w/o class balance")
            self.weight = None 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:27,代碼來源:losses.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def forward(self, input1):
        self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())

        for i in range(input1.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)
        #print(self.batchgrid3d)

        x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
        y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
        z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
        #print(x)
        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)
        phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
        phi = phi/np.pi


        output = torch.cat([theta,phi], 3)

        return output 
開發者ID:guoruoqian,項目名稱:cascade-rcnn_Pytorch,代碼行數:27,代碼來源:gridgen.py

示例15: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sum [as 別名]
def __init__(self, src, trg=None, pad=0):
        self.src = src
        self.src_mask = (src != pad).unsqueeze(-2)
        if trg is not None:
            self.trg = trg[:, :-1]
            self.trg_y = trg[:, 1:]
            self.trg_mask = self.make_std_mask(self.trg, pad)
            self.ntokens = (self.trg_y != pad).data.sum() 
開發者ID:Nrgeup,項目名稱:controllable-text-attribute-transfer,代碼行數:10,代碼來源:model2.py


注:本文中的torch.sum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。