當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.unique方法代碼示例

本文整理匯總了Python中torch.unique方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.unique方法的具體用法?Python torch.unique怎麽用?Python torch.unique使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.unique方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: roi2bbox

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def roi2bbox(rois):
    """Convert rois to bounding box format.

    Args:
        rois (torch.Tensor): RoIs with the shape (n, 5) where the first
            column indicates batch id of each RoI.

    Returns:
        list[torch.Tensor]: Converted boxes of corresponding rois.
    """
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:19,代碼來源:transforms.py

示例2: scatter

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def scatter(self, data_list, device_ids):
        num_devices = min(len(device_ids), len(data_list))

        count = torch.tensor([data.num_nodes for data in data_list])
        cumsum = count.cumsum(0)
        cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0)
        device_id = num_devices * cumsum.to(torch.float) / cumsum[-1].item()
        device_id = (device_id[:-1] + device_id[1:]) / 2.0
        device_id = device_id.to(torch.long)  # round.
        split = device_id.bincount().cumsum(0)
        split = torch.cat([split.new_zeros(1), split], dim=0)
        split = torch.unique(split, sorted=True)
        split = split.tolist()

        return [
            Batch.from_data_list(data_list[split[i]:split[i + 1]]).to(
                torch.device('cuda:{}'.format(device_ids[i])))
            for i in range(len(split) - 1)
        ] 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:21,代碼來源:data_parallel.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def forward(self, pos, centroids, feat=None):
        dev = pos.device
        group_idx = self.frnn(pos, centroids)
        B, N, _ = pos.shape
        glist = []
        for i in range(B):
            center = torch.zeros((N)).to(dev)
            center[centroids[i]] = 1
            src = group_idx[i].contiguous().view(-1)
            dst = centroids[i].view(-1, 1).repeat(1, self.n_neighbor).view(-1)

            unified = torch.cat([src, dst])
            uniq, inv_idx = torch.unique(unified, return_inverse=True)
            src_idx = inv_idx[:src.shape[0]]
            dst_idx = inv_idx[src.shape[0]:]

            g = dgl.DGLGraph((src_idx.cpu(), dst_idx.cpu()), readonly=True)
            g.ndata['pos'] = pos[i][uniq]
            g.ndata['center'] = center[uniq]
            if feat is not None:
                g.ndata['feat'] = feat[i][uniq]
            glist.append(g)
        bg = dgl.batch(glist)
        return bg 
開發者ID:dmlc,項目名稱:dgl,代碼行數:26,代碼來源:pointnet2.py

示例4: main

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def main(args):
    th.distributed.init_process_group(backend='gloo')
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name)
    print('rank:', g.rank())

    train_nid = dgl.distributed.node_split(g.ndata['train_mask'], g.get_partition_book(), force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'], g.get_partition_book(), force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'], g.get_partition_book(), force_even=True)
    print('part {}, train: {}, val: {}, test: {}'.format(g.rank(), len(train_nid),
                                                         len(val_nid), len(test_nid)))
    device = th.device('cpu')
    n_classes = len(th.unique(g.ndata['labels'][np.arange(g.number_of_nodes())]))

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends") 
開發者ID:dmlc,項目名稱:dgl,代碼行數:20,代碼來源:train_dist.py

示例5: load_ogb

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def load_ogb(name):
    from ogb.nodeproppred import DglNodePropPredDataset

    data = DglNodePropPredDataset(name=name)
    splitted_idx = data.get_idx_split()
    graph, labels = data[0]
    labels = labels[:, 0]

    graph.ndata['features'] = graph.ndata['feat']
    graph.ndata['labels'] = labels
    in_feats = graph.ndata['features'].shape[1]
    num_labels = len(th.unique(labels))

    # Find the node IDs in the training, validation, and test set.
    train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']
    train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    train_mask[train_nid] = True
    val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    val_mask[val_nid] = True
    test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    test_mask[test_nid] = True
    graph.ndata['train_mask'] = train_mask
    graph.ndata['val_mask'] = val_mask
    graph.ndata['test_mask'] = test_mask
    return graph, len(th.unique(graph.ndata['labels'])) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:27,代碼來源:load_graph.py

示例6: bdd_message_func

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def bdd_message_func(self, edges):
        """Message function for block-diagonal-decomposition regularizer"""
        if edges.src['h'].dtype == th.int64 and len(edges.src['h'].shape) == 1:
            raise TypeError('Block decomposition does not allow integer ID feature.')

        # calculate msg @ W_r before put msg into edge
        if self.low_mem:
            etypes = th.unique(edges.data['type'])
            msg = th.empty((edges.src['h'].shape[0], self.out_feat),
                           device=edges.src['h'].device)
            for etype in etypes:
                loc = edges.data['type'] == etype
                w = self.weight[etype].view(self.num_bases, self.submat_in, self.submat_out)
                src = edges.src['h'][loc].view(-1, self.num_bases, self.submat_in)
                sub_msg = th.einsum('abc,bcd->abd', src, w)
                sub_msg = sub_msg.reshape(-1, self.out_feat)
                msg[loc] = sub_msg
        else:
            weight = self.weight.index_select(0, edges.data['type']).view(
                -1, self.submat_in, self.submat_out)
            node = edges.src['h'].view(-1, 1, self.submat_in)
            msg = th.bmm(node, weight).view(-1, self.out_feat)
        if 'norm' in edges.data:
            msg = msg * edges.data['norm']
        return {'msg': msg} 
開發者ID:dmlc,項目名稱:dgl,代碼行數:27,代碼來源:relgraphconv.py

示例7: task_importance_weights

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
開發者ID:Raschka-research-group,項目名稱:coral-cnn,代碼行數:18,代碼來源:cacd-ordinal.py

示例8: test_sparse_weights_2d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def test_sparse_weights_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]

                # Expected non-zeros per output channel
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
開發者ID:numenta,項目名稱:nupic.torch,代碼行數:18,代碼來源:sparse_weights_test.py

示例9: test_rezero_1d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def test_rezero_1d(self):
        in_features, out_features = 784, 10
        for sparsity in [0.1, 0.5, 0.9]:
            linear = torch.nn.Linear(in_features=in_features,
                                     out_features=out_features)
            sparse = SparseWeights(linear, sparsity=sparsity)

            # Ensure weights are not sparse
            sparse.module.weight.data.fill_(1.0)

            # Rezero, verify the weights become sparse
            sparse.rezero_weights()
            nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
            counts = torch.unique(nonzeros, return_counts=True)[1]
            expected = [round(in_features * (1.0 - sparsity))] * out_features
            self.assertSequenceEqual(counts.numpy().tolist(), expected) 
開發者ID:numenta,項目名稱:nupic.torch,代碼行數:18,代碼來源:sparse_weights_test.py

示例10: test_rezero_2d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def test_rezero_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)

                # Ensure weights are not sparse
                sparse.module.weight.data.fill_(1.0)

                # Rezero, verify the weights become sparse
                sparse.rezero_weights()
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
開發者ID:numenta,項目名稱:nupic.torch,代碼行數:22,代碼來源:sparse_weights_test.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def forward(self, x, target):
        """ Compute weighted cross entropy

            @param x: a [N x C x H x W] torch.FloatTensor of values
            @param target: a [N x H x W] torch.LongTensor of values
        """
        temp = self.CrossEntropyLoss(x, target) # Shape: [N x H x W]

        # Compute pixel weights
        weight_mask = torch.zeros_like(target).float() # Shape: [N x H x W]. weighted mean over pixels
        unique_object_labels = torch.unique(target)
        for obj in unique_object_labels:
            num_pixels = torch.sum(target == obj, dtype=torch.float)
            weight_mask[target == obj] = 1 / num_pixels # inversely proportional to number of pixels

        loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) 
        return loss 
開發者ID:chrisdxie,項目名稱:uois,代碼行數:19,代碼來源:losses.py

示例12: Compute_AUSUC

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def Compute_AUSUC(dataset, all_scores, gt_classes, seen, unseen):
    cls_in_test = set(np.unique(gt_classes).tolist())
    seen = sorted(list(cls_in_test.intersection(set(seen))))
    unseen = sorted(list(cls_in_test.intersection(set(unseen))))
    score_S = all_scores[:, seen]
    score_U = all_scores[:, unseen]
    Y = gt_classes
    label_S = np.array(seen)
    label_U = np.array(unseen)

    AUC_val, AUC_record, acc_noBias, HM, fixed_bias = _Compute_AUSUC(
        torch.from_numpy(score_S),
        torch.from_numpy(score_U),
        torch.from_numpy(Y.astype(np.int64)),
        torch.from_numpy(label_S.astype(np.int64)),
        torch.from_numpy(label_U.astype(np.int64)))

    HM, fixed_bias = HM.item(), fixed_bias.item()
    print('AUC_val: {:.3f} HM: {:.3f} fixed_bias: {:.3f}'\
        .format(AUC_val, HM, fixed_bias))

    return {'AUC_val':AUC_val, 'AUC_record':AUC_record,\
        'acc_noBias': acc_noBias, 'HM': HM, 'fixed_bias': fixed_bias} 
開發者ID:ruotianluo,項目名稱:Context-aware-ZSR,代碼行數:25,代碼來源:test_engine.py

示例13: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False 
開發者ID:Res2Net,項目名稱:Res2Net-maskrcnn,代碼行數:17,代碼來源:grouped_batch_sampler.py

示例14: roi2bbox

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def roi2bbox(rois):
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:10,代碼來源:transforms.py

示例15: droi2dbbox

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import unique [as 別名]
def droi2dbbox(drois):
    dbbox_list = []
    img_ids = torch.unique(drois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (drois[:, 0] == img_id.item())
        dbbox = drois[inds, 1:]
        dbbox_list.append(dbbox)
    return dbbox_list 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:10,代碼來源:transforms_rbbox.py


注:本文中的torch.unique方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。