当前位置: 首页>>代码示例>>Python>>正文


Python torch.unique方法代码示例

本文整理汇总了Python中torch.unique方法的典型用法代码示例。如果您正苦于以下问题:Python torch.unique方法的具体用法?Python torch.unique怎么用?Python torch.unique使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.unique方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: roi2bbox

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def roi2bbox(rois):
    """Convert rois to bounding box format.

    Args:
        rois (torch.Tensor): RoIs with the shape (n, 5) where the first
            column indicates batch id of each RoI.

    Returns:
        list[torch.Tensor]: Converted boxes of corresponding rois.
    """
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:19,代码来源:transforms.py

示例2: scatter

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def scatter(self, data_list, device_ids):
        num_devices = min(len(device_ids), len(data_list))

        count = torch.tensor([data.num_nodes for data in data_list])
        cumsum = count.cumsum(0)
        cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0)
        device_id = num_devices * cumsum.to(torch.float) / cumsum[-1].item()
        device_id = (device_id[:-1] + device_id[1:]) / 2.0
        device_id = device_id.to(torch.long)  # round.
        split = device_id.bincount().cumsum(0)
        split = torch.cat([split.new_zeros(1), split], dim=0)
        split = torch.unique(split, sorted=True)
        split = split.tolist()

        return [
            Batch.from_data_list(data_list[split[i]:split[i + 1]]).to(
                torch.device('cuda:{}'.format(device_ids[i])))
            for i in range(len(split) - 1)
        ] 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:21,代码来源:data_parallel.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def forward(self, pos, centroids, feat=None):
        dev = pos.device
        group_idx = self.frnn(pos, centroids)
        B, N, _ = pos.shape
        glist = []
        for i in range(B):
            center = torch.zeros((N)).to(dev)
            center[centroids[i]] = 1
            src = group_idx[i].contiguous().view(-1)
            dst = centroids[i].view(-1, 1).repeat(1, self.n_neighbor).view(-1)

            unified = torch.cat([src, dst])
            uniq, inv_idx = torch.unique(unified, return_inverse=True)
            src_idx = inv_idx[:src.shape[0]]
            dst_idx = inv_idx[src.shape[0]:]

            g = dgl.DGLGraph((src_idx.cpu(), dst_idx.cpu()), readonly=True)
            g.ndata['pos'] = pos[i][uniq]
            g.ndata['center'] = center[uniq]
            if feat is not None:
                g.ndata['feat'] = feat[i][uniq]
            glist.append(g)
        bg = dgl.batch(glist)
        return bg 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:pointnet2.py

示例4: main

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def main(args):
    th.distributed.init_process_group(backend='gloo')
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name)
    print('rank:', g.rank())

    train_nid = dgl.distributed.node_split(g.ndata['train_mask'], g.get_partition_book(), force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'], g.get_partition_book(), force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'], g.get_partition_book(), force_even=True)
    print('part {}, train: {}, val: {}, test: {}'.format(g.rank(), len(train_nid),
                                                         len(val_nid), len(test_nid)))
    device = th.device('cpu')
    n_classes = len(th.unique(g.ndata['labels'][np.arange(g.number_of_nodes())]))

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends") 
开发者ID:dmlc,项目名称:dgl,代码行数:20,代码来源:train_dist.py

示例5: load_ogb

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def load_ogb(name):
    from ogb.nodeproppred import DglNodePropPredDataset

    data = DglNodePropPredDataset(name=name)
    splitted_idx = data.get_idx_split()
    graph, labels = data[0]
    labels = labels[:, 0]

    graph.ndata['features'] = graph.ndata['feat']
    graph.ndata['labels'] = labels
    in_feats = graph.ndata['features'].shape[1]
    num_labels = len(th.unique(labels))

    # Find the node IDs in the training, validation, and test set.
    train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']
    train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    train_mask[train_nid] = True
    val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    val_mask[val_nid] = True
    test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    test_mask[test_nid] = True
    graph.ndata['train_mask'] = train_mask
    graph.ndata['val_mask'] = val_mask
    graph.ndata['test_mask'] = test_mask
    return graph, len(th.unique(graph.ndata['labels'])) 
开发者ID:dmlc,项目名称:dgl,代码行数:27,代码来源:load_graph.py

示例6: bdd_message_func

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def bdd_message_func(self, edges):
        """Message function for block-diagonal-decomposition regularizer"""
        if edges.src['h'].dtype == th.int64 and len(edges.src['h'].shape) == 1:
            raise TypeError('Block decomposition does not allow integer ID feature.')

        # calculate msg @ W_r before put msg into edge
        if self.low_mem:
            etypes = th.unique(edges.data['type'])
            msg = th.empty((edges.src['h'].shape[0], self.out_feat),
                           device=edges.src['h'].device)
            for etype in etypes:
                loc = edges.data['type'] == etype
                w = self.weight[etype].view(self.num_bases, self.submat_in, self.submat_out)
                src = edges.src['h'][loc].view(-1, self.num_bases, self.submat_in)
                sub_msg = th.einsum('abc,bcd->abd', src, w)
                sub_msg = sub_msg.reshape(-1, self.out_feat)
                msg[loc] = sub_msg
        else:
            weight = self.weight.index_select(0, edges.data['type']).view(
                -1, self.submat_in, self.submat_out)
            node = edges.src['h'].view(-1, 1, self.submat_in)
            msg = th.bmm(node, weight).view(-1, self.out_feat)
        if 'norm' in edges.data:
            msg = msg * edges.data['norm']
        return {'msg': msg} 
开发者ID:dmlc,项目名称:dgl,代码行数:27,代码来源:relgraphconv.py

示例7: task_importance_weights

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
开发者ID:Raschka-research-group,项目名称:coral-cnn,代码行数:18,代码来源:cacd-ordinal.py

示例8: test_sparse_weights_2d

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def test_sparse_weights_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]

                # Expected non-zeros per output channel
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
开发者ID:numenta,项目名称:nupic.torch,代码行数:18,代码来源:sparse_weights_test.py

示例9: test_rezero_1d

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def test_rezero_1d(self):
        in_features, out_features = 784, 10
        for sparsity in [0.1, 0.5, 0.9]:
            linear = torch.nn.Linear(in_features=in_features,
                                     out_features=out_features)
            sparse = SparseWeights(linear, sparsity=sparsity)

            # Ensure weights are not sparse
            sparse.module.weight.data.fill_(1.0)

            # Rezero, verify the weights become sparse
            sparse.rezero_weights()
            nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
            counts = torch.unique(nonzeros, return_counts=True)[1]
            expected = [round(in_features * (1.0 - sparsity))] * out_features
            self.assertSequenceEqual(counts.numpy().tolist(), expected) 
开发者ID:numenta,项目名称:nupic.torch,代码行数:18,代码来源:sparse_weights_test.py

示例10: test_rezero_2d

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def test_rezero_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)

                # Ensure weights are not sparse
                sparse.module.weight.data.fill_(1.0)

                # Rezero, verify the weights become sparse
                sparse.rezero_weights()
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
开发者ID:numenta,项目名称:nupic.torch,代码行数:22,代码来源:sparse_weights_test.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def forward(self, x, target):
        """ Compute weighted cross entropy

            @param x: a [N x C x H x W] torch.FloatTensor of values
            @param target: a [N x H x W] torch.LongTensor of values
        """
        temp = self.CrossEntropyLoss(x, target) # Shape: [N x H x W]

        # Compute pixel weights
        weight_mask = torch.zeros_like(target).float() # Shape: [N x H x W]. weighted mean over pixels
        unique_object_labels = torch.unique(target)
        for obj in unique_object_labels:
            num_pixels = torch.sum(target == obj, dtype=torch.float)
            weight_mask[target == obj] = 1 / num_pixels # inversely proportional to number of pixels

        loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) 
        return loss 
开发者ID:chrisdxie,项目名称:uois,代码行数:19,代码来源:losses.py

示例12: Compute_AUSUC

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def Compute_AUSUC(dataset, all_scores, gt_classes, seen, unseen):
    cls_in_test = set(np.unique(gt_classes).tolist())
    seen = sorted(list(cls_in_test.intersection(set(seen))))
    unseen = sorted(list(cls_in_test.intersection(set(unseen))))
    score_S = all_scores[:, seen]
    score_U = all_scores[:, unseen]
    Y = gt_classes
    label_S = np.array(seen)
    label_U = np.array(unseen)

    AUC_val, AUC_record, acc_noBias, HM, fixed_bias = _Compute_AUSUC(
        torch.from_numpy(score_S),
        torch.from_numpy(score_U),
        torch.from_numpy(Y.astype(np.int64)),
        torch.from_numpy(label_S.astype(np.int64)),
        torch.from_numpy(label_U.astype(np.int64)))

    HM, fixed_bias = HM.item(), fixed_bias.item()
    print('AUC_val: {:.3f} HM: {:.3f} fixed_bias: {:.3f}'\
        .format(AUC_val, HM, fixed_bias))

    return {'AUC_val':AUC_val, 'AUC_record':AUC_record,\
        'acc_noBias': acc_noBias, 'HM': HM, 'fixed_bias': fixed_bias} 
开发者ID:ruotianluo,项目名称:Context-aware-ZSR,代码行数:25,代码来源:test_engine.py

示例13: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:17,代码来源:grouped_batch_sampler.py

示例14: roi2bbox

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def roi2bbox(rois):
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
开发者ID:dingjiansw101,项目名称:AerialDetection,代码行数:10,代码来源:transforms.py

示例15: droi2dbbox

# 需要导入模块: import torch [as 别名]
# 或者: from torch import unique [as 别名]
def droi2dbbox(drois):
    dbbox_list = []
    img_ids = torch.unique(drois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (drois[:, 0] == img_id.item())
        dbbox = drois[inds, 1:]
        dbbox_list.append(dbbox)
    return dbbox_list 
开发者ID:dingjiansw101,项目名称:AerialDetection,代码行数:10,代码来源:transforms_rbbox.py


注:本文中的torch.unique方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。