當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.sparse方法代碼示例

本文整理匯總了Python中torch.sparse方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.sparse方法的具體用法?Python torch.sparse怎麽用?Python torch.sparse使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.sparse方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: normalize_feature

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def normalize_feature(mx):
    """Row-normalize sparse matrix

    Parameters
    ----------
    mx : scipy.sparse.csr_matrix
        matrix to be normalized

    Returns
    -------
    scipy.sprase.lil_matrix
        normalized matrix
    """
    if type(mx) is not sp.lil.lil_matrix:
        mx = mx.tolil()
    rowsum = np.array(mx.sum(1))
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    return mx 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:23,代碼來源:utils.py

示例2: normalize_sparse_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def normalize_sparse_tensor(adj, fill_value=1):
    """Normalize sparse tensor. Need to import torch_scatter
    """
    edge_index = adj._indices()
    edge_weight = adj._values()
    num_nodes= adj.size(0)
    edge_index, edge_weight = add_self_loops(
	edge_index, edge_weight, fill_value, num_nodes)

    row, col = edge_index
    from torch_scatter import scatter_add
    deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
    deg_inv_sqrt = deg.pow(-0.5)
    deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0

    values = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]

    shape = adj.shape
    return torch.sparse.FloatTensor(edge_index, values, shape) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:21,代碼來源:utils.py

示例3: degree_normalize_sparse_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def degree_normalize_sparse_tensor(adj, fill_value=1):
    """degree_normalize_sparse_tensor.
    """
    edge_index = adj._indices()
    edge_weight = adj._values()
    num_nodes= adj.size(0)

    edge_index, edge_weight = add_self_loops(
	edge_index, edge_weight, fill_value, num_nodes)

    row, col = edge_index
    from torch_scatter import scatter_add
    deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
    deg_inv_sqrt = deg.pow(-1)
    deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0

    values = deg_inv_sqrt[row] * edge_weight
    shape = adj.shape
    return torch.sparse.FloatTensor(edge_index, values, shape) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:21,代碼來源:utils.py

示例4: degree_normalize_adj_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def degree_normalize_adj_tensor(adj, sparse=True):
    """degree_normalize_adj_tensor.
    """

    device = torch.device("cuda" if adj.is_cuda else "cpu")
    if sparse:
        # return  degree_normalize_sparse_tensor(adj)
        adj = to_scipy(adj)
        mx = degree_normalize_adj(adj)
        return sparse_mx_to_torch_sparse_tensor(mx).to(device)
    else:
        mx = adj + torch.eye(adj.shape[0]).to(device)
        rowsum = mx.sum(1)
        r_inv = rowsum.pow(-1).flatten()
        r_inv[torch.isinf(r_inv)] = 0.
        r_mat_inv = torch.diag(r_inv)
        mx = r_mat_inv @ mx
    return mx 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:20,代碼來源:utils.py

示例5: generate_w

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def generate_w(output_dim, w_distrib='uniform', w_sparsity=None, mean=0.0, std=1.0, seed=None, dtype=torch.float32):
        """
        Generate W matrix
        :param output_dim:
        :param w_sparsity:
        :return:
        """
        # Manual seed
        if seed is not None:
            torch.manual_seed(seed)
            np.random.seed(seed)
        # end if

        # Distribution
        if w_distrib == 'uniform':
            w = ESNCell.generate_uniform_matrix(size=(output_dim, output_dim), sparsity=w_sparsity, input_set=[-1.0, 1.0])
            w = torch.from_numpy(w.astype(np.float32))
        else:
            w = ESNCell.generate_gaussian_matrix(size=(output_dim, output_dim), sparsity=w_sparsity, mean=mean, std=std, dtype=dtype)
        # end if

        return w
    # end generate_w

    # To sparse matrix 
開發者ID:nschaetti,項目名稱:EchoTorch,代碼行數:27,代碼來源:ESNCell.py

示例6: preprocess

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def preprocess(adj, features, labels, preprocess_adj=False, preprocess_feature=False, sparse=False, device='cpu'):
    """Convert adj, features, labels from array or sparse matrix to
    torch Tensor, and normalize the input data.

    Parameters
    ----------
    adj : scipy.sparse.csr_matrix
        the adjacency matrix.
    features : scipy.sparse.csr_matrix
        node features
    labels : numpy.array
        node labels
    preprocess_adj : bool
        whether to normalize the adjacency matrix
    preprocess_feature :
        whether to normalize the feature matrix
    sparse : bool
       whether to return sparse tensor
    device : str
        'cpu' or 'cuda'
    """

    if preprocess_adj:
        adj_norm = normalize_adj(adj)

    if preprocess_feature:
        features = normalize_feature(features)

    labels = torch.LongTensor(labels)
    if sparse:
        adj = sparse_mx_to_torch_sparse_tensor(adj)
        features = sparse_mx_to_torch_sparse_tensor(features)
    else:
        features = torch.FloatTensor(np.array(features.todense()))
        adj = torch.FloatTensor(adj.todense())
    return adj.to(device), features.to(device), labels.to(device) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:38,代碼來源:utils.py

示例7: to_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def to_tensor(adj, features, labels=None, device='cpu'):
    """Convert adj, features, labels from array or sparse matrix to
    torch Tensor.

    Parameters
    ----------
    adj : scipy.sparse.csr_matrix
        the adjacency matrix.
    features : scipy.sparse.csr_matrix
        node features
    labels : numpy.array
        node labels
    device : str
        'cpu' or 'cuda'
    """
    if sp.issparse(adj):
        adj = sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = torch.FloatTensor(adj)
    if sp.issparse(features):
        features = sparse_mx_to_torch_sparse_tensor(features)
    else:
        features = torch.FloatTensor(np.array(features))

    if labels is None:
        return adj.to(device), features.to(device)
    else:
        labels = torch.LongTensor(labels)
        return adj.to(device), features.to(device), labels.to(device) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:31,代碼來源:utils.py

示例8: normalize_adj

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def normalize_adj(mx):
    """Normalize sparse adjacency matrix,
    A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
    Row-normalize sparse matrix

    Parameters
    ----------
    mx : scipy.sparse.csr_matrix
        matrix to be normalized

    Returns
    -------
    scipy.sprase.lil_matrix
        normalized matrix
    """

    if type(mx) is not sp.lil.lil_matrix:
        mx = mx.tolil()
    if mx[0, 0] == 0 :
        mx = mx + sp.eye(mx.shape[0])
    rowsum = np.array(mx.sum(1))
    r_inv = np.power(rowsum, -1/2).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    mx = mx.dot(r_mat_inv)
    return mx 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:29,代碼來源:utils.py

示例9: degree_normalize_adj

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def degree_normalize_adj(mx):
    """Row-normalize sparse matrix"""
    mx = mx.tolil()
    if mx[0, 0] == 0 :
        mx = mx + sp.eye(mx.shape[0])
    rowsum = np.array(mx.sum(1))
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    # mx = mx.dot(r_mat_inv)
    mx = r_mat_inv.dot(mx)
    return mx 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:14,代碼來源:utils.py

示例10: sparse_mx_to_torch_sparse_tensor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
    """Convert a scipy sparse matrix to a torch sparse tensor."""
    sparse_mx = sparse_mx.tocoo().astype(np.float32)
    indices = torch.from_numpy(
        np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
    values = torch.from_numpy(sparse_mx.data)
    shape = torch.Size(sparse_mx.shape)
    return torch.sparse.FloatTensor(indices, values, shape) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:10,代碼來源:utils.py

示例11: to_scipy

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def to_scipy(tensor):
    """Convert a dense/sparse tensor to scipy matrix"""
    if is_sparse_tensor(tensor):
        values = tensor._values()
        indices = tensor._indices()
        return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
    else:
        indices = tensor.nonzero().t()
        values = tensor[indices[0], indices[1]]
        return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape) 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:12,代碼來源:utils.py

示例12: backward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def backward(ctx, grad_output, grad_indices):
        tensor_type = type(grad_output).__name__
        if grad_output.is_cuda:
            SparseTensor = getattr(torch.cuda.sparse, tensor_type)
        else:
            SparseTensor = getattr(torch.sparse, tensor_type)

        grad_input = grad_output
        indices = ctx._indices
        indices = indices.view(1, -1)
        grad_weight = SparseTensor(indices, grad_output, ctx._weight_size).to_dense()
        return grad_input, grad_weight 
開發者ID:akolishchak,項目名稱:doom-net-pytorch,代碼行數:14,代碼來源:nearest_embedding.py

示例13: sparse_eye

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def sparse_eye(size):
    """
    Returns the identity matrix as a sparse matrix
    """
    indices = torch.arange(0, size).long().unsqueeze(0).expand(2, size)
    values = torch.tensor(1.0).expand(size)
    cls = getattr(torch.sparse, values.type().split(".")[-1])
    return cls(indices, values, torch.Size([size, size])) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:10,代碼來源:sparse.py

示例14: sparse_repeat

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def sparse_repeat(sparse, *repeat_sizes):
    """
    """
    if len(repeat_sizes) == 1 and isinstance(repeat_sizes, tuple):
        repeat_sizes = repeat_sizes[0]

    if len(repeat_sizes) > len(sparse.shape):
        num_new_dims = len(repeat_sizes) - len(sparse.shape)
        new_indices = sparse._indices()
        new_indices = torch.cat(
            [
                torch.zeros(num_new_dims, new_indices.size(1), dtype=new_indices.dtype, device=new_indices.device),
                new_indices,
            ],
            0,
        )
        sparse = torch.sparse_coo_tensor(
            new_indices,
            sparse._values(),
            torch.Size((*[1 for _ in range(num_new_dims)], *sparse.shape)),
            dtype=sparse.dtype,
            device=sparse.device,
        )

    for i, repeat_size in enumerate(repeat_sizes):
        if repeat_size > 1:
            new_indices = sparse._indices().repeat(1, repeat_size)
            adding_factor = torch.arange(0, repeat_size, dtype=new_indices.dtype, device=new_indices.device).unsqueeze_(
                1
            )
            new_indices[i].view(repeat_size, -1).add_(adding_factor)
            sparse = torch.sparse_coo_tensor(
                new_indices,
                sparse._values().repeat(repeat_size),
                torch.Size((*sparse.shape[:i], repeat_size * sparse.size(i), *sparse.shape[i + 1 :])),
                dtype=sparse.dtype,
                device=sparse.device,
            )

    return sparse 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:42,代碼來源:sparse.py

示例15: _setupX

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse [as 別名]
def _setupX(self, sparse=0):
        """
        Initializes an X tensor of features for prediction
        :param sparse: 0 if dense tensor, 1 if sparse
        :return: Null
        """
        feature_table = self .dataengine.get_table_to_dataframe(
            "Feature_clean", self.dataset).collect()
        if sparse:
            coordinates = torch.LongTensor()
            values = torch.FloatTensor([])
            for factor in feature_table:
                coordinate = torch.LongTensor([[int(factor.vid) - 1],
                                               [int(factor.feature) - 1],
                                               [int(factor.assigned_val) - 1]])
                coordinates = torch.cat((coordinates, coordinate), 1)
                value = factor['count']
                values = torch.cat((values, torch.FloatTensor([value])), 0)
            self.X = torch.sparse\
                .FloatTensor(coordinates, values,
                             torch.Size([self.N, self.M, self.L]))
        else:
            self.X = torch.zeros(self.N, self.M, self.L)
            for factor in feature_table:
                self.X[factor.vid - 1, factor.feature - 1,
                       factor.assigned_val - 1] = factor['count']
        return 
開發者ID:HoloClean,項目名稱:HoloClean-Legacy-deprecated,代碼行數:29,代碼來源:softmax.py


注:本文中的torch.sparse方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。