本文整理匯總了Python中torch.sparse_coo_tensor方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.sparse_coo_tensor方法的具體用法?Python torch.sparse_coo_tensor怎麽用?Python torch.sparse_coo_tensor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.sparse_coo_tensor方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _compute_laplacian
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def _compute_laplacian(self):
"""Precomputes the graph Laplacian."""
self._recompute_laplacian = False
indices = [
(node, edge)
for node, edges in enumerate(self.adjacency)
for edge in edges + [node]
]
values = torch.zeros(len(indices))
for idx, index in enumerate(indices):
values[idx] = self._laplacian_element(*index)
indices = torch.Tensor(indices).t()
self._laplacian = torch.sparse_coo_tensor(
indices, values,
(len(self.adjacency), len(self.adjacency))
)
示例2: _generate_adj
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def _generate_adj(self, sample1, sample2):
edgelist = []
mapping = {}
for i in range(len(sample1)):
mapping[sample1[i]] = i
for i in range(len(sample2)):
nodes = self.adj[sample2[i]]
for node in nodes:
if node in mapping:
edgelist.append([mapping[node], i])
edgetensor = torch.LongTensor(edgelist)
valuetensor = torch.ones(edgetensor.shape[0]).float()
t = torch.sparse_coo_tensor(
edgetensor.t(), valuetensor, (len(sample1), len(sample2))
)
return t
示例3: make_batch_align_matrix
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def make_batch_align_matrix(index_tensor, size=None, normalize=False):
"""
Convert a sparse index_tensor into a batch of alignment matrix,
with row normalize to the sum of 1 if set normalize.
Args:
index_tensor (LongTensor): ``(N, 3)`` of [batch_id, tgt_id, src_id]
size (List[int]): Size of the sparse tensor.
normalize (bool): if normalize the 2nd dim of resulting tensor.
"""
n_fill, device = index_tensor.size(0), index_tensor.device
value_tensor = torch.ones([n_fill], dtype=torch.float)
dense_tensor = torch.sparse_coo_tensor(
index_tensor.t(), value_tensor, size=size, device=device).to_dense()
if normalize:
row_sum = dense_tensor.sum(-1, keepdim=True) # sum by row(tgt)
# threshold on 1 to avoid div by 0
torch.nn.functional.threshold(row_sum, 1, 1, inplace=True)
dense_tensor.div_(row_sum)
return dense_tensor
示例4: build_adj
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def build_adj(edge_index, num_nodes):
"""
for undirected graph
:param edge_index:
:param num_nodes:
:return:
"""
if num_nodes is None:
num_nodes = max(edge_index[0]) + 1
edge_attr = torch.ones(edge_index.size(1), dtype=torch.float)
size = torch.Size([num_nodes, num_nodes])
adj = torch.sparse_coo_tensor(edge_index, edge_attr, size)
eye = torch.arange(start=0, end=num_nodes)
eye = torch.stack([eye, eye])
eye = torch.sparse_coo_tensor(eye, torch.ones([num_nodes]), size)
adj = adj.t() + adj + eye # greater than 1 when edge_index is already symmetrical
adj = adj.to_dense().gt(0).to_sparse().type(torch.float)
return adj
示例5: __call__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def __call__(self, data):
assert data.edge_index is not None
orig_num_nodes = data.num_nodes
if self.num_nodes is None:
num_nodes = orig_num_nodes
else:
assert orig_num_nodes <= self.num_nodes
num_nodes = self.num_nodes
if data.edge_attr is None:
edge_attr = torch.ones(data.edge_index.size(1), dtype=torch.float)
else:
edge_attr = data.edge_attr
size = torch.Size([num_nodes, num_nodes] + list(edge_attr.size())[1:])
adj = torch.sparse_coo_tensor(data.edge_index, edge_attr, size)
data.adj = adj.to_dense()
data.edge_index = None
data.edge_attr = None
data.mask = torch.zeros(num_nodes, dtype=torch.bool)
data.mask[:orig_num_nodes] = 1
if data.x is not None:
size = [num_nodes - data.x.size(0)] + list(data.x.size())[1:]
data.x = torch.cat([data.x, data.x.new_zeros(size)], dim=0)
if data.pos is not None:
size = [num_nodes - data.pos.size(0)] + list(data.pos.size())[1:]
data.pos = torch.cat([data.pos, data.pos.new_zeros(size)], dim=0)
if data.y is not None and (data.y.size(0) == orig_num_nodes):
size = [num_nodes - data.y.size(0)] + list(data.y.size())[1:]
data.y = torch.cat([data.y, data.y.new_zeros(size)], dim=0)
return data
示例6: test_gcn_conv_with_sparse_input_feature
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def test_gcn_conv_with_sparse_input_feature():
x = torch.sparse_coo_tensor(indices=torch.tensor([[0, 0], [0, 1]]),
values=torch.tensor([1., 1.]),
size=torch.Size([4, 16]))
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
conv = GCNConv(16, 32)
assert conv(x, edge_index).size() == (4, 32)
示例7: sparse_matrix
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
spmat = th.sparse_coo_tensor(index[1], data, shape)
return spmat, None
示例8: neighbourhood_to_adjacency
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def neighbourhood_to_adjacency(neighbourhood):
size = torch.Size([len(neighbourhood), len(neighbourhood)])
indices = []
for idx, nodes in neighbourhood:
for node in nodes:
indices.append([idx, node])
indices.append([node, idx])
indices = torch.Tensor(list(set(indices)))
values = torch.ones(indices.size(0))
return torch.sparse_coo_tensor(indices, values, size)
示例9: _compute_adjacency_matrix
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def _compute_adjacency_matrix(self):
"""Computes the graph adjacency matrix."""
self._recompute_adjacency_matrix = False
indices = torch.Tensor([
(node, edge)
for node, edges in enumerate(self.adjacency)
for edge in edges + [node]
]).t()
values = torch.ones(indices.size(1))
self._adjacency_matrix = torch.sparse_coo_tensor(
indices, values,
(len(self.adjacency), len(self.adjacency))
)
示例10: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
示例11: build_fixation_maps
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def build_fixation_maps(Ns, Ys, Xs, batch_size, height, width, dtype=torch.float32):
indices = torch.stack((Ns, Ys, Xs), axis=1).T
src = torch.ones(indices.shape[1], dtype=dtype, device=indices.device)
fixation_maps = torch.sparse_coo_tensor(indices, src, size=(batch_size, height, width)).to_dense()
return fixation_maps
示例12: test_sparse_tensor
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def test_sparse_tensor(self, to_tensor, device):
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip()
inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
expected = torch.sparse_coo_tensor(size=(5, 3)).to(device)
result = to_tensor(inp, device=device, accept_sparse=True)
assert self.tensors_equal(result, expected)
示例13: index2matrix
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def index2matrix(index):
assert index.size(0) == 2
index = index.long()
v_len = index.size(1)
v = torch.ones(v_len).float()
matrix = torch.sparse_coo_tensor(index, v).to_dense()
return matrix
示例14: sparse_repeat
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def sparse_repeat(sparse, *repeat_sizes):
"""
"""
if len(repeat_sizes) == 1 and isinstance(repeat_sizes, tuple):
repeat_sizes = repeat_sizes[0]
if len(repeat_sizes) > len(sparse.shape):
num_new_dims = len(repeat_sizes) - len(sparse.shape)
new_indices = sparse._indices()
new_indices = torch.cat(
[
torch.zeros(num_new_dims, new_indices.size(1), dtype=new_indices.dtype, device=new_indices.device),
new_indices,
],
0,
)
sparse = torch.sparse_coo_tensor(
new_indices,
sparse._values(),
torch.Size((*[1 for _ in range(num_new_dims)], *sparse.shape)),
dtype=sparse.dtype,
device=sparse.device,
)
for i, repeat_size in enumerate(repeat_sizes):
if repeat_size > 1:
new_indices = sparse._indices().repeat(1, repeat_size)
adding_factor = torch.arange(0, repeat_size, dtype=new_indices.dtype, device=new_indices.device).unsqueeze_(
1
)
new_indices[i].view(repeat_size, -1).add_(adding_factor)
sparse = torch.sparse_coo_tensor(
new_indices,
sparse._values().repeat(repeat_size),
torch.Size((*sparse.shape[:i], repeat_size * sparse.size(i), *sparse.shape[i + 1 :])),
dtype=sparse.dtype,
device=sparse.device,
)
return sparse
示例15: from_adjlist
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sparse_coo_tensor [as 別名]
def from_adjlist(self, adj):
"""Transfer adj-list format to sparsetensor"""
u_sampled, index = torch.unique(torch.flatten(adj), return_inverse=True)
row = (torch.range(0, index.shape[0]-1) / adj.shape[1]).long().to(adj.device)
col = index
values = torch.ones(index.shape[0]).float().to(adj.device)
indices = torch.cat([row.unsqueeze(1), col.unsqueeze(1)], axis=1).t()
dense_shape = (adj.shape[0], u_sampled.shape[0])
support = torch.sparse_coo_tensor(indices, values, dense_shape)
return support, u_sampled.long()