本文整理汇总了Python中torch_geometric.utils.remove_self_loops方法的典型用法代码示例。如果您正苦于以下问题:Python utils.remove_self_loops方法的具体用法?Python utils.remove_self_loops怎么用?Python utils.remove_self_loops使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch_geometric.utils
的用法示例。
在下文中一共展示了utils.remove_self_loops方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x, edge_index):
"""
Forward propagation pass with features an indices.
:param x: Feature matrix.
:param edge_index: Indices.
"""
edge_index, _ = remove_self_loops(edge_index, None)
row, col = edge_index
if self.norm:
out = scatter_mean(x[col], row, dim=0, dim_size=x.size(0))
else:
out = scatter_add(x[col], row, dim=0, dim_size=x.size(0))
out = torch.cat((out, x), 1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
示例2: __call__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def __call__(self, data):
edge_index, edge_attr = data.edge_index, data.edge_attr
N = data.num_nodes
value = edge_index.new_ones((edge_index.size(1), ), dtype=torch.float)
index, value = spspmm(edge_index, value, edge_index, value, N, N, N)
value.fill_(0)
index, value = remove_self_loops(index, value)
edge_index = torch.cat([edge_index, index], dim=1)
if edge_attr is None:
data.edge_index, _ = coalesce(edge_index, None, N, N)
else:
value = value.view(-1, *[1 for _ in range(edge_attr.dim() - 1)])
value = value.expand(-1, *list(edge_attr.size())[1:])
edge_attr = torch.cat([edge_attr, value], dim=0)
data.edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N)
data.edge_attr = edge_attr
return data
示例3: barabasi_albert_graph
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def barabasi_albert_graph(num_nodes, num_edges):
r"""Returns the :obj:`edge_index` of a Barabasi-Albert preferential
attachment model, where a graph of :obj:`num_nodes` nodes grows by
attaching new nodes with :obj:`num_edges` edges that are preferentially
attached to existing nodes with high degree.
Args:
num_nodes (int): The number of nodes.
num_edges (int): The number of edges from a new node to existing nodes.
"""
assert num_edges > 0 and num_edges < num_nodes
row, col = torch.arange(num_edges), torch.randperm(num_edges)
for i in range(num_edges, num_nodes):
row = torch.cat([row, torch.full((num_edges, ), i, dtype=torch.long)])
choice = np.random.choice(torch.cat([row, col]).numpy(), num_edges)
col = torch.cat([col, torch.from_numpy(choice)])
edge_index = torch.stack([row, col], dim=0)
edge_index, _ = remove_self_loops(edge_index)
edge_index = to_undirected(edge_index, num_nodes)
return edge_index
示例4: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x: Union[OptTensor, PairOptTensor],
pos: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
""""""
if not isinstance(x, tuple):
x: PairOptTensor = (x, None)
if isinstance(pos, Tensor):
pos: PairTensor = (pos, pos)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=pos[1].size(0))
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
# propagate_type: (x: PairOptTensor, pos: PairTensor)
out = self.propagate(edge_index, x=x, pos=pos, size=None)
if self.global_nn is not None:
out = self.global_nn(out)
return out
示例5: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x, edge_index):
edge_index, _ = remove_self_loops(edge_index)
deg = degree(edge_index[1 if self.flow == 'source_to_target' else 0],
x.size(0), dtype=torch.long)
deg.clamp_(max=self.max_degree)
if not self.root_weight:
edge_index, _ = add_self_loops(edge_index,
num_nodes=x.size(self.node_dim))
h = self.propagate(edge_index, x=x)
out = x.new_empty(list(x.size())[:-1] + [self.out_channels])
for i in deg.unique().tolist():
idx = (deg == i).nonzero().view(-1)
r = self.rel_lins[i](h.index_select(self.node_dim, idx))
if self.root_weight:
r = r + self.root_lins[i](x.index_select(self.node_dim, idx))
out.index_copy_(self.node_dim, idx, r)
return out
示例6: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
""""""
if isinstance(x, Tensor):
x: PairTensor = (x, x)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=x[1].size(0))
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
# propagate_type: (x: PairTensor)
out = self.propagate(edge_index, x=x, size=None)
if self.bias is not None:
out += self.bias
return out
示例7: __norm__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def __norm__(self, edge_index, num_nodes: Optional[int],
edge_weight: OptTensor, normalization: Optional[str],
lambda_max, dtype: Optional[int] = None,
batch: OptTensor = None):
edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
edge_index, edge_weight = get_laplacian(edge_index, edge_weight,
normalization, dtype,
num_nodes)
if batch is not None and lambda_max.numel() > 1:
lambda_max = lambda_max[batch[edge_index[0]]]
edge_weight = (2.0 * edge_weight) / lambda_max
edge_weight.masked_fill_(edge_weight == float('inf'), 0)
edge_index, edge_weight = add_self_loops(edge_index, edge_weight,
fill_value=-1.,
num_nodes=num_nodes)
assert edge_weight is not None
return edge_index, edge_weight
示例8: process_CSL
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def process_CSL(self):
path = osp.join(self.raw_dir, 'graphs_Kary_Deterministic_Graphs.pkl')
with open(path, 'rb') as f:
adjs = pickle.load(f)
path = osp.join(self.raw_dir, 'y_Kary_Deterministic_Graphs.pt')
ys = torch.load(path).tolist()
data_list = []
for adj, y in zip(adjs, ys):
row, col = torch.from_numpy(adj.row), torch.from_numpy(adj.col)
edge_index = torch.stack([row, col], dim=0).to(torch.long)
edge_index, _ = remove_self_loops(edge_index)
data = Data(edge_index=edge_index, y=y, num_nodes=adj.shape[0])
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
return data_list
示例9: __call__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def __call__(self, data):
device = data.edge_index.device
row = torch.arange(data.num_nodes, dtype=torch.long, device=device)
col = torch.arange(data.num_nodes, dtype=torch.long, device=device)
row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)
col = col.repeat(data.num_nodes)
edge_index = torch.stack([row, col], dim=0)
edge_attr = None
if data.edge_attr is not None:
idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]
size = list(data.edge_attr.size())
size[0] = data.num_nodes * data.num_nodes
edge_attr = data.edge_attr.new_zeros(size)
edge_attr[idx] = data.edge_attr
edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
data.edge_attr = edge_attr
data.edge_index = edge_index
return data
示例10: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
num_nodes = x.shape[0]
h = torch.matmul(x, self.weight)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ),
dtype=x.dtype,
device=edge_index.device)
edge_index, edge_weight = remove_self_loops(edge_index=edge_index, edge_attr=edge_weight)
deg = scatter_add(edge_weight, edge_index[0], dim=0, dim_size=num_nodes) #+ 1e-10
h_j = edge_weight.view(-1, 1) * h[edge_index[1]]
aggr_out = scatter_add(h_j, edge_index[0], dim=0, dim_size=num_nodes)
out = ( deg.view(-1, 1) * self.lin1(x) + aggr_out) + self.lin2(x)
edge_index, edge_weight = add_self_loops(edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes)
return out
示例11: contains_isolated_nodes
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def contains_isolated_nodes(edge_index, num_nodes=None):
r"""Returns :obj:`True` if the graph given by :attr:`edge_index` contains
isolated nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
(row, col), _ = remove_self_loops(edge_index)
return torch.unique(torch.cat((row, col))).size(0) < num_nodes
示例12: pool_edge
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def pool_edge(cluster, edge_index,
edge_attr: Optional[torch.Tensor] = None):
num_nodes = cluster.size(0)
edge_index = cluster[edge_index.view(-1)].view(2, -1)
edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
if edge_index.numel() > 0:
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes,
num_nodes)
return edge_index, edge_attr
示例13: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def forward(self, x: Tensor, edge_index: Adj) -> Tensor:
""""""
if self.add_self_loops:
if isinstance(edge_index, Tensor):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=x.size(self.node_dim))
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
x_norm = F.normalize(x, p=2., dim=-1)
# propagate_type: (x: Tensor, x_norm: Tensor)
return self.propagate(edge_index, x=x, x_norm=x_norm, size=None)
示例14: edge_index_from_dict
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def edge_index_from_dict(graph_dict, num_nodes=None):
row, col = [], []
for key, value in graph_dict.items():
row += repeat(key, len(value))
col += value
edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0)
# NOTE: There are duplicated edges and self loops in the datasets. Other
# implementations do not remove them!
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes)
return edge_index
示例15: parse_npz
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import remove_self_loops [as 别名]
def parse_npz(f):
x = sp.csr_matrix((f['attr_data'], f['attr_indices'], f['attr_indptr']),
f['attr_shape']).todense()
x = torch.from_numpy(x).to(torch.float)
x[x > 0] = 1
adj = sp.csr_matrix((f['adj_data'], f['adj_indices'], f['adj_indptr']),
f['adj_shape']).tocoo()
edge_index = torch.tensor([adj.row, adj.col], dtype=torch.long)
edge_index, _ = remove_self_loops(edge_index)
edge_index = to_undirected(edge_index, x.size(0)) # Internal coalesce.
y = torch.from_numpy(f['labels']).to(torch.long)
return Data(x=x, edge_index=edge_index, y=y)