本文整理汇总了Python中torch_geometric.utils.add_self_loops方法的典型用法代码示例。如果您正苦于以下问题:Python utils.add_self_loops方法的具体用法?Python utils.add_self_loops怎么用?Python utils.add_self_loops使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch_geometric.utils
的用法示例。
在下文中一共展示了utils.add_self_loops方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __init__(self, in_channels: int, out_channels: int,
diag_lambda: float = 0., add_self_loops: bool = True,
bias: bool = True, **kwargs):
super(ClusterGCNConv, self).__init__(aggr='add', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.diag_lambda = diag_lambda
self.add_self_loops = add_self_loops
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.root_weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
示例2: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x, edge_index):
edge_index, _ = remove_self_loops(edge_index)
deg = degree(edge_index[1 if self.flow == 'source_to_target' else 0],
x.size(0), dtype=torch.long)
deg.clamp_(max=self.max_degree)
if not self.root_weight:
edge_index, _ = add_self_loops(edge_index,
num_nodes=x.size(self.node_dim))
h = self.propagate(edge_index, x=x)
out = x.new_empty(list(x.size())[:-1] + [self.out_channels])
for i in deg.unique().tolist():
idx = (deg == i).nonzero().view(-1)
r = self.rel_lins[i](h.index_select(self.node_dim, idx))
if self.root_weight:
r = r + self.root_lins[i](x.index_select(self.node_dim, idx))
out.index_copy_(self.node_dim, idx, r)
return out
示例3: __init__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __init__(self, in_channels: int, out_channels: int, heads: int = 1,
add_self_loops: bool = True, bias: bool = True, **kwargs):
super(FeaStConv, self).__init__(aggr='mean', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.add_self_loops = add_self_loops
self.weight = Parameter(torch.Tensor(in_channels,
heads * out_channels))
self.u = Parameter(torch.Tensor(in_channels, heads))
self.c = Parameter(torch.Tensor(heads))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
示例4: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
""""""
if isinstance(x, Tensor):
x: PairTensor = (x, x)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=x[1].size(0))
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
# propagate_type: (x: PairTensor)
out = self.propagate(edge_index, x=x, size=None)
if self.bias is not None:
out += self.bias
return out
示例5: __norm__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __norm__(self, edge_index, num_nodes: Optional[int],
edge_weight: OptTensor, normalization: Optional[str],
lambda_max, dtype: Optional[int] = None,
batch: OptTensor = None):
edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
edge_index, edge_weight = get_laplacian(edge_index, edge_weight,
normalization, dtype,
num_nodes)
if batch is not None and lambda_max.numel() > 1:
lambda_max = lambda_max[batch[edge_index[0]]]
edge_weight = (2.0 * edge_weight) / lambda_max
edge_weight.masked_fill_(edge_weight == float('inf'), 0)
edge_index, edge_weight = add_self_loops(edge_index, edge_weight,
fill_value=-1.,
num_nodes=num_nodes)
assert edge_weight is not None
return edge_index, edge_weight
示例6: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
num_nodes = x.shape[0]
h = torch.matmul(x, self.weight)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ),
dtype=x.dtype,
device=edge_index.device)
edge_index, edge_weight = remove_self_loops(edge_index=edge_index, edge_attr=edge_weight)
deg = scatter_add(edge_weight, edge_index[0], dim=0, dim_size=num_nodes) #+ 1e-10
h_j = edge_weight.view(-1, 1) * h[edge_index[1]]
aggr_out = scatter_add(h_j, edge_index[0], dim=0, dim_size=num_nodes)
out = ( deg.view(-1, 1) * self.lin1(x) + aggr_out) + self.lin2(x)
edge_index, edge_weight = add_self_loops(edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes)
return out
示例7: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x_1, x_2, edge_index_pos, edge_index_neg):
"""
Forward propagation pass with features an indices.
:param x_1: Features for left hand side vertices.
:param x_2: Features for right hand side vertices.
:param edge_index_pos: Positive indices.
:param edge_index_neg: Negative indices.
:return out: Abstract convolved features.
"""
edge_index_pos, _ = remove_self_loops(edge_index_pos, None)
edge_index_pos, _ = add_self_loops(edge_index_pos, num_nodes=x_1.size(0))
edge_index_neg, _ = remove_self_loops(edge_index_neg, None)
edge_index_neg, _ = add_self_loops(edge_index_neg, num_nodes=x_2.size(0))
row_pos, col_pos = edge_index_pos
row_neg, col_neg = edge_index_neg
if self.norm:
out_1 = scatter_mean(x_1[col_pos], row_pos, dim=0, dim_size=x_1.size(0))
out_2 = scatter_mean(x_2[col_neg], row_neg, dim=0, dim_size=x_2.size(0))
else:
out_1 = scatter_add(x_1[col_pos], row_pos, dim=0, dim_size=x_1.size(0))
out_2 = scatter_add(x_2[col_neg], row_neg, dim=0, dim_size=x_2.size(0))
out = torch.cat((out_1, out_2, x_1), 1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
示例8: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x, edge_index):
# x has shape [N, in_channels]
# edge_index has shape [2, E]
# Step 1: Add self-loops to the adjacency matrix.
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
# Step 2: Linearly transform node feature matrix.
x = self.lin(x)
# Step 3-5: Start propagating messages.
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)
示例9: __call__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __call__(self, data):
N = data.num_nodes
edge_index = data.edge_index
if data.edge_attr is None:
edge_weight = torch.ones(edge_index.size(1),
device=edge_index.device)
else:
edge_weight = data.edge_attr
assert self.exact
assert edge_weight.dim() == 1
if self.self_loop_weight:
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value=self.self_loop_weight,
num_nodes=N)
edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N)
if self.exact:
edge_index, edge_weight = self.transition_matrix(
edge_index, edge_weight, N, self.normalization_in)
diff_mat = self.diffusion_matrix_exact(edge_index, edge_weight, N,
**self.diffusion_kwargs)
edge_index, edge_weight = self.sparsify_dense(
diff_mat, **self.sparsification_kwargs)
else:
edge_index, edge_weight = self.diffusion_matrix_approx(
edge_index, edge_weight, N, self.normalization_in,
**self.diffusion_kwargs)
edge_index, edge_weight = self.sparsify_sparse(
edge_index, edge_weight, N, **self.sparsification_kwargs)
edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N)
edge_index, edge_weight = self.transition_matrix(
edge_index, edge_weight, N, self.normalization_out)
data.edge_index = edge_index
data.edge_attr = edge_weight
return data
示例10: __call__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __call__(self, data):
N = data.num_nodes
edge_index = data.edge_index
assert data.edge_attr is None
edge_index, _ = add_self_loops(edge_index, num_nodes=N)
edge_index, _ = coalesce(edge_index, None, N, N)
data.edge_index = edge_index
return data
示例11: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x: Tensor, edge_index: Adj, size: Size = None) -> Tensor:
""""""
edge_weight: OptTensor = None
if isinstance(edge_index, Tensor):
num_nodes = size[1] if size is not None else x.size(self.node_dim)
if self.add_self_loops:
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
row, col = edge_index[0], edge_index[1]
deg_inv = 1. / degree(col, num_nodes=num_nodes).clamp_(1.)
edge_weight = deg_inv[col]
edge_weight[row == col] += self.diag_lambda * deg_inv
elif isinstance(edge_index, SparseTensor):
if self.add_self_loops:
edge_index = set_diag(edge_index)
col, row, _ = edge_index.coo() # Transposed.
deg_inv = 1. / sum(edge_index, dim=1).clamp_(1.)
edge_weight = deg_inv[col]
edge_weight[row == col] += self.diag_lambda * deg_inv
edge_index = edge_index.set_value(edge_weight, layout='coo')
# propagate_type: (x: Tensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=None)
out = out @ self.weight + x @ self.root_weight
if self.bias is not None:
out += self.bias
return out
示例12: __init__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __init__(self, local_nn: Optional[Callable] = None,
global_nn: Optional[Callable] = None,
add_self_loops: bool = True, **kwargs):
super(PointConv, self).__init__(aggr='max', **kwargs)
self.local_nn = local_nn
self.global_nn = global_nn
self.add_self_loops = add_self_loops
self.reset_parameters()
示例13: __init__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int, heads: int = 1, concat: bool = True,
negative_slope: float = 0.2, dropout: float = 0.,
add_self_loops: bool = True, bias: bool = True, **kwargs):
super(GATConv, self).__init__(aggr='add', node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.add_self_loops = add_self_loops
if isinstance(in_channels, int):
self.lin_l = Linear(in_channels, heads * out_channels, bias=False)
self.lin_r = self.lin_l
else:
self.lin_l = Linear(in_channels[0], heads * out_channels, False)
self.lin_r = Linear(in_channels[1], heads * out_channels, False)
self.att_l = Parameter(torch.Tensor(1, heads, out_channels))
self.att_r = Parameter(torch.Tensor(1, heads, out_channels))
if bias and concat:
self.bias = Parameter(torch.Tensor(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self._alpha = None
self.reset_parameters()
示例14: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def forward(self, x: Tensor, edge_index: Adj) -> Tensor:
""""""
if self.add_self_loops:
if isinstance(edge_index, Tensor):
edge_index, _ = remove_self_loops(edge_index)
edge_index, _ = add_self_loops(edge_index,
num_nodes=x.size(self.node_dim))
elif isinstance(edge_index, SparseTensor):
edge_index = set_diag(edge_index)
x_norm = F.normalize(x, p=2., dim=-1)
# propagate_type: (x: Tensor, x_norm: Tensor)
return self.propagate(edge_index, x=x, x_norm=x_norm, size=None)
示例15: __init__
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import add_self_loops [as 别名]
def __init__(self, local_nn: Optional[Callable] = None,
global_nn: Optional[Callable] = None,
add_self_loops: bool = True, **kwargs):
super(PPFConv, self).__init__(aggr='max', **kwargs)
self.local_nn = local_nn
self.global_nn = global_nn
self.add_self_loops = add_self_loops
self.reset_parameters()