本文整理汇总了Python中torch_geometric.utils.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python utils.softmax方法的具体用法?Python utils.softmax怎么用?Python utils.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch_geometric.utils
的用法示例。
在下文中一共展示了utils.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None):
""""""
if batch is None:
batch = edge_index.new_zeros(x.size(0))
attn = x if attn is None else attn
attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn
score = self.gnn(attn, edge_index).view(-1)
if self.min_score is None:
score = self.nonlinearity(score)
else:
score = softmax(score, batch)
perm = topk(score, self.ratio, batch, self.min_score)
x = x[perm] * score[perm].view(-1, 1)
x = self.multiplier * x if self.multiplier != 1 else x
batch = batch[perm]
edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm,
num_nodes=score.size(0))
return x, edge_index, edge_attr, batch, perm, score[perm]
示例2: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None):
""""""
if batch is None:
batch = edge_index.new_zeros(x.size(0))
attn = x if attn is None else attn
attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn
score = (attn * self.weight).sum(dim=-1)
if self.min_score is None:
score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1))
else:
score = softmax(score, batch)
perm = topk(score, self.ratio, batch, self.min_score)
x = x[perm] * score[perm].view(-1, 1)
x = self.multiplier * x if self.multiplier != 1 else x
batch = batch[perm]
edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm,
num_nodes=score.size(0))
return x, edge_index, edge_attr, batch, perm, score[perm]
示例3: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_i, x_j, edge_index, num_nodes):
if self.att_type == "const":
if self.training and self.dropout > 0:
x_j = F.dropout(x_j, p=self.dropout, training=True)
neighbor = x_j
elif self.att_type == "gcn":
if self.gcn_weight is None or self.gcn_weight.size(0) != x_j.size(0): # 对于不同的图gcn_weight需要重新计算
_, norm = self.norm(edge_index, num_nodes, None)
self.gcn_weight = norm
neighbor = self.gcn_weight.view(-1, 1, 1) * x_j
else:
# Compute attention coefficients.
alpha = self.apply_attention(edge_index, num_nodes, x_i, x_j)
alpha = softmax(alpha, edge_index[0], num_nodes)
# Sample attention coefficients stochastically.
if self.training and self.dropout > 0:
alpha = F.dropout(alpha, p=self.dropout, training=True)
neighbor = x_j * alpha.view(-1, self.heads, 1)
if self.pool_dim > 0:
for layer in self.pool_layer:
neighbor = layer(neighbor)
return neighbor
示例4: compute_edge_score_softmax
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def compute_edge_score_softmax(raw_edge_score, edge_index, num_nodes):
return softmax(raw_edge_score, edge_index[1], num_nodes=num_nodes)
示例5: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, x, batch, size=None):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
size = batch[-1].item() + 1 if size is None else size
gate = self.gate_nn(x).view(-1, 1)
x = self.nn(x) if self.nn is not None else x
assert gate.dim() == x.dim() and gate.size(0) == x.size(0)
gate = softmax(gate, batch, num_nodes=size)
out = scatter_add(gate * x, batch, dim=0, dim_size=size)
return out
示例6: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: OptTensor,
index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, index, ptr, size_i)
self._alpha = alpha
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return x_j * alpha.unsqueeze(-1)
示例7: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_j: Tensor, x_norm_i: Tensor, x_norm_j: Tensor,
index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
alpha = self.beta * (x_norm_i * x_norm_j).sum(dim=-1)
alpha = softmax(alpha, index, ptr, size_i)
return x_j * alpha.view(-1, 1)
示例8: test_softmax
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def test_softmax():
src = torch.tensor([1., 1., 1., 1.])
index = torch.tensor([0, 0, 1, 2])
ptr = torch.tensor([0, 2, 3, 4])
out = softmax(src, index)
assert out.tolist() == [0.5, 0.5, 1, 1]
assert softmax(src, None, ptr).tolist() == out.tolist()
示例9: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_i, x_j, edge_index, num_nodes):
# Compute attention coefficients.
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index[0], num_nodes)
alpha = F.dropout(alpha, p=self.dropout)
return x_j * alpha.view(-1, self.heads, 1)
示例10: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_q, x_k, x_v, edge_index_i, num_nodes):
score = self.cal_att_score(x_q, x_k, self.heads)
# score = F.leaky_relu(score)
score = softmax(score, edge_index_i, num_nodes)
# score = F.dropout(score, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
return x_v * score.view(-1, self.heads, 1)
示例11: message
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def message(self, x_q, x_k, x_v, edge_index, num_nodes):
score = self.cal_att_score(x_q, x_k, self.heads)
# score = F.leaky_relu(score)
score = softmax(score, edge_index[0], num_nodes)
# score = F.dropout(score, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
return x_v * score.view(-1, self.heads, 1)
示例12: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, neighbor_vecs, self_vecs):
# shape [num_nodes, num_sample, num_heads]
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
# alpha = torch.softmax(alpha, dim=-2)
# Sample attention coefficients stochastically.
return alpha
示例13: preprocess
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def preprocess(self, alpha, edge_index, neighbor_vecs, num_nodes):
if isinstance(alpha, int):
if self.training and self.dropout > 0:
neighbor_vecs = F.dropout(neighbor_vecs, p=self.dropout, training=self.training)
return alpha*neighbor_vecs
else:
alpha = softmax(alpha, edge_index[0], num_nodes)
# Sample attention coefficients stochastically.
if self.training and self.dropout > 0:
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
neighbor = neighbor_vecs * alpha.view(-1, self.num_head, 1)
return neighbor
示例14: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, x, edge_index, edge_weight=None, batch=None):
N = x.size(0)
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value=1, num_nodes=N)
if batch is None:
batch = edge_index.new_zeros(x.size(0))
x = x.unsqueeze(-1) if x.dim() == 1 else x
x_pool = x
if self.GNN is not None:
x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index,
edge_weight=edge_weight)
x_pool_j = x_pool[edge_index[0]]
x_q = scatter(x_pool_j, edge_index[1], dim=0, reduce='max')
x_q = self.lin(x_q)[edge_index[1]]
score = self.att(torch.cat([x_q, x_pool_j], dim=-1)).view(-1)
score = F.leaky_relu(score, self.negative_slope)
score = softmax(score, edge_index[1], num_nodes=N)
# Sample attention coefficients stochastically.
score = F.dropout(score, p=self.dropout, training=self.training)
v_j = x[edge_index[0]] * score.view(-1, 1)
x = scatter(v_j, edge_index[1], dim=0, reduce='add')
# Cluster selection.
fitness = self.gnn_score(x, edge_index).sigmoid().view(-1)
perm = topk(fitness, self.ratio, batch)
x = x[perm] * fitness[perm].view(-1, 1)
batch = batch[perm]
# Graph coarsening.
row, col = edge_index
A = SparseTensor(row=row, col=col, value=edge_weight,
sparse_sizes=(N, N))
S = SparseTensor(row=row, col=col, value=score, sparse_sizes=(N, N))
S = S[:, perm]
A = S.t() @ A @ S
if self.add_self_loops:
A = A.fill_diag(1.)
else:
A = A.remove_diag()
row, col, edge_weight = A.coo()
edge_index = torch.stack([row, col], dim=0)
return x, edge_index, edge_weight, batch, perm
示例15: forward
# 需要导入模块: from torch_geometric import utils [as 别名]
# 或者: from torch_geometric.utils import softmax [as 别名]
def forward(self, x, hyperedge_index, hyperedge_weight=None):
r"""
Args:
x (Tensor): Node feature matrix :math:`\mathbf{X}`
hyper_edge_index (LongTensor): Hyperedge indices from
:math:`\mathbf{H}`.
hyperedge_weight (Tensor, optional): Sparse hyperedge weights from
:math:`\mathbf{W}`. (default: :obj:`None`)
"""
x = torch.matmul(x, self.weight)
alpha = None
if self.use_attention:
x = x.view(-1, self.heads, self.out_channels)
x_i, x_j = x[hyperedge_index[0]], x[hyperedge_index[1]]
alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0))
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
if hyperedge_weight is None:
D = degree(hyperedge_index[0], x.size(0), x.dtype)
else:
D = scatter_add(hyperedge_weight[hyperedge_index[1]],
hyperedge_index[0], dim=0, dim_size=x.size(0))
D = 1.0 / D
D[D == float("inf")] = 0
if hyperedge_index.numel() == 0:
num_edges = 0
else:
num_edges = hyperedge_index[1].max().item() + 1
B = 1.0 / degree(hyperedge_index[1], num_edges, x.dtype)
B[B == float("inf")] = 0
if hyperedge_weight is not None:
B = B * hyperedge_weight
self.flow = 'source_to_target'
out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha)
self.flow = 'target_to_source'
out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha)
if self.concat is True:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out = out + self.bias
return out