當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.softmax方法代碼示例

本文整理匯總了Python中torch_geometric.utils.softmax方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.softmax方法的具體用法?Python utils.softmax怎麽用?Python utils.softmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch_geometric.utils的用法示例。


在下文中一共展示了utils.softmax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None):
        """"""
        if batch is None:
            batch = edge_index.new_zeros(x.size(0))

        attn = x if attn is None else attn
        attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn
        score = self.gnn(attn, edge_index).view(-1)

        if self.min_score is None:
            score = self.nonlinearity(score)
        else:
            score = softmax(score, batch)

        perm = topk(score, self.ratio, batch, self.min_score)
        x = x[perm] * score[perm].view(-1, 1)
        x = self.multiplier * x if self.multiplier != 1 else x

        batch = batch[perm]
        edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm,
                                           num_nodes=score.size(0))

        return x, edge_index, edge_attr, batch, perm, score[perm] 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:25,代碼來源:sag_pool.py

示例2: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, x, edge_index, edge_attr=None, batch=None, attn=None):
        """"""

        if batch is None:
            batch = edge_index.new_zeros(x.size(0))

        attn = x if attn is None else attn
        attn = attn.unsqueeze(-1) if attn.dim() == 1 else attn
        score = (attn * self.weight).sum(dim=-1)

        if self.min_score is None:
            score = self.nonlinearity(score / self.weight.norm(p=2, dim=-1))
        else:
            score = softmax(score, batch)

        perm = topk(score, self.ratio, batch, self.min_score)
        x = x[perm] * score[perm].view(-1, 1)
        x = self.multiplier * x if self.multiplier != 1 else x

        batch = batch[perm]
        edge_index, edge_attr = filter_adj(edge_index, edge_attr, perm,
                                           num_nodes=score.size(0))

        return x, edge_index, edge_attr, batch, perm, score[perm] 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:26,代碼來源:topk_pool.py

示例3: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_i, x_j, edge_index, num_nodes):

        if self.att_type == "const":
            if self.training and self.dropout > 0:
                x_j = F.dropout(x_j, p=self.dropout, training=True)
            neighbor = x_j
        elif self.att_type == "gcn":
            if self.gcn_weight is None or self.gcn_weight.size(0) != x_j.size(0):  # 對於不同的圖gcn_weight需要重新計算
                _, norm = self.norm(edge_index, num_nodes, None)
                self.gcn_weight = norm
            neighbor = self.gcn_weight.view(-1, 1, 1) * x_j
        else:
            # Compute attention coefficients.
            alpha = self.apply_attention(edge_index, num_nodes, x_i, x_j)
            alpha = softmax(alpha, edge_index[0], num_nodes)
            # Sample attention coefficients stochastically.
            if self.training and self.dropout > 0:
                alpha = F.dropout(alpha, p=self.dropout, training=True)

            neighbor = x_j * alpha.view(-1, self.heads, 1)
        if self.pool_dim > 0:
            for layer in self.pool_layer:
                neighbor = layer(neighbor)
        return neighbor 
開發者ID:GraphNAS,項目名稱:GraphNAS,代碼行數:26,代碼來源:pyg_gnn_layer.py

示例4: compute_edge_score_softmax

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def compute_edge_score_softmax(raw_edge_score, edge_index, num_nodes):
        return softmax(raw_edge_score, edge_index[1], num_nodes=num_nodes) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:4,代碼來源:edge_pool.py

示例5: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, x, batch, size=None):
        """"""
        x = x.unsqueeze(-1) if x.dim() == 1 else x
        size = batch[-1].item() + 1 if size is None else size

        gate = self.gate_nn(x).view(-1, 1)
        x = self.nn(x) if self.nn is not None else x
        assert gate.dim() == x.dim() and gate.size(0) == x.size(0)

        gate = softmax(gate, batch, num_nodes=size)
        out = scatter_add(gate * x, batch, dim=0, dim_size=size)

        return out 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:15,代碼來源:attention.py

示例6: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_j: Tensor, alpha_j: Tensor, alpha_i: OptTensor,
                index: Tensor, ptr: OptTensor,
                size_i: Optional[int]) -> Tensor:
        alpha = alpha_j if alpha_i is None else alpha_j + alpha_i
        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, index, ptr, size_i)
        self._alpha = alpha
        alpha = F.dropout(alpha, p=self.dropout, training=self.training)
        return x_j * alpha.unsqueeze(-1) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:11,代碼來源:gat_conv.py

示例7: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_j: Tensor, x_norm_i: Tensor, x_norm_j: Tensor,
                index: Tensor, ptr: OptTensor,
                size_i: Optional[int]) -> Tensor:
        alpha = self.beta * (x_norm_i * x_norm_j).sum(dim=-1)
        alpha = softmax(alpha, index, ptr, size_i)
        return x_j * alpha.view(-1, 1) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:8,代碼來源:agnn_conv.py

示例8: test_softmax

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def test_softmax():
    src = torch.tensor([1., 1., 1., 1.])
    index = torch.tensor([0, 0, 1, 2])
    ptr = torch.tensor([0, 2, 3, 4])

    out = softmax(src, index)
    assert out.tolist() == [0.5, 0.5, 1, 1]
    assert softmax(src, None, ptr).tolist() == out.tolist() 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:10,代碼來源:test_softmax.py

示例9: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_i, x_j, edge_index, num_nodes):
        # Compute attention coefficients.
        alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
        alpha = F.leaky_relu(alpha, self.negative_slope)
        alpha = softmax(alpha, edge_index[0], num_nodes)

        alpha = F.dropout(alpha, p=self.dropout)

        return x_j * alpha.view(-1, self.heads, 1) 
開發者ID:jshang123,項目名稱:G-Bert,代碼行數:11,代碼來源:graph_models.py

示例10: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_q, x_k, x_v, edge_index_i, num_nodes):
        score = self.cal_att_score(x_q, x_k, self.heads)
        # score = F.leaky_relu(score)
        score = softmax(score, edge_index_i, num_nodes)

        # score = F.dropout(score, p=self.dropout, training=self.training)
        x_v = F.dropout(x_v, p=self.dropout, training=self.training)

        return x_v * score.view(-1, self.heads, 1) 
開發者ID:graph-star-team,項目名稱:graph_star,代碼行數:11,代碼來源:graph_star_conv_multi_rel_super_attn.py

示例11: message

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def message(self, x_q, x_k, x_v, edge_index, num_nodes):
        score = self.cal_att_score(x_q, x_k, self.heads)
        # score = F.leaky_relu(score)
        score = softmax(score, edge_index[0], num_nodes)

        # score = F.dropout(score, p=self.dropout, training=self.training)
        x_v = F.dropout(x_v, p=self.dropout, training=self.training)

        return x_v * score.view(-1, self.heads, 1) 
開發者ID:graph-star-team,項目名稱:graph_star,代碼行數:11,代碼來源:graph_star_conv.py

示例12: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, neighbor_vecs, self_vecs):
        # shape [num_nodes, num_sample, num_heads]
        alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs * self.att_neighbor_weight).sum(dim=-1)
        alpha = F.leaky_relu(alpha, negative_slope=0.2)
        # alpha = torch.softmax(alpha, dim=-2)
        # Sample attention coefficients stochastically.
        return alpha 
開發者ID:GraphNAS,項目名稱:GraphNAS,代碼行數:9,代碼來源:pyg_basic_operators.py

示例13: preprocess

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def preprocess(self, alpha, edge_index, neighbor_vecs, num_nodes):
        if isinstance(alpha, int):
            if self.training and self.dropout > 0:
                neighbor_vecs = F.dropout(neighbor_vecs, p=self.dropout, training=self.training)
            return alpha*neighbor_vecs
        else:
            alpha = softmax(alpha, edge_index[0], num_nodes)
            # Sample attention coefficients stochastically.
            if self.training and self.dropout > 0:
                alpha = F.dropout(alpha, p=self.dropout, training=self.training)
            neighbor = neighbor_vecs * alpha.view(-1, self.num_head, 1)
        return neighbor 
開發者ID:GraphNAS,項目名稱:GraphNAS,代碼行數:14,代碼來源:pyg_basic_operators.py

示例14: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, x, edge_index, edge_weight=None, batch=None):
        N = x.size(0)

        edge_index, edge_weight = add_remaining_self_loops(
            edge_index, edge_weight, fill_value=1, num_nodes=N)

        if batch is None:
            batch = edge_index.new_zeros(x.size(0))

        x = x.unsqueeze(-1) if x.dim() == 1 else x

        x_pool = x
        if self.GNN is not None:
            x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index,
                                            edge_weight=edge_weight)

        x_pool_j = x_pool[edge_index[0]]
        x_q = scatter(x_pool_j, edge_index[1], dim=0, reduce='max')
        x_q = self.lin(x_q)[edge_index[1]]

        score = self.att(torch.cat([x_q, x_pool_j], dim=-1)).view(-1)
        score = F.leaky_relu(score, self.negative_slope)
        score = softmax(score, edge_index[1], num_nodes=N)

        # Sample attention coefficients stochastically.
        score = F.dropout(score, p=self.dropout, training=self.training)

        v_j = x[edge_index[0]] * score.view(-1, 1)
        x = scatter(v_j, edge_index[1], dim=0, reduce='add')

        # Cluster selection.
        fitness = self.gnn_score(x, edge_index).sigmoid().view(-1)
        perm = topk(fitness, self.ratio, batch)
        x = x[perm] * fitness[perm].view(-1, 1)
        batch = batch[perm]

        # Graph coarsening.
        row, col = edge_index
        A = SparseTensor(row=row, col=col, value=edge_weight,
                         sparse_sizes=(N, N))
        S = SparseTensor(row=row, col=col, value=score, sparse_sizes=(N, N))
        S = S[:, perm]

        A = S.t() @ A @ S

        if self.add_self_loops:
            A = A.fill_diag(1.)
        else:
            A = A.remove_diag()

        row, col, edge_weight = A.coo()
        edge_index = torch.stack([row, col], dim=0)

        return x, edge_index, edge_weight, batch, perm 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:56,代碼來源:asap.py

示例15: forward

# 需要導入模塊: from torch_geometric import utils [as 別名]
# 或者: from torch_geometric.utils import softmax [as 別名]
def forward(self, x, hyperedge_index, hyperedge_weight=None):
        r"""
        Args:
            x (Tensor): Node feature matrix :math:`\mathbf{X}`
            hyper_edge_index (LongTensor): Hyperedge indices from
                :math:`\mathbf{H}`.
            hyperedge_weight (Tensor, optional): Sparse hyperedge weights from
                :math:`\mathbf{W}`. (default: :obj:`None`)
        """
        x = torch.matmul(x, self.weight)
        alpha = None

        if self.use_attention:
            x = x.view(-1, self.heads, self.out_channels)
            x_i, x_j = x[hyperedge_index[0]], x[hyperedge_index[1]]
            alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)
            alpha = F.leaky_relu(alpha, self.negative_slope)
            alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0))
            alpha = F.dropout(alpha, p=self.dropout, training=self.training)

        if hyperedge_weight is None:
            D = degree(hyperedge_index[0], x.size(0), x.dtype)
        else:
            D = scatter_add(hyperedge_weight[hyperedge_index[1]],
                            hyperedge_index[0], dim=0, dim_size=x.size(0))
        D = 1.0 / D
        D[D == float("inf")] = 0

        if hyperedge_index.numel() == 0:
            num_edges = 0
        else:
            num_edges = hyperedge_index[1].max().item() + 1
        B = 1.0 / degree(hyperedge_index[1], num_edges, x.dtype)
        B[B == float("inf")] = 0
        if hyperedge_weight is not None:
            B = B * hyperedge_weight

        self.flow = 'source_to_target'
        out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha)
        self.flow = 'target_to_source'
        out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha)

        if self.concat is True:
            out = out.view(-1, self.heads * self.out_channels)
        else:
            out = out.mean(dim=1)

        if self.bias is not None:
            out = out + self.bias

        return out 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:53,代碼來源:hypergraph_conv.py


注:本文中的torch_geometric.utils.softmax方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。