当前位置: 首页>>代码示例>>Python>>正文


Python torch.spmm方法代码示例

本文整理汇总了Python中torch.spmm方法的典型用法代码示例。如果您正苦于以下问题:Python torch.spmm方法的具体用法?Python torch.spmm怎么用?Python torch.spmm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.spmm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)

        # Self-loop
        if self.self_weight is not None:
            output = output + torch.mm(input, self.self_weight)

        if self.bias is not None:
            output = output + self.bias
        # BN
        if self.bn is not None:
            output = self.bn(output)
        # Res
        if self.res:
            return self.sigma(output) + input
        else:
            return self.sigma(output) 
开发者ID:DropEdge,项目名称:DropEdge,代码行数:20,代码来源:layers.py

示例2: get_graph_embedding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def get_graph_embedding(self, adj):
        if self.node_features.data.is_sparse:
            node_embed = torch.spmm(self.node_features, self.w_n2l)
        else:
            node_embed = torch.mm(self.node_features, self.w_n2l)

        node_embed += self.bias_n2l

        input_message = node_embed
        node_embed = F.relu(input_message)

        for i in range(self.max_lv):
            n2npool = torch.spmm(adj, node_embed)
            node_linear = self.conv_params(n2npool)
            merged_linear = node_linear + input_message
            node_embed = F.relu(merged_linear)

        graph_embed = torch.mean(node_embed, dim=0, keepdim=True)
        return graph_embed, node_embed 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:21,代码来源:nipa_q_net_node.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def forward(self, previous_miu, previous_sigma, adj_norm1=None, adj_norm2=None, gamma=1):

        if adj_norm1 is None and adj_norm2 is None:
            return torch.mm(previous_miu, self.weight_miu), \
                    torch.mm(previous_miu, self.weight_miu)
                    # torch.mm(previous_sigma, self.weight_sigma)

        Att = torch.exp(-gamma * previous_sigma)
        M = adj_norm1 @ (previous_miu * Att) @ self.weight_miu
        Sigma = adj_norm2 @ (previous_sigma * Att * Att) @ self.weight_sigma
        return M, Sigma

        # M = torch.mm(torch.mm(adj, previous_miu * A), self.weight_miu)
        # Sigma = torch.mm(torch.mm(adj, previous_sigma * A * A), self.weight_sigma)

        # TODO sparse implemention
        # support = torch.mm(input, self.weight)
        # output = torch.spmm(adj, support)
        # return output + self.bias 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:21,代码来源:r_gcn.py

示例4: prox_nuclear_truncated_2

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def prox_nuclear_truncated_2(self, data, alpha, k=50):
        import tensorly as tl
        tl.set_backend('pytorch')
        U, S, V = tl.truncated_svd(data.cpu(), n_eigenvecs=k)
        U, S, V = torch.FloatTensor(U).cuda(), torch.FloatTensor(S).cuda(), torch.FloatTensor(V).cuda()
        self.nuclear_norm = S.sum()
        # print("nuclear norm: %.4f" % self.nuclear_norm)

        S = torch.clamp(S-alpha, min=0)
        indices = torch.tensor(range(0, U.shape[0]),range(0, U.shape[0])).cuda()
        values = S
        diag_S = torch.sparse.FloatTensor(indices, values, torch.Size(U.shape))
        # diag_S = torch.diag(torch.clamp(S-alpha, min=0))
        U = torch.spmm(U, diag_S)
        V = torch.matmul(U, V)
        return V 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:18,代码来源:pgd.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def forward(self, input_x, graph_pool, X_concat):
        prediction_scores = 0
        input_Tr = F.embedding(input_x, X_concat)
        for layer_idx in range(self.num_U2GNN_layers):
            #
            output_Tr = self.u2gnn_layers[layer_idx](input_Tr)
            output_Tr = torch.split(output_Tr, split_size_or_sections=1, dim=1)[0]
            output_Tr = torch.squeeze(output_Tr, dim=1)
            #new input for next layer
            input_Tr = F.embedding(input_x, output_Tr)
            #sum pooling
            graph_embeddings = torch.spmm(graph_pool, output_Tr)
            graph_embeddings = self.dropouts[layer_idx](graph_embeddings)
            # Produce the final scores
            prediction_scores += self.predictions[layer_idx](graph_embeddings)

        return prediction_scores 
开发者ID:daiquocnguyen,项目名称:Graph-Transformer,代码行数:19,代码来源:pytorch_U2GNN_Sup.py

示例6: evaluate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def evaluate():
    model.eval() # Turn on the evaluation mode
    with torch.no_grad():
        # evaluating
        node_embeddings = model.ss.weight
        graph_embeddings = torch.spmm(graph_pool, node_embeddings).data.cpu().numpy()
        acc_10folds = []
        for fold_idx in range(10):
            train_idx, test_idx = separate_data_idx(graphs, fold_idx)
            train_graph_embeddings = graph_embeddings[train_idx]
            test_graph_embeddings = graph_embeddings[test_idx]
            train_labels = graph_labels[train_idx]
            test_labels = graph_labels[test_idx]

            cls = LogisticRegression(solver="liblinear", tol=0.001)
            cls.fit(train_graph_embeddings, train_labels)
            ACC = cls.score(test_graph_embeddings, test_labels)
            acc_10folds.append(ACC)
            print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)

        mean_10folds = statistics.mean(acc_10folds)
        std_10folds = statistics.stdev(acc_10folds)
        # print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds))

    return mean_10folds, std_10folds 
开发者ID:daiquocnguyen,项目名称:Graph-Transformer,代码行数:27,代码来源:train_pytorch_U2GNN_UnSup.py

示例7: sgc_precompute

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def sgc_precompute(adj, features, degree, index_dict):
    assert degree==1, "Only supporting degree 2 now"
    feat_dict = {}
    start = perf_counter()
    train_feats = features[:, index_dict["train"]].cuda()
    train_feats = torch.spmm(adj, train_feats).t()
    train_feats_max, _ = train_feats.max(dim=0, keepdim=True)
    train_feats_min, _ = train_feats.min(dim=0, keepdim=True)
    train_feats_range = train_feats_max-train_feats_min
    useful_features_dim = train_feats_range.squeeze().gt(0).nonzero().squeeze()
    train_feats = train_feats[:, useful_features_dim]
    train_feats_range = train_feats_range[:, useful_features_dim]
    train_feats_min = train_feats_min[:, useful_features_dim]
    train_feats = (train_feats-train_feats_min)/train_feats_range
    feat_dict["train"] = train_feats
    for phase in ["test", "val"]:
        feats = features[:, index_dict[phase]].cuda()
        feats = torch.spmm(adj, feats).t()
        feats = feats[:, useful_features_dim]
        feat_dict[phase] = ((feats-train_feats_min)/train_feats_range).cpu() # adj is symmetric!
    precompute_time = perf_counter()-start
    return feat_dict, precompute_time 
开发者ID:Tiiiger,项目名称:SGC,代码行数:24,代码来源:utils.py

示例8: downsample

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def downsample(self, x, n1=0, n2=None):
        """Downsample mesh."""
        if n2 is None:
            n2 = self.num_downsampling
        if x.ndimension() < 3:
            for i in range(n1, n2):
                x = spmm(self._D[i], x)
        elif x.ndimension() == 3:
            out = []
            for i in range(x.shape[0]):
                y = x[i]
                for j in range(n1, n2):
                    y = spmm(self._D[j], y)
                out.append(y)
            x = torch.stack(out, dim=0)
        return x 
开发者ID:nkolot,项目名称:GraphCMR,代码行数:18,代码来源:mesh.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def forward(self, input, adj):
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
开发者ID:meliketoy,项目名称:graph-cnn.pytorch,代码行数:9,代码来源:layers.py

示例10: sgc_precompute

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def sgc_precompute(features, adj, degree):
    #t = perf_counter()
    for i in range(degree):
        features = torch.spmm(adj, features)
    precompute_time = 0 #perf_counter()-t
    return features, precompute_time 
开发者ID:DropEdge,项目名称:DropEdge,代码行数:8,代码来源:utils.py

示例11: spmm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def spmm(x, y):
    return th.spmm(x, y) 
开发者ID:dmlc,项目名称:dgl,代码行数:4,代码来源:__init__.py

示例12: inner_train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def inner_train(self, features, adj_norm, idx_train, idx_unlabeled, labels):
        self._initialize()

        for ix in range(len(self.hidden_sizes) + 1):
            self.weights[ix] = self.weights[ix].detach()
            self.weights[ix].requires_grad = True
            self.w_velocities[ix] = self.w_velocities[ix].detach()
            self.w_velocities[ix].requires_grad = True

            if self.with_bias:
                self.biases[ix] = self.biases[ix].detach()
                self.biases[ix].requires_grad = True
                self.b_velocities[ix] = self.b_velocities[ix].detach()
                self.b_velocities[ix].requires_grad = True

        for j in range(self.train_iters):
            hidden = features
            for ix, w in enumerate(self.weights):
                b = self.biases[ix] if self.with_bias else 0
                if self.sparse_features:
                    hidden = adj_norm @ torch.spmm(hidden, w) + b
                else:
                    hidden = adj_norm @ hidden @ w + b
                if self.with_relu:
                    hidden = F.relu(hidden)

            output = F.log_softmax(hidden, dim=1)
            loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])

            weight_grads = torch.autograd.grad(loss_labeled, self.weights, create_graph=True)
            self.w_velocities = [self.momentum * v + g for v, g in zip(self.w_velocities, weight_grads)]
            if self.with_bias:
                bias_grads = torch.autograd.grad(loss_labeled, self.biases, create_graph=True)
                self.b_velocities = [self.momentum * v + g for v, g in zip(self.b_velocities, bias_grads)]

            self.weights = [w - self.lr * v for w, v in zip(self.weights, self.w_velocities)]
            if self.with_bias:
                self.biases = [b - self.lr * v for b, v in zip(self.biases, self.b_velocities)] 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:40,代码来源:mettack.py

示例13: get_meta_grad

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def get_meta_grad(self, features, adj_norm, idx_train, idx_unlabeled, labels, labels_self_training):

        hidden = features
        for ix, w in enumerate(self.weights):
            b = self.biases[ix] if self.with_bias else 0
            if self.sparse_features:
                hidden = adj_norm @ torch.spmm(hidden, w) + b
            else:
                hidden = adj_norm @ hidden @ w + b
            if self.with_relu:
                hidden = F.relu(hidden)

        output = F.log_softmax(hidden, dim=1)

        loss_labeled = F.nll_loss(output[idx_train], labels[idx_train])
        loss_unlabeled = F.nll_loss(output[idx_unlabeled], labels_self_training[idx_unlabeled])
        loss_test_val = F.nll_loss(output[idx_unlabeled], labels[idx_unlabeled])

        if self.lambda_ == 1:
            attack_loss = loss_labeled
        elif self.lambda_ == 0:
            attack_loss = loss_unlabeled
        else:
            attack_loss = self.lambda_ * loss_labeled + (1 - self.lambda_) * loss_unlabeled

        print('GCN loss on unlabled data: {}'.format(loss_test_val.item()))
        print('GCN acc on unlabled data: {}'.format(utils.accuracy(output[idx_unlabeled], labels[idx_unlabeled]).item()))
        print('attack loss: {}'.format(attack_loss.item()))

        adj_grad, feature_grad = None, None
        if self.attack_structure:
            adj_grad = torch.autograd.grad(attack_loss, self.adj_changes, retain_graph=True)[0]
        if self.attack_features:
            feature_grad = torch.autograd.grad(attack_loss, self.feature_changes, retain_graph=True)[0]
        return adj_grad, feature_grad 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:37,代码来源:mettack.py

示例14: prox_nuclear_cuda

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def prox_nuclear_cuda(self, data, alpha):

        U, S, V = torch.svd(data)
        # self.nuclear_norm = S.sum()
        # print(f"rank = {len(S.nonzero())}")
        S = torch.clamp(S-alpha, min=0)
        indices = torch.tensor([range(0, U.shape[0]),range(0, U.shape[0])]).cuda()
        values = S
        diag_S = torch.sparse.FloatTensor(indices, values, torch.Size(U.shape))
        # diag_S = torch.diag(torch.clamp(S-alpha, min=0))
        # print(f"rank_after = {len(diag_S.nonzero())}")
        V = torch.spmm(diag_S, V.t_())
        V = torch.matmul(U, V)
        return V 
开发者ID:DSE-MSU,项目名称:DeepRobust,代码行数:16,代码来源:pgd.py

示例15: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import spmm [as 别名]
def forward(self, input, edge_index):
        adj = torch.sparse_coo_tensor(
            edge_index,
            torch.ones(edge_index.shape[1]).float(),
            (input.shape[0], input.shape[0]),
        ).to(input.device)
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output 
开发者ID:THUDM,项目名称:cogdl,代码行数:14,代码来源:gcn.py


注:本文中的torch.spmm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。