當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.GCNConv方法代碼示例

本文整理匯總了Python中torch_geometric.nn.GCNConv方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.GCNConv方法的具體用法?Python nn.GCNConv怎麽用?Python nn.GCNConv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch_geometric.nn的用法示例。


在下文中一共展示了nn.GCNConv方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1], k=30, 
                 regression=False, adj_dropout=0.2, force_undirected=False):
        super(DGCNN, self).__init__(
            dataset, gconv, latent_dim, regression, adj_dropout, force_undirected
        )
        if k < 1:  # transform percentile to number
            node_nums = sorted([g.num_nodes for g in dataset])
            k = node_nums[int(math.ceil(k * len(node_nums)))-1]
            k = max(10, k)  # no smaller than 10
        self.k = int(k)
        print('k used in sortpooling is:', self.k)
        conv1d_channels = [16, 32]
        conv1d_activation = nn.ReLU()
        self.total_latent_dim = sum(latent_dim)
        conv1d_kws = [self.total_latent_dim, 5]
        self.conv1d_params1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
        self.maxpool1d = nn.MaxPool1d(2, 2)
        self.conv1d_params2 = Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
        dense_dim = int((k - 2) / 2 + 1)
        self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
        self.lin1 = Linear(self.dense_dim, 128) 
開發者ID:muhanzhang,項目名稱:IGMC,代碼行數:23,代碼來源:models.py

示例2: test_asap

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def test_asap():
    in_channels = 16
    edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
                               [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    for GNN in [GraphConv, GCNConv]:
        pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN,
                          add_self_loops=False)
        assert pool.__repr__() == ('ASAPooling(16, ratio=0.5)')
        out = pool(x, edge_index)
        assert out[0].size() == (num_nodes // 2, in_channels)
        assert out[1].size() == (2, 2)

        pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN, add_self_loops=True)
        assert pool.__repr__() == ('ASAPooling(16, ratio=0.5)')
        out = pool(x, edge_index)
        assert out[0].size() == (num_nodes // 2, in_channels)
        assert out[1].size() == (2, 4) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:22,代碼來源:test_asap.py

示例3: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self,
                 dataset: InMemoryDataset,
                 hidden: List[int] = [64],
                 dropout: float = 0.5):
        super(GCN, self).__init__()

        num_features = [dataset.data.x.shape[1]] + hidden + [dataset.num_classes]
        layers = []
        for in_features, out_features in zip(num_features[:-1], num_features[1:]):
            layers.append(GCNConv(in_features, out_features))
        self.layers = ModuleList(layers)

        self.reg_params = list(layers[0].parameters())
        self.non_reg_params = list([p for l in layers[1:] for p in l.parameters()])

        self.dropout = Dropout(p=dropout)
        self.act_fn = ReLU() 
開發者ID:klicperajo,項目名稱:gdc,代碼行數:19,代碼來源:models.py

示例4: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, input_dim, hidden_dim, label_dim, num_layers,
            pred_hidden_dims=[], concat=True, bn=True, dropout=0.0, add_self=False, args=None):
        super(GCNNet, self).__init__()
        self.input_dim = input_dim
        print ('GCNNet input_dim:', self.input_dim)
        self.hidden_dim = hidden_dim
        print ('GCNNet hidden_dim:', self.hidden_dim)
        self.label_dim = label_dim
        print ('GCNNet label_dim:', self.label_dim)
        self.num_layers = num_layers
        print ('GCNNet num_layers:', self.num_layers)

        # self.concat = concat
        # self.bn = bn
        # self.add_self = add_self
        self.args = args
        self.dropout = dropout
        self.act = F.relu

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(self.input_dim, self.hidden_dim))
        for layer in range(self.num_layers - 2):
            self.convs.append(GCNConv(self.hidden_dim, self.hidden_dim))
        self.convs.append(GCNConv(self.hidden_dim, self.label_dim))
        print ('len(self.convs):', len(self.convs)) 
開發者ID:RexYing,項目名稱:gnn-model-explainer,代碼行數:27,代碼來源:models_pyg.py

示例5: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers):
        super(GTN, self).__init__()
        self.num_edge = num_edge
        self.num_channels = num_channels
        self.num_nodes = num_nodes
        self.w_in = w_in
        self.w_out = w_out
        self.num_class = num_class
        self.num_layers = num_layers
        layers = []
        for i in range(num_layers):
            if i == 0:
                layers.append(GTLayer(num_edge, num_channels, num_nodes, first=True))
            else:
                layers.append(GTLayer(num_edge, num_channels, num_nodes, first=False))
        self.layers = nn.ModuleList(layers)
        self.cross_entropy_loss = nn.CrossEntropyLoss()
        self.gcn = GCNConv(in_channels=self.w_in, out_channels=w_out)
        self.linear1 = nn.Linear(self.w_out*self.num_channels, self.w_out)
        self.linear2 = nn.Linear(self.w_out, self.num_class) 
開發者ID:THUDM,項目名稱:cogdl,代碼行數:22,代碼來源:gtn.py

示例6: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, Gnn_layers, use_gpu):
        super().__init__()
        self.gnn_layers = nn.ModuleList([GatedGraphConv(2116, 2) for l in range(Gnn_layers)])
        #self.gnn_layers = nn.ModuleList([GCNConv(2116,2116) for l in range(Gnn_layers)])
        #self.gnn_actfs = nn.ModuleList([nn.LeakyReLU() for l in range(Gnn_layers)])
        self.use_gpu = use_gpu 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:8,代碼來源:CNN_GNN2018.py

示例7: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, in_channels, hidden_channels, out_channels, depth,
                 pool_ratios=0.5, sum_res=True, act=F.relu):
        super(GraphUNet, self).__init__()
        assert depth >= 1
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.depth = depth
        self.pool_ratios = repeat(pool_ratios, depth)
        self.act = act
        self.sum_res = sum_res

        channels = hidden_channels

        self.down_convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        self.down_convs.append(GCNConv(in_channels, channels, improved=True))
        for i in range(depth):
            self.pools.append(TopKPooling(channels, self.pool_ratios[i]))
            self.down_convs.append(GCNConv(channels, channels, improved=True))

        in_channels = channels if sum_res else 2 * channels

        self.up_convs = torch.nn.ModuleList()
        for i in range(depth - 1):
            self.up_convs.append(GCNConv(in_channels, channels, improved=True))
        self.up_convs.append(GCNConv(in_channels, out_channels, improved=True))

        self.reset_parameters() 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:31,代碼來源:graph_unet.py

示例8: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, dataset, num_layers, hidden):
        super(GCN, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:10,代碼來源:gcn.py

示例9: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, in_channels, out_channels):
        super(GCN, self).__init__()
        self.conv1 = GCNConv(in_channels, 16, cached=True)
        self.conv2 = GCNConv(16, out_channels, cached=True) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:6,代碼來源:gcn.py

示例10: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, args.hidden)
        self.conv2 = GCNConv(args.hidden, dataset.num_classes) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:6,代碼來源:gcn.py

示例11: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, in_channels, hidden_channels, out_channels):
        super(Encoder, self).__init__()
        self.conv1 = GCNConv(in_channels, hidden_channels, cached=True)
        self.conv_mu = GCNConv(hidden_channels, out_channels, cached=True)
        self.conv_logstd = GCNConv(hidden_channels, out_channels, cached=True) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:7,代碼來源:argva_node_clustering.py

示例12: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, in_channels, out_channels):
        super(Encoder, self).__init__()
        self.conv1 = GCNConv(in_channels, 2 * out_channels, cached=True)
        if args.model in ['GAE']:
            self.conv2 = GCNConv(2 * out_channels, out_channels, cached=True)
        elif args.model in ['VGAE']:
            self.conv_mu = GCNConv(2 * out_channels, out_channels, cached=True)
            self.conv_logstd = GCNConv(2 * out_channels, out_channels,
                                       cached=True) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:11,代碼來源:autoencoder.py

示例13: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6):
        super(DGCNN, self).__init__()

        if k < 1:  # Transform percentile to number.
            num_nodes = sorted([data.num_nodes for data in train_dataset])
            k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1]
            k = max(10, k)
        self.k = int(k)

        self.convs = ModuleList()
        self.convs.append(GNN(train_dataset.num_features, hidden_channels))
        for i in range(0, num_layers - 1):
            self.convs.append(GNN(hidden_channels, hidden_channels))
        self.convs.append(GNN(hidden_channels, 1))

        conv1d_channels = [16, 32]
        total_latent_dim = hidden_channels * num_layers + 1
        conv1d_kws = [total_latent_dim, 5]
        self.conv1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0],
                            conv1d_kws[0])
        self.maxpool1d = MaxPool1d(2, 2)
        self.conv2 = Conv1d(conv1d_channels[0], conv1d_channels[1],
                            conv1d_kws[1], 1)
        dense_dim = int((self.k - 2) / 2 + 1)
        dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
        self.lin1 = Linear(dense_dim, 128)
        self.lin2 = Linear(128, 1) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:29,代碼來源:seal_link_pred.py

示例14: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self, in_channels):
        super(Net, self).__init__()

        self.conv1 = GINConv(Seq(Lin(in_channels, 64), ReLU(), Lin(64, 64)))
        self.pool1 = SAGPooling(64, min_score=0.001, GNN=GCNConv)
        self.conv2 = GINConv(Seq(Lin(64, 64), ReLU(), Lin(64, 64)))
        self.pool2 = SAGPooling(64, min_score=0.001, GNN=GCNConv)
        self.conv3 = GINConv(Seq(Lin(64, 64), ReLU(), Lin(64, 64)))

        self.lin = torch.nn.Linear(64, 1) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:12,代碼來源:triangles_sag_pool.py

示例15: __init__

# 需要導入模塊: from torch_geometric import nn [as 別名]
# 或者: from torch_geometric.nn import GCNConv [as 別名]
def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, 16, cached=True)
        self.conv2 = GCNConv(16, dataset.num_classes, cached=True) 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:6,代碼來源:tensorboard_logging.py


注:本文中的torch_geometric.nn.GCNConv方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。