本文整理汇总了Python中torch_geometric.nn.GATConv方法的典型用法代码示例。如果您正苦于以下问题:Python nn.GATConv方法的具体用法?Python nn.GATConv怎么用?Python nn.GATConv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch_geometric.nn
的用法示例。
在下文中一共展示了nn.GATConv方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
heads):
super(GAT, self).__init__()
self.num_layers = num_layers
self.convs = torch.nn.ModuleList()
self.convs.append(GATConv(dataset.num_features, hidden_channels,
heads))
for _ in range(num_layers - 2):
self.convs.append(
GATConv(heads * hidden_channels, hidden_channels, heads))
self.convs.append(
GATConv(heads * hidden_channels, out_channels, heads,
concat=False))
self.skips = torch.nn.ModuleList()
self.skips.append(Lin(dataset.num_features, hidden_channels * heads))
for _ in range(num_layers - 2):
self.skips.append(
Lin(hidden_channels * heads, hidden_channels * heads))
self.skips.append(Lin(hidden_channels * heads, out_channels))
示例2: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self,
node_input_dim=15,
output_dim=12,
node_hidden_dim=64,
num_step_prop=6,
num_step_set2set=6):
super(GAT, self).__init__()
self.num_step_prop = num_step_prop
self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
self.conv = GATConv(node_hidden_dim, node_hidden_dim)
self.set2set = Set2Set(node_hidden_dim, processing_steps=num_step_set2set)
self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim)
self.lin2 = nn.Linear(node_hidden_dim, output_dim)
示例3: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_channels, out_channels, kernel, head=8, dropout=0.5):
super(My_GATRNNConv, self).__init__()
# kernel is a Gated GRUCell
self.rnn = kernel # [in_channel, out_channel]
self.conv = GATConv(in_channels, in_channels, heads=head, dropout=dropout)
self.compress = nn.Linear(in_channels * head, in_channels)
self.in_channels = in_channels
self.opt = nn.Linear(in_channels, out_channels)
示例4: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_channels, out_channels):
super(GAT, self).__init__()
self.conv1 = GATConv(in_channels, 8, heads=8, dropout=0.6)
self.conv2 = GATConv(8 * 8, out_channels, dropout=0.6)
示例5: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, dataset):
super(Net, self).__init__()
self.conv1 = GATConv(
dataset.num_features,
args.hidden,
heads=args.heads,
dropout=args.dropout)
self.conv2 = GATConv(
args.hidden * args.heads,
dataset.num_classes,
heads=args.output_heads,
concat=False,
dropout=args.dropout)
示例6: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self):
super(Net, self).__init__()
self.conv1 = GATConv(dataset.num_features, 8, heads=8,
dropout=0.6).jittable()
self.conv2 = GATConv(64, dataset.num_classes, heads=1, concat=True,
dropout=0.6).jittable()
示例7: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self):
super(Net, self).__init__()
self.conv1 = GATConv(dataset.num_features, 8, heads=8, dropout=0.6)
# On the Pubmed dataset, use heads=8 in conv2.
self.conv2 = GATConv(8 * 8, dataset.num_classes, heads=1, concat=False,
dropout=0.6)
示例8: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_dim, out_dim):
super(Breadth, self).__init__()
self.gatconv = GATConv(in_dim, out_dim, heads=1)
示例9: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_dim, out_dim):
super(Breadth, self).__init__()
self.gatconv = GATConv(in_dim, out_dim, heads=heads)
示例10: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self):
super(Net, self).__init__()
self.conv1 = GATConv(train_dataset.num_features, 256, heads=4)
self.lin1 = torch.nn.Linear(train_dataset.num_features, 4 * 256)
self.conv2 = GATConv(4 * 256, 256, heads=4)
self.lin2 = torch.nn.Linear(4 * 256, 4 * 256)
self.conv3 = GATConv(
4 * 256, train_dataset.num_classes, heads=6, concat=False)
self.lin3 = torch.nn.Linear(4 * 256, train_dataset.num_classes)
示例11: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, in_dim, out_dim):
super(GAT, self).__init__()
self.conv1 = GATConv(in_dim, 256, heads=4)
self.lin1 = torch.nn.Linear(in_dim, 4 * 256)
self.conv2 = GATConv(4 * 256, 256, heads=4)
self.lin2 = torch.nn.Linear(4 * 256, 4 * 256)
self.conv3 = GATConv(
4 * 256, out_dim, heads=6, concat=False)
self.lin3 = torch.nn.Linear(4 * 256, out_dim)
示例12: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, inpt_size, output_size, user_embed_size,
posemb_size, dropout=0.5, threshold=2, head=5):
# inpt_size: utter_hidden_size + user_embed_size
super(GATContext, self).__init__()
# utter + user_embed + pos_embed
size = inpt_size + user_embed_size + posemb_size
self.threshold = threshold
# GraphConv
self.conv1 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv2 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv3 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.layer_norm1 = nn.LayerNorm(inpt_size)
self.layer_norm2 = nn.LayerNorm(inpt_size)
self.layer_norm3 = nn.LayerNorm(inpt_size)
self.layer_norm4 = nn.LayerNorm(inpt_size)
self.compress = nn.Linear(head * inpt_size, inpt_size)
# rnn for background
self.rnn = nn.GRU(inpt_size + user_embed_size, inpt_size, bidirectional=True)
self.linear1 = nn.Linear(inpt_size * 2, inpt_size)
self.linear2 = nn.Linear(inpt_size, output_size)
self.drop = nn.Dropout(p=dropout)
# 100 is far bigger than the max turn lengths (cornell and dailydialog datasets)
self.posemb = nn.Embedding(100, posemb_size)
self.init_weight()
示例13: __init__
# 需要导入模块: from torch_geometric import nn [as 别名]
# 或者: from torch_geometric.nn import GATConv [as 别名]
def __init__(self, num_edge, w_in, w_out):
super(HANLayer, self).__init__()
self.gat_layer = nn.ModuleList()
for _ in range(num_edge):
self.gat_layer.append(GATConv(w_in, w_out // 8, 8))
self.att_layer = AttentionLayer(w_out)