本文整理汇总了Python中torch_scatter.scatter_add方法的典型用法代码示例。如果您正苦于以下问题:Python torch_scatter.scatter_add方法的具体用法?Python torch_scatter.scatter_add怎么用?Python torch_scatter.scatter_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch_scatter
的用法示例。
在下文中一共展示了torch_scatter.scatter_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, x, edge_index):
"""
Forward propagation pass with features an indices.
:param x: Feature matrix.
:param edge_index: Indices.
"""
edge_index, _ = remove_self_loops(edge_index, None)
row, col = edge_index
if self.norm:
out = scatter_mean(x[col], row, dim=0, dim_size=x.size(0))
else:
out = scatter_add(x[col], row, dim=0, dim_size=x.size(0))
out = torch.cat((out, x), 1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
示例2: __call__
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def __call__(self, data):
assert 'face' in data
pos, face = data.pos, data.face
vec1 = pos[face[1]] - pos[face[0]]
vec2 = pos[face[2]] - pos[face[0]]
face_norm = F.normalize(vec1.cross(vec2), p=2, dim=-1) # [F, 3]
idx = torch.cat([face[0], face[1], face[2]], dim=0)
face_norm = face_norm.repeat(3, 1)
norm = scatter_add(face_norm, idx, dim=0, dim_size=pos.size(0))
norm = F.normalize(norm, p=2, dim=-1) # [N, 3]
data.norm = norm
return data
示例3: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, x, batch):
""""""
batch_size = batch.max().item() + 1
h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),
x.new_zeros((self.num_layers, batch_size, self.in_channels)))
q_star = x.new_zeros(batch_size, self.out_channels)
for i in range(self.processing_steps):
q, h = self.lstm(q_star.unsqueeze(0), h)
q = q.view(batch_size, self.in_channels)
e = (x * q[batch]).sum(dim=-1, keepdim=True)
a = softmax(e, batch, num_nodes=batch_size)
r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)
q_star = torch.cat([q, r], dim=-1)
return q_star
示例4: normalize_sparse_tensor
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def normalize_sparse_tensor(adj, fill_value=1):
"""Normalize sparse tensor. Need to import torch_scatter
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
示例5: degree_normalize_sparse_tensor
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def degree_normalize_sparse_tensor(adj, fill_value=1):
"""degree_normalize_sparse_tensor.
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-1)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
示例6: get_feat
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def get_feat(self, graph_list):
node_feat, edge_feat, edge_from_idx, edge_to_idx, g_idx = prepare_gnn(graph_list, self.is_cuda())
input_node_linear = self.w_n2l(node_feat)
input_message = input_node_linear
if edge_feat is not None:
input_edge_linear = self.w_e2l(edge_feat)
e2npool_input = scatter_add(input_edge_linear, edge_to_idx, dim=0, dim_size=node_feat.shape[0])
input_message += e2npool_input
input_potential = self.act_func(input_message)
cur_message_layer = input_potential
all_embeds = [cur_message_layer]
edge_index = [edge_from_idx, edge_to_idx]
edge_index = torch.stack(edge_index)
for lv in range(self.max_lv):
node_linear = self.conv_layers[lv](cur_message_layer, edge_index)
merged_linear = node_linear + input_message
cur_message_layer = self.act_func(merged_linear)
all_embeds.append(cur_message_layer)
return self.readout_net(all_embeds, g_idx, len(graph_list))
示例7: node_degree_as_feature
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def node_degree_as_feature(data):
r"""
Set each node feature as one-hot encoding of degree
:param data: a list of class Data
:return: a list of class Data
"""
max_degree = 0
degrees = []
for graph in data:
edge_index =graph.edge_index
edge_weight = torch.ones((edge_index.size(1),), device=edge_index.device)
fill_value = 1
num_nodes = graph.num_nodes
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes
)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes).long()
degrees.append(deg.cpu()-1)
max_degree = max(torch.max(deg), max_degree)
max_degree = int(max_degree)
for i in range(len(data)):
one_hot = torch.zeros(data[i].num_nodes, max_degree).scatter_(1, degrees[i].unsqueeze(1), 1)
data[i].x = one_hot.to(data[i].y.device)
return data
示例8: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, batch):
h = batch.x
layer_rep = [h]
for i in range(self.num_layers-1):
h = self.gin_layers[i](h, batch.edge_index)
h = self.batch_norm[i](h)
h = F.relu(h)
layer_rep.append(h)
final_score = 0
for i in range(self.num_layers):
# pooled = self.pooling(layer_rep[i], batch, dim=0)
pooled = scatter_add(layer_rep[i], batch.batch, dim=0)
final_score += self.dropout(self.linear_prediction[i](pooled))
final_score = F.softmax(final_score, dim=-1)
if batch.y is not None:
loss = self.loss(final_score, batch.y)
return final_score, loss
return final_score, None
示例9: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
num_nodes = x.shape[0]
h = torch.matmul(x, self.weight)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ),
dtype=x.dtype,
device=edge_index.device)
edge_index, edge_weight = remove_self_loops(edge_index=edge_index, edge_attr=edge_weight)
deg = scatter_add(edge_weight, edge_index[0], dim=0, dim_size=num_nodes) #+ 1e-10
h_j = edge_weight.view(-1, 1) * h[edge_index[1]]
aggr_out = scatter_add(h_j, edge_index[0], dim=0, dim_size=num_nodes)
out = ( deg.view(-1, 1) * self.lin1(x) + aggr_out) + self.lin2(x)
edge_index, edge_weight = add_self_loops(edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes)
return out
示例10: spmm
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def spmm(index, value, m, n, matrix):
"""Matrix product of sparse matrix with dense matrix.
Args:
index (:class:`LongTensor`): The index tensor of sparse matrix.
value (:class:`Tensor`): The value tensor of sparse matrix.
m (int): The first dimension of corresponding dense matrix.
n (int): The second dimension of corresponding dense matrix.
matrix (:class:`Tensor`): The dense matrix.
:rtype: :class:`Tensor`
"""
assert n == matrix.size(0)
row, col = index
matrix = matrix if matrix.dim() > 1 else matrix.unsqueeze(-1)
out = matrix[col]
out = out * value.unsqueeze(-1)
out = scatter_add(out, row, dim=0, dim_size=m)
return out
示例11: norm
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def norm(edge_index, num_nodes, edge_weight, improved=False, dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ),
dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
示例12: __call__
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def __call__(self, data):
num_nodes = data.num_nodes
if 'batch' not in data:
batch = data.pos.new_zeros(num_nodes, dtype=torch.long)
else:
batch = data.batch
cluster = voxel_grid(data.pos, batch, self.size, self.start, self.end)
cluster, perm = consecutive_cluster(cluster)
for key, item in data:
if bool(re.search('edge', key)):
raise ValueError(
'GridSampling does not support coarsening of edges')
if torch.is_tensor(item) and item.size(0) == num_nodes:
if key == 'y':
item = F.one_hot(item)
item = scatter_add(item, cluster, dim=0)
data[key] = item.argmax(dim=-1)
elif key == 'batch':
data[key] = item[perm]
else:
data[key] = scatter_mean(item, cluster, dim=0)
return data
示例13: topk
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def topk(x, ratio, batch, min_score=None, tol=1e-7):
if min_score is not None:
# Make sure that we do not drop all nodes in a graph.
scores_max = scatter_max(x, batch)[0][batch] - tol
scores_min = scores_max.clamp(max=min_score)
perm = torch.nonzero(x > scores_min).view(-1)
else:
num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0)
batch_size, max_num_nodes = num_nodes.size(0), num_nodes.max().item()
cum_num_nodes = torch.cat(
[num_nodes.new_zeros(1),
num_nodes.cumsum(dim=0)[:-1]], dim=0)
index = torch.arange(batch.size(0), dtype=torch.long, device=x.device)
index = (index - cum_num_nodes[batch]) + (batch * max_num_nodes)
dense_x = x.new_full((batch_size * max_num_nodes, ),
torch.finfo(x.dtype).min)
dense_x[index] = x
dense_x = dense_x.view(batch_size, max_num_nodes)
_, perm = dense_x.sort(dim=-1, descending=True)
perm = perm + cum_num_nodes.view(-1, 1)
perm = perm.view(-1)
k = (ratio * num_nodes.to(torch.float)).ceil().to(torch.long)
mask = [
torch.arange(k[i], dtype=torch.long, device=x.device) +
i * max_num_nodes for i in range(batch_size)
]
mask = torch.cat(mask, dim=0)
perm = perm[mask]
return perm
示例14: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, x, batch=None):
""""""
if batch is None:
batch = torch.zeros(x.size(0), dtype=torch.long, device=x.device)
batch_size = batch.max().item() + 1
if self.training or not self.track_running_stats:
count = degree(batch, batch_size, dtype=x.dtype).view(-1, 1)
tmp = scatter_add(x, batch, dim=0, dim_size=batch_size)
mean = tmp / count.clamp(min=1)
tmp = (x - mean[batch])
tmp = scatter_add(tmp * tmp, batch, dim=0, dim_size=batch_size)
var = tmp / count.clamp(min=1)
unbiased_var = tmp / (count - 1).clamp(min=1)
if self.training and self.track_running_stats:
momentum = self.momentum
self.running_mean = (
1 - momentum) * self.running_mean + momentum * mean.mean(dim=0)
self.running_var = (
1 - momentum
) * self.running_var + momentum * unbiased_var.mean(dim=0)
if not self.training and self.track_running_stats:
mean = self.running_mean.view(1, -1).expand(batch_size, -1)
var = self.running_var.view(1, -1).expand(batch_size, -1)
out = (x - mean[batch]) / torch.sqrt(var[batch] + self.eps)
if self.affine:
out = out * self.weight.view(1, -1) + self.bias.view(1, -1)
return out
示例15: forward
# 需要导入模块: import torch_scatter [as 别名]
# 或者: from torch_scatter import scatter_add [as 别名]
def forward(self, x, batch, size=None):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
size = batch[-1].item() + 1 if size is None else size
gate = self.gate_nn(x).view(-1, 1)
x = self.nn(x) if self.nn is not None else x
assert gate.dim() == x.dim() and gate.size(0) == x.size(0)
gate = softmax(gate, batch, num_nodes=size)
out = scatter_add(gate * x, batch, dim=0, dim_size=size)
return out