本文整理汇总了Python中torch.nn.functional.normalize方法的典型用法代码示例。如果您正苦于以下问题:Python functional.normalize方法的具体用法?Python functional.normalize怎么用?Python functional.normalize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.normalize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def forward(self, x, edge_index):
"""
Forward propagation pass with features an indices.
:param x: Feature matrix.
:param edge_index: Indices.
"""
edge_index, _ = remove_self_loops(edge_index, None)
row, col = edge_index
if self.norm:
out = scatter_mean(x[col], row, dim=0, dim_size=x.size(0))
else:
out = scatter_add(x[col], row, dim=0, dim_size=x.size(0))
out = torch.cat((out, x), 1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
示例2: rand_rot
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def rand_rot(N, dtype=None, max_rot_angle=float(math.pi),
axes=(1, 1, 1), get_ss=False):
rand_axis = torch.zeros((N, 3)).type(dtype).normal_()
# apply the axes mask
axes = torch.Tensor(axes).type(dtype)
rand_axis = axes[None, :] * rand_axis
rand_axis = Fu.normalize(rand_axis, dim=1, p=2)
rand_angle = torch.ones(N).type(dtype).uniform_(0, max_rot_angle)
R_ss_rand = rand_axis * rand_angle[:, None]
R_rand = so3_exponential_map(R_ss_rand)
if get_ss:
return R_rand, R_ss_rand
else:
return R_rand
示例3: loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def loss(self, scores, true_pos, lamb=1e-7):
loss = F.multi_margin_loss(scores, true_pos, margin=self.margin)
if self.use_local_only:
return loss
# regularization
X = F.normalize(self.rel_embs)
diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt()
diff = diff * (diff < 1).float()
loss -= torch.sum(diff).mul(lamb)
X = F.normalize(self.ew_embs)
diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt()
diff = diff * (diff < 1).float()
loss -= torch.sum(diff).mul(lamb)
return loss
示例4: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __init__(
self, in_features, out_features, bias=True, coeff=0.97, n_iterations=None, atol=None, rtol=None, **unused_kwargs
):
del unused_kwargs
super(SpectralNormLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.coeff = coeff
self.n_iterations = n_iterations
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
h, w = self.weight.shape
self.register_buffer('scale', torch.tensor(0.))
self.register_buffer('u', F.normalize(self.weight.new_empty(h).normal_(0, 1), dim=0))
self.register_buffer('v', F.normalize(self.weight.new_empty(w).normal_(0, 1), dim=0))
self.compute_weight(True, 200)
示例5: _initialize_u_v
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def _initialize_u_v(self):
if self.kernel_size == (1, 1):
self.register_buffer('u', F.normalize(self.weight.new_empty(self.out_channels).normal_(0, 1), dim=0))
self.register_buffer('v', F.normalize(self.weight.new_empty(self.in_channels).normal_(0, 1), dim=0))
else:
c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
with torch.no_grad():
num_input_dim = c * h * w
v = F.normalize(torch.randn(num_input_dim).to(self.weight), dim=0, eps=1e-12)
# forward call to infer the shape
u = F.conv2d(v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None)
num_output_dim = u.shape[0] * u.shape[1] * u.shape[2] * u.shape[3]
self.out_shape = u.shape
# overwrite u with random init
u = F.normalize(torch.randn(num_output_dim).to(self.weight), dim=0, eps=1e-12)
self.register_buffer('u', u)
self.register_buffer('v', v)
示例6: normalize_u
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def normalize_u(u, codomain, out=None):
if not torch.is_tensor(codomain) and codomain == 2:
u = F.normalize(u, p=2, dim=0, out=out)
elif codomain == float('inf'):
u = projmax_(u)
else:
uabs = torch.abs(u)
uph = u / uabs
uph[torch.isnan(uph)] = 1
uabs = uabs / torch.max(uabs)
uabs = uabs**(codomain - 1)
if codomain == 1:
u = uph * uabs / vector_norm(uabs, float('inf'))
else:
u = uph * uabs / vector_norm(uabs, codomain / (codomain - 1))
return u
示例7: __call__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __call__(self, data):
assert 'face' in data
pos, face = data.pos, data.face
vec1 = pos[face[1]] - pos[face[0]]
vec2 = pos[face[2]] - pos[face[0]]
face_norm = F.normalize(vec1.cross(vec2), p=2, dim=-1) # [F, 3]
idx = torch.cat([face[0], face[1], face[2]], dim=0)
face_norm = face_norm.repeat(3, 1)
norm = scatter_add(face_norm, idx, dim=0, dim_size=pos.size(0))
norm = F.normalize(norm, p=2, dim=-1) # [N, 3]
data.norm = norm
return data
示例8: __call__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __call__(self, data):
pos = data.pos
if self.max_points > 0 and pos.size(0) > self.max_points:
perm = torch.randperm(pos.size(0))
pos = pos[perm[:self.max_points]]
pos = pos - pos.mean(dim=0, keepdim=True)
C = torch.matmul(pos.t(), pos)
e, v = torch.eig(C, eigenvectors=True) # v[:,j] is j-th eigenvector
data.pos = torch.matmul(data.pos, v)
if 'norm' in data:
data.norm = F.normalize(torch.matmul(data.norm, v))
return data
示例9: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int, normalize: bool = False,
bias: bool = True, **kwargs): # yapf: disable
super(SAGEConv, self).__init__(aggr='mean', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.lin_l = Linear(in_channels[0], out_channels, bias=bias)
self.lin_r = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def forward(self, x):
x0 = self.linear0(x[0])
x1 = self.linear1(x[1])
if self.dropout_input > 0:
x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
m0 = self.merge_linear0(x0)
m1 = self.merge_linear1(x1)
m = m0 * m1
m = m.view(-1, self.rank, self.mm_dim)
z = torch.sum(m, 1)
if self.normalize:
z = torch.sqrt(F.relu(z)) - torch.sqrt(F.relu(-z))
z = F.normalize(z, p=2)
if self.dropout_pre_lin > 0:
z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
z = self.linear_out(z)
if self.dropout_output > 0:
z = F.dropout(z, p=self.dropout_output, training=self.training)
return z
示例11: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __init__(self,
input_dims,
output_dim,
mm_dim=1200,
activ_input='relu',
activ_output='relu',
normalize=False,
dropout_input=0.,
dropout_pre_lin=0.,
dropout_output=0.):
super(MLB, self).__init__()
self.input_dims = input_dims
self.mm_dim = mm_dim
self.output_dim = output_dim
self.activ_input = activ_input
self.activ_output = activ_output
self.normalize = normalize
self.dropout_input = dropout_input
self.dropout_pre_lin = dropout_pre_lin
self.dropout_output = dropout_output
# Modules
self.linear0 = nn.Linear(input_dims[0], mm_dim)
self.linear1 = nn.Linear(input_dims[1], mm_dim)
self.linear_out = nn.Linear(mm_dim, output_dim)
self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def forward(self, inputs, labels):
cos_th = F.linear(inputs, F.normalize(self.weight))
cos_th = cos_th.clamp(-1, 1)
sin_th = torch.sqrt(1.0 - torch.pow(cos_th, 2))
cos_th_m = cos_th * self.cos_m - sin_th * self.sin_m
cos_th_m = torch.where(cos_th > self.th, cos_th_m, cos_th - self.mm)
cond_v = cos_th - self.th
cond = cond_v <= 0
cos_th_m[cond] = (cos_th - self.mm)[cond]
if labels.dim() == 1:
labels = labels.unsqueeze(-1)
onehot = torch.zeros(cos_th.size()).cuda()
onehot.scatter_(1, labels, 1)
outputs = onehot * cos_th_m + (1.0 - onehot) * cos_th
outputs = outputs * self.s
return outputs
示例13: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def __init__(self, num_nodes, num_sampled, embedding_size):
super(NSLoss, self).__init__()
self.num_nodes = num_nodes
self.num_sampled = num_sampled
self.embedding_size = embedding_size
self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
# [ (log(i+2) - log(i+1)) / log(num_nodes + 1)]
self.sample_weights = F.normalize(
torch.Tensor(
[
(math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
for k in range(num_nodes)
]
),
dim=0,
)
self.reset_parameters()
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def forward(self, x, adj):
if self.use_bn and not hasattr(self, 'bn'):
self.bn = nn.BatchNorm1d(adj.size(1)).to(adj.device)
if self.add_self:
adj = adj + torch.eye(adj.size(0)).to(adj.device)
if self.mean:
adj = adj / adj.sum(1, keepdim=True)
h_k_N = torch.matmul(adj, x)
h_k = self.W(h_k_N)
h_k = F.normalize(h_k, dim=2, p=2)
h_k = F.relu(h_k)
if self.use_bn:
h_k = self.bn(h_k)
return h_k
示例15: predict
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import normalize [as 别名]
def predict(self, x):
batch_size, dims = x.size()
query = F.normalize(self.query_proj(x), dim=1)
# Find the k-nearest neighbors of the query
scores = torch.matmul(query, torch.t(self.keys_var))
cosine_similarity, topk_indices_var = torch.topk(scores, self.top_k, dim=1)
# softmax of cosine similarities - embedding
softmax_score = F.softmax(self.softmax_temperature * cosine_similarity)
# retrive memory values - prediction
y_hat_indices = topk_indices_var.data[:, 0]
y_hat = self.values[y_hat_indices]
return y_hat, softmax_score