本文整理汇总了Python中torch.nn.functional.group_norm方法的典型用法代码示例。如果您正苦于以下问题:Python functional.group_norm方法的具体用法?Python functional.group_norm怎么用?Python functional.group_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.group_norm方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: groupnorm
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def groupnorm(x, norm_style):
# If number of channels specified in norm_style:
if 'ch' in norm_style:
ch = int(norm_style.split('_')[-1])
groups = max(int(x.shape[1]) // ch, 1)
# If number of groups specified in norm style
elif 'grp' in norm_style:
groups = int(norm_style.split('_')[-1])
# If neither, default to groups = 16
else:
groups = 16
return F.group_norm(x, groups)
# Class-conditional bn
# output size is the number of channels, input size is for the linear layers
# Andy's Note: this class feels messy but I'm not really sure how to clean it up
# Suggestions welcome! (By which I mean, refactor this and make a pull request
# if you want to make this more readable/usable).
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def forward(self, x):
output = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
size = output.size()
y = self.attention_weights(x) # TODO: use output as attention input
weight = y @ self.weight_
bias = y @ self.bias_
weight = weight.unsqueeze(-1).unsqueeze(-1).expand(size)
bias = bias.unsqueeze(-1).unsqueeze(-1).expand(size)
return weight * output + bias
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def forward(self, x):
x = functional.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
if self.activation == "relu":
return functional.relu(x, inplace=True)
elif self.activation == "leaky_relu":
return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
elif self.activation == "elu":
return functional.elu(x, alpha=self.activation_param, inplace=True)
elif self.activation == "identity":
return x
else:
raise RuntimeError("Unknown activation function {}".format(self.activation))
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
if self.act is not None:
x = self.act(x)
return x
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import group_norm [as 别名]
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
func = ACT_FUNC_DICT[self.activation]
if self.activation == ACT.LEAKY_RELU:
return func(x, inplace=True, negative_slope=self.activation_param)
elif self.activation == ACT.ELU:
return func(x, inplace=True, alpha=self.activation_param)
else:
return func(x, inplace=True)