本文整理匯總了Python中torch.nn.GroupNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.GroupNorm方法的具體用法?Python nn.GroupNorm怎麽用?Python nn.GroupNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.GroupNorm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
示例2: patch_norm_fp32
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def patch_norm_fp32(module):
"""Recursively convert normalization layers from FP16 to FP32.
Args:
module (nn.Module): The modules to be converted in FP16.
Returns:
nn.Module: The converted module, the normalization layers have been
converted to FP32.
"""
if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
module.float()
if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
示例3: __call__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __call__(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
if module.weight is not None:
module.weight.data.fill_(1)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
示例4: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m, 'conv2_offset'):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
示例5: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
示例6: _make_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, final_relu=True, use_gn=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if use_gn:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.GroupNorm(4, planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation, use_gn=use_gn))
self.inplanes = planes * block.expansion
for i in range(1, blocks-1):
layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn))
layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn, final_relu=final_relu))
return nn.Sequential(*layers)
示例7: make_norm
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def make_norm(c, norm='bn', eps=1e-5, an_k=10):
if norm == 'bn':
return nn.BatchNorm2d(c, eps=eps)
elif norm == 'affine':
return ops.AffineChannel2d(c)
elif norm == 'gn':
group = 32 if c >= 32 else c
assert c % group == 0
return nn.GroupNorm(group, c, eps=eps)
elif norm == 'an_bn':
return ops.MixtureBatchNorm2d(c, an_k)
elif norm == 'an_gn':
group = 32 if c >= 32 else c
assert c % group == 0
return ops.MixtureGroupNorm(c, group, an_k)
elif norm == 'none':
return None
else:
return nn.BatchNorm2d(c, eps=eps)
示例8: _init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def _init_weights(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.0001)
nn.init.constant_(m.bias, 0)
# zero init deform conv offset
for m in self.modules():
if isinstance(m, ops.DeformConvPack):
nn.init.constant_(m.conv_offset.weight, 0)
nn.init.constant_(m.conv_offset.bias, 0)
if isinstance(m, ops.ModulatedDeformConvPack):
nn.init.constant_(m.conv_offset_mask.weight, 0)
nn.init.constant_(m.conv_offset_mask.bias, 0)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, dim_in, dim_inner, dim_out, use_gn=False, use_scale=True):
super().__init__()
self.dim_inner = dim_inner
self.use_gn = use_gn
self.use_scale = use_scale
self.theta_scale1 = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
self.theta_scale2 = Conv2d(dim_in, dim_inner * 4, 1, stride=2, padding=0)
self.theta_scale3 = Conv2d(dim_in, dim_inner * 16, 1, stride=4, padding=0)
self.phi = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
self.g = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
self.out = Conv2d(dim_inner, dim_out, 1, stride=1, padding=0)
if self.use_gn:
self.gn = nn.GroupNorm(32, dim_out, eps=1e-5)
self.apply(self._init_modules)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, inplanes, planes, use_scale=False, groups=None):
self.use_scale = use_scale
self.groups = groups
super(SpatialCGNL, self).__init__()
# conv theta
self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
# conv phi
self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
# conv g
self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
# conv z
self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
groups=self.groups, bias=False)
self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)
if self.use_scale:
cprint("=> WARN: SpatialCGNL block uses 'SCALE'", \
'yellow')
if self.groups:
cprint("=> WARN: SpatialCGNL block uses '{}' groups".format(self.groups), \
'yellow')
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, num_layers, chunk_size, hop_size, in_features,
bottleneck_size, skip_connection=False, **kwargs):
super().__init__()
self.chunk_size = chunk_size
self.hop_size = hop_size
blocks = []
for i in range(num_layers):
_block = DualPathBlock(n_features=bottleneck_size, **kwargs)
blocks.append(_block)
self.add_module(f'layer{i}', _block)
self.layers = blocks
self.skip_connection = skip_connection
self.prelu = nn.PReLU()
self.bottleneck = nn.Linear(in_features, bottleneck_size)
self.bottleneck_norm = nn.GroupNorm(1, in_features)
self.inv_bottleneck = nn.Linear(
bottleneck_size, in_features)
self.output_norm = nn.GroupNorm(1, in_features)
示例12: get_norm
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def get_norm(planes,norm_type='batch',num_groups=4):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d(planes, affine=True)
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d(planes, affine=False)
elif norm_type == 'group':
norm_layer = nn.GroupNorm(num_groups,planes)
elif norm_type == 'adain':
norm_layer = AdaptiveInstanceNorm2d(planes)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
##############################################################
## Simple Gated Operations (Affine) and (Multiplicative)
##############################################################
示例13: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m.conv2, 'conv_offset'):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, block, layers, num_classes=1000,zero_init_residual=False,
groups=1, width_per_group=64, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
示例15: get_norm
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def get_norm(n_filters, norm):
if norm is None:
return Identity()
elif norm == "batch":
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == "instance":
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == "layer":
return nn.GroupNorm(1, n_filters)
elif norm == "act":
return norms.ActNorm(n_filters, False)