當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.GroupNorm方法代碼示例

本文整理匯總了Python中torch.nn.GroupNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.GroupNorm方法的具體用法?Python nn.GroupNorm怎麽用?Python nn.GroupNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.GroupNorm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:27,代碼來源:hrnet.py

示例2: patch_norm_fp32

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def patch_norm_fp32(module):
    """Recursively convert normalization layers from FP16 to FP32.

    Args:
        module (nn.Module): The modules to be converted in FP16.

    Returns:
        nn.Module: The converted module, the normalization layers have been
            converted to FP32.
    """
    if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
        module.float()
        if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
            module.forward = patch_forward_method(module.forward, torch.half,
                                                  torch.float)
    for child in module.children():
        patch_norm_fp32(child)
    return module 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:20,代碼來源:hooks.py

示例3: __call__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __call__(self, module):
        if isinstance(module, (nn.Conv2d, nn.Conv3d)):
            self.initializer(
                module.weight.data,
                self.slope,
                self.mode,
                self.nonlinearity)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
            if module.weight is not None:
                module.weight.data.fill_(1)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Linear):
            self.initializer(
                module.weight.data,
                self.slope,
                self.mode,
                self.nonlinearity)
            if module.bias is not None:
                module.bias.data.zero_() 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:24,代碼來源:init.py

示例4: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m, 'conv2_offset'):
                        constant_init(m.conv2_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:27,代碼來源:resnet.py

示例5: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
        if isinstance(pretrained, str):
            logger = logging.getLogger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:21,代碼來源:hrnet.py

示例6: _make_layer

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, final_relu=True, use_gn=False):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            if use_gn:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion,
                              kernel_size=1, stride=stride, bias=False),
                    nn.GroupNorm(4, planes * block.expansion), 
                )
            else:
                downsample = nn.Sequential(
                    nn.Conv2d(self.inplanes, planes * block.expansion,
                              kernel_size=1, stride=stride, bias=False),
                    nn.BatchNorm2d(planes * block.expansion),
                )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation, use_gn=use_gn))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks-1):
            layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn))
        layers.append(block(self.inplanes, planes, dilation=dilation, use_gn=use_gn, final_relu=final_relu))

        return nn.Sequential(*layers) 
開發者ID:zhechen,項目名稱:PLARD,代碼行數:26,代碼來源:plard.py

示例7: make_norm

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def make_norm(c, norm='bn', eps=1e-5, an_k=10):
    if norm == 'bn':
        return nn.BatchNorm2d(c, eps=eps)
    elif norm == 'affine':
        return ops.AffineChannel2d(c)
    elif norm == 'gn':
        group = 32 if c >= 32 else c
        assert c % group == 0
        return nn.GroupNorm(group, c, eps=eps)
    elif norm == 'an_bn':
        return ops.MixtureBatchNorm2d(c, an_k)
    elif norm == 'an_gn':
        group = 32 if c >= 32 else c
        assert c % group == 0
        return ops.MixtureGroupNorm(c, group, an_k)
    elif norm == 'none':
        return None
    else:
        return nn.BatchNorm2d(c, eps=eps) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:21,代碼來源:net.py

示例8: _init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def _init_weights(self):
        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.0001)
                nn.init.constant_(m.bias, 0)
        # zero init deform conv offset
        for m in self.modules():
            if isinstance(m, ops.DeformConvPack):
                nn.init.constant_(m.conv_offset.weight, 0)
                nn.init.constant_(m.conv_offset.bias, 0)
            if isinstance(m, ops.ModulatedDeformConvPack):
                nn.init.constant_(m.conv_offset_mask.weight, 0)
                nn.init.constant_(m.conv_offset_mask.bias, 0) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:23,代碼來源:vovnet.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, dim_in, dim_inner, dim_out, use_gn=False, use_scale=True):
        super().__init__()
        self.dim_inner = dim_inner
        self.use_gn = use_gn
        self.use_scale = use_scale

        self.theta_scale1 = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
        self.theta_scale2 = Conv2d(dim_in, dim_inner * 4, 1, stride=2, padding=0)
        self.theta_scale3 = Conv2d(dim_in, dim_inner * 16, 1, stride=4, padding=0)

        self.phi = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)
        self.g = Conv2d(dim_in, dim_inner, 1, stride=1, padding=0)

        self.out = Conv2d(dim_inner, dim_out, 1, stride=1, padding=0)
        if self.use_gn:
            self.gn = nn.GroupNorm(32, dim_out, eps=1e-5)

        self.apply(self._init_modules) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:20,代碼來源:nonlocal2d.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, inplanes, planes, use_scale=False, groups=None):
        self.use_scale = use_scale
        self.groups = groups

        super(SpatialCGNL, self).__init__()
        # conv theta
        self.t = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv phi
        self.p = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv g
        self.g = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
        # conv z
        self.z = nn.Conv2d(planes, inplanes, kernel_size=1, stride=1,
                                                  groups=self.groups, bias=False)
        self.gn = nn.GroupNorm(num_groups=self.groups, num_channels=inplanes)

        if self.use_scale:
            cprint("=> WARN: SpatialCGNL block uses 'SCALE'", \
                   'yellow')
        if self.groups:
            cprint("=> WARN: SpatialCGNL block uses '{}' groups".format(self.groups), \
                   'yellow') 
開發者ID:KaiyuYue,項目名稱:cgnl-network.pytorch,代碼行數:24,代碼來源:resnet.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, num_layers, chunk_size, hop_size, in_features,
                 bottleneck_size, skip_connection=False, **kwargs):
        super().__init__()
        
        self.chunk_size = chunk_size
        self.hop_size = hop_size
        
        blocks = []
        for i in range(num_layers):
            _block = DualPathBlock(n_features=bottleneck_size, **kwargs)
            blocks.append(_block)
            self.add_module(f'layer{i}', _block)
        self.layers = blocks
        self.skip_connection = skip_connection
        self.prelu = nn.PReLU()
        self.bottleneck = nn.Linear(in_features, bottleneck_size)
        self.bottleneck_norm = nn.GroupNorm(1, in_features)
        self.inv_bottleneck = nn.Linear(
            bottleneck_size, in_features)
        self.output_norm = nn.GroupNorm(1, in_features) 
開發者ID:nussl,項目名稱:nussl,代碼行數:22,代碼來源:blocks.py

示例12: get_norm

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def get_norm(planes,norm_type='batch',num_groups=4):
    if norm_type == 'batch':
        norm_layer = nn.BatchNorm2d(planes, affine=True)
    elif norm_type == 'instance':
        norm_layer = nn.InstanceNorm2d(planes, affine=False)
    elif norm_type == 'group':
        norm_layer = nn.GroupNorm(num_groups,planes)
    elif norm_type == 'adain':
        norm_layer = AdaptiveInstanceNorm2d(planes)
    elif norm_type == 'none':
        norm_layer = None
    else:
        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
    return norm_layer

##############################################################
## Simple Gated Operations (Affine) and (Multiplicative)
############################################################## 
開發者ID:arnabgho,項目名稱:iSketchNFill,代碼行數:20,代碼來源:common_net.py

示例13: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """
        if isinstance(pretrained, str):
            logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False, logger=logger)
        elif pretrained is None:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    kaiming_init(m)
                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
                    constant_init(m, 1)

            if self.dcn is not None:
                for m in self.modules():
                    if isinstance(m, Bottleneck) and hasattr(
                            m.conv2, 'conv_offset'):
                        constant_init(m.conv2.conv_offset, 0)

            if self.zero_init_residual:
                for m in self.modules():
                    if isinstance(m, Bottleneck):
                        constant_init(m.norm3, 0)
                    elif isinstance(m, BasicBlock):
                        constant_init(m.norm2, 0)
        else:
            raise TypeError('pretrained must be a str or None') 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:33,代碼來源:resnet.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def __init__(self, block, layers, num_classes=1000,zero_init_residual=False,
                 groups=1, width_per_group=64, norm_layer=None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        self.inplanes = 64
        self.groups = groups
        self.base_width = width_per_group
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0) 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:36,代碼來源:ResNet.py

示例15: get_norm

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import GroupNorm [as 別名]
def get_norm(n_filters, norm):
    if norm is None:
        return Identity()
    elif norm == "batch":
        return nn.BatchNorm2d(n_filters, momentum=0.9)
    elif norm == "instance":
        return nn.InstanceNorm2d(n_filters, affine=True)
    elif norm == "layer":
        return nn.GroupNorm(1, n_filters)
    elif norm == "act":
        return norms.ActNorm(n_filters, False) 
開發者ID:wgrathwohl,項目名稱:JEM,代碼行數:13,代碼來源:wideresnet.py


注:本文中的torch.nn.GroupNorm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。