当前位置: 首页>>代码示例>>Python>>正文


Python init.kaiming_normal方法代码示例

本文整理汇总了Python中torch.nn.init.kaiming_normal方法的典型用法代码示例。如果您正苦于以下问题:Python init.kaiming_normal方法的具体用法?Python init.kaiming_normal怎么用?Python init.kaiming_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.init的用法示例。


在下文中一共展示了init.kaiming_normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def __init__(self, *layers):
        '''
        layers : list of int
            There are dimensions in the sequence
        '''
        super(FullyConnectedNet, self).__init__()
        self.linear = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.relu = nn.ModuleList()
        pre_dim = layers[0]
        self.nLayers = 0
        for dim in layers[1:]:
            self.linear.append(nn.Linear(pre_dim, dim, bias=False))
            self.bn.append(nn.BatchNorm1d(dim))
            self.relu.append(nn.ReLU(inplace=True))
            init.kaiming_normal(self.linear[-1].weight)
            self.nLayers += 1
            pre_dim = dim 
开发者ID:princeton-vl,项目名称:FormulaNet,代码行数:20,代码来源:model.py

示例2: init_params

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)


#_, term_width = os.popen('stty size', 'r').read().split()
# term_width = int(term_width) 
开发者ID:leehomyc,项目名称:mixup_pytorch,代码行数:20,代码来源:utils.py

示例3: reset_params

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_in')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.kaiming_normal(m.weight, mode='fan_in')
                if m.bias is not None:
                    init.constant(m.bias, 0) 
开发者ID:aliyun,项目名称:alibabacloud-quantization-networks,代码行数:18,代码来源:alexnet_all.py

示例4: weights_init

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def weights_init(init_type='xavier'):
    def init_fun(m):
        classname = m.__class__.__name__
        if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
            if init_type == 'normal':
                init.normal(m.weight.data, 0.0, 0.02)
            elif init_type == 'xavier':
                init.xavier_normal(m.weight.data, gain=math.sqrt(2))
            elif init_type == 'kaiming':
                init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                init.orthogonal(m.weight.data, gain=math.sqrt(2))
            elif init_type == 'default':
                pass
            else:
                assert 0, "Unsupported initialization: {}".format(init_type)
            if hasattr(m, 'bias') and m.bias is not None:
                init.constant(m.bias.data, 0.0)
        elif (classname.find('Norm') == 0):
            if hasattr(m, 'weight') and m.weight is not None:
                init.constant(m.weight.data, 1.0)
            if hasattr(m, 'bias') and m.bias is not None:
                init.constant(m.bias.data, 0.0)
    return init_fun 
开发者ID:Xiaoming-Yu,项目名称:DMIT,代码行数:26,代码来源:network.py

示例5: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, pretrained=True):
        self.f,self.c,self.is_multi,self.is_reg,self.xtra_cut = f,c,is_multi,is_reg,xtra_cut
        if xtra_fc is None: xtra_fc = [512]
        if ps is None: ps = [0.25]*len(xtra_fc) + [0.5]
        self.ps,self.xtra_fc = ps,xtra_fc

        if f in model_meta: cut,self.lr_cut = model_meta[f]
        else: cut,self.lr_cut = 0,0
        cut-=xtra_cut
        layers = cut_model(f(pretrained), cut)
        self.nf = num_features(layers)*2
        if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
        self.top_model = nn.Sequential(*layers)
        n_fc = len(self.xtra_fc)+1
        if not isinstance(self.ps, list): self.ps = [self.ps]*n_fc

        if custom_head: fc_layers = [custom_head]
        else: fc_layers = self.get_fc_layers()
        self.n_fc = len(fc_layers)
        self.fc_model = to_gpu(nn.Sequential(*fc_layers))
        if not custom_head: apply_init(self.fc_model, kaiming_normal)
        self.model = to_gpu(nn.Sequential(*(layers+fc_layers))) 
开发者ID:Esri,项目名称:raster-deep-learning,代码行数:24,代码来源:model.py

示例6: init_params

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0) 
开发者ID:zhunzhong07,项目名称:Random-Erasing,代码行数:16,代码来源:misc.py

示例7: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def __init__(self, nFeats_in=None, nFeats_out=None, layer_list=None,
                 dropout=0, bias=False):
        super(ClassifyLoss, self).__init__()
        self.dropout = dropout
        if layer_list is None:
            self.list = False
            self.l1 = nn.Linear(nFeats_in, nFeats_out, bias=bias)
            if self.dropout > 0:
                self.l1dropout = nn.Dropout(self.dropout, inplace=True)
            init.kaiming_normal(self.l1.weight)
            self.l2 = nn.Linear(nFeats_out, 2)
            init.kaiming_normal(self.l2.weight)
            self.bn1 = nn.BatchNorm1d(nFeats_out)
            self.relu = nn.ReLU(inplace=True)
        else:
            self.list = True
            self.hids = nn.ModuleList()
            self.bns = nn.ModuleList()
            if self.dropout > 0:
                self.dropout_l = nn.Dropout(inplace=True)
            for i in range(len(layer_list) - 1):
                self.hids.append(nn.Linear(layer_list[i], layer_list[i+1], bias=False))
                init.kaiming_normal(self.hids[-1].weight)
                self.bns.append(nn.BatchNorm1d(layer_list[i+1]))
            self.lout = nn.Linear(layer_list[-1], 2)
            init.kaiming_normal(self.lout.weight)
        self.crossentropy = nn.CrossEntropyLoss()
        self.score = None 
开发者ID:princeton-vl,项目名称:FormulaNet,代码行数:30,代码来源:loss.py

示例8: init_weights_he

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def init_weights_he(model):
    if isinstance(model, nn.Conv2d):
        init.kaiming_normal(model.weight)
        init.constant(model.bias, 0) 
开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:6,代码来源:models.py

示例9: reset_params

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def reset_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0) 
开发者ID:gddingcs,项目名称:Dispersion-based-Clustering,代码行数:15,代码来源:resnet.py

示例10: weights_init_kaiming

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    #print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0) 
开发者ID:ozan-oktay,项目名称:Attention-Gated-Networks,代码行数:12,代码来源:networks_other.py

示例11: weights_init

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def weights_init(self, m):
        for key in m.state_dict():
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    init.kaiming_normal(m.state_dict()[key], mode='fan_out')
                if 'bn' in key:
                    m.state_dict()[key][...] = 1
            elif key.split('.')[-1] == 'bias':
                m.state_dict()[key][...] = 0 
开发者ID:ShuangXieIrene,项目名称:ssds.pytorch,代码行数:11,代码来源:ssds_train.py

示例12: weights_init

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def weights_init(m):
    for key in m.state_dict():
        if key.split('.')[-1] == 'weight':
            if 'conv' in key:
                init.kaiming_normal(m.state_dict()[key], mode='fan_out')
            if 'bn' in key:
                m.state_dict()[key][...] = 1
        elif key.split('.')[-1] == 'bias':
            m.state_dict()[key][...] = 0 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:11,代码来源:checkpointer.py

示例13: weights_init_kaiming

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0) 
开发者ID:joelmoniz,项目名称:DepthNets,代码行数:12,代码来源:networks.py

示例14: __init__

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
        """ Constructor
        Args:
            cardinality: number of convolution groups.
            depth: number of layers.
            num_classes: number of classes
            widen_factor: factor to adjust the channel dimensionality
        """
        super(CifarResNeXt, self).__init__()
        self.cardinality = cardinality
        self.depth = depth
        self.block_depth = (self.depth - 2) // 9
        self.widen_factor = widen_factor
        self.num_classes = num_classes
        self.output_size = 64
        self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]

        self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
        self.bn_1 = nn.BatchNorm2d(64)
        self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
        self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
        self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
        self.classifier = nn.Linear(1024, num_classes)
        init.kaiming_normal(self.classifier.weight)

        for key in self.state_dict():
            if key.split('.')[-1] == 'weight':
                if 'conv' in key:
                    init.kaiming_normal(self.state_dict()[key], mode='fan_out')
                if 'bn' in key:
                    self.state_dict()[key][...] = 1
            elif key.split('.')[-1] == 'bias':
                self.state_dict()[key][...] = 0 
开发者ID:LiyuanLucasLiu,项目名称:RAdam,代码行数:35,代码来源:resnext.py

示例15: _weights_init

# 需要导入模块: from torch.nn import init [as 别名]
# 或者: from torch.nn.init import kaiming_normal [as 别名]
def _weights_init(m):
    if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
        init.kaiming_normal(m.weight) 
开发者ID:PavelOstyakov,项目名称:pipeline,代码行数:5,代码来源:resnet_cifar.py


注:本文中的torch.nn.init.kaiming_normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。