當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.BatchNorm1d方法代碼示例

本文整理匯總了Python中torch.nn.BatchNorm1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.BatchNorm1d方法的具體用法?Python nn.BatchNorm1d怎麽用?Python nn.BatchNorm1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.BatchNorm1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, num_classes, base_size=64, dropout=0.2):
        super().__init__()

        self.conv = nn.Sequential(
            ConvBlock(in_channels=3, out_channels=base_size),
            ConvBlock(in_channels=base_size, out_channels=base_size*2),
            ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
            ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
        )

        self.fc = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(base_size*8, base_size*2),
            nn.PReLU(),
            nn.BatchNorm1d(base_size*2),
            nn.Dropout(dropout/2),
            nn.Linear(base_size*2, num_classes),
        ) 
開發者ID:lRomul,項目名稱:argus-freesound,代碼行數:20,代碼來源:simple_kaggle.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
        super(CW2_Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)
        self.conv3 = nn.Conv2d(64, 128, 3)
        self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)
        self.conv4 = nn.Conv2d(128, 128, 3)
        self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)
        self.fc1 = nn.Linear(3200, 256)
        #self.dropout1 = nn.Dropout(p=0.35, inplace=False)
        self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc2 = nn.Linear(256, 256)
        self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)
        self.fc3 = nn.Linear(256, 10)
        #self.dropout2 = nn.Dropout(p=0.35, inplace=False)
        #self.dropout3 = nn.Dropout(p=0.35, inplace=False) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:20,代碼來源:model.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
        super(Network, self).__init__()
        self.feature = nn.Sequential()
        self.feature.add_module('f_conv1', nn.Conv2d(3, 64, kernel_size=5))
        self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
        self.feature.add_module('f_pool1', nn.MaxPool2d(2))
        self.feature.add_module('f_relu1', nn.ReLU(True))
        self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
        self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
        self.feature.add_module('f_drop1', nn.Dropout2d())
        self.feature.add_module('f_pool2', nn.MaxPool2d(2))
        self.feature.add_module('f_relu2', nn.ReLU(True))

        self.class_classifier = nn.Sequential()
        self.class_classifier.add_module('c_fc1', nn.Linear(50 * 5 * 5, 100))
        self.class_classifier.add_module('c_bn1', nn.BatchNorm1d(100))
        self.class_classifier.add_module('c_relu1', nn.ReLU(True))
        self.class_classifier.add_module('c_drop1', nn.Dropout2d())
        self.class_classifier.add_module('c_fc2', nn.Linear(100, 500))
        self.class_classifier.add_module('c_bn2', nn.BatchNorm1d(500))
        self.class_classifier.add_module('c_relu2', nn.ReLU(True))
        self.class_classifier.add_module('c_fc3', nn.Linear(500, 10)) 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:24,代碼來源:digit_network.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, num_class, base_net='resnet50', transfer_loss='mmd', use_bottleneck=True, bottleneck_width=256, width=1024):
        super(Transfer_Net, self).__init__()
        self.base_network = backbone.network_dict[base_net]()
        self.use_bottleneck = use_bottleneck
        self.transfer_loss = transfer_loss
        bottleneck_list = [nn.Linear(self.base_network.output_num(
        ), bottleneck_width), nn.BatchNorm1d(bottleneck_width), nn.ReLU(), nn.Dropout(0.5)]
        self.bottleneck_layer = nn.Sequential(*bottleneck_list)
        classifier_layer_list = [nn.Linear(self.base_network.output_num(), width), nn.ReLU(), nn.Dropout(0.5),
                                 nn.Linear(width, num_class)]
        self.classifier_layer = nn.Sequential(*classifier_layer_list)

        self.bottleneck_layer[0].weight.data.normal_(0, 0.005)
        self.bottleneck_layer[0].bias.data.fill_(0.1)
        for i in range(2):
            self.classifier_layer[i * 3].weight.data.normal_(0, 0.01)
            self.classifier_layer[i * 3].bias.data.fill_(0.0) 
開發者ID:jindongwang,項目名稱:transferlearning,代碼行數:19,代碼來源:models.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self,
                 n_res_block: int = 10,
                 n_freq: int = 128,
                 n_hidden: int = 128,
                 n_output: int = 128,
                 kernel_size: int = 5) -> None:
        super().__init__()

        ResBlocks = [_ResBlock(n_hidden) for _ in range(n_res_block)]

        self.melresnet_model = nn.Sequential(
            nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),
            nn.BatchNorm1d(n_hidden),
            nn.ReLU(inplace=True),
            *ResBlocks,
            nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)
        ) 
開發者ID:pytorch,項目名稱:audio,代碼行數:19,代碼來源:_wavernn.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, *layers):
        '''
        layers : list of int
            There are dimensions in the sequence
        '''
        super(FullyConnectedNet, self).__init__()
        self.linear = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.relu = nn.ModuleList()
        pre_dim = layers[0]
        self.nLayers = 0
        for dim in layers[1:]:
            self.linear.append(nn.Linear(pre_dim, dim, bias=False))
            self.bn.append(nn.BatchNorm1d(dim))
            self.relu.append(nn.ReLU(inplace=True))
            init.kaiming_normal(self.linear[-1].weight)
            self.nLayers += 1
            pre_dim = dim 
開發者ID:princeton-vl,項目名稱:FormulaNet,代碼行數:20,代碼來源:model.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, in_channels, dilation=1):
        
        super(LocalAttenBlock, self).__init__()
        
        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=dilation)
        self.bn1   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=1)
        self.bn2   = nn.BatchNorm2d(in_channels)
        self.relu  = nn.ReLU(inplace=True)
        
        def _weights_init(m):
            if isinstance(m, nn.Conv2d or nn.Linear):
                xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d or nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        
        self.apply(_weights_init) 
開發者ID:jefflai108,項目名稱:Attentive-Filtering-Network,代碼行數:22,代碼來源:basic_layers.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self,
                 name: str,
                 input_dim: int,
                 embedding_dim: int = 300,
                 dropout_ratio: float = 0.5,
                 skip_conversion: bool = False):
        super(FeatureEncoder, self).__init__()
        self.name = name
        self.input_dim = input_dim
        self.embedding_dim = embedding_dim
        self.skip_conversion = skip_conversion
        if self.skip_conversion:
            self.embedding_dim = self.input_dim

        # TODO(Yoshi): Check if applying Batch normalization to the input is good
        self.bn1 = nn.BatchNorm1d(num_features=input_dim)
        self.linear1 = nn.Linear(input_dim,
                                 embedding_dim)
        self.relu1 = nn.ReLU()
        self.dp1 = nn.Dropout(dropout_ratio)
        self.linear2 = nn.Linear(embedding_dim,
                                 embedding_dim)
        self.relu2 = nn.ReLU() 
開發者ID:megagonlabs,項目名稱:sato,代碼行數:25,代碼來源:models_sherlock.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, params):
        super(OpenChemMLP, self).__init__()
        check_params(params, self.get_required_params(),
                     self.get_optional_params())
        self.params = params
        self.hidden_size = self.params['hidden_size']
        self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
        self.n_layers = self.params['n_layers']
        self.activation = self.params['activation']
        if type(self.activation) is list:
            assert len(self.activation) == self.n_layers
        else:
            self.activation = [self.activation]*self.n_layers
        if 'dropout' in self.params.keys():
            self.dropout = self.params['dropout']
        else:
            self.dropout = 0
        self.layers = nn.ModuleList([])
        self.bn = nn.ModuleList([])
        self.dropouts = nn.ModuleList([])
        for i in range(self.n_layers):
            self.dropouts.append(nn.Dropout(self.dropout))
            self.bn.append(nn.BatchNorm1d(self.hidden_size[i]))
            self.layers.append(nn.Linear(in_features=self.input_size[i],
                                      out_features=self.hidden_size[i])) 
開發者ID:Mariewelt,項目名稱:OpenChem,代碼行數:27,代碼來源:openchem_mlp.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, input_channel, channels, output_channel):
        super(VoiceEmbedNet, self).__init__()
        self.model = nn.Sequential(
            nn.Conv1d(input_channel, channels[0], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[0], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[0], channels[1], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[1], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[1], channels[2], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[2], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[2], channels[3], 3, 2, 1, bias=False),
            nn.BatchNorm1d(channels[3], affine=True),
            nn.ReLU(inplace=True),
            nn.Conv1d(channels[3], output_channel, 3, 2, 1, bias=True),
        ) 
開發者ID:cmu-mlsp,項目名稱:reconstructing_faces_from_voices,代碼行數:19,代碼來源:network.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):
        super(DanEncoder, self).__init__()
        encoder_layers = []
        for i in range(n_hidden_layers):
            if i == 0:
                input_dim = embedding_dim
            else:
                input_dim = n_hidden_units

            encoder_layers.extend([
                nn.Linear(input_dim, n_hidden_units),
                nn.BatchNorm1d(n_hidden_units),
                nn.ELU(),
                nn.Dropout(dropout_prob),
            ])
        self.encoder = nn.Sequential(*encoder_layers) 
開發者ID:Pinafore,項目名稱:qb,代碼行數:18,代碼來源:dan.py

示例12: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def init_weights(model):
    if isinstance(model, nn.Linear):
        if model.weight is not None:
            init.kaiming_uniform_(model.weight.data)
        if model.bias is not None:
            init.normal_(model.bias.data)
    elif isinstance(model, nn.BatchNorm1d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    elif isinstance(model, nn.BatchNorm2d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    elif isinstance(model, nn.BatchNorm3d):
        if model.weight is not None:
            init.normal_(model.weight.data, mean=1, std=0.02)
        if model.bias is not None:
            init.constant_(model.bias.data, 0)
    else:
        pass 
開發者ID:GitHub-HongweiZhang,項目名稱:prediction-flow,代碼行數:25,代碼來源:utils.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, input_size, hidden_layers,
                 dropout=0.0, batchnorm=True, activation='relu'):
        super(MLP, self).__init__()
        modules = OrderedDict()

        previous_size = input_size
        for index, hidden_layer in enumerate(hidden_layers):
            modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
            if batchnorm:
                modules[f"batchnorm{index}"] = nn.BatchNorm1d(hidden_layer)
            if activation:
                if activation.lower() == 'relu':
                    modules[f"activation{index}"] = nn.ReLU()
                elif activation.lower() == 'prelu':
                    modules[f"activation{index}"] = nn.PReLU()
                elif activation.lower() == 'sigmoid':
                    modules[f"activation{index}"] = nn.Sigmoid()
                else:
                    raise NotImplementedError(f"{activation} is not supported")
            if dropout:
                modules[f"dropout{index}"] = nn.Dropout(dropout)
            previous_size = hidden_layer
        self._sequential = nn.Sequential(modules) 
開發者ID:GitHub-HongweiZhang,項目名稱:prediction-flow,代碼行數:25,代碼來源:mlp.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
        super(CW_Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 3)
        self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv2 = nn.Conv2d(32, 32, 3)
        self.bnm2 = nn.BatchNorm2d(32, momentum=0.1)
        self.conv3 = nn.Conv2d(32, 64, 3)
        self.bnm3 = nn.BatchNorm2d(64, momentum=0.1)
        self.conv4 = nn.Conv2d(64, 64, 3)
        self.bnm4 = nn.BatchNorm2d(64, momentum=0.1)
        self.fc1 = nn.Linear(1024, 200)
        self.bnm5 = nn.BatchNorm1d(200, momentum=0.1)
        self.fc2 = nn.Linear(200, 200)
        self.bnm6 = nn.BatchNorm1d(200, momentum=0.1)
        self.fc3 = nn.Linear(200, 10) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:17,代碼來源:model.py

示例15: get_model

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def get_model(load_weights = True):
    # alphabet seems to be fine:
    """
    https://github.com/davek44/Basset/tree/master/src/dna_io.py#L145-L148
    seq = seq.replace('A','0')
    seq = seq.replace('C','1')
    seq = seq.replace('G','2')
    seq = seq.replace('T','3')
    """
    pretrained_model_reloaded_th = nn.Sequential( # Sequential,
        nn.Conv2d(4,300,(19, 1)),
        nn.BatchNorm2d(300),
        nn.ReLU(),
        nn.MaxPool2d((3, 1),(3, 1)),
        nn.Conv2d(300,200,(11, 1)),
        nn.BatchNorm2d(200),
        nn.ReLU(),
        nn.MaxPool2d((4, 1),(4, 1)),
        nn.Conv2d(200,200,(7, 1)),
        nn.BatchNorm2d(200),
        nn.ReLU(),
        nn.MaxPool2d((4, 1),(4, 1)),
        Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2000,1000)), # Linear,
        nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
        nn.ReLU(),
        nn.Dropout(0.3),
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,1000)), # Linear,
        nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
        nn.ReLU(),
        nn.Dropout(0.3),
        nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,164)), # Linear,
        nn.Sigmoid(),
    )
    if load_weights:
        sd = torch.load('model_files/pretrained_model_reloaded_th.pth')
        pretrained_model_reloaded_th.load_state_dict(sd)
    return  pretrained_model_reloaded_th 
開發者ID:kipoi,項目名稱:models,代碼行數:40,代碼來源:pretrained_model_reloaded_th.py


注:本文中的torch.nn.BatchNorm1d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。