本文整理匯總了Python中torch.nn.BatchNorm1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.BatchNorm1d方法的具體用法?Python nn.BatchNorm1d怎麽用?Python nn.BatchNorm1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.BatchNorm1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, num_classes, base_size=64, dropout=0.2):
super().__init__()
self.conv = nn.Sequential(
ConvBlock(in_channels=3, out_channels=base_size),
ConvBlock(in_channels=base_size, out_channels=base_size*2),
ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
)
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(base_size*8, base_size*2),
nn.PReLU(),
nn.BatchNorm1d(base_size*2),
nn.Dropout(dropout/2),
nn.Linear(base_size*2, num_classes),
)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
super(CW2_Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
self.conv2 = nn.Conv2d(32, 64, 3)
self.bnm2 = nn.BatchNorm2d(64, momentum=0.1)
self.conv3 = nn.Conv2d(64, 128, 3)
self.bnm3 = nn.BatchNorm2d(128, momentum=0.1)
self.conv4 = nn.Conv2d(128, 128, 3)
self.bnm4 = nn.BatchNorm2d(128, momentum=0.1)
self.fc1 = nn.Linear(3200, 256)
#self.dropout1 = nn.Dropout(p=0.35, inplace=False)
self.bnm5 = nn.BatchNorm1d(256, momentum=0.1)
self.fc2 = nn.Linear(256, 256)
self.bnm6 = nn.BatchNorm1d(256, momentum=0.1)
self.fc3 = nn.Linear(256, 10)
#self.dropout2 = nn.Dropout(p=0.35, inplace=False)
#self.dropout3 = nn.Dropout(p=0.35, inplace=False)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
super(Network, self).__init__()
self.feature = nn.Sequential()
self.feature.add_module('f_conv1', nn.Conv2d(3, 64, kernel_size=5))
self.feature.add_module('f_bn1', nn.BatchNorm2d(64))
self.feature.add_module('f_pool1', nn.MaxPool2d(2))
self.feature.add_module('f_relu1', nn.ReLU(True))
self.feature.add_module('f_conv2', nn.Conv2d(64, 50, kernel_size=5))
self.feature.add_module('f_bn2', nn.BatchNorm2d(50))
self.feature.add_module('f_drop1', nn.Dropout2d())
self.feature.add_module('f_pool2', nn.MaxPool2d(2))
self.feature.add_module('f_relu2', nn.ReLU(True))
self.class_classifier = nn.Sequential()
self.class_classifier.add_module('c_fc1', nn.Linear(50 * 5 * 5, 100))
self.class_classifier.add_module('c_bn1', nn.BatchNorm1d(100))
self.class_classifier.add_module('c_relu1', nn.ReLU(True))
self.class_classifier.add_module('c_drop1', nn.Dropout2d())
self.class_classifier.add_module('c_fc2', nn.Linear(100, 500))
self.class_classifier.add_module('c_bn2', nn.BatchNorm1d(500))
self.class_classifier.add_module('c_relu2', nn.ReLU(True))
self.class_classifier.add_module('c_fc3', nn.Linear(500, 10))
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, num_class, base_net='resnet50', transfer_loss='mmd', use_bottleneck=True, bottleneck_width=256, width=1024):
super(Transfer_Net, self).__init__()
self.base_network = backbone.network_dict[base_net]()
self.use_bottleneck = use_bottleneck
self.transfer_loss = transfer_loss
bottleneck_list = [nn.Linear(self.base_network.output_num(
), bottleneck_width), nn.BatchNorm1d(bottleneck_width), nn.ReLU(), nn.Dropout(0.5)]
self.bottleneck_layer = nn.Sequential(*bottleneck_list)
classifier_layer_list = [nn.Linear(self.base_network.output_num(), width), nn.ReLU(), nn.Dropout(0.5),
nn.Linear(width, num_class)]
self.classifier_layer = nn.Sequential(*classifier_layer_list)
self.bottleneck_layer[0].weight.data.normal_(0, 0.005)
self.bottleneck_layer[0].bias.data.fill_(0.1)
for i in range(2):
self.classifier_layer[i * 3].weight.data.normal_(0, 0.01)
self.classifier_layer[i * 3].bias.data.fill_(0.0)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self,
n_res_block: int = 10,
n_freq: int = 128,
n_hidden: int = 128,
n_output: int = 128,
kernel_size: int = 5) -> None:
super().__init__()
ResBlocks = [_ResBlock(n_hidden) for _ in range(n_res_block)]
self.melresnet_model = nn.Sequential(
nn.Conv1d(in_channels=n_freq, out_channels=n_hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
*ResBlocks,
nn.Conv1d(in_channels=n_hidden, out_channels=n_output, kernel_size=1)
)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, *layers):
'''
layers : list of int
There are dimensions in the sequence
'''
super(FullyConnectedNet, self).__init__()
self.linear = nn.ModuleList()
self.bn = nn.ModuleList()
self.relu = nn.ModuleList()
pre_dim = layers[0]
self.nLayers = 0
for dim in layers[1:]:
self.linear.append(nn.Linear(pre_dim, dim, bias=False))
self.bn.append(nn.BatchNorm1d(dim))
self.relu.append(nn.ReLU(inplace=True))
init.kaiming_normal(self.linear[-1].weight)
self.nLayers += 1
pre_dim = dim
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, in_channels, dilation=1):
super(LocalAttenBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=dilation)
self.bn1 = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=(3,3), padding=(1,1), dilation=1)
self.bn2 = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
def _weights_init(m):
if isinstance(m, nn.Conv2d or nn.Linear):
xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d or nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self,
name: str,
input_dim: int,
embedding_dim: int = 300,
dropout_ratio: float = 0.5,
skip_conversion: bool = False):
super(FeatureEncoder, self).__init__()
self.name = name
self.input_dim = input_dim
self.embedding_dim = embedding_dim
self.skip_conversion = skip_conversion
if self.skip_conversion:
self.embedding_dim = self.input_dim
# TODO(Yoshi): Check if applying Batch normalization to the input is good
self.bn1 = nn.BatchNorm1d(num_features=input_dim)
self.linear1 = nn.Linear(input_dim,
embedding_dim)
self.relu1 = nn.ReLU()
self.dp1 = nn.Dropout(dropout_ratio)
self.linear2 = nn.Linear(embedding_dim,
embedding_dim)
self.relu2 = nn.ReLU()
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, params):
super(OpenChemMLP, self).__init__()
check_params(params, self.get_required_params(),
self.get_optional_params())
self.params = params
self.hidden_size = self.params['hidden_size']
self.input_size = [self.params['input_size']] + self.hidden_size[:-1]
self.n_layers = self.params['n_layers']
self.activation = self.params['activation']
if type(self.activation) is list:
assert len(self.activation) == self.n_layers
else:
self.activation = [self.activation]*self.n_layers
if 'dropout' in self.params.keys():
self.dropout = self.params['dropout']
else:
self.dropout = 0
self.layers = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.dropouts = nn.ModuleList([])
for i in range(self.n_layers):
self.dropouts.append(nn.Dropout(self.dropout))
self.bn.append(nn.BatchNorm1d(self.hidden_size[i]))
self.layers.append(nn.Linear(in_features=self.input_size[i],
out_features=self.hidden_size[i]))
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, input_channel, channels, output_channel):
super(VoiceEmbedNet, self).__init__()
self.model = nn.Sequential(
nn.Conv1d(input_channel, channels[0], 3, 2, 1, bias=False),
nn.BatchNorm1d(channels[0], affine=True),
nn.ReLU(inplace=True),
nn.Conv1d(channels[0], channels[1], 3, 2, 1, bias=False),
nn.BatchNorm1d(channels[1], affine=True),
nn.ReLU(inplace=True),
nn.Conv1d(channels[1], channels[2], 3, 2, 1, bias=False),
nn.BatchNorm1d(channels[2], affine=True),
nn.ReLU(inplace=True),
nn.Conv1d(channels[2], channels[3], 3, 2, 1, bias=False),
nn.BatchNorm1d(channels[3], affine=True),
nn.ReLU(inplace=True),
nn.Conv1d(channels[3], output_channel, 3, 2, 1, bias=True),
)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):
super(DanEncoder, self).__init__()
encoder_layers = []
for i in range(n_hidden_layers):
if i == 0:
input_dim = embedding_dim
else:
input_dim = n_hidden_units
encoder_layers.extend([
nn.Linear(input_dim, n_hidden_units),
nn.BatchNorm1d(n_hidden_units),
nn.ELU(),
nn.Dropout(dropout_prob),
])
self.encoder = nn.Sequential(*encoder_layers)
示例12: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def init_weights(model):
if isinstance(model, nn.Linear):
if model.weight is not None:
init.kaiming_uniform_(model.weight.data)
if model.bias is not None:
init.normal_(model.bias.data)
elif isinstance(model, nn.BatchNorm1d):
if model.weight is not None:
init.normal_(model.weight.data, mean=1, std=0.02)
if model.bias is not None:
init.constant_(model.bias.data, 0)
elif isinstance(model, nn.BatchNorm2d):
if model.weight is not None:
init.normal_(model.weight.data, mean=1, std=0.02)
if model.bias is not None:
init.constant_(model.bias.data, 0)
elif isinstance(model, nn.BatchNorm3d):
if model.weight is not None:
init.normal_(model.weight.data, mean=1, std=0.02)
if model.bias is not None:
init.constant_(model.bias.data, 0)
else:
pass
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self, input_size, hidden_layers,
dropout=0.0, batchnorm=True, activation='relu'):
super(MLP, self).__init__()
modules = OrderedDict()
previous_size = input_size
for index, hidden_layer in enumerate(hidden_layers):
modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
if batchnorm:
modules[f"batchnorm{index}"] = nn.BatchNorm1d(hidden_layer)
if activation:
if activation.lower() == 'relu':
modules[f"activation{index}"] = nn.ReLU()
elif activation.lower() == 'prelu':
modules[f"activation{index}"] = nn.PReLU()
elif activation.lower() == 'sigmoid':
modules[f"activation{index}"] = nn.Sigmoid()
else:
raise NotImplementedError(f"{activation} is not supported")
if dropout:
modules[f"dropout{index}"] = nn.Dropout(dropout)
previous_size = hidden_layer
self._sequential = nn.Sequential(modules)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def __init__(self):
super(CW_Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.bnm1 = nn.BatchNorm2d(32, momentum=0.1)
self.conv2 = nn.Conv2d(32, 32, 3)
self.bnm2 = nn.BatchNorm2d(32, momentum=0.1)
self.conv3 = nn.Conv2d(32, 64, 3)
self.bnm3 = nn.BatchNorm2d(64, momentum=0.1)
self.conv4 = nn.Conv2d(64, 64, 3)
self.bnm4 = nn.BatchNorm2d(64, momentum=0.1)
self.fc1 = nn.Linear(1024, 200)
self.bnm5 = nn.BatchNorm1d(200, momentum=0.1)
self.fc2 = nn.Linear(200, 200)
self.bnm6 = nn.BatchNorm1d(200, momentum=0.1)
self.fc3 = nn.Linear(200, 10)
示例15: get_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import BatchNorm1d [as 別名]
def get_model(load_weights = True):
# alphabet seems to be fine:
"""
https://github.com/davek44/Basset/tree/master/src/dna_io.py#L145-L148
seq = seq.replace('A','0')
seq = seq.replace('C','1')
seq = seq.replace('G','2')
seq = seq.replace('T','3')
"""
pretrained_model_reloaded_th = nn.Sequential( # Sequential,
nn.Conv2d(4,300,(19, 1)),
nn.BatchNorm2d(300),
nn.ReLU(),
nn.MaxPool2d((3, 1),(3, 1)),
nn.Conv2d(300,200,(11, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
nn.Conv2d(200,200,(7, 1)),
nn.BatchNorm2d(200),
nn.ReLU(),
nn.MaxPool2d((4, 1),(4, 1)),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,1000)), # Linear,
nn.BatchNorm1d(1000,1e-05,0.1,True),#BatchNorm1d,
nn.ReLU(),
nn.Dropout(0.3),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(1000,164)), # Linear,
nn.Sigmoid(),
)
if load_weights:
sd = torch.load('model_files/pretrained_model_reloaded_th.pth')
pretrained_model_reloaded_th.load_state_dict(sd)
return pretrained_model_reloaded_th