本文整理匯總了Python中torch.nn.LocalResponseNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.LocalResponseNorm方法的具體用法?Python nn.LocalResponseNorm怎麽用?Python nn.LocalResponseNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.LocalResponseNorm方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, state):
super(AlexCifarNet, self).__init__()
assert state.nc == 3
self.features = nn.Sequential(
nn.Conv2d(state.nc, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.LocalResponseNorm(4, alpha=0.001 / 9.0, beta=0.75, k=1),
nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(4, alpha=0.001 / 9.0, beta=0.75, k=1),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.classifier = nn.Sequential(
nn.Linear(4096, 384),
nn.ReLU(inplace=True),
nn.Linear(384, 192),
nn.ReLU(inplace=True),
nn.Linear(192, state.num_classes),
)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super(CaffeNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 96, 11, 4),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75))
self.conv2 = nn.Sequential(
nn.Conv2d(96, 256, 5, 1, padding=2, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2),
nn.LocalResponseNorm(5, alpha=1e-4, beta=0.75))
self.conv3 = nn.Sequential(
nn.Conv2d(256, 384, 3, 1, padding=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(384, 384, 3, 1, padding=1, groups=2),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.Conv2d(384, 256, 3, 1, padding=1, groups=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2))
initialize_weights(self)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, kernel_size, full_input_size, full_output_size, curr_vtx_id=None, args=None, norm_type='instance'):
super(DynamicConvDifferentNorm, self).__init__(kernel_size, full_input_size, full_output_size, curr_vtx_id, args)
del self.bn
if norm_type == 'instance':
self.bn = nn.InstanceNorm2d(full_output_size, momentum=base_ops.BN_MOMENTUM, eps=base_ops.BN_EPSILON)
# elif norm_type == 'layer': # need to calculate the size, so abandon here.
# self.bn = nn.LayerNorm()
elif norm_type == 'group':
self.bn = nn.GroupNorm(num_groups=8, num_channels=full_output_size, eps=base_ops.BN_EPSILON)
elif norm_type == 'local':
self.bn = nn.LocalResponseNorm(2)
else:
raise ValueError("Norm type not yet supported ", norm_type)
# self.bn = WSBNFull(curr_vtx_id + 1, full_output_size, momentum=base_ops.BN_MOMENTUM, eps=base_ops.BN_EPSILON)
logging.debug('construction {} before creation, current id {}'.format(norm_type, curr_vtx_id))
self.previous_node_id = 0
self.num_possible_inputs = curr_vtx_id + 1
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=72, out_channels=96, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5,
stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
#nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
# stride=1),
#nn.MaxPool2d(kernel_size=3, stride=2),
#nn.ReLU(),
)
mock_input = torch.randn(4, 72, 224, 224)
mock_output = self.model(mock_input)
flattened_output = torch.flatten(mock_output, start_dim=1)
fc_in_dim = flattened_output.shape[1] # Get number of nodes from flattened value's size, then convert 0 dim tensor to integer
self.full_conn1 = nn.Linear(in_features=fc_in_dim, out_features=4096)
#self.full_conn2 = nn.Linear(in_features=4096, out_features=2048)
#self.full_conn3 = nn.Linear(in_features=2048, out_features=2)
self.full_conn3 = nn.Linear(in_features=4096, out_features=2)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=72, out_channels=96, kernel_size=7, stride=2),
nn.BatchNorm2d(num_features=96),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
#nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1),
nn.MaxPool2d(kernel_size=3, stride=1),
nn.ReLU(),
#nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1),
#nn.MaxPool2d(kernel_size=3, stride=1),
#nn.ReLU(),
)
mock_input = torch.randn(32, 72, 224, 224)
mock_output = self.model(mock_input)
flattened_output = torch.flatten(mock_output, start_dim=1)
fc_in_dim = flattened_output.shape[1] # Get number of nodes from flattened value's size, then convert 0 dim tensor to integer
self.full_conn1 = nn.Linear(in_features=fc_in_dim, out_features=4096)
self.full_conn2 = nn.Linear(in_features=4096, out_features=2048)
self.full_conn3 = nn.Linear(in_features = 2048, out_features=1024)
self.full_conn4 = nn.Linear(in_features = 1024, out_features = 2)
#self.full_conn3 = nn.Linear(in_features=4096, out_features=2)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super().__init__()
self.convolution = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=96, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
)
mock_input = torch.randn(8, 3, 224, 224)
mock_output = self.convolution(mock_input)
flattened_output = torch.flatten(mock_output, start_dim=1)
self.num_nodes = flattened_output.shape[1] # Get number of nodes from flattened value's size, then convert 0 dim tensor to integer
fc_in_dim = self.num_nodes
self.lstm = nn.LSTM(self.num_nodes, self.num_nodes, num_layers=2)
self.full_conn1 = nn.Linear(in_features=fc_in_dim, out_features=4096)
self.full_conn2 = nn.Linear(in_features=4096, out_features=2048)
self.full_conn3 = nn.Linear(in_features=2048, out_features=2)
#self.full_conn3 = nn.Linear(in_features=4096, out_features=2)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, aux_classes=1000, n_classes=100, domains=3, dropout=True):
super(AlexNetCaffe, self).__init__()
print("Using Caffe AlexNet")
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
("relu5", nn.ReLU(inplace=True)),
("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(OrderedDict([
("fc6", nn.Linear(256 * 6 * 6, 4096)),
("relu6", nn.ReLU(inplace=True)),
("drop6", nn.Dropout() if dropout else Id()),
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout() if dropout else Id())]))
self.aux_classifier = nn.Linear(4096, aux_classes)
self.class_classifier = nn.Linear(4096, n_classes)
# self.domain_classifier = nn.Sequential(
# nn.Linear(256 * 6 * 6, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, domains))
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super(DCFNetFeature, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self):
super(DCFNetFeature, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
)
示例10: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def forward(self, input):
norm_img = self.input_norm(input)
x_features = self.features(norm_img)
return nn.LocalResponseNorm(256,1*256,0.5,0.5)(x_features).view(input.size(0),-1)
示例11: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def forward(self, input):
norm_img = self.input_norm(input)
x_features = self.features(norm_img)
return nn.LocalResponseNorm(256,1*256,0.5,0.5)(x_features).view(input.size(0),-1)
# interface for pySLAM
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, params):
""""""
Model.check_parameters(
params,
{'name': 'GoogleNet', 'input_shape':(3, 224, 224),
'num_classes': 1000, 'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
self.features = nn.Sequential(
ConvModule(self.input_shape[0], 64, kernel_size=7, stride=2, padding=3),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
ConvModule(64, 64, kernel_size=1, stride=1),
ConvModule(64, 192, kernel_size=3, stride=1, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
InceptionModule(192, num_1x1=64, num_3x3red=96, num_3x3=128, num_d5x5red=16, num_d5x5=32, proj=32), # out channels = 256
InceptionModule(256, num_1x1=128, num_3x3red=128, num_3x3=192, num_d5x5red=32, num_d5x5=96, proj=64), # out channels = 480
nn.MaxPool2d(kernel_size=3, stride=2),
InceptionModule(480, num_1x1=192, num_3x3red=96, num_3x3=208, num_d5x5red=16, num_d5x5=48, proj=64), # out channels = 512
InceptionModule(512, num_1x1=160, num_3x3red=112, num_3x3=224, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512
InceptionModule(512, num_1x1=128, num_3x3red=128, num_3x3=256, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512
InceptionModule(512, num_1x1=112, num_3x3red=144, num_3x3=288, num_d5x5red=32, num_d5x5=64, proj=64), # out channels = 528
InceptionModule(528, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
InceptionModule(832, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832
InceptionModule(832, num_1x1=384, num_3x3red=192, num_3x3=384, num_d5x5red=48, num_d5x5=128, proj=128), # out channels = 1024
nn.AvgPool2d(kernel_size=7, stride=1)
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(1024, self.num_classes)
)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, params):
Model.check_parameters(
params,
{'name': 'AlexNet', 'input_shape':(3, 227, 227), 'num_classes': 1000,
'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(96, 256, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, self.num_classes),
)
示例14: old_forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def old_forward(self, x):
B, T, C, H, W = x.size()
x = x.transpose(0, 1)
fc7_out = torch.zeros(B, T, self.fc_size).to(x.device)
for i in range(9):
z = self.features(x[i])
z = self.classifier(z.view(B, -1))
fc7_out[:, i] = z
jig_out = self.jigsaw_classifier(fc7_out.view(B, -1))
class_out = self.class_classifier(fc7_out.max(1)[0])
return jig_out, class_out
# class AlexNetCaffePatches(nn.Module):
# def __init__(self, num_classes=1000, dropout=True):
# super(AlexNetCaffePatches, self).__init__()
# self.features = nn.Sequential(OrderedDict([
# ("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
# ("relu1", nn.ReLU(inplace=True)),
# ("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
# ("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
# ("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
# ("relu2", nn.ReLU(inplace=True)),
# ("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
# ("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
# ("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
# ("relu3", nn.ReLU(inplace=True)),
# ("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
# ("relu4", nn.ReLU(inplace=True)),
# ("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
# ("relu5", nn.ReLU(inplace=True)),
# ("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
# ]))
# self.fc6 = nn.Sequential(nn.Linear(256 * 3 * 3, 1024), nn.ReLU(inplace=True), nn.Dropout())
# self.fc7 = nn.Sequential(nn.Linear(9 * 1024, 4096), nn.ReLU(inplace=True), nn.Dropout())
# self.fc8 = nn.Linear(4096, num_classes)
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LocalResponseNorm [as 別名]
def __init__(self, jigsaw_classes=1000, n_classes=100, domains=3, dropout=True):
super(AlexNetCaffe, self).__init__()
print("Using Caffe AlexNet")
self.features = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 96, kernel_size=11, stride=4)),
("relu1", nn.ReLU(inplace=True)),
("pool1", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm1", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv2", nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=2)),
("relu2", nn.ReLU(inplace=True)),
("pool2", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
("norm2", nn.LocalResponseNorm(5, 1.e-4, 0.75)),
("conv3", nn.Conv2d(256, 384, kernel_size=3, padding=1)),
("relu3", nn.ReLU(inplace=True)),
("conv4", nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2)),
("relu4", nn.ReLU(inplace=True)),
("conv5", nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2)),
("relu5", nn.ReLU(inplace=True)),
("pool5", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)),
]))
self.classifier = nn.Sequential(OrderedDict([
("fc6", nn.Linear(256 * 6 * 6, 4096)),
("relu6", nn.ReLU(inplace=True)),
("drop6", nn.Dropout() if dropout else Id()),
("fc7", nn.Linear(4096, 4096)),
("relu7", nn.ReLU(inplace=True)),
("drop7", nn.Dropout() if dropout else Id())]))
self.jigsaw_classifier = nn.Linear(4096, jigsaw_classes)
self.class_classifier = nn.Linear(4096, n_classes)
# self.domain_classifier = nn.Sequential(
# nn.Linear(256 * 6 * 6, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, 1024),
# nn.ReLU(),
# nn.Dropout(),
# nn.Linear(1024, domains))