本文整理匯總了Python中torch.nn.MaxPool2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MaxPool2d方法的具體用法?Python nn.MaxPool2d怎麽用?Python nn.MaxPool2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.MaxPool2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(MyResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# note the increasing dilation
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilation=1)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
# these layers will not be used
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
# maxpool different from pytorch-resnet, to match tf-faster-rcnn
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# use stride 1 for the last conv4 layer (same as tf-faster-rcnn)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,代碼來源:resnet_v1.py
示例4: test_max_pool_2d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def test_max_pool_2d():
test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]),
('in_channel', [1, 3]), ('out_channel', [1, 3]),
('kernel_size', [3, 5]), ('stride', [1, 2]),
('padding', [0, 1]), ('dilation', [1, 2])])
for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
*list(test_cases.values())):
# wrapper op with 0-dim input
x_empty = torch.randn(0, in_cha, in_h, in_w, requires_grad=True)
wrapper = MaxPool2d(k, stride=s, padding=p, dilation=d)
wrapper_out = wrapper(x_empty)
# torch op with 3-dim input as shape reference
x_normal = torch.randn(3, in_cha, in_h, in_w)
ref = nn.MaxPool2d(k, stride=s, padding=p, dilation=d)
ref_out = ref(x_normal)
assert wrapper_out.shape[0] == 0
assert wrapper_out.shape[1:] == ref_out.shape[1:]
assert torch.equal(wrapper(x_normal), ref_out)
示例5: _make_layers
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size())
示例6: simplify_source
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def simplify_source(s):
s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace('),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace('),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace('),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s)
s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s)
s = map(lambda x: '{},\n'.format(x),s)
s = map(lambda x: x[1:],s)
s = reduce(lambda x,y: x+y, s)
return s
示例7: get_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def get_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), deepsea_cpu)
示例8: get_seqpred_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def get_seqpred_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), ConcatenateRC(), deepsea_cpu, AverageRC())
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self):
super(LeNet, self).__init__()
# 卷積層
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 5), # in_channels, out_channels, kernel_size
nn.Sigmoid(),
nn.MaxPool2d(2, 2), # kernel_size, stride
nn.Conv2d(6, 16, 5),
nn.Sigmoid(),
nn.MaxPool2d(2, 2)
)
# 全連接層
self.fc = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10)
)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, in_channels=3):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, min_depth, max_depth, num_classes,
classifierType, inferenceType, decoderType,
height, width,
alpha=0, beta=0,
layers=[3, 4, 6, 3],
block=Bottleneck):
# Note: classifierType: CE=Cross Entropy, OR=Ordinal Regression
super(ResNet, self).__init__(min_depth, max_depth, num_classes,
classifierType, inferenceType, decoderType)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
self.alpha = alpha
self.beta = beta
self.decoder = make_decoder(height, width, decoderType)
self.use_inter = self.alpha != 0.0
self.interout, self.classifier = make_classifier(classifierType, num_classes, self.use_inter, channel1=1024, channel2=2048)
self.parameter_initialization()
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, input_dim):
super(VGGExtractor, self).__init__()
self.init_dim = 64
self.hide_dim = 128
in_channel, freq_dim, out_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.out_dim = out_dim
self.extractor = nn.Sequential(
nn.Conv2d(in_channel, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(self.init_dim, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d(self.init_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(self.hide_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2) # Half-time dimension
)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # different
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool2d [as 別名]
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(DetNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_new_layer(256, layers[3])
self.layer5 = self._make_new_layer(256, layers[4])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(1024, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()