本文整理匯總了Python中torch.nn.AvgPool3d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AvgPool3d方法的具體用法?Python nn.AvgPool3d怎麽用?Python nn.AvgPool3d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.AvgPool3d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: inflate_pool
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def inflate_pool(pool2d,
time_dim=1,
time_padding=0,
time_stride=None,
time_dilation=1):
kernel_dim = (time_dim, pool2d.kernel_size, pool2d.kernel_size)
padding = (time_padding, pool2d.padding, pool2d.padding)
if time_stride is None:
time_stride = time_dim
stride = (time_stride, pool2d.stride, pool2d.stride)
if isinstance(pool2d, nn.MaxPool2d):
dilation = (time_dilation, pool2d.dilation, pool2d.dilation)
pool3d = nn.MaxPool3d(
kernel_dim,
padding=padding,
dilation=dilation,
stride=stride,
ceil_mode=pool2d.ceil_mode)
elif isinstance(pool2d, nn.AvgPool2d):
pool3d = nn.AvgPool3d(kernel_dim, stride=stride)
else:
raise ValueError(
'{} is not among known pooling classes'.format(type(pool2d)))
return pool3d
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_class):
super(S3D_G, self).__init__()
self.conv1=BasicConv3d(3,64,kernel_size=7,stride=2,padding=3)
self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1)
self.conv3=BasicConv3d(64,192,kernel_size=3,stride=1,padding=1)
self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.Inception1=nn.Sequential(S3D_G_block(192, [64,96,128,16,32,32]),
S3D_G_block(256, [128, 128, 192, 32, 96, 64]))
self.pool3=nn.MaxPool3d(kernel_size=(3,3,3),stride=(2,2,2),padding=(1,1,1))
self.Inception2=nn.Sequential(S3D_G_block(480,[192,96,208,16,48,64]),
S3D_G_block(512, [160, 112, 224, 24, 64, 64]),
S3D_G_block(512, [128, 128, 256, 24, 64, 64]),
S3D_G_block(512, [112, 144, 288, 32, 64, 64]),
S3D_G_block(528, [256, 160, 320, 32, 128, 128]))
self.pool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=2)
self.Inception3=nn.Sequential(S3D_G_block(832,[256,160,320,32,128,128]),
S3D_G_block(832, [384, 192, 384, 48, 128, 128]))
self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7))
self.dropout = nn.Dropout(0.4)
self.linear=nn.Linear(1024,num_class)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_class):
super(fast_S3D, self).__init__()
self.conv1=BasicConv3d(3,64,kernel_size=(1,7,7),stride=2,padding=(0,3,3))
self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1)
self.conv3=BasicConv3d(64,192,kernel_size=(1,3,3),stride=1,padding=(0,1,1))
self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.Inception1=nn.Sequential(Inception_block(192, [64,96,128,16,32,32]),
Inception_block(256, [128, 128, 192, 32, 96, 64]))
self.pool3=nn.MaxPool3d(kernel_size=3,stride=2,padding=1)
self.Inception2=nn.Sequential(Inception_block(480,[192,96,208,16,48,64]),
Inception_block(512, [160, 112, 224, 24, 64, 64]),
Inception_block(512, [128, 128, 256, 24, 64, 64]),
Inception_block(512, [112, 144, 288, 32, 64, 64]),
Inception_block(528, [256, 160, 320, 32, 128, 128]))
self.pool4=nn.MaxPool3d(kernel_size=2,stride=2)
self.Inception3=nn.Sequential(S3D_block(832,[256,160,320,32,128,128]),
S3D_block(832, [384, 192, 384, 48, 128, 128]))
self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7))
self.dropout = nn.Dropout(0.4)
self.linear=nn.Linear(1024,num_class)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_class):
super(Res21D, self).__init__()
self.conv1=nn.Conv3d(3,64,kernel_size=(3,7,7),stride=(1,2,2),padding=(1,3,3))
self.conv2=nn.Sequential(Res21D_Block(64, 64, spatial_stride=2),
Res21D_Block(64, 64),
Res21D_Block(64, 64))
self.conv3=nn.Sequential(Res21D_Block(64,128,spatial_stride=2,temporal_stride=2),
Res21D_Block(128, 128),
Res21D_Block(128, 128),
Res21D_Block(128, 128),)
self.conv4 = nn.Sequential(Res21D_Block(128, 256, spatial_stride=2,temporal_stride=2),
Res21D_Block(256, 256),
Res21D_Block(256, 256),
Res21D_Block(256, 256),
Res21D_Block(256, 256),
Res21D_Block(256, 256))
self.conv5 = nn.Sequential(Res21D_Block(256, 512, spatial_stride=2,temporal_stride=2),
Res21D_Block(512, 512),
Res21D_Block(512, 512))
self.avg_pool=nn.AvgPool3d(kernel_size=(1,4,4))
self.linear=nn.Linear(512,num_class)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_class):
super(I3D, self).__init__()
self.conv1=BasicConv3d(3,64,kernel_size=7,stride=2,padding=3)
self.pool1=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.conv2=BasicConv3d(64,64,kernel_size=1,stride=1)
self.conv3=BasicConv3d(64,192,kernel_size=3,stride=1,padding=1)
self.pool2=nn.MaxPool3d(kernel_size=(1,3,3),stride=(1,2,2),padding=(0,1,1))
self.Inception1=nn.Sequential(Inception_block(192, [64,96,128,16,32,32]),
Inception_block(256, [128, 128, 192, 32, 96, 64]))
self.pool3=nn.MaxPool3d(kernel_size=(3,3,3),stride=(2,2,2),padding=(1,1,1))
self.Inception2=nn.Sequential(Inception_block(480,[192,96,208,16,48,64]),
Inception_block(512, [160, 112, 224, 24, 64, 64]),
Inception_block(512, [128, 128, 256, 24, 64, 64]),
Inception_block(512, [112, 144, 288, 32, 64, 64]),
Inception_block(528, [256, 160, 320, 32, 128, 128]))
self.pool4=nn.MaxPool3d(kernel_size=(2,2,2),stride=2)
self.Inception3=nn.Sequential(Inception_block(832,[256,160,320,32,128,128]),
Inception_block(832, [384, 192, 384, 48, 128, 128]))
self.avg_pool=nn.AvgPool3d(kernel_size=(8,7,7))
self.dropout = nn.Dropout(0.4)
self.linear=nn.Linear(1024,num_class)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_classes=400, dropout_keep_prob = 1, input_channel = 3, spatial_squeeze=True):
super(S3DG, self).__init__()
self.features = nn.Sequential(
STConv3d(input_channel, 64, kernel_size=7, stride=2, padding=3), # (64, 32, 112, 112)
nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)), # (64, 32, 56, 56)
BasicConv3d(64, 64, kernel_size=1, stride=1), # (64, 32, 56, 56)
STConv3d(64, 192, kernel_size=3, stride=1, padding=1), # (192, 32, 56, 56)
nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)), # (192, 32, 28, 28)
Mixed_3b(), # (256, 32, 28, 28)
Mixed_3c(), # (480, 32, 28, 28)
nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)), # (480, 16, 14, 14)
Mixed_4b(),# (512, 16, 14, 14)
Mixed_4c(),# (512, 16, 14, 14)
Mixed_4d(),# (512, 16, 14, 14)
Mixed_4e(),# (528, 16, 14, 14)
Mixed_4f(),# (832, 16, 14, 14)
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 0, 0)), # (832, 8, 7, 7)
Mixed_5b(), # (832, 8, 7, 7)
Mixed_5c(), # (1024, 8, 7, 7)
nn.AvgPool3d(kernel_size=(2, 7, 7), stride=1),# (1024, 8, 1, 1)
nn.Dropout3d(dropout_keep_prob),
nn.Conv3d(1024, num_classes, kernel_size=1, stride=1, bias=True),# (400, 8, 1, 1)
)
self.spatial_squeeze = spatial_squeeze
self.softmax = nn.Softmax()
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
self.groups = groups
mid_planes = out_planes//4
if self.stride == 2:
out_planes = out_planes - in_planes
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv3d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm3d(mid_planes)
self.conv2 = nn.Conv3d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm3d(mid_planes)
self.conv3 = nn.Conv3d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm3d(out_planes)
self.relu = nn.ReLU(inplace=True)
if stride == 2:
self.shortcut = nn.AvgPool3d(kernel_size=(2,3,3), stride=2, padding=(0,1,1))
示例8: build_pooling3d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def build_pooling3d(attr, channels=None, conv_bias=False):
method = attr['mode']
ks = attr['kernel_size'] if 'kernel_size' in attr else (attr['kernel_d'], attr['kernel_h'], attr['kernel_w'])
if ('pad' in attr) or ('pad_d' in attr and 'pad_w' in attr and 'pad_h' in attr):
padding = attr['pad'] if 'pad' in attr else (attr['pad_d'], attr['pad_h'], attr['pad_w'])
else:
padding = 0
if ('stride' in attr) or ('stride_d' in attr and 'stride_w' in attr and 'stride_h' in attr):
stride = attr['stride'] if 'stride' in attr else (attr['stride_d'], attr['stride_h'], attr['stride_w'])
else:
stride = 1
if method == 'max':
pool = nn.MaxPool3d(ks, stride, padding,
ceil_mode=True) # all Caffe pooling use ceil model
elif method == 'ave':
pool = nn.AvgPool3d(ks, stride, padding,
ceil_mode=True) # all Caffe pooling use ceil model
else:
raise ValueError("Unknown pooling method: {}".format(method))
return pool, channels
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, block, layers, sample_size, sample_duration, shortcut_type='B', num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2),
padding=(3, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], shortcut_type, stride=2)
last_duration = math.ceil(sample_duration / 16)
last_size = math.ceil(sample_size / 32)
self.avgpool = nn.AvgPool3d((last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
m.eval()
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, n_channels=1, nlabels=1, init_filters=32):
nf = init_filters
super(FCNBN, self).__init__()
self.encoder = nn.Sequential(
conv3d_bn_block(n_channels, nf),
nn.MaxPool3d(2),
conv3d_bn_block(nf, 2*nf),
nn.MaxPool3d(2),
conv3d_bn_block(2*nf, 4*nf),
conv3d_bn_block(4*nf, 4*nf),
nn.MaxPool3d(2),
conv3d_bn_block(4*nf, 8*nf),
conv3d_bn_block(8*nf, 8*nf),
nn.MaxPool3d(2),
conv3d_bn_block(8*nf, 8*nf),
conv3d_bn_block(8*nf, 8*nf),
conv3d_bn_block(8*nf, 8*nf),
)
self.classifier = nn.Sequential(
conv3d_bn_block(8*nf, nlabels, kernel=1, activation=Identity),
nn.AvgPool3d(2),
)
開發者ID:orobix,項目名稱:Visual-Feature-Attribution-Using-Wasserstein-GANs-Pytorch,代碼行數:24,代碼來源:classifiers_3D.py
示例11: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def forward(self, x):
out = self.bn3d_list[0](self.conv3d_list[0](x))
#concatenate multiple atrous rates
for i in range(len(self.conv3d_list)-1):
#XXX add batch norm?
out = torch.cat([out, self.bn3d_list[i+1](self.conv3d_list[i+1](x))], 1)
#concatenate global avg pooling (avg global pool -> 1x1 conv (256 filter) -> batchnorm -> interpolate -> concat)
self.glob_avg_pool = nn.AvgPool3d(kernel_size=(x.size()[2],x.size()[3],x.size()[4]))
self.iterp_orig = nn.Upsample(size = (out.size()[2], out.size()[3], out.size()[4]), mode= 'trilinear')
out = torch.cat([out, self.iterp_orig(self.bn1(self.conv1x1_1(self.glob_avg_pool(x))))], 1)
#concatenate 1x1 convolution
out = torch.cat([out, self.conv1x1_2(x)], 1)
#apply batch norm on concatenated output
out = self.bn2(out)
out = self.relu(out)
#apply 1x1 convolution to get back to output number filters
out = self.conv1x1_3(out)
#apply last batch norm
out = self.bn3(out)
out = self.relu(out)
return out
#replaced transpose convolutions with trilinear interpolation
示例12: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def forward(self, x):
out = self.bn3d_list[0](self.conv3d_list[0](x))
#concatenate multiple atrous rates
for i in range(len(self.conv3d_list)-1):
#XXX add batch norm?
out = torch.cat([out, self.bn3d_list[i+1](self.conv3d_list[i+1](x))], 1)
#concatenate global avg pooling (avg global pool -> 1x1 conv (256 filter) -> batchnorm -> interpolate -> concat)
self.glob_avg_pool = nn.AvgPool3d(kernel_size=(x.size()[2],x.size()[3],x.size()[4]))
self.iterp_orig = nn.Upsample(size = (out.size()[2], out.size()[3], out.size()[4]), mode= 'trilinear')
out = torch.cat([out, self.iterp_orig(self.bn1(self.conv1x1_1(self.glob_avg_pool(x))))], 1)
#concatenate 1x1 convolution
out = torch.cat([out, self.conv1x1_2(x)], 1)
#apply batch norm on concatenated output
out = self.bn2(out)
#apply 1x1 convolution to get back to 256 filters
out = self.conv1x1_3(out)
#apply last batch norm
out = self.bn3(out)
#apply 1x1 convolution to get last labels
out = self.conv1x1_4(out)
return out
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self,
block,
layers,
spatial_size,
sample_duration,
shortcut_type='B',
cardinality=32,
num_classes=400):
self.inplanes = 64
super(ResNeXt, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type,
cardinality)
self.layer2 = self._make_layer(
block, 256, layers[1], shortcut_type, cardinality, stride=2)
self.layer3 = self._make_layer(
block, 512, layers[2], shortcut_type, cardinality, stride=2)
self.layer4 = self._make_layer(
block, 1024, layers[3], shortcut_type, cardinality, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(spatial_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self,
block,
layers,
spatial_size,
sample_duration,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], shortcut_type, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(spatial_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AvgPool3d [as 別名]
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm3d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv',
nn.Conv3d(
num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False))
self.add_module('pool', nn.AvgPool3d(kernel_size=2, stride=2))