當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.MaxPool3d方法代碼示例

本文整理匯總了Python中torch.nn.MaxPool3d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MaxPool3d方法的具體用法?Python nn.MaxPool3d怎麽用?Python nn.MaxPool3d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.MaxPool3d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self, block, layers, track_running_stats=True):
        super(ResNet2d3d_full, self).__init__()
        self.inplanes = 64
        self.track_running_stats = track_running_stats
        bias = False
        self.conv1 = nn.Conv3d(3, 64, kernel_size=(1,7,7), stride=(1, 2, 2), padding=(0, 3, 3), bias=bias)
        self.bn1 = nn.BatchNorm3d(64, track_running_stats=track_running_stats)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
        
        if not isinstance(block, list):
            block = [block] * 4

        self.layer1 = self._make_layer(block[0], 64, layers[0])
        self.layer2 = self._make_layer(block[1], 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block[2], 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block[3], 256, layers[3], stride=2, is_final=True)
        # modify layer4 from exp=512 to exp=256
        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None: m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
開發者ID:TengdaHan,項目名稱:DPC,代碼行數:27,代碼來源:resnet_2d3d.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(
            self, in_channels, middle_channels, out_channels,
            dropout=False, downsample_kernel=2
    ):
        super(Encoder3D, self).__init__()

        layers = [
            nn.MaxPool3d(kernel_size=downsample_kernel),
            nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
            nn.BatchNorm3d(middle_channels),
            nn.ReLU(inplace=True),
            nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm3d(out_channels),
            nn.ReLU(inplace=True)
        ]

        if dropout:
            assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
            layers.append(nn.Dropout3d(p=dropout))

        self.encoder = nn.Sequential(*layers) 
開發者ID:cosmic-cortex,項目名稱:pytorch-UNet,代碼行數:23,代碼來源:blocks.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self):
        super(Mixed_3b, self).__init__()

        self.branch0 = nn.Sequential(
            BasicConv3d(192, 64, kernel_size=1, stride=1),
        )
        self.branch1 = nn.Sequential(
            BasicConv3d(192, 96, kernel_size=1, stride=1),
            SepConv3d(96, 128, kernel_size=3, stride=1, padding=1),
        )
        self.branch2 = nn.Sequential(
            BasicConv3d(192, 16, kernel_size=1, stride=1),
            SepConv3d(16, 32, kernel_size=3, stride=1, padding=1),
        )
        self.branch3 = nn.Sequential(
            nn.MaxPool3d(kernel_size=(3,3,3), stride=1, padding=1),
            BasicConv3d(192, 32, kernel_size=1, stride=1),
        ) 
開發者ID:MichiganCOG,項目名稱:TASED-Net,代碼行數:20,代碼來源:model.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self, num_features_out: int, anchor_ratios: List[Tuple[int, int]], anchor_sizes: List[int],
                 pre_nms_top_n: int, post_nms_top_n: int, anchor_smooth_l1_loss_beta: float):
        super().__init__()
        self.maxpool3d = nn.MaxPool3d(kernel_size=(64, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0))
        self._features = nn.Sequential(
            nn.Conv2d(in_channels=num_features_out, out_channels=512, kernel_size=3, padding=1),
            nn.ReLU()
        )

        self._anchor_ratios = anchor_ratios
        self._anchor_sizes = anchor_sizes

        num_anchor_ratios = len(self._anchor_ratios)
        num_anchor_sizes = len(self._anchor_sizes)
        num_anchors = num_anchor_ratios * num_anchor_sizes

        self._pre_nms_top_n = pre_nms_top_n
        self._post_nms_top_n = post_nms_top_n
        self._anchor_smooth_l1_loss_beta = anchor_smooth_l1_loss_beta

        self._anchor_objectness = nn.Conv2d(in_channels=512, out_channels=num_anchors * 2, kernel_size=1)
        self._anchor_transformer = nn.Conv2d(in_channels=512, out_channels=num_anchors * 4, kernel_size=1) 
開發者ID:MagicChuyi,項目名稱:SlowFast-Network-pytorch,代碼行數:24,代碼來源:region_proposal_network.py

示例5: inflate_pool

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def inflate_pool(pool2d,
                 time_dim=1,
                 time_padding=0,
                 time_stride=None,
                 time_dilation=1):
    kernel_dim = (time_dim, pool2d.kernel_size, pool2d.kernel_size)
    padding = (time_padding, pool2d.padding, pool2d.padding)
    if time_stride is None:
        time_stride = time_dim
    stride = (time_stride, pool2d.stride, pool2d.stride)
    if isinstance(pool2d, nn.MaxPool2d):
        dilation = (time_dilation, pool2d.dilation, pool2d.dilation)
        pool3d = nn.MaxPool3d(
            kernel_dim,
            padding=padding,
            dilation=dilation,
            stride=stride,
            ceil_mode=pool2d.ceil_mode)
    elif isinstance(pool2d, nn.AvgPool2d):
        pool3d = nn.AvgPool3d(kernel_dim, stride=stride)
    else:
        raise ValueError(
            '{} is not among known pooling classes'.format(type(pool2d)))
    return pool3d 
開發者ID:guxinqian,項目名稱:TKP,代碼行數:26,代碼來源:inflate.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self, block, layers, shortcut_type='B', cardinality=32, num_classes=400):
        self.inplanes = 64
        super(ResNeXt3D, self).__init__()
        self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
        self.bn1 = nn.BatchNorm3d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
        self.layer1 = self._make_layer(block, 128, layers[0], shortcut_type, cardinality)
        self.layer2 = self._make_layer(block, 256, layers[1], shortcut_type, cardinality, stride=2)
        self.layer3 = self._make_layer(block, 512, layers[2], shortcut_type, cardinality, stride=2)
        self.layer4 = self._make_layer(block, 1024, layers[3], shortcut_type, cardinality, stride=2)
        self.avgpool = nn.AdaptiveAvgPool3d(1)
        self.fc = nn.Linear(cardinality * 32 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
開發者ID:alexandonian,項目名稱:pretorched-x,代碼行數:22,代碼來源:resnext3D.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self,
                 block,
                 layers,
                 nonlocal_layers,
                 shortcut_type='A',
                 num_classes=339):
        self.inplanes = 64
        super().__init__()
        self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
        self.bn1 = nn.BatchNorm3d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], nonlocal_layers[0], shortcut_type)
        self.layer2 = self._make_layer(block, 128, layers[1], nonlocal_layers[1], shortcut_type, stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], nonlocal_layers[2], shortcut_type, stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], nonlocal_layers[3], shortcut_type, stride=2)
        self.avgpool = nn.AdaptiveAvgPool3d(1)
        self.last_linear = nn.Linear(512 * block.expansion, num_classes)

        self.init_weights() 
開發者ID:alexandonian,項目名稱:pretorched-x,代碼行數:22,代碼來源:nonlocalnet.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self,
                 block,
                 layers,
                 k=1,
                 shortcut_type='B',
                 num_classes=400):
        self.inplanes = 64
        super(WideResNet, self).__init__()
        self.conv1 = nn.Conv3d(3, 64, kernel_size=7, stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
        self.bn1 = nn.BatchNorm3d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64 * k, layers[0], shortcut_type)
        self.layer2 = self._make_layer(block, 128 * k, layers[1], shortcut_type, stride=2)
        self.layer3 = self._make_layer(block, 256 * k, layers[2], shortcut_type, stride=2)
        self.layer4 = self._make_layer(block, 512 * k, layers[3], shortcut_type, stride=2)
        self.avgpool = nn.AdaptiveAvgPool3d(1)
        self.fc = nn.Linear(512 * k * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
開發者ID:alexandonian,項目名稱:pretorched-x,代碼行數:27,代碼來源:wideresnet3D.py

示例9: _construct_stem

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def _construct_stem(self, dim_in, dim_out):

        assert (
            self.stride[1] == self.stride[2]
        ), "Only support identical height stride and width stride"
        self.conv = r2plus1_unit(
            dim_in,
            dim_out,
            self.stride[0],  # temporal_stride
            self.stride[1],  # spatial_stride
            1,  # groups
            self.inplace_relu,
            self.bn_eps,
            self.bn_mmt,
            dim_mid=45,  # hard-coded middle channels
        )
        self.bn = nn.BatchNorm3d(dim_out, eps=self.bn_eps, momentum=self.bn_mmt)
        self.relu = nn.ReLU(self.inplace_relu)
        if self.maxpool:
            self.pool_layer = nn.MaxPool3d(
                kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
            ) 
開發者ID:facebookresearch,項目名稱:ClassyVision,代碼行數:24,代碼來源:resnext3d_stem.py

示例10: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def forward(self, x):
        if self.dimension == 1:
            x = torch.transpose(x, 1, 2)  # [N,L,C] -> [N,C,L]
            pooling = nn.MaxPool1d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else x.size(-1),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        elif self.dimension == 2:
            pooling = nn.MaxPool2d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-2), x.size(-1)),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        else:
            pooling = nn.MaxPool3d(
                stride=self.stride, padding=self.padding, dilation=self.dilation,
                kernel_size=self.kernel_size if self.kernel_size is not None else (x.size(-3), x.size(-2), x.size(-1)),
                return_indices=False, ceil_mode=self.ceil_mode
            )
        x = pooling(x)
        return x.squeeze(dim=-1)  # [N,C,1] -> [N,C] 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:24,代碼來源:pooling.py

示例11: _create_layers

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def _create_layers(self):
        # num. features in downward path
        nfeat_down_out = [self.nf*(2**(i+1)) for i in range(self.li)]

        # cap the maximum number of feature layers
        nfeat_down_out = [n if n <= self.mf else self.mf for n in nfeat_down_out]
        nfeat_down_in = [self.nf] + nfeat_down_out[:-1]

        self.conv_in = ResBlock3D(self.in_features, self.nf, self.nf)
        self.conv_out = nn.Conv3d(nfeat_down_out[-1], self.out_features, kernel_size=1, stride=1)
        
        self.down_modules = [ResBlock3D(n_in, int(n/2), n) for n_in, n in zip(nfeat_down_in,
                                                                              nfeat_down_out)]
        self.down_pools = []

        prev_layer_dims = np.array(self.igres)
        for _ in range(len(nfeat_down_out)):
            pool_kernel_size, next_layer_dims = self._get_pool_kernel_size(prev_layer_dims)
            pool_layer = nn.MaxPool3d(pool_kernel_size)
            self.down_pools.append(pool_layer)
            prev_layer_dims = next_layer_dims
            
        self.down_modules = nn.ModuleList(self.down_modules)
        self.down_pools = nn.ModuleList(self.down_pools) 
開發者ID:maxjiang93,項目名稱:space_time_pde,代碼行數:26,代碼來源:unet3d.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self,in_channel,out_channel):
        super(S3D_G_block, self).__init__()
        # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce]


        self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=(3,1,1), stride=1, padding=(1,0,0))
        self.branch2 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1),
            BasicConv3d(out_channel[1], out_channel[1],kernel_size=(1,3,3), stride=1, padding=(0,1,1)),
            BasicConv3d(out_channel[1], out_channel[2], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0))
        )
        self.branch3 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1),
            BasicConv3d(out_channel[3], out_channel[3], kernel_size=(1, 3, 3), stride=1, padding= (0, 1, 1)),
            BasicConv3d(out_channel[3], out_channel[4], kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0))
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool3d(kernel_size=3,stride=1,padding=1),
            BasicConv3d(in_channel, out_channel[5], kernel_size=(3,1,1), stride=1,padding=(1,0,0))
        )
        self.squeeze = nn.AdaptiveAvgPool3d(1)
        # we replace weight matrix with 1D conv to reduce the para
        self.excitation = nn.Conv1d(1, 1, (3,1,1), stride=1,padding=(1,0,0))
        self.sigmoid=nn.Sigmoid() 
開發者ID:MRzzm,項目名稱:action-recognition-models-pytorch,代碼行數:26,代碼來源:S3D_G.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self,in_channel,out_channel):
        super(Inception_block, self).__init__()
        # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce]

        self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=(3,1,1), stride=1, padding=(1,0,0))
        self.branch2 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1),
            BasicConv3d(out_channel[1], out_channel[2],kernel_size=(1,3,3), stride=1, padding=(0,1,1))
        )
        self.branch3 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1),
            BasicConv3d(out_channel[3], out_channel[4], kernel_size=(1, 3, 3), stride=1, padding= (0, 1, 1))
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool3d(kernel_size=(1,3,3),stride=1,padding=(0,1,1)),
            BasicConv3d(in_channel, out_channel[5], kernel_size=(3,1,1), stride=1,padding=(1,0,0))
        ) 
開發者ID:MRzzm,項目名稱:action-recognition-models-pytorch,代碼行數:19,代碼來源:Fast_S3D.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def __init__(self,in_channel,out_channel):
        super(Inception_block, self).__init__()
        # out_channel=[1x1x1,3x3x3_reduce,3x3x3,3x3x3_reduce,3x3x3,pooling_reduce]

        self.branch1 = BasicConv3d(in_channel,out_channel[0], kernel_size=1, stride=1)
        self.branch2 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[1], kernel_size=1, stride=1),
            BasicConv3d(out_channel[1], out_channel[2],kernel_size=3, stride=1, padding=1)
        )
        self.branch3 = nn.Sequential(
            BasicConv3d(in_channel, out_channel[3], kernel_size=1, stride=1),
            BasicConv3d(out_channel[3], out_channel[4], kernel_size=3, stride=1, padding=1)
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool3d(kernel_size=3,stride=1,padding=1),
            BasicConv3d(in_channel, out_channel[5], kernel_size=1, stride=1),
        ) 
開發者ID:MRzzm,項目名稱:action-recognition-models-pytorch,代碼行數:19,代碼來源:I3D.py

示例15: max_pooling_3d

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxPool3d [as 別名]
def max_pooling_3d():
    return nn.MaxPool3d(kernel_size=2, stride=2, padding=0) 
開發者ID:UdonDa,項目名稱:3D-UNet-PyTorch,代碼行數:4,代碼來源:model.py


注:本文中的torch.nn.MaxPool3d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。