本文整理匯總了Python中torch.nn.Conv3d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Conv3d方法的具體用法?Python nn.Conv3d怎麽用?Python nn.Conv3d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.Conv3d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm3d(planes, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = dilation_
#if dilation_ == 2:
# padding = 2
#elif dilation_ == 4:
# padding = 4
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=1, padding=padding, bias=False, dilation = dilation_)
self.bn2 = nn.BatchNorm3d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
示例2: __call__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __call__(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
if module.weight is not None:
module.weight.data.fill_(1)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
self.initializer(
module.weight.data,
self.slope,
self.mode,
self.nonlinearity)
if module.bias is not None:
module.bias.data.zero_()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, inplanes, outplanes, padding_=1, stride=1, dilation_ = 1):
super(HighResNetBlock, self).__init__()
self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, stride=1,
padding=padding_, bias=False, dilation = dilation_)
self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, stride=1,
padding=padding_, bias=False, dilation = dilation_)
#2 convolutions of same dilation. residual block
self.bn1 = nn.BatchNorm3d(outplanes, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.bn2 = nn.BatchNorm3d(outplanes, affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.relu = nn.PReLU()
self.diff_dims = (inplanes != outplanes)
self.downsample = nn.Sequential(
nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(outplanes, affine = affine_par)
)
for i in self.downsample._modules['1'].parameters():
i.requires_grad = False
示例4: _make_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(planes * block.expansion,affine = affine_par),
)
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride, dilation_=dilation__, downsample = downsample ))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,dilation_=dilation__))
return nn.Sequential(*layers)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.last_planes = last_planes
self.in_planes = in_planes
self.conv1 = nn.Conv3d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(in_planes)
self.conv2 = nn.Conv3d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=8, bias=False)
self.bn2 = nn.BatchNorm3d(in_planes)
self.conv3 = nn.Conv3d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv3d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(out_planes+dense_depth)
)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
# self.in_planes = in_planes
# self.out_planes = out_planes
# self.num_blocks = num_blocks
# self.dense_depth = dense_depth
self.conv1 = nn.Conv3d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 2)#10)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, inplanes, planes, stride=1, downsample=None, track_running_stats=True, use_final_relu=True):
super(Bottleneck3d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias)
self.bn2 = nn.BatchNorm3d(planes, track_running_stats=track_running_stats)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm3d(planes * 4, track_running_stats=track_running_stats)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, in_size, gate_size, inter_size, nonlocal_mode, sub_sample_factor):
super(MultiAttentionBlock, self).__init__()
self.gate_block_1 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor= sub_sample_factor)
self.gate_block_2 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor=sub_sample_factor)
self.combine_gates = nn.Sequential(nn.Conv3d(in_size*2, in_size, kernel_size=1, stride=1, padding=0),
nn.BatchNorm3d(in_size),
nn.ReLU(inplace=True)
)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('GridAttentionBlock3D') != -1: continue
init_weights(m, init_type='kaiming')
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, inChannels, outChannels, kernelSize = 1, stride = 1, padding = 0):
super(NewConvBnRelu3D, self).__init__()
self.inChannels = inChannels
self.outChannels = outChannels
self.kernelSize = kernelSize
self.stride = stride
self.padding = padding
self.relu = nn.LeakyReLU()
self.bn = nn.BatchNorm3d(self.inChannels)
if (kernelSize == 1):
self.conv = nn.Conv1d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
elif (isinstance(kernelSize, int)):
self.conv = nn.Conv3d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
elif (kernelSize[0] == 1):
self.conv = nn.Conv2d(self.inChannels, self.outChannels, self.kernelSize[1:], self.stride, self.padding)
else :
self.conv = nn.Conv3d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First3D, self).__init__()
layers = [
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.first = nn.Sequential(*layers)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, inplanes, planes, cardinality, stride=1,
downsample=None):
super(ResNeXtBottleneck, self).__init__()
mid_planes = cardinality * int(planes / 32)
self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(mid_planes)
self.conv2 = nn.Conv3d(
mid_planes,
mid_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=cardinality,
bias=False)
self.bn2 = nn.BatchNorm3d(mid_planes)
self.conv3 = nn.Conv3d(
mid_planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
示例12: _make_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm3d(num_input_features))
self.add_module('relu1', nn.ReLU(inplace=True))
self.add_module('conv1',
nn.Conv3d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False))
self.add_module('norm2', nn.BatchNorm3d(bn_size * growth_rate))
self.add_module('relu2', nn.ReLU(inplace=True))
self.add_module('conv2',
nn.Conv3d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False))
self.drop_rate = drop_rate
示例14: conv_block_3d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def conv_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),
activation,)
示例15: conv_block_2_3d
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Conv3d [as 別名]
def conv_block_2_3d(in_dim, out_dim, activation):
return nn.Sequential(
conv_block_3d(in_dim, out_dim, activation),
nn.Conv3d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),)