本文整理匯總了Python中torch.nn.ConvTranspose3d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ConvTranspose3d方法的具體用法?Python nn.ConvTranspose3d怎麽用?Python nn.ConvTranspose3d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ConvTranspose3d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self,NoLabels):
super(HighResNet,self).__init__()
self.conv1 = nn.Conv3d(1, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(16, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.PReLU()
self.block1_1 = HighResNetBlock(inplanes=16, outplanes=16, padding_=1, dilation_=1)
self.block2_1 = HighResNetBlock(inplanes=16, outplanes=32, padding_=2, dilation_=2)
self.block2_2 = HighResNetBlock(inplanes=32, outplanes=32, padding_=2, dilation_=2)
self.block3_1 = HighResNetBlock(inplanes=32, outplanes=64, padding_=4, dilation_=4)
self.block3_2 = HighResNetBlock(inplanes=64, outplanes=64, padding_=4, dilation_=4)
self.conv2 = nn.Conv3d(64, 80, kernel_size=1, stride=1, padding=0, bias=False)
self.upsample = nn.ConvTranspose3d(80, 80, kernel_size=2, stride=2, bias=False)
self.conv3 = nn.Conv3d(80, NoLabels, kernel_size=1, stride=1, padding=0, bias=False)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
super(Center3D, self).__init__()
layers = [
nn.MaxPool3d(kernel_size=2),
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose3d(out_channels, deconv_channels, kernel_size=2, stride=2)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.center = nn.Sequential(*layers)
示例4: deconv3d_bn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def deconv3d_bn(in_planes, out_planes, kernel_size=4, stride=2, flag_bias=flag_bias_default, bn=flag_bn, activefun=activefun_default):
"3d deconvolution with padding, bn and activefun"
assert stride > 1
p = (kernel_size - 1)//2
op = stride - (kernel_size - 2*p)
conv2d = ConvTranspose3d(in_planes, out_planes, kernel_size, stride, padding=p, output_padding=op, bias=flag_bias)
if(not bn and not activefun):
return conv2d
layers = []
layers.append(conv2d)
if bn: layers.append(nn.BatchNorm2d(out_planes))
if activefun: layers.append(activefun)
return nn.Sequential(*layers)
示例5: deconv3d_bn_relu
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def deconv3d_bn_relu(batchNorm, in_planes, out_planes, kernel_size=4, stride=2, padding=1, output_padding=0, bias=True):
if batchNorm:
return nn.Sequential(
nn.ConvTranspose3d(
in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
else:
return nn.Sequential(
nn.ConvTranspose3d(
in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias
),
nn.ReLU(inplace=True),
)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self):
super().__init__()
self.net1 = nn.Sequential(
Conv3dBn(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1, dilation=1, use_relu=True),
Conv3dBn(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, dilation=1, use_relu=False)
)
self.net2 = nn.Sequential(
Conv3dBn(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1, dilation=1, use_relu=True),
Conv3dBn(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, dilation=1, use_relu=True)
)
self.net3 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm3d(num_features=64)
# nn.ReLU(inplace=True)
)
self.net4 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm3d(num_features=32)
)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, in_channels1, in_channels2, out_channels,
dim = 2, dropout_p = 0.0, bilinear=True):
super(UpBlock, self).__init__()
self.bilinear = bilinear
self.dim = dim
if bilinear:
if(dim == 2):
self.up = nn.Sequential(
nn.Conv2d(in_channels1, in_channels2, kernel_size = 1),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True))
else:
self.up = nn.Sequential(
nn.Conv3d(in_channels1, in_channels2, kernel_size = 1),
nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True))
else:
if(dim == 2):
self.up = nn.ConvTranspose2d(in_channels1, in_channels2, kernel_size=2, stride=2)
else:
self.up = nn.ConvTranspose3d(in_channels1, in_channels2, kernel_size=2, stride=2)
self.conv = ConvBlockND(in_channels2 * 2, out_channels, dim, dropout_p)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, in_channels, out_channels, kernel_size,
dim = 3, stride = 1, padding = 0, output_padding = 0,
dilation =1, groups = 1, bias = True,
batch_norm = True, acti_func = None):
super(DeconvolutionLayer, self).__init__()
self.n_in_chns = in_channels
self.n_out_chns = out_channels
self.batch_norm = batch_norm
self.acti_func = acti_func
assert(dim == 2 or dim == 3)
if(dim == 2):
self.conv = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size, stride, padding, output_padding,
groups, bias, dilation)
if(self.batch_norm):
self.bn = nn.BatchNorm2d(out_channels)
else:
self.conv = nn.ConvTranspose3d(in_channels, out_channels,
kernel_size, stride, padding, output_padding,
groups, bias, dilation)
if(self.batch_norm):
self.bn = nn.BatchNorm3d(out_channels)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, in_channels):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(in_channels, in_channels * 2, 3, 2, 1),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(convbn_3d(in_channels * 2, in_channels * 2, 3, 1, 1),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(convbn_3d(in_channels * 2, in_channels * 4, 3, 2, 1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(in_channels * 4, in_channels * 4, 3, 1, 1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.ConvTranspose3d(in_channels * 4, in_channels * 2, 3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(in_channels * 2))
self.conv6 = nn.Sequential(
nn.ConvTranspose3d(in_channels * 2, in_channels, 3, padding=1, output_padding=1, stride=2, bias=False),
nn.BatchNorm3d(in_channels))
self.redir1 = convbn_3d(in_channels, in_channels, kernel_size=1, stride=1, pad=0)
self.redir2 = convbn_3d(in_channels * 2, in_channels * 2, kernel_size=1, stride=1, pad=0)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2) # todo why no norm here?
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, block, upblock, upblock1, n_size, num_classes=2, in_channel=1): # BasicBlock, 3
super(ResNetUNET3D, self).__init__()
self.inplane = 28
self.conv1 = nn.Conv3d(in_channel, self.inplane, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(self.inplane)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 30, blocks=n_size, stride=1)
self.layer2 = self._make_layer(block, 32, blocks=n_size, stride=1)
self.layer3 = self._make_layer(block, 34, blocks=n_size, stride=1)
self.layer4 = upblock(34, 32, 32, stride=1)
self.inplane = 32
self.layer5 = self._make_layer(block, 32, blocks=n_size-1, stride=1)
self.layer6 = upblock(32, 30, 30, stride=1)
self.inplane = 30
self.layer7 = self._make_layer(block, 30, blocks=n_size-1, stride=1)
self.layer8 = upblock(30, 28, 28, stride=1)
self.inplane = 28
self.layer9 = self._make_layer(block, 28, blocks=n_size-1, stride=1)
self.inplane = 28
self.layer10 = upblock1(28, 1, 14, stride=2)
self.layer11 = nn.Sequential(#nn.Conv3d(16, 14, kernel_size=3, stride=1, padding=1, bias=True),
#nn.ReLU(inplace=True),
nn.Conv3d(14, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
# self.outconv = nn.ConvTranspose3d(self.inplane, num_classes, 2, stride=2)
self.initialize()
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, block, upblock, n_size, num_classes=2, in_channel=1): # BasicBlock, 3
super(ResNetUNET3D, self).__init__()
self.inplane = 28
self.conv1 = nn.Conv3d(in_channel, self.inplane, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(self.inplane)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 32, blocks=n_size, stride=1)
self.layer2 = self._make_layer(block, 36, blocks=n_size, stride=1)
self.layer3 = self._make_layer(block, 40, blocks=n_size, stride=1)
self.layer4 = upblock(40, 36, 36, stride=1)
self.inplane = 36
self.layer5 = self._make_layer(block, 36, blocks=n_size-1, stride=1)
self.layer6 = upblock(36, 32, 32, stride=1)
self.inplane = 32
self.layer7 = self._make_layer(block, 32, blocks=n_size-1, stride=1)
self.layer8 = upblock(32, 28, 28, stride=1)
self.inplane = 28
self.layer9 = self._make_layer(block, 28, blocks=n_size-1, stride=1)
self.inplane = 28
self.layer10 = upblock(28, 1, 14, stride=2)
self.layer11 = nn.Sequential(#nn.Conv3d(16, 14, kernel_size=3, stride=1, padding=1, bias=True),
#nn.ReLU(inplace=True),
nn.Conv3d(14, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
# self.outconv = nn.ConvTranspose3d(self.inplane, num_classes, 2, stride=2)
self.initialize()
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, block, upblock, upblock1, n_size, num_classes=2, in_channel=1): # BasicBlock, 3
super(ResNetUNET3D, self).__init__()
self.inplane = 22
self.conv1 = nn.Conv3d(in_channel, self.inplane, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(self.inplane)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer2 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer3 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer4 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer5 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.layer6 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer7 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.layer8 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer9 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.inplane = 22
self.layer10 = upblock1(22, 1, 14, stride=2)
self.layer11 = nn.Sequential(#nn.Conv3d(16, 14, kernel_size=3, stride=1, padding=1, bias=True),
#nn.ReLU(inplace=True),
nn.Conv3d(14, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
# self.outconv = nn.ConvTranspose3d(self.inplane, num_classes, 2, stride=2)
self.initialize()
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, block, upblock, n_size, num_classes=2, in_channel=1): # BasicBlock, 3
super(ResNetUNET3D, self).__init__()
self.inplane = 22
self.conv1 = nn.Conv3d(in_channel, self.inplane, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(self.inplane)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer2 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer3 = self._make_layer(block, 22, blocks=n_size, stride=1)
self.layer4 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer5 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.layer6 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer7 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.layer8 = upblock(22, 22, 22, stride=1)
self.inplane = 22
self.layer9 = self._make_layer(block, 22, blocks=n_size-1, stride=1)
self.inplane = 22
self.layer10 = upblock(22, 1, 14, stride=2)
self.layer11 = nn.Sequential(#nn.Conv3d(20, 20, kernel_size=3, stride=1, padding=1, bias=True),
#nn.ReLU(inplace=True),
nn.Conv3d(14, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
# self.outconv = nn.ConvTranspose3d(self.inplane, num_classes, 2, stride=2)
self.initialize()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose3d [as 別名]
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=None, mode='conv'):
super(ConvBlock, self).__init__()
if padding == None:
padding = (kernel_size - 1) // 2
pass
if mode == 'conv':
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
elif mode == 'deconv':
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
elif mode == 'conv_3d':
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
elif mode == 'deconv_3d':
self.conv = nn.ConvTranspose3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
else:
print('conv mode not supported', mode)
exit(1)
pass
if '3d' not in mode:
self.bn = nn.BatchNorm2d(out_planes)
else:
self.bn = nn.BatchNorm3d(out_planes)
pass
self.relu = nn.ReLU(inplace=True)
return