當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.ConvTranspose2d方法代碼示例

本文整理匯總了Python中torch.nn.ConvTranspose2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ConvTranspose2d方法的具體用法?Python nn.ConvTranspose2d怎麽用?Python nn.ConvTranspose2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.ConvTranspose2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'):
    super(ImageDecoder, self).__init__()

    ngf = ngf * (2 ** (n_layers - 2))
    layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False),
              nn.BatchNorm2d(ngf),
              nn.ReLU(True)]

    for i in range(1, n_layers - 1):
      layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False),
                 nn.BatchNorm2d(ngf // 2),
                 nn.ReLU(True)]
      ngf = ngf // 2

    layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)]
    if activation == 'tanh':
      layers += [nn.Tanh()]
    elif activation == 'sigmoid':
      layers += [nn.Sigmoid()]
    else:
      raise NotImplementedError

    self.main = nn.Sequential(*layers) 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:25,代碼來源:decoder.py

示例2: forward

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def forward(self, x):
        if x.numel() == 0 and torch.__version__ <= '1.4.0':
            out_shape = [x.shape[0], self.out_channels]
            for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
                                         self.padding, self.stride,
                                         self.dilation, self.output_padding):
                out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
            empty = NewEmptyTensorOp.apply(x, out_shape)
            if self.training:
                # produce dummy gradient to avoid DDP warning.
                dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
                return empty + dummy
            else:
                return empty

        return super(ConvTranspose2d, self).forward(x) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:18,代碼來源:wrappers.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, in_ch, out_ch, circular_padding, bilinear=True, group_conv=False):
        super(up, self).__init__()

        #  would be a nice idea if the upsampling could be learned too,
        #  but my machine do not have enough memory to handle all those weights
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        elif group_conv:
            self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2,groups = in_ch//2)
        else:
            self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)

        if circular_padding:
            self.conv = double_conv_circular(in_ch, out_ch,group_conv = group_conv)
        else:
            self.conv = double_conv(in_ch, out_ch,group_conv = group_conv) 
開發者ID:edwardzhou130,項目名稱:PolarSeg,代碼行數:18,代碼來源:BEV_Unet.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, config):
        super().__init__()
        self.config = config

        self.relu = nn.ReLU(inplace=True)

        self.deconv1 = nn.ConvTranspose2d(in_channels=self.config.g_input_size, out_channels=self.config.num_filt_g * 8, kernel_size=4, stride=1, padding=0, bias=False)
        self.batch_norm1 = nn.BatchNorm2d(self.config.num_filt_g*8)

        self.deconv2 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 8, out_channels=self.config.num_filt_g * 4, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm2 = nn.BatchNorm2d(self.config.num_filt_g*4)

        self.deconv3 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 4, out_channels=self.config.num_filt_g * 2, kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm3 = nn.BatchNorm2d(self.config.num_filt_g*2)

        self.deconv4 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g * 2, out_channels=self.config.num_filt_g , kernel_size=4, stride=2, padding=1, bias=False)
        self.batch_norm4 = nn.BatchNorm2d(self.config.num_filt_g)

        self.deconv5 = nn.ConvTranspose2d(in_channels=self.config.num_filt_g, out_channels=self.config.input_channels, kernel_size=4, stride=2, padding=1, bias=False)

        self.out = nn.Tanh()

        self.apply(weights_init) 
開發者ID:moemen95,項目名稱:Pytorch-Project-Template,代碼行數:25,代碼來源:dcgan_generator.py

示例5: _initialization

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def _initialization(self):
        if self.pretrained is not False:
            self.modules.load_state_dict(model_zoo.load_url(model_urls[self.pretrained]))
            #TODO(liu):check it correct or not.
        else:
            for name, sub_module in self.named_modules():
                if isinstance(sub_module, nn.Conv2d) or isinstance(sub_module, nn.ConvTranspose2d) or \
                    isinstance(sub_module, nn.Linear):
                    nn.init.kaiming_normal_(sub_module.weight)
                    # nn.init.kaiming_normal_(sub_module.weight,mode='fan_out'
                    #                         ,nonlinearity='relu')
                    if self.logger is not None:
                        self.logger.info('init {}.weight as kaiming_normal_'.format(name))
                    if sub_module.bias is not None:
                        nn.init.constant_(sub_module.bias, 0.0)
                        if self.logger is not None:
                            self.logger.info('init {}.bias as 0'.format(name))
                # elif isinstance(sub_module, nn.BatchNorm2d):
                #     nn.init.constant_(sub_module.weight,1)
                #     nn.init.constant_(sub_module.bias,0)
                #     if self.logger is not None:
                #         self.logger.info('init {}.weight as constant_ 1'.format(name))
                #         self.logger.info('init {}.bias as constant_ 0'.format(name)) 
開發者ID:HaiyangLiu1997,項目名稱:Pytorch-Networks,代碼行數:25,代碼來源:ResNet2015.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, cf, conv):
        super(Mask, self).__init__()
        self.pool_size = cf.mask_pool_size
        self.pyramid_levels = cf.pyramid_levels
        self.dim = conv.dim
        self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
        self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
        self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
        self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
        if conv.dim == 2:
            self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
        else:
            self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)

        self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
        self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
        self.sigmoid = nn.Sigmoid() 
開發者ID:MIC-DKFZ,項目名稱:medicaldetectiontoolkit,代碼行數:19,代碼來源:mrcnn.py

示例7: _up_samples

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def _up_samples(self):
        up_samples = []
        kernel_scale = self.kernel_scale
        stride = self.pool_stride
        kernel_size = kernel_scale * stride
        padding, output_padding = get_upsample_pad(stride=stride, kernel=kernel_size)
        for i in range(self.repeat_blocks):
            in_channels = int(self.n_filters * 2 ** (i + 2))
            out_channels = int(self.n_filters * 2 ** (i + 1))
            up_samples.append(nn.ConvTranspose2d(in_channels=in_channels,
                                                 out_channels=out_channels,
                                                 kernel_size=kernel_size,
                                                 stride=stride,
                                                 padding=padding,
                                                 output_padding=output_padding,
                                                 bias=False
                                                 ))
        return nn.ModuleList(up_samples) 
開發者ID:minerva-ml,項目名稱:steppy-toolkit,代碼行數:20,代碼來源:unet.py

示例8: compute_madd

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        return 0 
開發者ID:Tramac,項目名稱:torchscope,代碼行數:23,代碼來源:helper.py

示例9: compute_ConvTranspose2d_madd

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def compute_ConvTranspose2d_madd(module, inp, out):
    assert isinstance(module, nn.ConvTranspose2d)
    assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())

    in_c, in_h, in_w = inp.size()[1:]
    k_h, k_w = module.kernel_size
    out_c, out_h, out_w = out.size()[1:]
    groups = module.groups

    kernel_mul = k_h * k_w * (in_c // groups)
    kernel_add = kernel_mul - 1 + (0 if module.bias is None else 1)

    kernel_mul_group = kernel_mul * in_h * in_w * (out_c // groups)
    kernel_add_group = kernel_add * in_h * in_w * (out_c // groups)

    total_mul = kernel_mul_group * groups
    total_add = kernel_add_group * groups

    return total_mul + total_add 
開發者ID:Tramac,項目名稱:torchscope,代碼行數:21,代碼來源:helper.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, obs_shape, feature_dim, num_layers=2, num_filters=32):
        super().__init__()

        self.num_layers = num_layers
        self.num_filters = num_filters
        self.out_dim = OUT_DIM[num_layers]

        self.fc = nn.Linear(
            feature_dim, num_filters * self.out_dim * self.out_dim
        )

        self.deconvs = nn.ModuleList()

        for i in range(self.num_layers - 1):
            self.deconvs.append(
                nn.ConvTranspose2d(num_filters, num_filters, 3, stride=1)
            )
        self.deconvs.append(
            nn.ConvTranspose2d(
                num_filters, obs_shape[0], 3, stride=2, output_padding=1
            )
        )

        self.outputs = dict() 
開發者ID:denisyarats,項目名稱:pytorch_sac_ae,代碼行數:26,代碼來源:decoder.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, rep_dim=128):
        super().__init__()

        self.rep_dim = rep_dim

        self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
        self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
        self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
        self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
        self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
        nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu')) 
開發者ID:lukasruff,項目名稱:Deep-SAD-PyTorch,代碼行數:18,代碼來源:cifar10_LeNet.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, in_channels, middle_channels, out_channels, deconv_channels, dropout=False):
        super(Center2D, self).__init__()

        layers = [
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(middle_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.ConvTranspose2d(out_channels, deconv_channels, kernel_size=2, stride=2)
        ]

        if dropout:
            assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
            layers.append(nn.Dropout2d(p=dropout))

        self.center = nn.Sequential(*layers) 
開發者ID:cosmic-cortex,項目名稱:pytorch-UNet,代碼行數:21,代碼來源:blocks.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, dim_in):
        super(Keypoint_output, self).__init__()
        num_keypoints = cfg.KRCNN.NUM_CLASSES
        assert cfg.KRCNN.RESOLUTION[0] // cfg.KRCNN.ROI_XFORM_RESOLUTION[0] == \
               cfg.KRCNN.RESOLUTION[1] // cfg.KRCNN.ROI_XFORM_RESOLUTION[1]
        self.up_scale = cfg.KRCNN.RESOLUTION[0] // (cfg.KRCNN.ROI_XFORM_RESOLUTION[0] * 2)

        deconv_kernel = 4
        self.kps_score_lowres = nn.ConvTranspose2d(
            dim_in,
            num_keypoints,
            deconv_kernel,
            stride=2,
            padding=deconv_kernel // 2 - 1,
        )

        nn.init.kaiming_normal_(self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu")
        nn.init.constant_(self.kps_score_lowres.bias, 0)

        self.dim_out = num_keypoints 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:22,代碼來源:outputs.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, dim_in):
        super(UV_output, self).__init__()
        num_patches = cfg.UVRCNN.NUM_PATCHES
        deconv_kernel = 4
        assert cfg.UVRCNN.RESOLUTION[0] // cfg.UVRCNN.ROI_XFORM_RESOLUTION[0] == \
               cfg.UVRCNN.RESOLUTION[1] // cfg.UVRCNN.ROI_XFORM_RESOLUTION[1]
        self.up_scale = cfg.UVRCNN.RESOLUTION[0] // (cfg.UVRCNN.ROI_XFORM_RESOLUTION[0] * 2)

        self.deconv_Ann = nn.ConvTranspose2d(dim_in, 15, deconv_kernel, 2, padding=deconv_kernel // 2 - 1)
        self.deconv_Index = nn.ConvTranspose2d(dim_in, num_patches + 1, deconv_kernel, 2,
                                               padding=deconv_kernel // 2 - 1)
        self.deconv_U = nn.ConvTranspose2d(dim_in, num_patches + 1, deconv_kernel, 2, padding=deconv_kernel // 2 - 1)
        self.deconv_V = nn.ConvTranspose2d(dim_in, num_patches + 1, deconv_kernel, 2, padding=deconv_kernel // 2 - 1)

        for m in self.modules():
            if isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
                nn.init.constant_(m.bias, 0) 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:20,代碼來源:outputs.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose2d [as 別名]
def __init__(self, dim_in):
        super(Parsing_output, self).__init__()
        num_parsing = cfg.PRCNN.NUM_PARSING
        assert cfg.PRCNN.RESOLUTION[0] // cfg.PRCNN.ROI_XFORM_RESOLUTION[0] == \
               cfg.PRCNN.RESOLUTION[1] // cfg.PRCNN.ROI_XFORM_RESOLUTION[1]
        self.up_scale = cfg.PRCNN.RESOLUTION[0] // (cfg.PRCNN.ROI_XFORM_RESOLUTION[0] * 2)

        deconv_kernel = 4
        self.parsing_score_lowres = nn.ConvTranspose2d(
            dim_in,
            num_parsing,
            deconv_kernel,
            stride=2,
            padding=deconv_kernel // 2 - 1,
        )

        nn.init.kaiming_normal_(self.parsing_score_lowres.weight, mode="fan_out", nonlinearity="relu")
        nn.init.constant_(self.parsing_score_lowres.bias, 0)

        self.dim_out = num_parsing 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:22,代碼來源:outputs.py


注:本文中的torch.nn.ConvTranspose2d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。