當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.PixelShuffle方法代碼示例

本文整理匯總了Python中torch.nn.PixelShuffle方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.PixelShuffle方法的具體用法?Python nn.PixelShuffle怎麽用?Python nn.PixelShuffle使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.PixelShuffle方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, scale, n_feats, norm_type=False, act_type='relu', bias=False):

        m = []
        act = act_layer(act_type) if act_type else None
        norm = norm_layer(norm_type, n_feats) if norm_type else None
        if (scale & (scale - 1)) == 0:    # Is scale = 2^n?
            for _ in range(int(math.log(scale, 2))):
                m.append(default_conv(n_feats, 4 * n_feats, 3, bias=bias))
                m.append(nn.PixelShuffle(2))
                if norm: m.append(norm)
                if act is not None: m.append(act)

        elif scale == 3:
            m.append(default_conv(n_feats, 9 * n_feats, 3, bias=bias))
            m.append(nn.PixelShuffle(3))
            if norm: m.append(norm)
            if act is not None: m.append(act)
        else:
            raise NotImplementedError

        super(Upsampler, self).__init__(*m) 
開發者ID:guochengqian,項目名稱:TENet,代碼行數:23,代碼來源:common.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R'):
        """
        # ------------------------------------
        in_nc: channel number of input
        out_nc: channel number of output
        nc: channel number
        nb: total number of conv layers
        act_mode: batch norm + activation function; 'BR' means BN+ReLU.
        # ------------------------------------
        # ------------------------------------
        """
        super(FFDNet, self).__init__()
        assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
        bias = True
        sf = 2

        self.m_down = B.PixelUnShuffle(upscale_factor=sf)

        m_head = B.conv(in_nc*sf*sf+1, nc, mode='C'+act_mode[-1], bias=bias)
        m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
        m_tail = B.conv(nc, out_nc*sf*sf, mode='C', bias=bias)

        self.model = B.sequential(m_head, *m_body, m_tail)

        self.m_up = nn.PixelShuffle(upscale_factor=sf) 
開發者ID:cszn,項目名稱:KAIR,代碼行數:27,代碼來源:network_ffdnet.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, input_size, output_size, scale_factor, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm='batch'):
        super(PSBlock, self).__init__()
        self.conv = nn.Conv2d(input_size, output_size * scale_factor**2, kernel_size, stride, padding, bias=bias)
        self.ps = nn.PixelShuffle(scale_factor)

        self.norm = norm
        if self.norm == 'batch':
            self.bn = nn.BatchNorm2d(output_size)
        elif norm == 'instance':
            self.bn = nn.InstanceNorm2d(output_size)

        self.activation = activation
        if self.activation == 'relu':
            self.act = nn.ReLU(True)
        elif self.activation == 'prelu':
            self.act = nn.PReLU()
        elif self.activation == 'lrelu':
            self.act = nn.LeakyReLU(0.1, True)
        elif self.activation == 'tanh':
            self.act = nn.Tanh()
        elif self.activation == 'sigmoid':
            self.act = nn.Sigmoid() 
開發者ID:alterzero,項目名稱:STARnet,代碼行數:24,代碼來源:base_networks.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self,channel=1,growth_rate=64,rdb_number=16,rdb_conv_layers=8,upscale_factor=3):
        super(RDN,self).__init__()
        self.SFF1 = nn.Conv2d(in_channels = channel,out_channels = 64,kernel_size = 3,padding = 1 , stride = 1)
        self.SFF2 = nn.Conv2d(in_channels = 64,out_channels = 64,kernel_size = 3,padding = 1 , stride = 1)
        rdb_layers = []
        for _ in range(rdb_number):
            rdb_layers.append(RDB(nb_layers = rdb_conv_layers,input_dim=64,growth_rate=64))
        self.RDB_layers = nn.ModuleList(rdb_layers)
        # self.RDB1 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64)
        # self.RDB2 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64)
        # self.RDB3 = RDB(nb_layers = rdb_number,input_dim=64,growth_rate=64)
        self.GFF1 = nn.Conv2d(in_channels = 64*rdb_number,out_channels = 64,kernel_size = 1,padding = 0 )
        self.GFF2 = nn.Conv2d(in_channels = 64,out_channels = 64,kernel_size = 3,padding = 1 )
        self.upconv = nn.Conv2d(in_channels = 64, out_channels=(64*upscale_factor*upscale_factor),kernel_size = 3,padding = 1)
        self.pixelshuffle = nn.PixelShuffle(upscale_factor)
        self.conv2 = nn.Conv2d(in_channels = 64,out_channels = channel,kernel_size = 3,padding = 1 ) 
開發者ID:produvia,項目名稱:ai-platform,代碼行數:18,代碼來源:rdn.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self):
        super(SFT_Net, self).__init__()
        self.conv0 = nn.Conv2d(3, 64, 3, 1, 1)

        sft_branch = []
        for i in range(16):
            sft_branch.append(ResBlock_SFT())
        sft_branch.append(SFTLayer())
        sft_branch.append(nn.Conv2d(64, 64, 3, 1, 1))
        self.sft_branch = nn.Sequential(*sft_branch)

        self.HR_branch = nn.Sequential(nn.Conv2d(64, 256, 3, 1,
                                                 1), nn.PixelShuffle(2), nn.ReLU(True),
                                       nn.Conv2d(64, 256, 3, 1, 1), nn.PixelShuffle(2),
                                       nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True),
                                       nn.Conv2d(64, 3, 3, 1, 1))

        self.CondNet = nn.Sequential(nn.Conv2d(8, 128, 4, 4), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 128, 1), nn.LeakyReLU(0.1, True),
                                     nn.Conv2d(128, 32, 1)) 
開發者ID:xinntao,項目名稱:BasicSR,代碼行數:24,代碼來源:sft_arch.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, in_dim, out_dim, res_dim, f_size=3, dilation=1, norm_type="instance", with_relu=True):
        super(DualUpDownLayer, self).__init__()
        
        self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
        self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
        
        # T^{l}_{1}: (up+conv.)
        # -- Up --
        self.conv_pre = ConvLayer(in_dim, 4*in_dim, 3, 1)
        self.upsamp = nn.PixelShuffle(2)
        # --------
        self.up_conv = ConvLayer(res_dim, res_dim, kernel_size=f_size, stride=1, dilation=dilation)

        # T^{l}_{2}: (se+conv.), stride=2 for down-scaling.
        self.se = se_nets.SEBasicBlock(res_dim, res_dim, reduction=32)
        self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=3, stride=2)

        self.with_relu = with_relu            
        self.relu = nn.ReLU() 
開發者ID:liu-vis,項目名稱:DualResidualNetworks,代碼行數:21,代碼來源:DuRN_US.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, in_dim, out_dim, res_dim, f_size=3, dilation=1, norm_type="instance", with_relu=True):
        super(DualUpDownLayer, self).__init__()
        
        self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
        self.norm1 = FeatNorm(norm_type, in_dim)       
        self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
        self.norm2 = FeatNorm(norm_type, in_dim)

        # T^{l}_{1}: (Up+conv+insnorm)
        #-- Up --
        self.conv_pre = ConvLayer(in_dim, 2*in_dim, 1, 1)
        self.norm_pre = FeatNorm(norm_type, 2*in_dim)        
        self.upsamp = nn.PixelShuffle(2)
        #--------
        self.up_conv = ConvLayer(res_dim, res_dim, kernel_size=f_size, stride=1, dilation=dilation)
        self.up_norm = FeatNorm(norm_type, res_dim)

        # T^{l}_{2}: (conv+insnorm), stride=2 for down-scaling.        
        self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=3, stride=2)
        self.down_norm = FeatNorm(norm_type, out_dim)

        self.with_relu = with_relu            
        self.relu = nn.ReLU() 
開發者ID:liu-vis,項目名稱:DualResidualNetworks,代碼行數:25,代碼來源:DuRN_U.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, channels, filters=64, num_res_blocks=16, num_upsample=2):
        super(GeneratorRRDB, self).__init__()

        # First layer
        self.conv1 = nn.Conv2d(channels, filters, kernel_size=3, stride=1, padding=1)
        # Residual blocks
        self.res_blocks = nn.Sequential(*[ResidualInResidualDenseBlock(filters) for _ in range(num_res_blocks)])
        # Second conv layer post residual blocks
        self.conv2 = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)
        # Upsampling layers
        upsample_layers = []
        for _ in range(num_upsample):
            upsample_layers += [
                nn.Conv2d(filters, filters * 4, kernel_size=3, stride=1, padding=1),
                nn.LeakyReLU(),
                nn.PixelShuffle(upscale_factor=2),
            ]
        self.upsampling = nn.Sequential(*upsample_layers)
        # Final output block
        self.conv3 = nn.Sequential(
            nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(),
            nn.Conv2d(filters, channels, kernel_size=3, stride=1, padding=1),
        ) 
開發者ID:eriklindernoren,項目名稱:PyTorch-GAN,代碼行數:26,代碼來源:models.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, in_dim, out_dim, type='Trp', norm_layer=None, nl_layer=None):
        super(Upsampling2dBlock, self).__init__()
        if type=='transpose':
            self.upsample = TrConv2dBlock(in_dim,out_dim,kernel_size=4,stride=2,
                            padding=1,bias=False,norm_layer=norm_layer,nl_layer=nl_layer)
        elif type=='nearest':
            self.upsample = nn.Sequential(
                nn.Upsample(scale_factor=2, mode='nearest'),
                Conv2dBlock(in_dim,out_dim,kernel_size=3, stride=1, padding=1,
                    pad_type='reflect', bias=False,norm_layer=norm_layer,nl_layer=nl_layer)
                )
        elif type=='pixelshuffle':
            self.upsample = nn.Sequential(
                Conv2dBlock(in_dim,out_dim*4,kernel_size=3, stride=1, padding=1,
                    pad_type='reflect', bias=False,norm_layer=norm_layer,nl_layer=nl_layer),
                nn.PixelShuffle(2)
                )
        else:
            raise NotImplementedError('Upsampling layer [%s] is not found' % type) 
開發者ID:Xiaoming-Yu,項目名稱:DMIT,代碼行數:21,代碼來源:network.py

示例10: packing

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def packing(x, r=2):
    """
    Takes a [B,C,H,W] tensor and returns a [B,(r^2)C,H/r,W/r] tensor, by concatenating
    neighbor spatial pixels as extra channels. It is the inverse of nn.PixelShuffle
    (if you apply both sequentially you should get the same tensor)

    Parameters
    ----------
    x : torch.Tensor [B,C,H,W]
        Input tensor
    r : int
        Packing ratio

    Returns
    -------
    out : torch.Tensor [B,(r^2)C,H/r,W/r]
        Packed tensor
    """
    b, c, h, w = x.shape
    out_channel = c * (r ** 2)
    out_h, out_w = h // r, w // r
    x = x.contiguous().view(b, c, out_h, r, out_w, r)
    return x.permute(0, 1, 3, 5, 2, 4).contiguous().view(b, out_channel, out_h, out_w)

######################################################################################################################## 
開發者ID:TRI-ML,項目名稱:packnet-sfm,代碼行數:27,代碼來源:layers01.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, r=2):
        """
        Initializes a UnpackLayerConv2d object.

        Parameters
        ----------
        in_channels : int
            Number of input channels
        out_channels : int
            Number of output channels
        kernel_size : int
            Kernel size
        r : int
            Packing ratio
        """
        super().__init__()
        self.conv = Conv2D(in_channels, out_channels * (r ** 2), kernel_size, 1)
        self.unpack = nn.PixelShuffle(r) 
開發者ID:TRI-ML,項目名稱:packnet-sfm,代碼行數:20,代碼來源:layers01.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self,
				 n_channels, scale,
				 group=1):
        super(_UpsampleBlock, self).__init__()

        modules = []
        if scale == 2 or scale == 4 or scale == 8:
            for _ in range(int(math.log(scale, 2))):
                modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
                modules += [nn.PixelShuffle(2)]
        elif scale == 3:
            modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
            modules += [nn.PixelShuffle(3)]

        self.body = nn.Sequential(*modules)
        init_weights(self.modules) 
開發者ID:ofsoundof,項目名稱:3D_Appearance_SR,代碼行數:18,代碼來源:ops.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):

        m = []
        if (scale & (scale - 1)) == 0:    # Is scale = 2^n?
            for _ in range(int(math.log(scale, 2))):
                m.append(conv(n_feat, 4 * n_feat, 3, bias))
                m.append(nn.PixelShuffle(2))
                if bn: m.append(nn.BatchNorm2d(n_feat))
                if act: m.append(nn.PReLU())
        elif scale == 3:
            m.append(conv(n_feat, 9 * n_feat, 3, bias))
            m.append(nn.PixelShuffle(3))
            if bn: m.append(nn.BatchNorm2d(n_feat))
            if act: m.append(nn.PReLU())
        else:
            raise NotImplementedError

        super(Upsampler, self).__init__(*m) 
開發者ID:ofsoundof,項目名稱:3D_Appearance_SR,代碼行數:20,代碼來源:common.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):

        m = []
        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?
            for _ in range(int(math.log(scale, 2))):
                m.append(conv(n_feats, 4 * n_feats, 3, bias))
                m.append(nn.PixelShuffle(2))
                if bn: m.append(nn.BatchNorm2d(n_feats))

                if act == 'relu':
                    m.append(nn.ReLU(True))
                elif act == 'prelu':
                    m.append(nn.PReLU(n_feats))

        elif scale == 3:
            m.append(conv(n_feats, 9 * n_feats, 3, bias))
            m.append(nn.PixelShuffle(3))
            if bn: m.append(nn.BatchNorm2d(n_feats))

            if act == 'relu':
                m.append(nn.ReLU(True))
            elif act == 'prelu':
                m.append(nn.PReLU(n_feats))
        else:
            raise NotImplementedError

        super(Upsampler, self).__init__(*m) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:29,代碼來源:edsr.py

示例15: make_pixel_shuffle_layers

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PixelShuffle [as 別名]
def make_pixel_shuffle_layers(self, in_features, up_scale):
        layers = []
        for i in range(up_scale):
            kernel_size = 2 ** (i + 1)
            out_features = self.compute_out_features(i, up_scale)
            in_features = int(in_features / (self.up_factor ** 2))
            layers.append(nn.PixelShuffle(self.up_factor))
            layers.append(nn.Conv2d(in_features, out_features, 1))
            if i < up_scale:
                layers.append(nn.ReLU(inplace=True))
            in_features = out_features
        return layers 
開發者ID:xavysp,項目名稱:DexiNed,代碼行數:14,代碼來源:model.py


注:本文中的torch.nn.PixelShuffle方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。