当前位置: 首页>>代码示例>>Python>>正文


Python functional.pixel_shuffle方法代码示例

本文整理汇总了Python中torch.nn.functional.pixel_shuffle方法的典型用法代码示例。如果您正苦于以下问题:Python functional.pixel_shuffle方法的具体用法?Python functional.pixel_shuffle怎么用?Python functional.pixel_shuffle使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.pixel_shuffle方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: kernel_normalizer

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def kernel_normalizer(self, mask):
        mask = F.pixel_shuffle(mask, self.scale_factor)
        n, mask_c, h, w = mask.size()
        mask_channel = int(mask_c / (self.up_kernel * self.up_kernel))
        mask = mask.view(n, mask_channel, -1, h, w)

        mask = F.softmax(mask, dim=2)
        mask = mask.view(n, mask_c, h, w).contiguous()

        return mask 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:12,代码来源:carafe.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, input):
        out = F.conv2d(input, self.weight, self.bias, self.stride,
                       self.padding, self.dilation, self.groups)
        return F.pixel_shuffle(out, self.scale_factor)

# Experimental 
开发者ID:zhanghang1989,项目名称:PyTorch-Encoding,代码行数:8,代码来源:encoding.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, x):
        """
        Arguments:
            x (list[Tensor]): feature maps for each feature level.
        Returns:
            results (tuple[Tensor]): feature maps after FPN layers.
                They are ordered from highest resolution first.
        """
        last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
        results = []
        results.append(getattr(self, self.layer_blocks[-1])(last_inner))
        for feature, inner_block, layer_block in zip(
            x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
        ):

            inner_lateral = getattr(self, inner_block)(feature)
            # print('inner_lateral.size:', inner_lateral.size())
            # inner_top_down = F.interpolate(last_inner, size=inner_lateral.size()[-2:], mode="bilinear") #scale_factor=2, nearest
            # TODO use size instead of scale to make it robust to different sizes

            #------------- D2S -------------
            #inner_top_down_pxsf = F.pixel_shuffle(last_inner, 2)
            #inner_top_down_bfal = F.upsample(inner_top_down_pxsf, size=inner_lateral.shape[-2:], mode='bilinear')#bilinear , align_corners=False
            #inner_top_down = self.channel_aligned(inner_top_down_bfal)
            # ------------------------------

            # print('shape:', inner_lateral.shape, inner_top_down.shape)
            inner_top_down = F.interpolate(last_inner, size=inner_lateral.shape[-2:], mode='bilinear', align_corners=False) #bilinear
            last_inner = inner_lateral + inner_top_down
            results.insert(0, getattr(self, layer_block)(last_inner))

        if self.top_blocks is not None:
            last_results = self.top_blocks(results[-1])
            results.extend(last_results)

        return tuple(results) 
开发者ID:clw5180,项目名称:remote_sensing_object_detection_2019,代码行数:38,代码来源:fpn.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, x):
        x = self.upsample_conv(x)
        x = F.pixel_shuffle(x, self.scale_factor)
        return x 
开发者ID:open-mmlab,项目名称:mmcv,代码行数:6,代码来源:upsample.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, x):
        '''
        x: [B, T, C, H, W], T = 7. reshape to [B, C, T, H, W] for Conv3D
        Generate filters and image residual:
        Fx: [B, 25, 16, H, W] for DynamicUpsamplingFilter_3C
        Rx: [B, 3*16, 1, H, W]
        '''
        B, T, C, H, W = x.size()
        x = x.permute(0, 2, 1, 3, 4)  # [B, C, T, H, W] for Conv3D
        x_center = x[:, :, T // 2, :, :]

        x = self.conv3d_1(x)
        x = self.dense_block_1(x)
        x = self.dense_block_2(x)  # reduce T to 1
        x = F.relu(self.conv3d_2(F.relu(self.bn3d_2(x), inplace=True)), inplace=True)

        # image residual
        Rx = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True))  # [B, 3*16, 1, H, W]

        # filter
        Fx = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True))  # [B, 25*16, 1, H, W]
        Fx = F.softmax(Fx.view(B, 25, self.scale**2, H, W), dim=1)

        # Adapt to official model weights
        if self.adapt_official:
            adapt_official(Rx, scale=self.scale)

        # dynamic filter
        out = self.dynamic_filter(x_center, Fx)  # [B, 3*R, H, W]
        out += Rx.squeeze_(2)
        out = F.pixel_shuffle(out, self.scale)  # [B, 3, H, W]

        return out 
开发者ID:xinntao,项目名称:EDVR,代码行数:35,代码来源:DUF_arch.py

示例6: upsample

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def upsample(img, scale, border='reflect'):
  """Bicubical upsample via **CONV2D**. Using PIL's kernel.

  Args:
    img: a tf tensor of 2/3/4-D.
    scale: must be integer >= 2.
    border: padding mode. Recommend to 'REFLECT'.
  """
  device = img.device
  kernels, s = weights_upsample(scale)
  if s == 1:
    return img  # bypass
  kernels = [k.astype('float32') for k in kernels]
  kernels = [torch.from_numpy(k) for k in kernels]
  p1 = 1 + s // 2
  p2 = 3
  img, shape = _push_shape_4d(img)
  img_ex = F.pad(img, [p1, p2, p1, p2], mode=border)
  c = img_ex.shape[1]
  assert c is not None, "img must define channel number"
  c = int(c)
  filters = [torch.reshape(torch.eye(c, c), [c, c, 1, 1]) * k for k in kernels]
  weights = torch.stack(filters, dim=0).transpose(0, 1).reshape([-1, c, 5, 5])
  img_s = F.conv2d(img_ex, weights.to(device))
  img_s = F.pixel_shuffle(img_s, s)
  more = s // 2 * s
  crop = slice(more - s // 2, - (s // 2))
  img_s = _pop_shape(img_s[..., crop, crop], shape)
  return img_s 
开发者ID:LoSealL,项目名称:VideoSuperResolution,代码行数:31,代码来源:Utility.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, x):
        x = F.pixel_shuffle(x, 2)
        x = self.conv1(x)

        x = F.pixel_shuffle(x, 2)
        x = self.conv2(x)

        x = F.pixel_shuffle(x, 2)
        x = self.conv3(x)

        x = F.pixel_shuffle(x, 2)
        x = self.conv4(x)

        x = F.pixel_shuffle(x, 2)
        return x 
开发者ID:dwofk,项目名称:fast-depth,代码行数:17,代码来源:models.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import pixel_shuffle [as 别名]
def forward(self, noise):
        down_1 = self.d_conv_1(noise)
        down_1 = self.d_bn_1(down_1)
        down_1 = F.leaky_relu(down_1)
        
        down_2 = self.d_conv_2(down_1)
        down_2 = self.d_bn_2(down_2)
        down_2 = F.leaky_relu(down_2)

        down_3 = self.d_conv_3(down_2)
        down_3 = self.d_bn_3(down_3)
        down_3 = F.leaky_relu(down_3)
        skip_3 = self.s_conv_3(down_3)

        down_4 = self.d_conv_4(down_3)
        down_4 = self.d_bn_4(down_4)
        down_4 = F.leaky_relu(down_4)
        skip_4 = self.s_conv_4(down_4)

        down_5 = self.d_conv_5(down_4)
        down_5 = self.d_bn_5(down_5)
        down_5 = F.leaky_relu(down_5)
        skip_5 = self.s_conv_5(down_5)

        down_6 = self.d_conv_6(down_5)
        down_6 = self.d_bn_6(down_6)
        down_6 = F.leaky_relu(down_6)

        up_5 = F.pixel_shuffle(down_6, 2)
        up_5 = torch.cat([up_5, skip_5], 1)
        up_5 = self.u_conv_5(up_5)
        up_5 = self.u_bn_5(up_5)
        up_5 = F.leaky_relu(up_5)

        up_4 = F.pixel_shuffle(up_5, 2)
        up_4 = torch.cat([up_4, skip_4], 1)
        up_4 = self.u_conv_4(up_4)
        up_4 = self.u_bn_4(up_4)
        up_4 = F.leaky_relu(up_4)

        up_3 = F.pixel_shuffle(up_4, 2)
        up_3 = torch.cat([up_3, skip_3], 1)
        up_3 = self.u_conv_3(up_3)
        up_3 = self.u_bn_3(up_3)
        up_3 = F.leaky_relu(up_3)

        up_2 = F.pixel_shuffle(up_3, 2)
        up_2 = self.u_conv_2(up_2)
        up_2 = self.u_bn_2(up_2)
        up_2 = F.leaky_relu(up_2)

        up_1 = F.pixel_shuffle(up_2, 2)
        up_1 = self.u_conv_1(up_1)
        up_1 = self.u_bn_1(up_1)
        up_1 = F.leaky_relu(up_1)

        out = F.pixel_shuffle(up_1, 2)
        out = self.out_conv(out)
        out = self.out_bn(out)
        out = F.sigmoid(out)
        return out 
开发者ID:atiyo,项目名称:deep_image_prior,代码行数:63,代码来源:deep_image_prior.py


注:本文中的torch.nn.functional.pixel_shuffle方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。