当前位置: 首页>>代码示例>>Python>>正文


Python functional.conv2d方法代码示例

本文整理汇总了Python中torch.nn.functional.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.conv2d方法的具体用法?Python functional.conv2d怎么用?Python functional.conv2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.conv2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _initialize_u_v

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _initialize_u_v(self):
        if self.kernel_size == (1, 1):
            self.register_buffer('u', F.normalize(self.weight.new_empty(self.out_channels).normal_(0, 1), dim=0))
            self.register_buffer('v', F.normalize(self.weight.new_empty(self.in_channels).normal_(0, 1), dim=0))
        else:
            c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
            with torch.no_grad():
                num_input_dim = c * h * w
                v = F.normalize(torch.randn(num_input_dim).to(self.weight), dim=0, eps=1e-12)
                # forward call to infer the shape
                u = F.conv2d(v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None)
                num_output_dim = u.shape[0] * u.shape[1] * u.shape[2] * u.shape[3]
                self.out_shape = u.shape
                # overwrite u with random init
                u = F.normalize(torch.randn(num_output_dim).to(self.weight), dim=0, eps=1e-12)

                self.register_buffer('u', u)
                self.register_buffer('v', v) 
开发者ID:rtqichen,项目名称:residual-flows,代码行数:20,代码来源:lipschitz.py

示例2: compute_one_iter

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def compute_one_iter(self):
        if not self.initialized:
            raise ValueError('Layer needs to be initialized first.')
        domain, codomain = self.compute_domain_codomain()
        if self.kernel_size == (1, 1):
            u = self.u.detach()
            v = self.v.detach()
            weight = self.weight.detach().view(self.out_channels, self.in_channels)
            u = normalize_u(torch.mv(weight, v), codomain)
            v = normalize_v(torch.mv(weight.t(), u), domain)
            return torch.dot(u, torch.mv(weight, v))
        else:
            u = self.u.detach()
            v = self.v.detach()
            weight = self.weight.detach()
            c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
            u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
            out_shape = u_s.shape
            u = normalize_u(u_s.view(-1), codomain)
            v_s = F.conv_transpose2d(
                u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0
            )
            v = normalize_v(v_s.view(-1), domain)
            weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
            return torch.dot(u.view(-1), weight_v.view(-1)) 
开发者ID:rtqichen,项目名称:residual-flows,代码行数:27,代码来源:mixed_lipschitz.py

示例3: _ssim

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _ssim(img1, img2, window, window_size, channel):
    window = window.transpose(0,1) / channel
    mu1 = F.conv2d(img1, window, padding = window_size//2, groups = 1)
    mu2 = F.conv2d(img2, window, padding = window_size//2, groups = 1)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = 1) - mu1_sq
    sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = 1) - mu2_sq
    sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = 1) - mu1_mu2

    scale = 1
    C1 = (scale * 0.01)**2
    C2 = (scale * 0.03)**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
    return ssim_map 
开发者ID:wyf2017,项目名称:DSMnet,代码行数:21,代码来源:SSIM.py

示例4: _ssim1

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _ssim1(img1, img2, window, window_size, channel):
    mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
    mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
    sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
    sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2

    scale = 1
    C1 = (scale * 0.01)**2
    C2 = (scale * 0.03)**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
    return ssim_map 
开发者ID:wyf2017,项目名称:DSMnet,代码行数:20,代码来源:SSIM.py

示例5: _ssim0

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _ssim0(img1, img2, window, window_size, channel):
    mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
    mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    dimg1 = (img1 - mu1)
    dimg2 = (img2 - mu2)
    sigma1_sq = F.conv2d(dimg1*dimg1, window, padding = window_size//2, groups = channel)
    sigma2_sq = F.conv2d(dimg2*dimg2, window, padding = window_size//2, groups = channel)
    sigma12 = F.conv2d(dimg1*dimg2, window, padding = window_size//2, groups = channel)

    scale = 1
    C1 = (scale * 0.01)**2
    C2 = (scale * 0.03)**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
    return ssim_map 
开发者ID:wyf2017,项目名称:DSMnet,代码行数:22,代码来源:SSIM.py

示例6: _ssim

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _ssim(img1, img2, window, window_size, channel, size_average=True):
    mu1 = F.conv2d(img1, window, padding=window_size//2, groups=channel)
    mu2 = F.conv2d(img2, window, padding=window_size//2, groups=channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1*mu2

    sigma1_sq = F.conv2d(img1*img1, window, padding=window_size//2, groups=channel) - mu1_sq
    sigma2_sq = F.conv2d(img2*img2, window, padding=window_size//2, groups=channel) - mu2_sq
    sigma12 = F.conv2d(img1*img2, window, padding=window_size//2, groups=channel) - mu1_mu2

    C1 = 0.01**2
    C2 = 0.03**2

    ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
    if size_average:
        return ssim_map.mean()
    else:
        return ssim_map.mean(1).mean(1).mean(1) 
开发者ID:cszn,项目名称:KAIR,代码行数:22,代码来源:loss_ssim.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def forward(self, x):
    x_list = []
    s_num = self.s_num
    ch_ratio = (1+self.delta/self.g)
    ch_len = self.in_channels - self.delta
    for s in range(s_num):
        for start in range(0, self.delta+1, self.g):
            weight1 = self.weight[:, :ch_len, s:self.kernel_size[0]-s, s:self.kernel_size[0]-s]
            if self.padding[0]-s < 0:
                h = x.size(2)
                x1 = x[:,start:start+ch_len,s:h-s,s:h-s]
                padding1 = _pair(0)
            else:
                x1 = x[:,start:start+ch_len,:,:]
                padding1 = _pair(self.padding[0]-s)
            x_list.append(F.conv2d(x1, weight1, self.bias[int(self.out_channels*(s*ch_ratio+start)/s_num/ch_ratio):int(self.out_channels*(s*ch_ratio+start+1)/s_num/ch_ratio)], self.stride,
                      padding1, self.dilation, self.groups))
    x = torch.cat(x_list, 1)
    return x 
开发者ID:huawei-noah,项目名称:Versatile-Filters,代码行数:21,代码来源:vcnn.py

示例8: _conv_forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _conv_forward(self, input, weight):
        if self.padding_mode != "zeros":
            return F.conv2d(
                F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
                weight,
                self.bias,
                self.stride,
                _pair(0),
                self.dilation,
                self.groups,
            )
        return F.conv2d(
            input,
            weight,
            self.bias,
            self.stride,
            self.padding,
            self.dilation,
            self.groups,
        ) 
开发者ID:pytorch,项目名称:fairseq,代码行数:22,代码来源:qconv.py

示例9: _ssim

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def _ssim(img1, img2, window, window_size, channel, size_average=True):
    mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
    mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)

    mu1_sq = mu1.pow(2)
    mu2_sq = mu2.pow(2)
    mu1_mu2 = mu1 * mu2

    sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
    sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
    sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2

    C1 = 0.01 ** 2
    C2 = 0.03 ** 2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))

    if size_average:
        return ssim_map.mean()
    else:
        return ssim_map.mean(1).mean(1).mean(1) 
开发者ID:amanchadha,项目名称:iSeeBetter,代码行数:23,代码来源:__init__.py

示例10: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def __init__(self, imsize, correct=True, device='cpu'):
        # conv2d is cross-correlation, need to transpose the kernel here
        self.HSOBEL_WEIGHTS_3x3 = torch.FloatTensor(
            np.array([[-1, -2, -1],
                     [ 0, 0, 0],
                     [1, 2, 1]]) / 8.0).unsqueeze(0).unsqueeze(0).to(device)

        self.VSOBEL_WEIGHTS_3x3 = self.HSOBEL_WEIGHTS_3x3.transpose(-1, -2)

        self.VSOBEL_WEIGHTS_5x5 = torch.FloatTensor(
                    np.array([[-5, -4, 0, 4, 5],
                                [-8, -10, 0, 10, 8],
                                [-10, -20, 0, 20, 10],
                                [-8, -10, 0, 10, 8],
                                [-5, -4, 0, 4, 5]]) / 240.).unsqueeze(0).unsqueeze(0).to(device)
        self.HSOBEL_WEIGHTS_5x5 = self.VSOBEL_WEIGHTS_5x5.transpose(-1, -2)

        modifier = np.eye(imsize)
        modifier[0:2, 0] = np.array([4, -1])
        modifier[-2:, -1] = np.array([-1, 4])
        self.modifier = torch.FloatTensor(modifier).to(device)
        self.correct = correct 
开发者ID:cics-nd,项目名称:pde-surrogate,代码行数:24,代码来源:image_gradient.py

示例11: grad_v

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def grad_v(self, image, filter_size=3):
        image_height = image.shape[-2]
        if filter_size == 3:
            replicate_pad = 1
            kernel = self.HSOBEL_WEIGHTS_3x3
        elif filter_size == 5:
            replicate_pad = 2
            kernel = self.HSOBEL_WEIGHTS_5x5
        image = F.pad(image, _quadruple(replicate_pad), mode='replicate')
        grad = F.conv2d(image, kernel, stride=1, padding=0, 
            bias=None) * image_height
        # modify the boundary based on forward & backward finite difference
        if self.correct:
            return torch.matmul(self.modifier.t(), grad)
        else:
            return grad 
开发者ID:cics-nd,项目名称:pde-surrogate,代码行数:18,代码来源:image_gradient.py

示例12: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, bias=True):
        super(ConvVariance, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = (kernel_size, kernel_size)
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.groups = 1
        self.sigma = Parameter(torch.Tensor(
            out_channels, in_channels, *self.kernel_size))
        if bias:
            self.bias = Parameter(torch.Tensor(1, out_channels, 1, 1))
        else:
            self.register_parameter('bias', None)
        self.op_bias = lambda input, kernel: F.conv2d(input, kernel, self.bias, self.stride, self.padding, self.dilation, self.groups)
        self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None, self.stride, self.padding, self.dilation, self.groups)
        self.reset_parameters()
        self.zero_mean = False
        self.permute_sigma = False 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:23,代码来源:layers.py

示例13: conv_ws_2d

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def conv_ws_2d(input,
               weight,
               bias=None,
               stride=1,
               padding=0,
               dilation=1,
               groups=1,
               eps=1e-5):
    c_in = weight.size(0)
    weight_flat = weight.view(c_in, -1)
    mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
    std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
    weight = (weight - mean) / (std + eps)
    return F.conv2d(input, weight, bias, stride, padding, dilation, groups) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:16,代码来源:conv_ws.py

示例14: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def forward(self, x):
        weight = self._get_weight(self.weight)
        return F.conv2d(x, weight, self.bias, self.stride, self.padding,
                        self.dilation, self.groups) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:6,代码来源:conv_ws.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv2d [as 别名]
def forward(self, x):
        out = self.batch_norm(x)
        out = self.relu(out)
        if self.dropout_rate > 0:
            out = self.dropout(out)
        ## Dropping here ##
        self.check_if_drop()
        # To mask the output
        weight = self.conv.weight * self.mask
        out_conv = F.conv2d(input=out, weight=weight, bias=None, stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=1)
        return out_conv 
开发者ID:moemen95,项目名称:Pytorch-Project-Template,代码行数:13,代码来源:learnedgroupconv.py


注:本文中的torch.nn.functional.conv2d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。