当前位置: 首页>>代码示例>>Python>>正文


Python functional.unfold方法代码示例

本文整理汇总了Python中torch.nn.functional.unfold方法的典型用法代码示例。如果您正苦于以下问题:Python functional.unfold方法的具体用法?Python functional.unfold怎么用?Python functional.unfold使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.unfold方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(ctx, input, kernel_size, stride, padding, dilation, channel_wise):
        ctx.kernel_size = _pair(kernel_size)
        ctx.dilation = _pair(dilation)
        ctx.padding = _pair(padding)
        ctx.stride = _pair(stride)
        bs, ch, in_h, in_w = input.shape
        out_h = (in_h + 2 * ctx.padding[0] - ctx.dilation[0] * (ctx.kernel_size[0] - 1) - 1) // ctx.stride[0] + 1
        out_w = (in_w + 2 * ctx.padding[1] - ctx.dilation[1] * (ctx.kernel_size[1] - 1) - 1) // ctx.stride[1] + 1
        cols = F.unfold(input, ctx.kernel_size, ctx.dilation, ctx.padding, ctx.stride)
        cols = cols.view(bs, ch, ctx.kernel_size[0], ctx.kernel_size[1], out_h, out_w)
        center_y, center_x = ctx.kernel_size[0] // 2, ctx.kernel_size[1] // 2
        feat_0 = cols.contiguous()[:, :, center_y:center_y + 1, center_x:center_x + 1, :, :]
        diff_sq = (cols - feat_0).pow(2)
        if not channel_wise:
            diff_sq = diff_sq.sum(dim=1, keepdim=True)
        output = torch.exp(-0.5 * diff_sq)
        ctx._backend = type2backend[input.type()]
        ctx.save_for_backward(input, output)

        return output 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:22,代码来源:pac.py

示例2: backward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def backward(ctx, grad_output):
        input, output = ctx.saved_tensors
        bs, ch, in_h, in_w = input.shape
        out_h, out_w = output.shape[-2:]
        cols = F.unfold(input, ctx.kernel_size, ctx.dilation, ctx.padding, ctx.stride)
        cols = cols.view(bs, ch, ctx.kernel_size[0], ctx.kernel_size[1], out_h, out_w)
        center_y, center_x = ctx.kernel_size[0] // 2, ctx.kernel_size[1] // 2
        feat_0 = cols.contiguous()[:, :, center_y:center_y + 1, center_x:center_x + 1, :, :]
        diff = cols - feat_0
        grad = -0.5 * grad_output * output
        grad_diff = grad.expand_as(cols) * (2 * diff)
        grad_diff[:, :, center_y:center_y + 1, center_x:center_x + 1, :, :] -= \
            grad_diff.sum(dim=2, keepdim=True).sum(dim=3, keepdim=True)
        grad_input = grad_output.new()
        ctx._backend.Im2Col_updateGradInput(ctx._backend.library_state,
                                            grad_diff.view(bs, ch * ctx.kernel_size[0] * ctx.kernel_size[1], -1),
                                            grad_input,
                                            in_h, in_w,
                                            ctx.kernel_size[0], ctx.kernel_size[1],
                                            ctx.dilation[0], ctx.dilation[1],
                                            ctx.padding[0], ctx.padding[1],
                                            ctx.stride[0], ctx.stride[1])

        return grad_input, None, None, None, None, None 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:26,代码来源:pac.py

示例3: im2col_indices

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def im2col_indices(
    x: Tensor,
    kernel_height: int,
    kernel_width: int,
    padding: Tuple[int, int] = (0, 0),
    stride: Tuple[int, int] = (1, 1),
) -> Tensor:
    # language=rst
    """
    im2col is a special case of unfold which is implemented inside of Pytorch.

    :param x: Input image tensor to be reshaped to column-wise format.
    :param kernel_height: Height of the convolutional kernel in pixels.
    :param kernel_width: Width of the convolutional kernel in pixels.
    :param padding: Amount of zero padding on the input image.
    :param stride: Amount to stride over image by per convolution.
    :return: Input tensor reshaped to column-wise format.
    """
    return F.unfold(x, (kernel_height, kernel_width), padding=padding, stride=stride) 
开发者ID:BindsNET,项目名称:bindsnet,代码行数:21,代码来源:utils.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(self, *input):
        x = input[0]
        size = x.size()
        kernel = self.conv1(x)
        kernel = self.conv2(kernel)
        kernel = F.softmax(kernel, 1)
        kernel = kernel.reshape(size[0], 1, size[2] * size[3], 7 * 7)
        # print("Before unfold", x.shape)
        x = F.unfold(x, kernel_size=[7, 7], dilation=[2, 2], padding=6)
        # print("After unfold", x.shape)
        x = x.reshape(size[0], size[1], size[2] * size[3], -1)
        # print(x.shape, kernel.shape)
        x = torch.mul(x, kernel)
        x = torch.sum(x, dim=3)
        x = x.reshape(size[0], size[1], size[2], size[3])
        return x 
开发者ID:Ugness,项目名称:PiCANet-Implementation,代码行数:18,代码来源:network.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(self, *input):
        x = input[0]
        size = x.size()
        kernel = self.renet(x)
        kernel = F.softmax(kernel, 1)
        # print(kernel.size())
        x = F.unfold(x, [10, 10], dilation=[3, 3])
        x = x.reshape(size[0], size[1], 10 * 10)
        kernel = kernel.reshape(size[0], 100, -1)
        x = torch.matmul(x, kernel)
        x = x.reshape(size[0], size[1], size[2], size[3])

        # for attention visualization

        # print(torch.cuda.memory_allocated() / 1024 / 1024)
        attention = kernel.data
        attention = attention.requires_grad_(False)
        attention = torch.reshape(attention, (size[0], -1, 10, 10))
        # attention = F.conv_transpose2d(torch.ones((1, 1, 1, 1)).cuda(), attention, dilation=3)
        attention = F.interpolate(attention, 224, mode='bilinear', align_corners=True)
        # attention = F.interpolate(attention, 224, mode='area')
        attention = torch.reshape(attention, (size[0], size[2], size[3], 224, 224))
        return x, attention 
开发者ID:Ugness,项目名称:PiCANet-Implementation,代码行数:25,代码来源:network4att_test.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(self, x, trace_in):
        r"""Calculate postsynaptic activation potentials and trace.
        
        :param x: Presynaptic spikes.
        :param trace_in: Presynaptic trace.

        :return: (Activation potentials, Postsynaptic trace)
        """
        trace_in = self.unfold(trace_in)
        self.update_trace(trace_in)
        x = self.convert_spikes(x)
        x = self.unfold(x)  # Till here it is a rather easy set of steps
        x = self.propagate_spike(x)  # Output spikes
        return self.activation_potential(x), self.fold(self.trace)


#########################################################
# Max Pooling
######################################################### 
开发者ID:BasBuller,项目名称:PySNN,代码行数:21,代码来源:connection.py

示例7: select_mask_logistic_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    if len(p_m.shape) == 4:
        p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
        p_m = torch.index_select(p_m, 0, pos)
        p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
        p_m = p_m.view(-1, g_sz * g_sz)
    else:
        p_m = torch.index_select(p_m, 0, pos)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=0, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
开发者ID:foolwood,项目名称:SiamMask,代码行数:22,代码来源:siammask_sharp.py

示例8: select_mask_logistic_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def select_mask_logistic_loss(p_m, mask, weight, o_sz=63, g_sz=127):
    weight = weight.view(-1)
    pos = Variable(weight.data.eq(1).nonzero().squeeze())
    if pos.nelement() == 0: return p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0, p_m.sum() * 0

    p_m = p_m.permute(0, 2, 3, 1).contiguous().view(-1, 1, o_sz, o_sz)
    p_m = torch.index_select(p_m, 0, pos)
    p_m = nn.UpsamplingBilinear2d(size=[g_sz, g_sz])(p_m)
    p_m = p_m.view(-1, g_sz * g_sz)

    mask_uf = F.unfold(mask, (g_sz, g_sz), padding=32, stride=8)
    mask_uf = torch.transpose(mask_uf, 1, 2).contiguous().view(-1, g_sz * g_sz)

    mask_uf = torch.index_select(mask_uf, 0, pos)
    loss = F.soft_margin_loss(p_m, mask_uf)
    iou_m, iou_5, iou_7 = iou_measure(p_m, mask_uf)
    return loss, iou_m, iou_5, iou_7 
开发者ID:foolwood,项目名称:SiamMask,代码行数:19,代码来源:siammask.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(self, f, corr_feature, pos=None, test=False):
        if test:
            p0 = torch.nn.functional.pad(f[0], [16, 16, 16, 16])[:, :, 4*pos[0]:4*pos[0]+61, 4*pos[1]:4*pos[1]+61]
            p1 = torch.nn.functional.pad(f[1], [8, 8, 8, 8])[:, :, 2 * pos[0]:2 * pos[0] + 31, 2 * pos[1]:2 * pos[1] + 31]
            p2 = torch.nn.functional.pad(f[2], [4, 4, 4, 4])[:, :, pos[0]:pos[0] + 15, pos[1]:pos[1] + 15]
        else:
            p0 = F.unfold(f[0], (61, 61), padding=0, stride=4).permute(0, 2, 1).contiguous().view(-1, 64, 61, 61)
            if not (pos is None): p0 = torch.index_select(p0, 0, pos)
            p1 = F.unfold(f[1], (31, 31), padding=0, stride=2).permute(0, 2, 1).contiguous().view(-1, 256, 31, 31)
            if not (pos is None): p1 = torch.index_select(p1, 0, pos)
            p2 = F.unfold(f[2], (15, 15), padding=0, stride=1).permute(0, 2, 1).contiguous().view(-1, 512, 15, 15)
            if not (pos is None): p2 = torch.index_select(p2, 0, pos)

        if not(pos is None):
            p3 = corr_feature[:, :, pos[0], pos[1]].view(-1, 256, 1, 1)
        else:
            p3 = corr_feature.permute(0, 2, 3, 1).contiguous().view(-1, 256, 1, 1)

        out = self.deconv(p3)
        out = self.post0(F.upsample(self.h2(out) + self.v2(p2), size=(31, 31)))
        out = self.post1(F.upsample(self.h1(out) + self.v1(p1), size=(61, 61)))
        out = self.post2(F.upsample(self.h0(out) + self.v0(p0), size=(127, 127)))
        out = out.view(-1, 127*127)
        return out 
开发者ID:foolwood,项目名称:SiamMask,代码行数:26,代码来源:custom.py

示例10: im2col_indices

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def im2col_indices(
    x: Tensor,
    kernel_height: int,
    kernel_width: int,
    padding: Tuple[int, int] = (0, 0),
    stride: Tuple[int, int] = (1, 1),
) -> Tensor:
    # language=rst
    """
    im2col is a special case of unfold which is implemented inside of Pytorch.

    :param x: Input image tensor to be reshaped to column-wise format.
    :param kernel_height: Height of the convolutional kernel in pixels.
    :param kernel_width: Width of the convolutional kernel in pixels.
    :param padding: Amount of zero padding on the input image.
    :param stride: Amount to stride over image by per convolution.
    :return: Input tensor reshaped to column-wise format.
    """
    return F.unfold(
        x, (kernel_height, kernel_width), padding=padding, stride=stride
    ) 
开发者ID:BINDS-LAB-UMASS,项目名称:bindsnet,代码行数:23,代码来源:utils.py

示例11: feature_region_reg_loss

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def feature_region_reg_loss(gt_f_pairs):
    inp = np.zeros((15, 15), dtype=np.float32)
    inp[7, 7] = 1
    gaussian_kernel = fi.gaussian_filter(inp, 3.5)
    target = torch.cuda.FloatTensor(gaussian_kernel).view(1, 15 * 15, 1)
    target = (1.0 - target / target.max()) * 2.0
    loss = 0.0
    for (f_a, gt_f_wrap_b) in gt_f_pairs:
        N, C, H, W = f_a.shape
        unfold_gt_f_wrap_b = F.unfold(gt_f_wrap_b, kernel_size=15, padding=7, stride=4).view(N, C, 15 * 15, H * W // 16)                # (N, C, 15*15, num_patches)
        unfold_f_a = F.unfold(f_a, kernel_size=1, padding=0, stride=4).view(N, C, 1, H * W // 16)                                       # (N, C, 1, num_patches)
        e = torch.norm(unfold_f_a - unfold_gt_f_wrap_b, p=2, dim=1)                                                                     # (N, 15*15, num_patches)
        meann = torch.mean(e, 1, keepdim=True)
        e = e / meann
        loss += torch.mean((target - e) ** 2)
    return loss 
开发者ID:sfu-gruvi-3dv,项目名称:sanet_relocal_demo,代码行数:18,代码来源:train_interface.py

示例12: grad_conv2d

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def grad_conv2d(module: nn.Module, data_input: torch.Tensor, grad_output: torch.Tensor):

    assert isinstance(module, nn.Conv2d)
    conv2d = module
    assert data_input.ndimension() == 4  # n x c_in x h_in x w_in
    assert grad_output.ndimension() == 4  # n x c_out x h_out x w_out

    if conv2d.weight.requires_grad:
        # n x (c_in)(k_h)(k_w) x (h_out)(w_out)
        input2d = F.unfold(data_input,
                           kernel_size=conv2d.kernel_size, stride=conv2d.stride,
                           padding=conv2d.padding, dilation=conv2d.dilation)

        # n x c_out x h_out x w_out
        n, c_out, h, w = grad_output.size()
        # n x c_out x (h_out)(w_out)
        grad_output2d = grad_output.view(n, c_out, -1)

        c_out, c_in, k_h, k_w = conv2d.weight.size()

        grads_2d = torch.einsum('bik,bjk->bij', grad_output2d, input2d)  # n x c_out x (c_in)(k_h)(k_w)
        setattr(conv2d.weight, 'grads', grads_2d.view(n, c_out, c_in, k_h, k_w))  # n x c_out x c_in x k_h x k_w

    if hasattr(conv2d, 'bias') and conv2d.bias.requires_grad:
        setattr(conv2d.bias, 'grads', grad_output.sum(dim=(2, 3)))  # n x c_out 
开发者ID:cybertronai,项目名称:pytorch-sso,代码行数:27,代码来源:samplegrad.py

示例13: update_in_forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def update_in_forward(self, data_input):
        conv2d = self._module

        # n x (c_in)(k_h)(k_w) x (h_out)(w_out)
        input2d = F.unfold(data_input,
                           kernel_size=conv2d.kernel_size, stride=conv2d.stride,
                           padding=conv2d.padding, dilation=conv2d.dilation)

        n, a, _ = input2d.shape

        # (c_in)(k_h)(k_w) x n(h_out)(w_out)
        m = input2d.transpose(0, 1).reshape(a, -1)
        a, b = m.shape
        if self.bias:
            # {(c_in)(k_h)(k_w) + 1} x n(h_out)(w_out)
            m = torch.cat((m, m.new_ones((1, b))), 0)

        # (c_in)(k_h)(k_w) x (c_in)(k_h)(k_w) or
        # {(c_in)(k_h)(k_w) + 1} x {(c_in)(k_h)(k_w) + 1}
        A = torch.einsum('ik,jk->ij', m, m).div(n)
        self._A = A 
开发者ID:cybertronai,项目名称:pytorch-sso,代码行数:23,代码来源:conv.py

示例14: nd2col

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def nd2col(input_nd, kernel_size, stride=1, padding=0, output_padding=0, dilation=1, transposed=False,
           use_pyinn_if_possible=False):
    """
    Shape:
        - Input: :math:`(N, C, L_{in})`
        - Output: :math:`(N, C, *kernel_size, *L_{out})` where
          :math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1)` for non-transposed
          :math:`L_{out} = (L_{in} - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + 1 + output_padding` for transposed
    """
    n_dims = len(input_nd.shape[2:])
    kernel_size = (kernel_size,) * n_dims if isinstance(kernel_size, Number) else kernel_size
    stride = (stride,) * n_dims if isinstance(stride, Number) else stride
    padding = (padding,) * n_dims if isinstance(padding, Number) else padding
    output_padding = (output_padding,) * n_dims if isinstance(output_padding, Number) else output_padding
    dilation = (dilation,) * n_dims if isinstance(dilation, Number) else dilation

    if transposed:
        assert n_dims == 2, 'Only 2D is supported for fractional strides.'
        w_one = input_nd.new_ones(1, 1, 1, 1)
        pad = [(k - 1) * d - p for (k, d, p) in zip(kernel_size, dilation, padding)]
        input_nd = F.conv_transpose2d(input_nd, w_one, stride=stride)
        input_nd = F.pad(input_nd, (pad[1], pad[1] + output_padding[1], pad[0], pad[0] + output_padding[0]))
        stride = _pair(1)
        padding = _pair(0)

    (bs, nch), in_sz = input_nd.shape[:2], input_nd.shape[2:]
    out_sz = tuple([((i + 2 * p - d * (k - 1) - 1) // s + 1)
                    for (i, k, d, p, s) in zip(in_sz, kernel_size, dilation, padding, stride)])
    # Use PyINN if possible (about 15% faster) TODO confirm the speed-up
    if n_dims == 2 and dilation == 1 and has_pyinn and torch.cuda.is_available() and use_pyinn_if_possible:
        output = P.im2col(input_nd, kernel_size, stride, padding)
    else:
        output = F.unfold(input_nd, kernel_size, dilation, padding, stride)
        out_shape = (bs, nch) + tuple(kernel_size) + out_sz
        output = output.view(*out_shape).contiguous()
    return output 
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:38,代码来源:pac.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import unfold [as 别名]
def forward(self, x, ax=None):
        # x: B, H, L, 1, ax : B, H, X, L append features
        nhid, nhead, head_dim, unfold_size = self.nhid, self.nhead, self.head_dim, self.unfold_size
        B, H, L, _ = x.shape

        q, k, v = self.WQ(x), self.WK(x), self.WV(x)  # x: (B,H,L,1)

        if ax is not None:
            aL = ax.shape[2]
            ak = self.WK(ax).view(B, nhead, head_dim, aL, L)
            av = self.WV(ax).view(B, nhead, head_dim, aL, L)
        q = q.view(B, nhead, head_dim, 1, L)
        k = F.unfold(k.view(B, nhead * head_dim, L, 1), (unfold_size, 1), padding=(unfold_size // 2, 0)) \
            .view(B, nhead, head_dim, unfold_size, L)
        v = F.unfold(v.view(B, nhead * head_dim, L, 1), (unfold_size, 1), padding=(unfold_size // 2, 0)) \
            .view(B, nhead, head_dim, unfold_size, L)
        if ax is not None:
            k = torch.cat([k, ak], 3)
            v = torch.cat([v, av], 3)

        alphas = self.drop(F.softmax((q * k).sum(2, keepdim=True) / NP.sqrt(head_dim), 3))  # B N L 1 U
        att = (alphas * v).sum(3).view(B, nhead * head_dim, L, 1)

        ret = self.WO(att)

        return ret 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:28,代码来源:star_transformer.py


注:本文中的torch.nn.functional.unfold方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。