当前位置: 首页>>代码示例>>Python>>正文


Python functional.conv1d方法代码示例

本文整理汇总了Python中torch.nn.functional.conv1d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.conv1d方法的具体用法?Python functional.conv1d怎么用?Python functional.conv1d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.conv1d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, input):
        '''
        input size: B x C x T
        output size: B x C x T
        '''
        B, C, T = input.size()
        H = self.num_heads

        weight = self.weight
        if self.weight_softmax:
            weight = F.softmax(weight, dim=-1)

        weight = F.dropout(weight, self.weight_dropout, training=self.training)
        # Merge every C/H entries into the batch dimension (C = self.input_size)
        # B x C x T -> (B * C/H) x H x T
        # One can also expand the weight to C x 1 x K by a factor of C/H
        # and do not reshape the input instead, which is slow though
        input = input.view(-1, H, T)
        output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
        output = output.view(B, C, T)
        if self.bias is not None:
            output = output + self.bias.view(1, -1, 1)

        return output 
开发者ID:pytorch,项目名称:fairseq,代码行数:26,代码来源:lightweight_convolution.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
开发者ID:alphacep,项目名称:tn2-wg,代码行数:23,代码来源:glow.py

示例3: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def __init__(self, spatial_dims: int, sigma, truncated: float = 4.0):
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
                must have shape (Batch, channels, H[, W, ...]).
            sigma (float or sequence of floats): std.
            truncated: spreads how many stds.
        """
        super().__init__()
        self.spatial_dims = int(spatial_dims)
        _sigma = ensure_tuple_rep(sigma, self.spatial_dims)
        self.kernel = [
            torch.nn.Parameter(torch.as_tensor(gaussian_1d(s, truncated), dtype=torch.float), False) for s in _sigma
        ]
        self.padding = [same_padding(k.size()[0]) for k in self.kernel]
        self.conv_n = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1]
        for idx, param in enumerate(self.kernel):
            self.register_parameter(f"kernel_{idx}", param) 
开发者ID:Project-MONAI,项目名称:MONAI,代码行数:20,代码来源:simplelayers.py

示例4: net

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def net(self, x, block_num, params=None):
        layer_name = "ll_tc.ll_temporal_block" + str(block_num)
        if params is None:
            x = self.ll_conv1(x)
        else:
            x = F.conv1d(
                x,
                weight=params[layer_name + ".ll_conv1.weight"],
                bias=params[layer_name + ".ll_conv1.bias"],
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation,
            )

        x = self.chomp1(x)
        x = F.leaky_relu(x)

        return x 
开发者ID:allenai,项目名称:savn,代码行数:20,代码来源:tcn.py

示例5: calcEdgeFlux

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def calcEdgeFlux(self, flux):

        flux_edge1 = F.conv1d(F.pad(flux, (2,1), mode='circular'), self.weight_flux1, 
                        stride=1, padding=0, bias=None)

        flux_edge2 = F.conv1d(F.pad(flux, (1,2), mode='circular'), self.weight_flux2, 
                        stride=1, padding=0, bias=None)

        beta1 = torch.pow(F.conv1d(F.pad(flux, (2,1), mode='circular'), self.weight_beta1, 
                        stride=1, padding=0, bias=None), 2)
        
        beta2 = torch.pow(F.conv1d(F.pad(flux, (1,2), mode='circular'), self.weight_beta2, 
                        stride=1, padding=0, bias=None), 2)
        
        eps = 1e-6
        w1 = 1./(3*(eps + beta1)**2)
        w2 = 2./(3*(eps + beta2)**2)

        w = torch.stack([w1, w2], dim = 0)

        w = w / torch.sum(w, dim=0)

        edge_flux = w[0]*flux_edge1 + w[1]*flux_edge2

        return edge_flux 
开发者ID:cics-nd,项目名称:ar-pde-cnn,代码行数:27,代码来源:burgerFiniteDifference.py

示例6: __call__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def __call__(self, u):
        """
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        if(self.kernel_size == 2):
            return self.conditionalUpwind(u)

        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])
        u = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        return u.view(u_shape) 
开发者ID:cics-nd,项目名称:ar-pde-cnn,代码行数:18,代码来源:burgerFiniteDifference.py

示例7: conditionalUpwind

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def conditionalUpwind(self, u):
        """
        Upwind scheme:
        https://en.wikipedia.org/wiki/Upwind_scheme
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])

        u1 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        u2 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            -torch.flip(self.weight, dims=[-1]), stride=1, padding=0, bias=None) / (self.dx)

        u = torch.where(u > 0, u1, u2)

        return u2.view(u_shape) 
开发者ID:cics-nd,项目名称:ar-pde-cnn,代码行数:23,代码来源:burgerFiniteDifference.py

示例8: quaternion_conv

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
                    padding, groups, dilatation):
    """
    Applies a quaternion convolution to the incoming data:
    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)

    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)

    if   input.dim() == 3:
        convfunc = F.conv1d
    elif input.dim() == 4:
        convfunc = F.conv2d
    elif input.dim() == 5:
        convfunc = F.conv3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups) 
开发者ID:Orkis-Research,项目名称:Pytorch-Quaternion-Neural-Networks,代码行数:26,代码来源:quaternion_ops.py

示例9: update_params

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def update_params(self, VdivWZH, update_W, update_H, update_Z, W_alpha, H_alpha, Z_alpha):
        # type: (Tensor, bool, bool, bool, float, float, float) -> None

        if update_W or update_Z:
            new_W = F.conv1d(VdivWZH[:, None], self.H[:, None] * self.Z[:, None, None]) * self.W

        if update_H:
            new_H = F.conv1d(VdivWZH[None, ...], torch.transpose(self.W * self.Z[:, None], 0, 1))[0] * self.H
            new_H = normalize(self.fix_neg(new_H + H_alpha - 1), 1)
            self.H[:] = new_H

        if update_W:
            self.W[:] = normalize(self.fix_neg(new_W + W_alpha - 1), (0, 2))

        if update_Z:
            Z = normalize(self.fix_neg(new_W.sum((0, 2)) + Z_alpha - 1))
            self.Z[:] = Z 
开发者ID:yoyololicon,项目名称:pytorch-NMF,代码行数:19,代码来源:plca.py

示例10: bspline_kernel_1d

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def bspline_kernel_1d(sigma, order=2, asTensor=False, dtype=th.float32, device='cpu'):

    kernel_ones = th.ones(1, 1, sigma)
    kernel = kernel_ones
	
    padding = sigma - 1

    for i in range(1, order + 1):
        kernel = F.conv1d(kernel, kernel_ones, padding=padding)/sigma
	


    if asTensor:
        return kernel[0, 0, ...].to(dtype=dtype, device=device)
    else:
        return kernel[0, 0, ...].numpy() 
开发者ID:airlab-unibas,项目名称:airlab,代码行数:18,代码来源:kernelFunction.py

示例11: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, input_data):
        num_batches, _, num_samples = input_data.size()

        self.num_samples = num_samples

        forward_transform = F.conv1d(input_data,
                                     self.forward_basis,
                                     stride=self.hop_length,
                                     padding=self.filter_length)
        cutoff = int((self.filter_length / 2) + 1)
        real_part = forward_transform[:, :cutoff, :]
        imag_part = forward_transform[:, cutoff:, :]

        magnitude = torch.sqrt(real_part**2 + imag_part**2)
        phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
        return magnitude, phase 
开发者ID:tiberiu44,项目名称:TTS-Cube,代码行数:18,代码来源:modules.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, x):
        """
        Implemented complex convolution using combining 'grouped convolution' and 'real / img weight'
        :param x: data (N, C, T) C is concatenated with C/2 real channels and C/2 idea channels
        :return: complex conved result
        """
        # adopt reflect padding
        if self.padding:
            x = F.pad(x, (self.padding, self.padding), 'reflect')

        # forward real
        real_part = F.conv1d(x, self.A, None, stride=self.stride, padding=0,
                             dilation=self.dilation, groups=2)

        # forward idea
        spl = self.in_channels // 2
        weight_B = torch.cat([self.B[:spl].data * (-1), self.B[spl:].data])
        idea_part = F.conv1d(x, weight_B, None, stride=self.stride, padding=0,
                             dilation=self.dilation, groups=2)

        return real_part + idea_part 
开发者ID:AppleHolic,项目名称:source_separation,代码行数:23,代码来源:modules.py

示例13: moving_sum

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def moving_sum(x, back, forward):
    """Compute the moving sum of x over a chunk_size with the provided bounds.

    Args:
        x (FloatTensor): `[B, H_ma, H_ca, qlen, klen]`
        back (int):
        forward (int):

    Returns:
        x_sum (FloatTensor): `[B, H_ma, H_ca, qlen, klen]`

    """
    bs, n_heads_mono, n_heads_chunk, qlen, klen = x.size()
    x = x.view(-1, klen)
    # Moving sum is computed as a carefully-padded 1D convolution with ones
    x_padded = F.pad(x, pad=[back, forward])  # `[B * H_ma * H_ca * qlen, back + klen + forward]`
    # Add a "channel" dimension
    x_padded = x_padded.unsqueeze(1)
    # Construct filters
    filters = x.new_ones(1, 1, back + forward + 1)
    x_sum = F.conv1d(x_padded, filters)
    x_sum = x_sum.squeeze(1).view(bs, n_heads_mono, n_heads_chunk, qlen, -1)
    return x_sum 
开发者ID:hirofumi0810,项目名称:neural_sp,代码行数:25,代码来源:mocha.py

示例14: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, x):
        if self.deterministic:
            assert self.training == False, "Flag deterministic is True. This should not be used in training."
            return F.conv1d(x, self.post_weight_mu, self.bias_mu, self.stride, self.padding, self.dilation, self.groups)
        batch_size = x.size()[0]
        # apply local reparametrisation trick see [1] Eq. (6)
        # to the parametrisation given in [3] Eq. (6)
        mu_activations = F.conv1d(x, self.weight_mu, self.bias_mu, self.stride,
                                  self.padding, self.dilation, self.groups)

        var_activations = F.conv1d(x.pow(2), self.weight_logvar.exp(), self.bias_logvar.exp(), self.stride,
                                   self.padding, self.dilation, self.groups)
        # compute z
        # note that we reparametrise according to [2] Eq. (11) (not [1])
        z = reparametrize(self.z_mu.repeat(batch_size, 1, 1), self.z_logvar.repeat(batch_size, 1, 1),
                          sampling=self.training, cuda=self.cuda)
        z = z[:, :, None]

        return reparametrize(mu_activations * z, (var_activations * z.pow(2)).log(), sampling=self.training,
                             cuda=self.cuda) 
开发者ID:KarenUllrich,项目名称:Tutorial_BayesianCompressionForDL,代码行数:22,代码来源:BayesianLayers.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv1d [as 别名]
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
开发者ID:xcmyz,项目名称:LightSpeech,代码行数:23,代码来源:glow.py


注:本文中的torch.nn.functional.conv1d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。