当前位置: 首页>>代码示例>>Python>>正文


Python functional.conv_transpose1d方法代码示例

本文整理汇总了Python中torch.nn.functional.conv_transpose1d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.conv_transpose1d方法的具体用法?Python functional.conv_transpose1d怎么用?Python functional.conv_transpose1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.conv_transpose1d方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: quaternion_transpose_conv

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
                    padding, output_padding, groups, dilatation):
    """
    Applies a quaternion trasposed convolution to the incoming data:

    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)
    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)


    if   input.dim() == 3:
        convfunc = F.conv_transpose1d
    elif input.dim() == 4:
        convfunc = F.conv_transpose2d
    elif input.dim() == 5:
        convfunc = F.conv_transpose3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation) 
开发者ID:Orkis-Research,项目名称:Pytorch-Quaternion-Neural-Networks,代码行数:27,代码来源:quaternion_ops.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, x, output_size=None):
        """
        Implemented complex transposed convolution using combining 'grouped convolution' and 'real / img weight'
        :param x: data (N, C, T) C is concatenated with C/2 real channels and C/2 idea channels
        :return: complex transposed convolution result
        """
        # forward real
        if self.padding:
            x = F.pad(x, (self.padding, self.padding), 'reflect')

        real_part = F.conv_transpose1d(x, self.A, None, stride=self.stride, padding=0,
                                       dilation=self.dilation, groups=2)

        # forward idea
        spl = self.out_channels // 2
        weight_B = torch.cat([self.B[:spl] * (-1), self.B[spl:]])
        idea_part = F.conv_transpose1d(x, weight_B, None, stride=self.stride, padding=0,
                                       dilation=self.dilation, groups=2)

        if self.output_padding:
            real_part = F.pad(real_part, (self.output_padding, self.output_padding), 'reflect')
            idea_part = F.pad(idea_part, (self.output_padding, self.output_padding), 'reflect')

        return real_part + idea_part 
开发者ID:AppleHolic,项目名称:source_separation,代码行数:26,代码来源:modules.py

示例3: quaternion_transpose_conv

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride, 
                    padding, output_padding, groups, dilatation):
    """
    Applies a quaternion trasposed convolution to the incoming data:

    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)
    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)


    if   input.dim() == 3:
        convfunc = F.conv_transpose1d
    elif input.dim() == 4:
        convfunc = F.conv_transpose2d
    elif input.dim() == 5:
        convfunc = F.conv_transpose3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation) 
开发者ID:Orkis-Research,项目名称:Quaternion-Recurrent-Neural-Networks,代码行数:27,代码来源:quaternion_ops.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, x):
        # Pad here if not using transposed conv
        input_size = x.shape[2]
        if self.padding != "valid":
            num_pad = (self.kernel_size-1)//2
            out = F.pad(x, (num_pad, num_pad), mode=self.padding)
        else:
            out = x

        # Lowpass filter (+ 0 insertion if transposed)
        if self.transpose:
            expected_steps = ((input_size - 1) * self.stride + 1)
            if self.padding == "valid":
                expected_steps = expected_steps - self.kernel_size + 1

            out = F.conv_transpose1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)
            diff_steps = out.shape[2] - expected_steps
            if diff_steps > 0:
                assert(diff_steps % 2 == 0)
                out = out[:,:,diff_steps//2:-diff_steps//2]
        else:
            assert(input_size % self.stride == 1)
            out = F.conv1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)

        return out 
开发者ID:f90,项目名称:Wave-U-Net-Pytorch,代码行数:27,代码来源:waveunet_utils.py

示例5: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat(
            [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            Variable(self.inverse_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.autograd.Variable(
                torch.from_numpy(window_sum), requires_grad=False)
            window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
            inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
        inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]

        return inverse_transform 
开发者ID:alphacep,项目名称:tn2-wg,代码行数:32,代码来源:stft.py

示例6: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat([magnitude*torch.cos(phase),
                                               magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(recombine_magnitude_phase,
                                               self.inverse_basis,
                                               stride=self.hop_length,
                                               padding=0)
        inverse_transform = inverse_transform[:, :, self.filter_length:]
        inverse_transform = inverse_transform[:, :, :self.num_samples]
        return inverse_transform 
开发者ID:tiberiu44,项目名称:TTS-Cube,代码行数:13,代码来源:modules.py

示例7: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat(
            [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            Variable(self.inverse_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = self._window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.autograd.Variable(
                torch.from_numpy(window_sum), requires_grad=False).cuda()
            inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
        inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]

        return inverse_transform 
开发者ID:tiberiu44,项目名称:TTS-Cube,代码行数:31,代码来源:stft.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, input):
        weight = self.window * self.conv_tr.weight
        return F.conv_transpose1d(input,
            weight,
            bias=self.conv_tr.bias,
            stride=self.conv_tr.stride,
            padding=self.conv_tr.padding,
            output_padding=self.conv_tr.output_padding,
            groups=self.conv_tr.groups,
            dilation=self.conv_tr.dilation) 
开发者ID:acids-ircam,项目名称:ddsp_pytorch,代码行数:12,代码来源:modules.py

示例9: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat(
            [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            Variable(self.inverse_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.autograd.Variable(
                torch.from_numpy(window_sum), requires_grad=False)
            window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
            inverse_transform[:, :,
                              approx_nonzero_indices] /= window_sum[approx_nonzero_indices]

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
        inverse_transform = inverse_transform[:,
                                              :, :-int(self.filter_length/2):]

        return inverse_transform 
开发者ID:xcmyz,项目名称:LightSpeech,代码行数:34,代码来源:stft.py

示例10: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat(
            [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            Variable(self.inverse_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.autograd.Variable(
                torch.from_numpy(window_sum), requires_grad=False)
            inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
                approx_nonzero_indices].cuda()

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
        inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]

        return inverse_transform 
开发者ID:guanlongzhao,项目名称:fac-via-ppg,代码行数:32,代码来源:stft.py

示例11: test_conv_transpose1d

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def test_conv_transpose1d(self):
        # Data and weight tensors
        conv_transpose1d_tensor = torch.randn(64, 16, 64, device='cuda', dtype=self.dtype)
        conv_transpose1d_filter = torch.randn(16, 32, 3, device='cuda', dtype=self.dtype)
        conv_transpose1d_bias = torch.randn(32, device='cuda', dtype=self.dtype)
        # Conv transpose runs
        conv_transpose1d_out = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter)
        conv_transpose1d_out_biased = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, bias=conv_transpose1d_bias)
        conv_transpose1d_out_strided = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, stride=2)
        conv_transpose1d_out_padded = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, padding=3)
        conv_transpose1d_out2_padded = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, output_padding=2, dilation=3)
        conv_transpose1d_out_grouped = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, groups=2)
        conv_transpose1d_out_dilated = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, dilation=2) 
开发者ID:NVIDIA,项目名称:apex,代码行数:15,代码来源:test_pyprof_nvtx.py

示例12: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        recombine_magnitude_phase = torch.cat(
            [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            Variable(self.inverse_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.autograd.Variable(
                torch.from_numpy(window_sum), requires_grad=False)
            window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
            inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[:, :, int(self.filter_length / 2):]
        inverse_transform = inverse_transform[:, :, :-int(self.filter_length / 2):]

        return inverse_transform 
开发者ID:foamliu,项目名称:Tacotron2-Mandarin,代码行数:32,代码来源:stft.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, spec):
        """ Applies transposed convolution to a TF representation.

        This is equivalent to overlap-add.

        Args:
            spec (:class:`torch.Tensor`): 3D or 4D Tensor. The TF
                representation. (Output of :func:`Encoder.forward`).
        Returns:
            :class:`torch.Tensor`: The corresponding time domain signal.
        """
        filters = self.get_filters()
        if spec.ndim == 2:
            # Input is (freq, conv_time), output is (time)
            return F.conv_transpose1d(
                spec.unsqueeze(0),
                filters,
                stride=self.stride,
                padding=self.padding,
                output_padding=self.output_padding
            ).squeeze()
        if spec.ndim == 3:
            # Input is (batch, freq, conv_time), output is (batch, 1, time)
            return F.conv_transpose1d(spec, filters, stride=self.stride,
                                      padding=self.padding,
                                      output_padding=self.output_padding)
        elif spec.ndim > 3:
            # Multiply all the left dimensions together and group them in the
            # batch. Make the convolution and restore.
            view_as = (-1,) + spec.shape[-2:]
            out = F.conv_transpose1d(spec.view(view_as),
                                     filters, stride=self.stride,
                                     padding=self.padding,
                                     output_padding=self.output_padding)
            return out.view(spec.shape[:-2] + (-1,)) 
开发者ID:mpariente,项目名称:asteroid,代码行数:37,代码来源:enc_dec.py

示例14: inverse

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
        """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced 
        by the ```transform``` function.
        
        Arguments:
            magnitude {tensor} -- Magnitude of STFT with shape (num_batch, 
                num_frequencies, num_frames)
            phase {tensor} -- Phase of STFT with shape (num_batch, 
                num_frequencies, num_frames)
        
        Returns:
            inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
                shape (num_batch, num_samples)
        """
        recombine_magnitude_phase = torch.cat(
            [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)

        inverse_transform = F.conv_transpose1d(
            recombine_magnitude_phase,
            self.inverse_basis,
            stride=self.hop_length,
            padding=0)

        if self.window is not None:
            window_sum = window_sumsquare(
                self.window, magnitude.size(-1), hop_length=self.hop_length,
                win_length=self.win_length, n_fft=self.filter_length,
                dtype=np.float32)
            # remove modulation effects
            approx_nonzero_indices = torch.from_numpy(
                np.where(window_sum > tiny(window_sum))[0])
            window_sum = torch.from_numpy(window_sum).to(inverse_transform.device)
            inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]

            # scale by hop ratio
            inverse_transform *= float(self.filter_length) / self.hop_length

        inverse_transform = inverse_transform[..., self.pad_amount:]
        inverse_transform = inverse_transform[..., :self.num_samples]
        inverse_transform = inverse_transform.squeeze(1)

        return inverse_transform 
开发者ID:pseeth,项目名称:torch-stft,代码行数:44,代码来源:stft.py


注:本文中的torch.nn.functional.conv_transpose1d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。