本文整理汇总了Python中torch.nn.functional.conv_transpose1d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.conv_transpose1d方法的具体用法?Python functional.conv_transpose1d怎么用?Python functional.conv_transpose1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.conv_transpose1d方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: quaternion_transpose_conv
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
padding, output_padding, groups, dilatation):
"""
Applies a quaternion trasposed convolution to the incoming data:
"""
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)
if input.dim() == 3:
convfunc = F.conv_transpose1d
elif input.dim() == 4:
convfunc = F.conv_transpose2d
elif input.dim() == 5:
convfunc = F.conv_transpose3d
else:
raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
" input.dim = " + str(input.dim()))
return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation)
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, x, output_size=None):
"""
Implemented complex transposed convolution using combining 'grouped convolution' and 'real / img weight'
:param x: data (N, C, T) C is concatenated with C/2 real channels and C/2 idea channels
:return: complex transposed convolution result
"""
# forward real
if self.padding:
x = F.pad(x, (self.padding, self.padding), 'reflect')
real_part = F.conv_transpose1d(x, self.A, None, stride=self.stride, padding=0,
dilation=self.dilation, groups=2)
# forward idea
spl = self.out_channels // 2
weight_B = torch.cat([self.B[:spl] * (-1), self.B[spl:]])
idea_part = F.conv_transpose1d(x, weight_B, None, stride=self.stride, padding=0,
dilation=self.dilation, groups=2)
if self.output_padding:
real_part = F.pad(real_part, (self.output_padding, self.output_padding), 'reflect')
idea_part = F.pad(idea_part, (self.output_padding, self.output_padding), 'reflect')
return real_part + idea_part
示例3: quaternion_transpose_conv
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def quaternion_transpose_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
padding, output_padding, groups, dilatation):
"""
Applies a quaternion trasposed convolution to the incoming data:
"""
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=1)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=1)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=1)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)
if input.dim() == 3:
convfunc = F.conv_transpose1d
elif input.dim() == 4:
convfunc = F.conv_transpose2d
elif input.dim() == 5:
convfunc = F.conv_transpose3d
else:
raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
" input.dim = " + str(input.dim()))
return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, output_padding, groups, dilatation)
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, x):
# Pad here if not using transposed conv
input_size = x.shape[2]
if self.padding != "valid":
num_pad = (self.kernel_size-1)//2
out = F.pad(x, (num_pad, num_pad), mode=self.padding)
else:
out = x
# Lowpass filter (+ 0 insertion if transposed)
if self.transpose:
expected_steps = ((input_size - 1) * self.stride + 1)
if self.padding == "valid":
expected_steps = expected_steps - self.kernel_size + 1
out = F.conv_transpose1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)
diff_steps = out.shape[2] - expected_steps
if diff_steps > 0:
assert(diff_steps % 2 == 0)
out = out[:,:,diff_steps//2:-diff_steps//2]
else:
assert(input_size % self.stride == 1)
out = F.conv1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)
return out
示例5: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
示例6: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat([magnitude*torch.cos(phase),
magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(recombine_magnitude_phase,
self.inverse_basis,
stride=self.hop_length,
padding=0)
inverse_transform = inverse_transform[:, :, self.filter_length:]
inverse_transform = inverse_transform[:, :, :self.num_samples]
return inverse_transform
示例7: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = self._window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False).cuda()
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, input):
weight = self.window * self.conv_tr.weight
return F.conv_transpose1d(input,
weight,
bias=self.conv_tr.bias,
stride=self.conv_tr.stride,
padding=self.conv_tr.padding,
output_padding=self.conv_tr.output_padding,
groups=self.conv_tr.groups,
dilation=self.conv_tr.dilation)
示例9: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :,
approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:,
:, :-int(self.filter_length/2):]
return inverse_transform
示例10: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[
approx_nonzero_indices].cuda()
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
示例11: test_conv_transpose1d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def test_conv_transpose1d(self):
# Data and weight tensors
conv_transpose1d_tensor = torch.randn(64, 16, 64, device='cuda', dtype=self.dtype)
conv_transpose1d_filter = torch.randn(16, 32, 3, device='cuda', dtype=self.dtype)
conv_transpose1d_bias = torch.randn(32, device='cuda', dtype=self.dtype)
# Conv transpose runs
conv_transpose1d_out = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter)
conv_transpose1d_out_biased = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, bias=conv_transpose1d_bias)
conv_transpose1d_out_strided = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, stride=2)
conv_transpose1d_out_padded = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, padding=3)
conv_transpose1d_out2_padded = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, output_padding=2, dilation=3)
conv_transpose1d_out_grouped = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, groups=2)
conv_transpose1d_out_dilated = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, dilation=2)
示例12: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length / 2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length / 2):]
return inverse_transform
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def forward(self, spec):
""" Applies transposed convolution to a TF representation.
This is equivalent to overlap-add.
Args:
spec (:class:`torch.Tensor`): 3D or 4D Tensor. The TF
representation. (Output of :func:`Encoder.forward`).
Returns:
:class:`torch.Tensor`: The corresponding time domain signal.
"""
filters = self.get_filters()
if spec.ndim == 2:
# Input is (freq, conv_time), output is (time)
return F.conv_transpose1d(
spec.unsqueeze(0),
filters,
stride=self.stride,
padding=self.padding,
output_padding=self.output_padding
).squeeze()
if spec.ndim == 3:
# Input is (batch, freq, conv_time), output is (batch, 1, time)
return F.conv_transpose1d(spec, filters, stride=self.stride,
padding=self.padding,
output_padding=self.output_padding)
elif spec.ndim > 3:
# Multiply all the left dimensions together and group them in the
# batch. Make the convolution and restore.
view_as = (-1,) + spec.shape[-2:]
out = F.conv_transpose1d(spec.view(view_as),
filters, stride=self.stride,
padding=self.padding,
output_padding=self.output_padding)
return out.view(spec.shape[:-2] + (-1,))
示例14: inverse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import conv_transpose1d [as 别名]
def inverse(self, magnitude, phase):
"""Call the inverse STFT (iSTFT), given magnitude and phase tensors produced
by the ```transform``` function.
Arguments:
magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
num_frequencies, num_frames)
phase {tensor} -- Phase of STFT with shape (num_batch,
num_frequencies, num_frames)
Returns:
inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
shape (num_batch, num_samples)
"""
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
self.inverse_basis,
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.from_numpy(window_sum).to(inverse_transform.device)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[..., self.pad_amount:]
inverse_transform = inverse_transform[..., :self.num_samples]
inverse_transform = inverse_transform.squeeze(1)
return inverse_transform