本文整理匯總了Python中torch.nn.ConvTranspose1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ConvTranspose1d方法的具體用法?Python nn.ConvTranspose1d怎麽用?Python nn.ConvTranspose1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ConvTranspose1d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, ft_size=1024, hop_size=384):
super(Synthesis, self).__init__()
# Parameters
self.batch_size = None
self.time_domain_samples = None
self.sz = ft_size
self.hop = hop_size
self.half_N = int(self.sz / 2 + 1)
# Synthesis 1D CNN
self.conv_synthesis_real = nn.ConvTranspose1d(self.sz, 1, self.sz,
padding=0, stride=self.hop, bias=False)
self.conv_synthesis_imag = nn.ConvTranspose1d(self.sz, 1, self.sz,
padding=0, stride=self.hop, bias=False)
# Custom Initialization with Fourier matrix
self.initialize()
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, ft_size=1024, w_size=2048, hop_size=1024):
super(Synthesis, self).__init__()
# Parameters
self.batch_size = None
self.time_domain_samples = None
self.sz = ft_size
self.wsz = w_size
self.hop = hop_size
self.half_N = int(self.sz / 2 + 1)
# Synthesis 1D CNN
self.conv_synthesis = nn.ConvTranspose1d(self.sz, 1, self.wsz,
padding=0, stride=self.hop, bias=False)
# Activation functions
self.h_tanh = torch.nn.Hardtanh()
self.tanh = torch.nn.Tanh()
# Custom Initialization with DCT-TypeIV matrix
self.initialize()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, ninp, fmaps,
kwidth, stride=4, norm_type=None,
act=None,
bias=True,
name='GDeconv1DBlock'):
super().__init__(name=name)
if act is not None and act == 'glu':
Wfmaps = 2 * fmaps
else:
Wfmaps = fmaps
pad = max(0, (stride - kwidth)//-2)
self.deconv = nn.ConvTranspose1d(ninp, Wfmaps,
kwidth,
stride=stride,
padding=pad,
bias=bias)
self.norm = build_norm_layer(norm_type, self.deconv,
Wfmaps)
self.act = build_activation(act, fmaps)
self.kwidth = kwidth
self.stride = stride
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, ninp, fmaps,
kwidth, stride=4, norm_type=None,
act=None,
name='GDeconv1DBlock'):
super().__init__(name=name)
pad = max(0, (stride - kwidth)//-2)
self.deconv = nn.ConvTranspose1d(ninp, fmaps,
kwidth,
stride=stride,
padding=pad)
self.norm = build_norm_layer(norm_type, self.deconv,
fmaps)
if act is not None:
self.act = getattr(nn, act)()
else:
self.act = nn.PReLU(fmaps, init=0)
self.kwidth = kwidth
self.stride = stride
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, input_size, output_size, stride, hops):
super(CNN_decoder_share, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hops = hops
self.relu = nn.ReLU()
self.deconv = nn.ConvTranspose1d(in_channels=int(self.input_size), out_channels=int(self.input_size), kernel_size=3, stride=stride)
self.bn = nn.BatchNorm1d(int(self.input_size))
self.deconv_out = nn.ConvTranspose1d(in_channels=int(self.input_size), out_channels=int(self.output_size), kernel_size=3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.ConvTranspose1d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.dataset.normal_(0, math.sqrt(2. / n))
m.weight.data = init.xavier_uniform(m.weight.data, gain=nn.init.calculate_gain('relu'))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1, nd=2):
super(convTranspose23D_bn_Unit, self).__init__()
assert nd==1 or nd==2 or nd==3, 'nd is not correctly specified!!!!, it should be {1,2,3}'
if nd==2:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation)
self.bn = nn.BatchNorm2d(out_channels)
elif nd==3:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation)
self.bn = nn.BatchNorm3d(out_channels)
else:
self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, groups=groups, bias=bias, dilation=dilation)
self.bn = nn.BatchNorm1d(out_channels)
init.xavier_uniform(self.conv.weight, gain = np.sqrt(2.0))
init.constant(self.conv.bias, 0)
# self.relu = nn.ReLU()
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv_t = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=kernel_size,
bias=False
)
if bias:
self.bias = nn.Parameter(
torch.FloatTensor(out_channels, kernel_size)
)
else:
self.register_parameter('bias', None)
self.reset_parameters()
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, ninp, fmaps,
kwidth, stride=4,
bias=True,
norm_type=None,
act=None):
super().__init__()
pad = max(0, (stride - kwidth)//-2)
self.deconv = nn.ConvTranspose1d(ninp, fmaps,
kwidth,
stride=stride,
padding=pad)
self.norm = build_norm_layer(norm_type, self.deconv,
fmaps)
if act is not None:
self.act = getattr(nn, act)()
else:
self.act = nn.PReLU(fmaps, init=0)
self.kwidth = kwidth
self.stride = stride
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, n_inputs, n_outputs, kernel_size, stride, conv_type, transpose=False):
super(ConvLayer, self).__init__()
self.transpose = transpose
self.stride = stride
self.kernel_size = kernel_size
self.conv_type = conv_type
# How many channels should be normalised as one group if GroupNorm is activated
# WARNING: Number of channels has to be divisible by this number!
NORM_CHANNELS = 8
if self.transpose:
self.filter = nn.ConvTranspose1d(n_inputs, n_outputs, self.kernel_size, stride, padding=kernel_size-1)
else:
self.filter = nn.Conv1d(n_inputs, n_outputs, self.kernel_size, stride)
if conv_type == "gn":
assert(n_outputs % NORM_CHANNELS == 0)
self.norm = nn.GroupNorm(n_outputs // NORM_CHANNELS, n_outputs)
elif conv_type == "bn":
self.norm = nn.BatchNorm1d(n_outputs, momentum=0.01)
# Add you own types of variations here!
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, vocab_size, latent_variable_size, rnn_size, rnn_num_layers, embed_size):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.latent_variable_size = latent_variable_size
self.rnn_size = rnn_size
self.embed_size = embed_size
self.rnn_num_layers = rnn_num_layers
self.cnn = nn.Sequential(
nn.ConvTranspose1d(self.latent_variable_size, 512, 4, 2, 0),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 512, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 256, 4, 2, 0),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 256, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 128, 4, 2, 0),
nn.BatchNorm1d(128),
nn.ELU(),
nn.ConvTranspose1d(128, self.vocab_size, 4, 2, 0)
)
self.rnn = nn.GRU(input_size=self.vocab_size + self.embed_size,
hidden_size=self.rnn_size,
num_layers=self.rnn_num_layers,
batch_first=True)
self.hidden_to_vocab = nn.Linear(self.rnn_size, self.vocab_size)
示例11: convtrans_factory
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def convtrans_factory(dim):
types = [nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]
return types[dim - 1]
示例12: init_weights
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def init_weights(self):
"""
Initialize weights for convolution layers using Xavier initialization.
"""
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d):
nn.init.xavier_normal_(m.weight.data)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def __init__(self, in_channels, out_channels, warmup_steps, global_cond_channels):
super().__init__()
self.gru = nn.GRU(in_channels + global_cond_channels, out_channels, batch_first=True)
self.tconv = nn.ConvTranspose1d(out_channels, out_channels, kernel_size=4, stride=4)
self.warmup_steps = warmup_steps
示例14: is_sparseable
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def is_sparseable(m):
return True if hasattr(m, 'weight') and isinstance(m, (
nn.Conv1d, nn.Conv2d, nn.Conv3d,
nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d,
nn.Linear)) else False
示例15: last_decoding
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ConvTranspose1d [as 別名]
def last_decoding(in_features, out_channels, drop_rate=0., upsample='nearest'):
"""Last transition up layer, which outputs directly the predictions.
"""
last_up = nn.Sequential()
last_up.add_module('norm1', nn.BatchNorm1d(in_features))
last_up.add_module('relu1', nn.ReLU(True))
last_up.add_module('conv1', nn.Conv1d(in_features, in_features // 2,
kernel_size=1, stride=1, padding=0, bias=False))
if drop_rate > 0.:
last_up.add_module('dropout1', nn.Dropout1d(p=drop_rate))
last_up.add_module('norm2', nn.BatchNorm1d(in_features // 2))
last_up.add_module('relu2', nn.ReLU(True))
# last_up.add_module('convT2', nn.ConvTranspose1d(in_features // 2,
# out_channels, kernel_size=2*padding+stride, stride=stride,
# padding=padding, output_padding=output_padding, bias=bias))
if upsample == 'nearest':
last_up.add_module('upsample', UpsamplingNearest1d(scale_factor=2))
elif upsample == 'linear':
last_up.add_module('upsample', UpsamplingLinear1d(scale_factor=2))
last_up.add_module('conv2', nn.Conv1d(in_features // 2, in_features // 4,
kernel_size=3, stride=1, padding=1*2, bias=False, padding_mode='circular'))
last_up.add_module('norm3', nn.BatchNorm1d(in_features // 4))
last_up.add_module('relu3', nn.ReLU(True))
last_up.add_module('conv3', nn.Conv1d(in_features // 4, out_channels,
kernel_size=5, stride=1, padding=2*2, bias=False, padding_mode='circular'))
return last_up