本文整理匯總了Python中torch.nn.ReflectionPad1d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ReflectionPad1d方法的具體用法?Python nn.ReflectionPad1d怎麽用?Python nn.ReflectionPad1d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ReflectionPad1d方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad1d [as 別名]
def __init__(self, channels, kernel_size=7):
super(Decoder, self).__init__()
model = []
pad = (kernel_size - 1) // 2
acti = nn.LeakyReLU(0.2)
for i in range(len(channels) - 1):
model.append(nn.Upsample(scale_factor=2, mode='nearest'))
model.append(nn.ReflectionPad1d(pad))
model.append(nn.Conv1d(channels[i], channels[i + 1],
kernel_size=kernel_size, stride=1))
if i == 0 or i == 1:
model.append(nn.Dropout(p=0.2))
if not i == len(channels) - 2:
model.append(acti) # whether to add tanh a last?
#model.append(nn.Dropout(p=0.2))
self.model = nn.Sequential(*model)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad1d [as 別名]
def __init__(self, channel):
super(ResStack, self).__init__()
self.blocks = nn.ModuleList([
nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3**i),
nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=3, dilation=3**i)),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)),
)
for i in range(3)
])
self.shortcuts = nn.ModuleList([
nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1))
for i in range(3)
])
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad1d [as 別名]
def __init__(self):
super(Discriminator, self).__init__()
self.discriminator = nn.ModuleList([
nn.Sequential(
nn.ReflectionPad1d(7),
nn.utils.weight_norm(nn.Conv1d(1, 16, kernel_size=15, stride=1)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(16, 64, kernel_size=41, stride=4, padding=20, groups=4)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(64, 256, kernel_size=41, stride=4, padding=20, groups=16)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(256, 1024, kernel_size=41, stride=4, padding=20, groups=64)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(1024, 1024, kernel_size=41, stride=4, padding=20, groups=256)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.Sequential(
nn.utils.weight_norm(nn.Conv1d(1024, 1024, kernel_size=5, stride=1, padding=2)),
nn.LeakyReLU(0.2, inplace=True),
),
nn.utils.weight_norm(nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1)),
])
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad1d [as 別名]
def __init__(self, mel_channel):
super(Generator, self).__init__()
self.mel_channel = mel_channel
self.generator = nn.Sequential(
nn.ReflectionPad1d(3),
nn.utils.weight_norm(nn.Conv1d(mel_channel, 512, kernel_size=7, stride=1)),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.ConvTranspose1d(512, 256, kernel_size=16, stride=8, padding=4)),
ResStack(256),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.ConvTranspose1d(256, 128, kernel_size=16, stride=8, padding=4)),
ResStack(128),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.ConvTranspose1d(128, 64, kernel_size=4, stride=2, padding=1)),
ResStack(64),
nn.LeakyReLU(0.2),
nn.utils.weight_norm(nn.ConvTranspose1d(64, 32, kernel_size=4, stride=2, padding=1)),
ResStack(32),
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
nn.utils.weight_norm(nn.Conv1d(32, 1, kernel_size=7, stride=1)),
nn.Tanh(),
)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad1d [as 別名]
def __init__(self, dim, dilation=1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=3, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)