本文整理匯總了Python中torch.nn.ReflectionPad2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ReflectionPad2d方法的具體用法?Python nn.ReflectionPad2d怎麽用?Python nn.ReflectionPad2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ReflectionPad2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, input_nc=3, output_nc=3, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=True, num_blocks=6):
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
res_model = [nn.ReflectionPad2d(3),
conv_norm_relu(input_nc, ngf * 1, 7, norm_layer=norm_layer, bias=use_bias),
conv_norm_relu(ngf * 1, ngf * 2, 3, 2, 1, norm_layer=norm_layer, bias=use_bias),
conv_norm_relu(ngf * 2, ngf * 4, 3, 2, 1, norm_layer=norm_layer, bias=use_bias)]
for i in range(num_blocks):
res_model += [ResidualBlock(ngf * 4, norm_layer, use_dropout, use_bias)]
res_model += [dconv_norm_relu(ngf * 4, ngf * 2, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
dconv_norm_relu(ngf * 2, ngf * 1, 3, 2, 1, 1, norm_layer=norm_layer, bias=use_bias),
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, 7),
nn.Tanh()]
self.res_model = nn.Sequential(*res_model)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, h_size):
super(Residual_block, self).__init__()
# Two Conv layers with same output size
model = []
if param.padding == "reflect":
model += [nn.ReflectionPad2d(padding=1)]
model += [nn.Conv2d(h_size, h_size, kernel_size=3, stride=1, padding=pad)]
if param.SELU:
model += [torch.nn.SELU()]
else:
model += [Norm2D(h_size),
nn.ReLU(True)]
if param.padding == "reflect":
model += [nn.ReflectionPad2d(padding=1)]
model += [nn.Conv2d(h_size, h_size, kernel_size=3, stride=1, padding=pad)]
if not param.SELU:
model += [Norm2D(h_size)]
self.model = nn.Sequential(*model)
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, channels_in, channels_out, kernel_size, upsample, stride=1, activation=nn.ReLU):
super(UpsampleConvInRelu, self).__init__()
self.n_params = channels_out * 2
self.upsample = upsample
self.channels = channels_out
if upsample:
self.upsample_layer = torch.nn.Upsample(scale_factor=upsample)
reflection_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size, stride)
self.instancenorm = nn.InstanceNorm2d(channels_out)
self.fc_beta = nn.Linear(100,channels_out)
self.fc_gamma = nn.Linear(100,channels_out)
if activation:
self.activation = activation(inplace=False)
else:
self.activation = None
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
padding_type='reflect'):
assert(n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = nn.ReLU(True)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf), nn.ReLU(True)]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, out_channels, guidance_channels, rate=2):
super(GuidedCxtAtten, self).__init__()
self.rate = rate
self.padding = nn.ReflectionPad2d(1)
self.up_sample = nn.Upsample(scale_factor=self.rate, mode='nearest')
self.guidance_conv = nn.Conv2d(in_channels=guidance_channels, out_channels=guidance_channels//2,
kernel_size=1, stride=1, padding=0)
self.W = nn.Sequential(
nn.Conv2d(in_channels=out_channels, out_channels=out_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels)
)
nn.init.xavier_uniform_(self.guidance_conv.weight)
nn.init.constant_(self.guidance_conv.bias, 0)
nn.init.xavier_uniform_(self.W[0].weight)
nn.init.constant_(self.W[1].weight, 1e-3)
nn.init.constant_(self.W[1].bias, 0)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, out_channels=3, dim=64, n_upsample=2, shared_block=None):
super(Generator, self).__init__()
self.shared_block = shared_block
layers = []
dim = dim * 2 ** n_upsample
# Residual blocks
for _ in range(3):
layers += [ResidualBlock(dim)]
# Upsampling
for _ in range(n_upsample):
layers += [
nn.ConvTranspose2d(dim, dim // 2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim // 2),
nn.LeakyReLU(0.2, inplace=True),
]
dim = dim // 2
# Output layer
layers += [nn.ReflectionPad2d(3), nn.Conv2d(dim, out_channels, 7), nn.Tanh()]
self.model_blocks = nn.Sequential(*layers)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, in_channels=3, dim=64, n_residual=3, n_downsample=2):
super(ContentEncoder, self).__init__()
# Initial convolution block
layers = [
nn.ReflectionPad2d(3),
nn.Conv2d(in_channels, dim, 7),
nn.InstanceNorm2d(dim),
nn.ReLU(inplace=True),
]
# Downsampling
for _ in range(n_downsample):
layers += [
nn.Conv2d(dim, dim * 2, 4, stride=2, padding=1),
nn.InstanceNorm2d(dim * 2),
nn.ReLU(inplace=True),
]
dim *= 2
# Residual blocks
for _ in range(n_residual):
layers += [ResidualBlock(dim, norm="in")]
self.model = nn.Sequential(*layers)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, in_dim, out_dim, kernel_size=3, stride=1, padding=0,
pad_type='reflect', bias=True, norm_layer=None, nl_layer=None):
super(Conv2dBlock, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
self.conv = spectral_norm(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size,
stride=stride, padding=0, bias=bias))
if norm_layer is not None:
self.norm = norm_layer(out_dim)
else:
self.norm = lambda x: x
if nl_layer is not None:
self.activation = nl_layer()
else:
self.activation = lambda x: x
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, im_size, nz, ngf=64, nup=6,
norm_layer=None, nl_layer=None):
super(ConvUpSampleDecoder, self).__init__()
self.im_size = im_size // (2 ** nup)
fc_dim = 4 * nz
layers = []
prev = 8
for i in range(nup-1, -1, -1):
cur = min(prev, 2**i)
layers.append(deconv3x3(ngf * prev, ngf * cur, stride=2))
prev = cur
layers += [
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, 3, kernel_size=7, stride=1, padding=0),
nn.Tanh(),
]
self.conv = nn.Sequential(*layers)
self.fc = nn.Sequential(
nn.Linear(nz, fc_dim),
nl_layer,
nn.Dropout(),
nn.Linear(fc_dim, self.im_size * self.im_size * ngf * 8),
)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, dim, kernel_size=3, stride=1, padding=1):
super(ResBlock, self).__init__()
layers = []
layers += [
nn.ReflectionPad2d(padding),
nn.Conv2d(dim, dim, kernel_size, stride),
nn.InstanceNorm2d(dim),
nn.ReLU(inplace=True)
]
layers += [
nn.ReflectionPad2d(padding),
nn.Conv2d(dim, dim, kernel_size, 1),
nn.InstanceNorm2d(dim)
]
self.model = nn.Sequential(*layers)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, dim, norm_layer, use_dropout, use_bias):
super(ResidualBlock, self).__init__()
res_block = [nn.ReflectionPad2d(1),
conv_norm_relu(dim, dim, kernel_size=3,
norm_layer= norm_layer, bias=use_bias)]
if use_dropout:
res_block += [nn.Dropout(0.5)]
res_block += [nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, padding=0, bias=use_bias),
norm_layer(dim)]
self.res_block = nn.Sequential(*res_block)
示例13: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def __init__(self, h_size):
super(Residual_block, self).__init__()
# Two Conv layers with same output size
model = [nn.ReflectionPad2d(padding=1),
nn.Conv2d(h_size, h_size, kernel_size=3, stride=1, padding=0),
Norm2D(h_size),
nn.ReLU(True)]
if param.use_dropout:
model += [nn.Dropout(0.5)]
model += [nn.ReflectionPad2d(padding=1),
nn.Conv2d(h_size, h_size, kernel_size=3, stride=1, padding=0),
nn.ReLU(True)]
self.model = nn.Sequential(*model)
示例15: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReflectionPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)