本文整理匯總了Python中torch.nn.ReplicationPad2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ReplicationPad2d方法的具體用法?Python nn.ReplicationPad2d怎麽用?Python nn.ReplicationPad2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.ReplicationPad2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def forward(self, x):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/8)*8-h)
paddingRight = int(np.ceil(w/8)*8-w)
x = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x1 = self.m_head(x)
x2 = self.m_down1(x1)
x3 = self.m_down2(x2)
x4 = self.m_down3(x3)
x = self.m_body(x4)
x = self.m_up3(x+x4)
x = self.m_up2(x+x3)
x = self.m_up1(x+x2)
x = self.m_tail(x+x1)
x = x[..., :h, :w]
return x
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, in_dim, out_dim, kernel_size=3, stride=1, padding=0,
pad_type='reflect', bias=True, norm_layer=None, nl_layer=None):
super(Conv2dBlock, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
self.conv = spectral_norm(nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size,
stride=stride, padding=0, bias=bias))
if norm_layer is not None:
self.norm = norm_layer(out_dim)
else:
self.norm = lambda x: x
if nl_layer is not None:
self.activation = nl_layer()
else:
self.activation = lambda x: x
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, dim_in, dilation=1, padtype = 'zero'):
super(ResidualBlockNoNorm, self).__init__()
pad = dilation
layers = []
if padtype == 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([ nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.LeakyReLU(0.1,inplace=True)])
pad = dilation
if padtype== 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([
nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.LeakyReLU(0.1,inplace=True)
])
self.main = nn.Sequential(*layers)
示例4: build_conv_block1
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block1(self, inchannel, padding_type, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(inchannel, inchannel, kernel_size=3, padding=p),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
return nn.Sequential(*conv_block)
示例5: build_conv_block2
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block2(self, inchannel, outchannel, padding_type, activation, use_dropout, end):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if end:
conv_block += [nn.Conv2d(inchannel, outchannel, kernel_size=3, padding=p)]
else:
conv_block += [nn.Conv2d(inchannel, outchannel, kernel_size=3, padding=p),
activation]
return nn.Sequential(*conv_block)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, inchannel, outchannel, upsampling=False, end=False):
"""
Reverse Vgg19_bn block
:param inchannel: input channel
:param outchannel: output channel
:param upsampling: judge for adding upsampling module
:param padding: padding mode: 'zero', 'reflect', by default:'reflect'
"""
super(ReVggBlock, self).__init__()
model = []
model += [nn.ReplicationPad2d(1)]
model += [nn.Conv2d(inchannel, outchannel, 3)]
if upsampling:
model += [nn.UpsamplingBilinear2d(scale_factor=2)]
if not end:
model += [nn.LeakyReLU(True), nn.BatchNorm2d(outchannel)]
self.model = nn.Sequential(*model)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=0, pad_type='reflect', bias=True, norm_layer=None, nl_layer=None):
super(Conv2dBlock, self).__init__()
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
self.conv = spectral_norm(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=0, bias=bias))
if norm_layer is not None:
self.norm = norm_layer(out_planes)
else:
self.norm = lambda x: x
if nl_layer is not None:
self.activation = nl_layer()
else:
self.activation = lambda x: x
示例8: conv2d_norm_act
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def conv2d_norm_act(in_planes, out_planes, kernel_size=(3,3), stride=1,
dilation=(1,1), padding=(1,1), bias=True, pad_mode='rep', norm_mode='', act_mode='', return_list=False):
if isinstance(padding,int):
pad_mode = pad_mode if padding!=0 else 'zeros'
else:
pad_mode = pad_mode if max(padding)!=0 else 'zeros'
if pad_mode in ['zeros','circular']:
layers = [nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, padding_mode=pad_mode, dilation=dilation, bias=bias)]
elif pad_mode=='rep':
# the size of the padding should be a 6-tuple
padding = tuple([x for x in padding for _ in range(2)][::-1])
layers = [nn.ReplicationPad2d(padding),
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=0, dilation=dilation, bias=bias)]
else:
raise ValueError('Unknown padding option {}'.format(mode))
layers += get_layer_norm(out_planes, norm_mode)
layers += get_layer_act(act_mode)
if return_list:
return layers
else:
return nn.Sequential(*layers)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, ch_in):
super(RefineFlow, self).__init__()
self.kernel_size = 3
self.pad_size = 1
self.pad_ftn = nn.ReplicationPad2d(self.pad_size)
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 1),
conv(128, 64, 3, 1, 1),
conv(64, 64, 3, 1, 1),
conv(64, 32, 3, 1, 1),
conv(32, 32, 3, 1, 1),
conv(32, self.kernel_size * self.kernel_size, 3, 1, 1)
)
self.softmax_feat = nn.Softmax(dim=1)
self.unfold_flow = nn.Unfold(kernel_size=(self.kernel_size, self.kernel_size))
self.unfold_kernel = nn.Unfold(kernel_size=(1, 1))
示例10: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
示例11: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
示例12: pad
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def pad(pad_type, padding):
# helper selecting padding layer
# if padding is 'zero', do by conv layers
pad_type = pad_type.lower()
if padding == 0:
return None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('padding layer [%s] is not implemented' % pad_type)
return layer
示例13: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ELU()]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def __init__(self, init_weights=True):
super(Net, self).__init__()
conv_kernel = (3, 3)
conv_stride = (1, 1)
conv_padding = 1
sep_kernel = config.OUTPUT_1D_KERNEL_SIZE
self.pool = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
self.upsamp = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.relu = nn.ReLU()
self.conv32 = self._conv_module(6, 32, conv_kernel, conv_stride, conv_padding, self.relu)
self.conv64 = self._conv_module(32, 64, conv_kernel, conv_stride, conv_padding, self.relu)
self.conv128 = self._conv_module(64, 128, conv_kernel, conv_stride, conv_padding, self.relu)
self.conv256 = self._conv_module(128, 256, conv_kernel, conv_stride, conv_padding, self.relu)
self.conv512 = self._conv_module(256, 512, conv_kernel, conv_stride, conv_padding, self.relu)
self.conv512x512 = self._conv_module(512, 512, conv_kernel, conv_stride, conv_padding, self.relu)
self.upsamp512 = self._upsample_module(512, 512, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv256 = self._conv_module(512, 256, conv_kernel, conv_stride, conv_padding, self.relu)
self.upsamp256 = self._upsample_module(256, 256, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv128 = self._conv_module(256, 128, conv_kernel, conv_stride, conv_padding, self.relu)
self.upsamp128 = self._upsample_module(128, 128, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv64 = self._conv_module(128, 64, conv_kernel, conv_stride, conv_padding, self.relu)
self.upsamp64 = self._upsample_module(64, 64, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv51_1 = self._kernel_module(64, sep_kernel, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv51_2 = self._kernel_module(64, sep_kernel, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv51_3 = self._kernel_module(64, sep_kernel, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.upconv51_4 = self._kernel_module(64, sep_kernel, conv_kernel, conv_stride, conv_padding, self.upsamp, self.relu)
self.pad = nn.ReplicationPad2d(sep_kernel // 2)
if torch.cuda.is_available() and not config.ALWAYS_SLOW_SEP_CONV:
self.separable_conv = SeparableConvolution.apply
else:
self.separable_conv = SeparableConvolutionSlow()
if init_weights:
print('Initializing weights...')
self.apply(self._weight_init)
示例15: build_conv_block
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReplicationPad2d [as 別名]
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)