本文整理匯總了Python中torch.nn.MaxUnpool2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.MaxUnpool2d方法的具體用法?Python nn.MaxUnpool2d怎麽用?Python nn.MaxUnpool2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.MaxUnpool2d方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_channels, inter_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs):
super(UpsamplingBottleneck, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels)
)
self.upsampling = nn.MaxUnpool2d(2)
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 1, bias=False),
norm_layer(inter_channels),
nn.PReLU(),
nn.ConvTranspose2d(inter_channels, inter_channels, 2, 2, bias=False),
norm_layer(inter_channels),
nn.PReLU(),
nn.Conv2d(inter_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.Dropout2d(0.1)
)
self.act = nn.PReLU()
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_channels, inter_channels, out_channels, norm_layer=nn.BatchNorm2d, dropout=0.1, **kwargs):
super(UpsamplingBottleneck, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels)
)
self.upsampling = nn.MaxUnpool2d(2)
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 1, bias=False),
norm_layer(inter_channels),
nn.PReLU(),
nn.ConvTranspose2d(inter_channels, inter_channels, 2, 2, bias=False),
norm_layer(inter_channels),
nn.PReLU(),
nn.Conv2d(inter_channels, out_channels, 1, bias=False),
norm_layer(out_channels),
nn.Dropout2d(dropout)
)
self.act = nn.PReLU()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_size, out_size):
super(segnetUp2, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_size, out_size):
super(segnetUp2, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, pooling):
super(StatefulMaxUnpool2d, self).__init__()
self.pooling = pooling
self.unpooling = nn.MaxUnpool2d(pooling.kernel_size, pooling.stride, pooling.padding)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_size, out_size):
super(segnetUp2Instance, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = conv2D(in_size, out_size, 3, 1, 1)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, params, outblock=False):
"""
Decoder Block initialization
:param dict params: parameters like number of channels, stride etc.
:param bool outblock: Flag, indicating if last block of network before classifier
is created. Default: False
"""
super(CompetitiveDecoderBlock, self).__init__(params, outblock=outblock)
self.unpool = nn.MaxUnpool2d(kernel_size=params['pool'], stride=params['stride_pool'])
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_size, out_size):
super(segnetUp1, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv = conv2DBatchNormRelu(in_size, out_size, k_size=5, stride=1, padding=2, with_relu=False)
示例9: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def forward(self, x, layer, activation_idx, pool_locs):
if layer in self.conv2deconv_indices:
start_idx = self.conv2deconv_indices[layer]
else:
raise ValueError('layer is not a conv feature map')
for idx in range(start_idx, len(self.features)):
if isinstance(self.features[idx], nn.MaxUnpool2d):
x = self.features[idx]\
(x, pool_locs[self.unpool2pool_indices[idx]])
else:
x = self.features[idx](x)
return x
示例10: mupool
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def mupool(ks:int=2, s:int=2, p:int=0):
return nn.MaxUnpool2d(kernel_size=ks, stride=s, padding=p)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, input_channels=None, output_channels=None, upsample=None, pooling_module=None):
super().__init__()
self.__dict__.update(locals())
del self.self
if output_channels != input_channels or upsample:
self.conv = nn.Conv2d(
input_channels, output_channels, 1,
stride=1, padding=0, bias=False)
self.batch_norm = nn.BatchNorm2d(output_channels, eps=1e-03)
if upsample and pooling_module:
self.unpool = nn.MaxUnpool2d(2, stride=2, padding=0)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_ch, out_ch, is_bn=False):
super(SegNetUpx2, self).__init__()
# upsampling and convolution block
self.unpool = nn.MaxUnpool2d(2, 2)
self.block = UNetDownx2(in_ch, out_ch, is_bn)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import MaxUnpool2d [as 別名]
def __init__(self, in_size, out_size):
super(SegnetUp2, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = layers.ConvNorm2d(in_size, out_size, 3, 1, 1,
norm='batch', noli='relu')
self.conv2 = layers.ConvNorm2d(out_size, out_size, 3, 1, 1,
norm='batch', noli='relu')