本文整理匯總了Python中torch.nn.PReLU方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.PReLU方法的具體用法?Python nn.PReLU怎麽用?Python nn.PReLU使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.PReLU方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, num_classes, base_size=64, dropout=0.2):
super().__init__()
self.conv = nn.Sequential(
ConvBlock(in_channels=3, out_channels=base_size),
ConvBlock(in_channels=base_size, out_channels=base_size*2),
ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
)
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(base_size*8, base_size*2),
nn.PReLU(),
nn.BatchNorm1d(base_size*2),
nn.Dropout(dropout/2),
nn.Linear(base_size*2, num_classes),
)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, inplanes, outplanes, padding_=1, stride=1, dilation_ = 1):
super(HighResNetBlock, self).__init__()
self.conv1 = nn.Conv3d(inplanes, outplanes, kernel_size=3, stride=1,
padding=padding_, bias=False, dilation = dilation_)
self.conv2 = nn.Conv3d(outplanes, outplanes, kernel_size=3, stride=1,
padding=padding_, bias=False, dilation = dilation_)
#2 convolutions of same dilation. residual block
self.bn1 = nn.BatchNorm3d(outplanes, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.bn2 = nn.BatchNorm3d(outplanes, affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.relu = nn.PReLU()
self.diff_dims = (inplanes != outplanes)
self.downsample = nn.Sequential(
nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(outplanes, affine = affine_par)
)
for i in self.downsample._modules['1'].parameters():
i.requires_grad = False
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self,NoLabels):
super(HighResNet,self).__init__()
self.conv1 = nn.Conv3d(1, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm3d(16, affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.PReLU()
self.block1_1 = HighResNetBlock(inplanes=16, outplanes=16, padding_=1, dilation_=1)
self.block2_1 = HighResNetBlock(inplanes=16, outplanes=32, padding_=2, dilation_=2)
self.block2_2 = HighResNetBlock(inplanes=32, outplanes=32, padding_=2, dilation_=2)
self.block3_1 = HighResNetBlock(inplanes=32, outplanes=64, padding_=4, dilation_=4)
self.block3_2 = HighResNetBlock(inplanes=64, outplanes=64, padding_=4, dilation_=4)
self.conv2 = nn.Conv3d(64, 80, kernel_size=1, stride=1, padding=0, bias=False)
self.upsample = nn.ConvTranspose3d(80, 80, kernel_size=2, stride=2, bias=False)
self.conv3 = nn.Conv3d(80, NoLabels, kernel_size=1, stride=1, padding=0, bias=False)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, num_classes, base_size=64, dropout=0.2,
ratio=16, kernel_size=7):
super().__init__()
self.conv = nn.Sequential(
ConvBlock(in_channels=3, out_channels=base_size),
ConvBlock(in_channels=base_size, out_channels=base_size*2),
ConvBlock(in_channels=base_size*2, out_channels=base_size*4),
ConvBlock(in_channels=base_size*4, out_channels=base_size*8),
)
self.attention = ConvolutionalBlockAttentionModule(base_size*8,
ratio=ratio,
kernel_size=kernel_size)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(base_size*8, base_size*2),
nn.PReLU(),
nn.BatchNorm1d(base_size*2),
nn.Dropout(dropout/2),
nn.Linear(base_size*2, num_classes),
)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, nIn, nOut, kSize, stride=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: stride rate for down-sampling. Default is 1
'''
super().__init__()
padding = int((kSize - 1) / 2)
# self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False)
# self.conv1 = nn.Conv2d(nOut, nOut, (1, kSize), stride=1, padding=(0, padding), bias=False)
self.bn = nn.BatchNorm2d(nOut, eps=1e-03)
self.act = nn.PReLU(nOut)
# self.act = nn.ReLU()
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, nIn, nOut, kSize, stride=1):
'''
:param nIn: number of input channels
:param nOut: number of output channels
:param kSize: kernel size
:param stride: stride rate for down-sampling. Default is 1
'''
super().__init__()
padding = int((kSize - 1) / 2)
self.conv = nn.Sequential(
nn.Conv2d(nIn, nIn, (kSize, kSize), stride=stride, padding=(padding, padding), groups=nIn, bias=False),
nn.Conv2d(nIn, nOut, kernel_size=1, stride=1, bias=False),
)
self.bn = nn.BatchNorm2d(nOut, eps=1e-03, momentum= BN_moment)
self.act = nn.PReLU(nOut)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, input_size, hidden_layers, activation='sigmoid'):
super(AuxiliaryNet, self).__init__()
modules = OrderedDict()
previous_size = input_size
for index, hidden_layer in enumerate(hidden_layers):
modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
if activation:
if activation.lower() == 'relu':
modules[f"activation{index}"] = nn.ReLU()
elif activation.lower() == 'prelu':
modules[f"activation{index}"] = nn.PReLU()
elif activation.lower() == 'sigmoid':
modules[f"activation{index}"] = nn.Sigmoid()
else:
raise NotImplementedError(f"{activation} is not supported")
previous_size = hidden_layer
modules["final_layer"] = nn.Linear(previous_size, 1)
self._sequential = nn.Sequential(modules)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, input_size, hidden_layers,
dropout=0.0, batchnorm=True, activation='relu'):
super(MLP, self).__init__()
modules = OrderedDict()
previous_size = input_size
for index, hidden_layer in enumerate(hidden_layers):
modules[f"dense{index}"] = nn.Linear(previous_size, hidden_layer)
if batchnorm:
modules[f"batchnorm{index}"] = nn.BatchNorm1d(hidden_layer)
if activation:
if activation.lower() == 'relu':
modules[f"activation{index}"] = nn.ReLU()
elif activation.lower() == 'prelu':
modules[f"activation{index}"] = nn.PReLU()
elif activation.lower() == 'sigmoid':
modules[f"activation{index}"] = nn.Sigmoid()
else:
raise NotImplementedError(f"{activation} is not supported")
if dropout:
modules[f"dropout{index}"] = nn.Dropout(dropout)
previous_size = hidden_layer
self._sequential = nn.Sequential(modules)
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d):
super(EESP, self).__init__()
self.stride = stride
n = int(out_channels / k)
n1 = out_channels - (k - 1) * n
assert down_method in ['avg', 'esp'], 'One of these is suppported (avg or esp)'
assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(n, n1)
self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer)
map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8}
self.k_sizes = list()
for i in range(k):
ksize = int(3 + 2 * i)
ksize = ksize if ksize <= r_lim else 3
self.k_sizes.append(ksize)
self.k_sizes.sort()
self.spp_dw = nn.ModuleList()
for i in range(k):
dilation = map_receptive_ksize[self.k_sizes[i]]
self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False))
self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer)
self.br_after_cat = _BNPReLU(out_channels, norm_layer)
self.module_act = nn.PReLU(out_channels)
self.downAvg = True if down_method == 'avg' else False
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False,
residual=True, norm_layer=nn.BatchNorm2d):
super(ContextGuidedBlock, self).__init__()
inter_channels = out_channels // 2 if not down else out_channels
if down:
self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer)
self.reduce = nn.Conv2d(inter_channels * 2, out_channels, 1, bias=False)
else:
self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer)
self.f_loc = _ChannelWiseConv(inter_channels, inter_channels)
self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation)
self.bn = norm_layer(inter_channels * 2)
self.prelu = nn.PReLU(inter_channels * 2)
self.f_glo = _FGlo(out_channels, reduction)
self.down = down
self.residual = residual
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[
nn.Conv2d(in_channels, nr, 1),
nn.PReLU(nr)
])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
self.conv_2 = nn.Sequential(*[
projection_conv(nr, inter_channels, scale, not up),
nn.PReLU(inter_channels)
])
self.conv_3 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.PReLU(1, 0.25), res_scale=1):
super(ResBlock, self).__init__()
self.conv1 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.conv2 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.conv3 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.conv4 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.relu1 = nn.PReLU(n_feats, 0.25)
self.relu2 = nn.PReLU(n_feats, 0.25)
self.relu3 = nn.PReLU(n_feats, 0.25)
self.relu4 = nn.PReLU(n_feats, 0.25)
self.scale1 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
self.scale2 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
self.scale3 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
self.scale4 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.PReLU(1, 0.25), res_scale=1):
super(ResBlock, self).__init__()
self.conv1 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.conv2 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.conv3 = conv(n_feats, n_feats, kernel_size, bias=bias)
self.relu1 = nn.PReLU(n_feats, 0.25)
self.relu2 = nn.PReLU(n_feats, 0.25)
self.relu3 = nn.PReLU(n_feats, 0.25)
self.scale1 = nn.Parameter(torch.FloatTensor([0.5]), requires_grad=True)
self.scale2 = nn.Parameter(torch.FloatTensor([2.0]), requires_grad=True)
self.scale3 = nn.Parameter(torch.FloatTensor([-1.0]), requires_grad=True)
self.scale4 = nn.Parameter(torch.FloatTensor([4.0]), requires_grad=True)
self.scale5 = nn.Parameter(torch.FloatTensor([1/6]), requires_grad=True)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import PReLU [as 別名]
def __init__(self, input_size, output_size, bias=True, activation='relu', norm='batch'):
super(DenseBlock, self).__init__()
self.fc = nn.Linear(input_size, output_size, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = nn.BatchNorm1d(output_size)
elif self.norm == 'instance':
self.bn = nn.InstanceNorm1d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = nn.ReLU(True)
elif self.activation == 'prelu':
self.act = nn.PReLU()
elif self.activation == 'lrelu':
self.act = nn.LeakyReLU(0.1, True)
elif self.activation == 'tanh':
self.act = nn.Tanh()
elif self.activation == 'sigmoid':
self.act = nn.Sigmoid()