本文整理匯總了Python中torch.nn.LeakyReLU方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.LeakyReLU方法的具體用法?Python nn.LeakyReLU怎麽用?Python nn.LeakyReLU使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.LeakyReLU方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, config):
super().__init__()
self.config = config
self.relu = nn.LeakyReLU(self.config.relu_slope, inplace=True)
self.conv1 = nn.Conv2d(in_channels=self.config.input_channels, out_channels=self.config.num_filt_d, kernel_size=4, stride=2, padding=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=self.config.num_filt_d, out_channels=self.config.num_filt_d * 2, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(self.config.num_filt_d*2)
self.conv3 = nn.Conv2d(in_channels=self.config.num_filt_d*2, out_channels=self.config.num_filt_d * 4, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm2 = nn.BatchNorm2d(self.config.num_filt_d*4)
self.conv4 = nn.Conv2d(in_channels=self.config.num_filt_d*4, out_channels=self.config.num_filt_d*8, kernel_size=4, stride=2, padding=1, bias=False)
self.batch_norm3 = nn.BatchNorm2d(self.config.num_filt_d*8)
self.conv5 = nn.Conv2d(in_channels=self.config.num_filt_d*8, out_channels=1, kernel_size=4, stride=1, padding=0, bias=False)
self.out = nn.Sigmoid()
self.apply(weights_init)
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, cf, conv):
super(Mask, self).__init__()
self.pool_size = cf.mask_pool_size
self.pyramid_levels = cf.pyramid_levels
self.dim = conv.dim
self.conv1 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv2 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv3 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
self.conv4 = conv(cf.end_filts, cf.end_filts, ks=3, stride=1, pad=1, norm=cf.norm, relu=cf.relu)
if conv.dim == 2:
self.deconv = nn.ConvTranspose2d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
else:
self.deconv = nn.ConvTranspose3d(cf.end_filts, cf.end_filts, kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True) if cf.relu == 'relu' else nn.LeakyReLU(inplace=True)
self.conv5 = conv(cf.end_filts, cf.head_classes, ks=1, stride=1, relu=None)
self.sigmoid = nn.Sigmoid()
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, depth, widen_factor, num_classes=10, input_channels=3,
sum_pool=False, norm=None, leak=.2, dropout_rate=0.0):
super(Wide_ResNet, self).__init__()
self.leak = leak
self.in_planes = 16
self.sum_pool = sum_pool
self.norm = norm
self.lrelu = nn.LeakyReLU(leak)
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)//6
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(input_channels, nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = get_norm(nStages[3], self.norm)
self.last_dim = nStages[3]
self.linear = nn.Linear(nStages[3], num_classes)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, nFin, nFout):
super(ResBlock, self).__init__()
self.conv_block = nn.Sequential()
self.conv_block.add_module('ConvL1',
nn.Conv2d(nFin, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm1', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu1', nn.LeakyReLU(0.1, inplace=True))
self.conv_block.add_module('ConvL2',
nn.Conv2d(nFout, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm2', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu2', nn.LeakyReLU(0.1, inplace=True))
self.conv_block.add_module('ConvL3',
nn.Conv2d(nFout, nFout, kernel_size=3, padding=1, bias=False))
self.conv_block.add_module('BNorm3', nn.BatchNorm2d(nFout))
self.conv_block.add_module('LRelu3', nn.LeakyReLU(0.1, inplace=True))
self.skip_layer = nn.Conv2d(nFin, nFout, kernel_size=1, stride=1)
示例5: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_bias=False):
super(NLayerDiscriminator, self).__init__()
dis_model = [nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
dis_model += [conv_norm_lrelu(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=4, stride=2,
norm_layer= norm_layer, padding=1, bias=use_bias)]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
dis_model += [conv_norm_lrelu(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=4, stride=1,
norm_layer= norm_layer, padding=1, bias=use_bias)]
dis_model += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=4, stride=1, padding=1)]
self.dis_model = nn.Sequential(*dis_model)
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, inChannels, outChannels, kernelSize = 1, stride = 1, padding = 0):
super(NewConvBnRelu3D, self).__init__()
self.inChannels = inChannels
self.outChannels = outChannels
self.kernelSize = kernelSize
self.stride = stride
self.padding = padding
self.relu = nn.LeakyReLU()
self.bn = nn.BatchNorm3d(self.inChannels)
if (kernelSize == 1):
self.conv = nn.Conv1d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
elif (isinstance(kernelSize, int)):
self.conv = nn.Conv3d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
elif (kernelSize[0] == 1):
self.conv = nn.Conv2d(self.inChannels, self.outChannels, self.kernelSize[1:], self.stride, self.padding)
else :
self.conv = nn.Conv3d(self.inChannels, self.outChannels, self.kernelSize, self.stride, self.padding)
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self):
super(CycleGAN_D, self).__init__()
# Size = n_colors x image_size x image_size
model = [nn.Conv2d(param.n_colors, param.D_h_size, kernel_size=4, stride=2, padding=2),
nn.LeakyReLU(0.2, inplace=True)]
# Size = D_h_size x (image_size / 2) x (image_size / 2)
model += [nn.Conv2d(param.D_h_size, param.D_h_size * 2, kernel_size=4, stride=2, padding=2),
Norm2D(param.D_h_size * 2),
nn.LeakyReLU(0.2, inplace=True)]
# Size = (D_h_size * 2) x (image_size / 4) x (image_size / 4)
model += [nn.Conv2d(param.D_h_size * 2, param.D_h_size * 4, kernel_size=4, stride=2, padding=2),
Norm2D(param.D_h_size * 4),
nn.LeakyReLU(0.2, inplace=True)]
# Size = (D_h_size * 4) x (image_size / 8) x (image_size / 8)
model += [nn.Conv2d(param.D_h_size * 4, param.D_h_size * 8, kernel_size=4, stride=1, padding=2),
Norm2D(param.D_h_size * 8),
nn.LeakyReLU(0.2, inplace=True)]
# Size = (D_h_size * 8) x (image_size / 8) x (image_size / 8)
model += [nn.Conv2d(param.D_h_size * 8, 1, kernel_size=2, stride=1, padding=2)]
# Size = 1 x (image_size / 8)) x (image_size / 8)
self.model = nn.Sequential(*model)
示例8: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, batch, depth, features, degrees, support=10, node=1, upsample=False, activation=True):
self.batch = batch
self.depth = depth
self.in_feature = features[depth]
self.out_feature = features[depth+1]
self.node = node
self.degree = degrees[depth]
self.upsample = upsample
self.activation = activation
super(TreeGCN, self).__init__()
self.W_root = nn.ModuleList([nn.Linear(features[inx], self.out_feature, bias=False) for inx in range(self.depth+1)])
if self.upsample:
self.W_branch = nn.Parameter(torch.FloatTensor(self.node, self.in_feature, self.degree*self.in_feature))
self.W_loop = nn.Sequential(nn.Linear(self.in_feature, self.in_feature*support, bias=False),
nn.Linear(self.in_feature*support, self.out_feature, bias=False))
self.bias = nn.Parameter(torch.FloatTensor(1, self.degree, self.out_feature))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
self.init_param()
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, channels, kernel_size=7):
super(Decoder, self).__init__()
model = []
pad = (kernel_size - 1) // 2
acti = nn.LeakyReLU(0.2)
for i in range(len(channels) - 1):
model.append(nn.Upsample(scale_factor=2, mode='nearest'))
model.append(nn.ReflectionPad1d(pad))
model.append(nn.Conv1d(channels[i], channels[i + 1],
kernel_size=kernel_size, stride=1))
if i == 0 or i == 1:
model.append(nn.Dropout(p=0.2))
if not i == len(channels) - 2:
model.append(acti) # whether to add tanh a last?
#model.append(nn.Dropout(p=0.2))
self.model = nn.Sequential(*model)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, inp, oup, stride, expand_ratio=0.5):
super(_conv_block, self).__init__()
if stride == 1 and inp == oup:
depth = int(oup*expand_ratio)
self.conv = nn.Sequential(
nn.Conv2d(inp, depth, 1, 1, bias=False),
nn.BatchNorm2d(depth),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(depth, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.conv = nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(0.1, inplace=True),
)
self.depth = oup
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
super(MultiHeadGraphAttention, self).__init__()
self.n_head = n_head
self.w = Parameter(torch.Tensor(n_head, f_in, f_out))
self.a_src = Parameter(torch.Tensor(n_head, f_out, 1))
self.a_dst = Parameter(torch.Tensor(n_head, f_out, 1))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(attn_dropout)
if bias:
self.bias = Parameter(torch.Tensor(f_out))
init.constant_(self.bias, 0)
else:
self.register_parameter('bias', None)
init.xavier_uniform_(self.w)
init.xavier_uniform_(self.a_src)
init.xavier_uniform_(self.a_dst)
示例12: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(PixelDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
if use_sigmoid:
self.net.append(nn.Sigmoid())
self.net = nn.Sequential(*self.net)
示例14: add_conv
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def add_conv(in_ch, out_ch, ksize, stride, leaky=True):
"""
Add a conv2d / batchnorm / leaky ReLU block.
Args:
in_ch (int): number of input channels of the convolution layer.
out_ch (int): number of output channels of the convolution layer.
ksize (int): kernel size of the convolution layer.
stride (int): stride of the convolution layer.
Returns:
stage (Sequential) : Sequential layers composing a convolution block.
"""
stage = nn.Sequential()
pad = (ksize - 1) // 2
stage.add_module('conv', nn.Conv2d(in_channels=in_ch,
out_channels=out_ch, kernel_size=ksize, stride=stride,
padding=pad, bias=False))
stage.add_module('batch_norm', nn.BatchNorm2d(out_ch))
if leaky:
stage.add_module('leaky', nn.LeakyReLU(0.1))
else:
stage.add_module('relu6', nn.ReLU6(inplace=True))
return stage
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import LeakyReLU [as 別名]
def __init__(self, ndf, nef, bcondition=True):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if bcondition:
self.outlogits = nn.Sequential(
conv3x3(ndf * 8 + nef, ndf * 8),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
else:
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())