本文整理汇总了Python中torch.nn.functional.leaky_relu方法的典型用法代码示例。如果您正苦于以下问题:Python functional.leaky_relu方法的具体用法?Python functional.leaky_relu怎么用?Python functional.leaky_relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.leaky_relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def __init__(self, rep_dim=128):
super().__init__()
self.rep_dim = rep_dim
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
示例2: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
"""Creates an InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope)
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self, x):
inv_var = torch.rsqrt(self.running_var + self.eps)
if self.affine:
alpha = self.weight * inv_var
beta = self.bias - self.running_mean * alpha
else:
alpha = inv_var
beta = - self.running_mean * alpha
x.mul_(alpha.view(self._broadcast_shape(x)))
x.add_(beta.view(self._broadcast_shape(x)))
if self.activation == "relu":
return functional.relu(x, inplace=True)
elif self.activation == "leaky_relu":
return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
elif self.activation == "elu":
return functional.elu(x, alpha=self.activation_param, inplace=True)
elif self.activation == "identity":
return x
else:
raise RuntimeError("Unknown activation function {}".format(self.activation))
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self, x):
if hasattr(self, "proj_conv"):
residual = self.proj_conv(x)
residual = self.proj_bn(residual)
else:
residual = x
x = self.convs(x) + residual
if self.convs.bn1.activation == "relu":
return functional.relu(x, inplace=True)
elif self.convs.bn1.activation == "leaky_relu":
return functional.leaky_relu(x, negative_slope=self.convs.bn1.activation_param, inplace=True)
elif self.convs.bn1.activation == "elu":
return functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=True)
elif self.convs.bn1.activation == "identity":
return x
else:
raise RuntimeError("Unknown activation function {}".format(self.activation))
示例5: correlation1d_cost
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def correlation1d_cost(reference_fm, target_fm, max_disp=192, start_disp=0, dilation=1, disp_sample=None,
kernel_size=1, stride=1, padding=0, dilation_patch=1,):
# for a pixel of left image at (x, y), it will calculates correlation cost volume
# with pixel of right image at (xr, y), where xr in [x-max_disp, x+max_disp]
# but we only need the left half part, i.e., [x-max_disp, 0]
correlation_sampler = SpatialCorrelationSampler(patch_size=(1, max_disp * 2 - 1),
kernel_size=kernel_size,
stride=stride, padding=padding,
dilation_patch=dilation_patch)
# [B, 1, max_disp*2-1, H, W]
out = correlation_sampler(reference_fm, target_fm)
# [B, max_disp*2-1, H, W]
out = out.squeeze(1)
# [B, max_disp, H, W], grad the left half searching part
out = out[:, :max_disp, :, :]
cost = F.leaky_relu(out, negative_slope=0.1, inplace=True)
return cost
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self,x):
encoded = x
for i, (encoder,bencoder) in enumerate(zip(self.enc,self.benc)):
if i == self.nlayers-1:
encoded = encoded.view(encoded.size(0), -1)
encoded = encoder(encoded)
if i < self.nlayers-1:
encoded = bencoder(encoded)
encoded = F.leaky_relu(encoded, negative_slope=self.reluslope)
out = encoded
for i, (decoder,bdecoder) in reversed(list(enumerate(zip(self.dec,self.bdec)))):
if i == self.nlayers-1:
out = out.view(out.size(0), -1, 1, 1)
out = decoder(out)
if i:
out = bdecoder(out)
out = F.leaky_relu(out, negative_slope=self.reluslope)
return encoded, out
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self, x):
# Pass Add bias.
x += self.bias
# Evaluate activation function.
if self.act == "linear":
pass
elif self.act == 'lrelu':
x = F.leaky_relu(x, self.alpha, inplace=True)
x = x * np.sqrt(2) # original repo def_gain=np.sqrt(2).
# Scale by gain.
if self.gain != 1:
x = x * self.gain
return x
示例8: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def __init__(self, n_class=10):
super().__init__()
def conv(in_channel, out_channel, stride=2,
self_attention=False):
return ConvBlock(in_channel, out_channel, stride=stride,
bn=False, activation=leaky_relu,
upsample=False, self_attention=self_attention)
self.conv = nn.Sequential(conv(3, 128),
conv(128, 256),
conv(256, 512, stride=1,
self_attention=True),
conv(512, 512),
conv(512, 512),
conv(512, 512))
self.linear = spectral_init(nn.Linear(512, 1))
self.embed = nn.Embedding(n_class, 512)
self.embed.weight.data.uniform_(-0.1, 0.1)
self.embed = spectral_norm(self.embed)
示例9: cum_return
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def cum_return(self, traj):
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
for x in traj:
x = x.permute(0,3,1,2) #get into NCHW format
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
x = x.view(-1, 784)
x = F.leaky_relu(self.fc1(x))
r = torch.sigmoid(self.fc2(x))
sum_rewards += r
sum_abs_rewards += torch.abs(r)
return sum_rewards, sum_abs_rewards
示例10: cum_return
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def cum_return(self, traj):
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
x = traj.permute(0,3,1,2) #get into NCHW format
#compute forward pass of reward network
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
x = x.view(-1, 784)
x = F.leaky_relu(self.fc1(x))
r = self.fc2(x)
sum_rewards += torch.sum(r)
sum_abs_rewards += torch.sum(torch.abs(r))
return sum_rewards, sum_abs_rewards
示例11: cum_return
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def cum_return(self, traj):
'''calculate cumulative return of trajectory'''
sum_rewards = 0
sum_abs_rewards = 0
x = traj.permute(0,3,1,2) #get into NCHW format
#compute forward pass of reward network (we parallelize across frames so batch size is length of partial trajectory)
x = F.leaky_relu(self.conv1(x))
x = F.leaky_relu(self.conv2(x))
x = F.leaky_relu(self.conv3(x))
x = F.leaky_relu(self.conv4(x))
x = x.view(-1, 784)
x = F.leaky_relu(self.fc1(x))
r = self.fc2(x)
sum_rewards += torch.sum(r)
sum_abs_rewards += torch.sum(torch.abs(r))
return sum_rewards, sum_abs_rewards
示例12: prune_model_keep_size
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def prune_model_keep_size(model, prune_idx, CBL_idx, CBLidx2mask):
pruned_model = deepcopy(model)
for idx in prune_idx:
mask = torch.from_numpy(CBLidx2mask[idx]).cuda()
bn_module = pruned_model.module_list[idx][1]
bn_module.weight.data.mul_(mask)
activation = F.leaky_relu((1 - mask) * bn_module.bias.data, 0.1)
# 两个上采样层前的卷积层
next_idx_list = [idx + 1]
if idx == 79:
next_idx_list.append(84)
elif idx == 91:
next_idx_list.append(96)
for next_idx in next_idx_list:
next_conv = pruned_model.module_list[next_idx][0]
conv_sum = next_conv.weight.data.sum(dim=(2, 3))
offset = conv_sum.matmul(activation.reshape(-1, 1)).reshape(-1)
if next_idx in CBL_idx:
next_bn = pruned_model.module_list[next_idx][1]
next_bn.running_mean.data.sub_(offset)
else:
next_conv.bias.data.add_(offset)
bn_module.bias.data.mul_(mask)
return pruned_model
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self, x):
x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
if self.activation == ACT_RELU:
return functional.relu(x, inplace=True)
elif self.activation == ACT_LEAKY_RELU:
return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
elif self.activation == ACT_ELU:
return functional.elu(x, inplace=True)
else:
return x
示例14: __repr__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def __repr__(self):
rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
' affine={affine}, activation={activation}'
if self.activation == "leaky_relu":
rep += ', slope={slope})'
else:
rep += ')'
return rep.format(name=self.__class__.__name__, **self.__dict__)
示例15: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu [as 别名]
def forward(self, x):
x = x.view(-1, 3, 32, 32)
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(int(x.size(0)), -1)
x = self.fc1(x)
return x