本文整理汇总了Python中torch.nn.functional.leaky_relu_方法的典型用法代码示例。如果您正苦于以下问题:Python functional.leaky_relu_方法的具体用法?Python functional.leaky_relu_怎么用?Python functional.leaky_relu_使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.leaky_relu_方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, style_embeddings, class_embeddings):
style_embeddings = F.leaky_relu_(self.style_input(style_embeddings), negative_slope=0.2)
class_embeddings = F.leaky_relu_(self.class_input(class_embeddings), negative_slope=0.2)
x = torch.cat((style_embeddings, class_embeddings), dim=1)
x = x.view(x.size(0), 128, 2, 2)
x = self.deconv_model(x)
return x
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, x):
real, img = x.chunk(2, 1)
return torch.cat([F.leaky_relu_(real), torch.tanh(img) * np.pi], dim=1)
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, x):
x = F.leaky_relu_(self.conv1(x))
x = self.conv1_bn(self.pool1(x))
x = self.conv2_bn(F.leaky_relu_(self.conv2(x)))
x = x.reshape(x.shape[0], -1)
x = self.fc1_bn(F.leaky_relu_(self.fc1(x)))
x = self.fc2(x)
return x
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, input, flip_feat=None):
# Encoder
# No norm on the first layer
e1 = self.e1_c(input)
e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))
e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
# No norm in the inner_most layer
e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))
# Decoder
d1 = self.d1_norm(self.d1_dc(F.relu_(e8)))
d2 = self.d2_norm(self.d2_dc(F.relu_(self.cat_feat(d1, e7))))
d3 = self.d3_norm(self.d3_dc(F.relu_(self.cat_feat(d2, e6))))
d4 = self.d4_norm(self.d4_dc(F.relu_(self.cat_feat(d3, e5))))
d5 = self.d5_norm(self.d5_dc(F.relu_(self.cat_feat(d4, e4))))
tmp, innerFeat = self.shift(self.innerCos(F.relu_(self.cat_feat(d5, e3))), flip_feat)
d6 = self.d6_norm(self.d6_dc(tmp))
d7 = self.d7_norm(self.d7_dc(F.relu_(self.cat_feat(d6, e2))))
# No norm on the last layer
d8 = self.d8_dc(F.relu_(self.cat_feat(d7, e1)))
d8 = torch.tanh(d8)
return d8, innerFeat
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, input):
# Encoder
# No norm on the first layer
e1 = self.e1_c(input)
e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))
e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
# No norm on the inner_most layer
e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))
# Decoder
d1 = self.d1_norm(self.d1_c(F.relu_(e8)))
d2 = self.d2_norm(self.d2_c(F.relu_(torch.cat([d1, e7], dim=1))))
d3 = self.d3_norm(self.d3_c(F.relu_(torch.cat([d2, e6], dim=1))))
d4 = self.d4_norm(self.d4_c(F.relu_(torch.cat([d3, e5], dim=1))))
d5 = self.d5_norm(self.d5_c(F.relu_(torch.cat([d4, e4], dim=1))))
d6 = self.d6_norm(self.d6_c(F.relu_(torch.cat([d5, e3], dim=1))))
d7 = self.d7_norm(self.d7_c(F.relu_(torch.cat([d6, e2], dim=1))))
# No norm on the last layer
d8 = self.d8_c(F.relu_(torch.cat([d7, e1], 1)))
d8 = torch.tanh(d8)
return d8
示例6: aten_leaky_relu_
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def aten_leaky_relu_(inputs, attributes, scope):
inp, leak = inputs[:2]
ctx = current_context()
net = current_context().network
if ctx.is_tensorrt and has_trt_tensor(inputs):
layer = net.add_activation(inp, trt.ActivationType.LEAKY_RELU)
layer.alpha = leak
output = layer.get_output(0)
output.name = scope
layer.name = scope
return [output]
elif ctx.is_tvm and has_tvm_tensor(inputs):
return [_op.nn.leaky_relu(inputs[0], leak)]
return [F.leaky_relu_(inp, leak)]
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, x):
y = F.batch_norm(
x, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats,
self.momentum, self.eps)
return F.leaky_relu_(y, self.slope)
示例8: test_leaky_relu_
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def test_leaky_relu_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.leaky_relu_(inp, negative_slope=0.01)
示例9: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def __init__(self, input_dim, pooling_dim=512, num_fc=1, act=F.leaky_relu_):
super(MaxPoolingAggregator, self).__init__()
out_dim = input_dim
self.fc = nn.ModuleList()
self.act = act
if num_fc > 0:
for i in range(num_fc - 1):
self.fc.append(nn.Linear(out_dim, pooling_dim))
out_dim = pooling_dim
self.fc.append(nn.Linear(out_dim, input_dim))
示例10: correlate
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def correlate(input1, input2):
out_corr = spatial_correlation_sample(input1,
input2,
kernel_size=1,
patch_size=21,
stride=1,
padding=0,
dilation_patch=2)
# collate dimensions 1 and 2 in order to be treated as a
# regular 4D tensor
b, ph, pw, h, w = out_corr.size()
out_corr = out_corr.view(b, ph * pw, h, w)/input1.size(1)
return F.leaky_relu_(out_corr, 0.1)
示例11: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import leaky_relu_ [as 别名]
def forward(self, x):
args = self.args
if args.input_norm:
rgb_mean = x.contiguous().view(x.size()[:2]+(-1,)).mean(dim=-1).view(x.size()[:2] + (1,1,1,))
x = (x - rgb_mean) / args.rgb_max
x1_raw = x[:,:,0,:,:].contiguous()
x2_raw = x[:,:,1,:,:].contiguous()
# on the bottom level are original images
x1_pyramid = self.feature_pyramid_extractor(x1_raw) + [x1_raw]
x2_pyramid = self.feature_pyramid_extractor(x2_raw) + [x2_raw]
# outputs
flows = []
# tensors for summary
summaries = {
'x2_warps': [],
}
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# upsample flow and scale the displacement
if l == 0:
shape = list(x1.size()); shape[1] = 2
flow = torch.zeros(shape).to(args.device)
else:
flow = F.upsample(flow, scale_factor = 2, mode = 'bilinear') * 2
x2_warp = self.warping_layer(x2, flow)
# correlation
corr = self.corr(x1, x2_warp)
if args.corr_activation: F.leaky_relu_(corr)
# concat and estimate flow
# ATTENTION: `+ flow` makes flow estimator learn to estimate residual flow
if args.residual:
flow_coarse = self.flow_estimators[l](torch.cat([x1, corr, flow], dim = 1)) + flow
else:
flow_coarse = self.flow_estimators[l](torch.cat([x1, corr, flow], dim = 1))
flow_fine = self.context_networks[l](torch.cat([x1, flow], dim = 1))
flow = flow_coarse + flow_fine
if l == args.output_level:
flow = F.upsample(flow, scale_factor = 2 ** (args.num_levels - args.output_level - 1), mode = 'bilinear') * 2 ** (args.num_levels - args.output_level - 1)
flows.append(flow)
summaries['x2_warps'].append(x2_warp.data)
break
else:
flows.append(flow)
summaries['x2_warps'].append(x2_warp.data)
return flows, summaries