本文整理汇总了Python中torch.nn.functional.elu方法的典型用法代码示例。如果您正苦于以下问题:Python functional.elu方法的具体用法?Python functional.elu怎么用?Python functional.elu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.elu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
"""Creates an InPlace Activated Batch Normalization module
Parameters
----------
num_features : int
Number of feature channels in the input and output.
eps : float
Small constant to prevent numerical issues.
momentum : float
Momentum factor applied to compute running statistics as.
affine : bool
If `True` apply learned scale and shift transformation after normalization.
activation : str
Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
slope : float
Negative slope for the `leaky_relu` activation.
"""
super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope)
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x):
inv_var = torch.rsqrt(self.running_var + self.eps)
if self.affine:
alpha = self.weight * inv_var
beta = self.bias - self.running_mean * alpha
else:
alpha = inv_var
beta = - self.running_mean * alpha
x.mul_(alpha.view(self._broadcast_shape(x)))
x.add_(beta.view(self._broadcast_shape(x)))
if self.activation == "relu":
return functional.relu(x, inplace=True)
elif self.activation == "leaky_relu":
return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
elif self.activation == "elu":
return functional.elu(x, alpha=self.activation_param, inplace=True)
elif self.activation == "identity":
return x
else:
raise RuntimeError("Unknown activation function {}".format(self.activation))
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x):
if hasattr(self, "proj_conv"):
residual = self.proj_conv(x)
residual = self.proj_bn(residual)
else:
residual = x
x = self.convs(x) + residual
if self.convs.bn1.activation == "relu":
return functional.relu(x, inplace=True)
elif self.convs.bn1.activation == "leaky_relu":
return functional.leaky_relu(x, negative_slope=self.convs.bn1.activation_param, inplace=True)
elif self.convs.bn1.activation == "elu":
return functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=True)
elif self.convs.bn1.activation == "identity":
return x
else:
raise RuntimeError("Unknown activation function {}".format(self.activation))
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x, vertices, adj):
emb = self.embedding(vertices)
if self.inst_norm:
emb = self.norm(emb.transpose(1, 2)).transpose(1, 2)
x = torch.cat((x, emb), dim=2)
if self.use_vertex_feature:
vfeature = self.vertex_feature(vertices)
x = torch.cat((x, vfeature), dim=2)
bs, n = adj.size()[:2]
for i, gat_layer in enumerate(self.layer_stack):
x = gat_layer(x, adj) # bs x n_head x n x f_out
if i + 1 == self.n_layer:
x = x.mean(dim=1)
else:
x = F.elu(x.transpose(1, 2).contiguous().view(bs, n, -1))
x = F.dropout(x, self.dropout, training=self.training)
return F.log_softmax(x, dim=-1)
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
f_1 = torch.matmul(h, self.a1)
f_2 = torch.matmul(h, self.a2)
e = self.leakyrelu(f_1 + f_2.transpose(0,1))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, data):
data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
data.edge_attr = None
data = max_pool(cluster, data, transform=transform)
data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28)
data.edge_attr = None
data = max_pool(cluster, data, transform=transform)
data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))
cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99)
x, _ = max_pool_x(cluster, data.x, data.batch, size=4)
x = x.view(-1, self.fc1.weight.size(1))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
示例7: lovasz_hinge_flat
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.elu(errors_sorted), grad)
return loss
示例8: lovasz_hinge_flat
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
return loss
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input):
"""Method to forward propagate through the actor's graph
Parameters:
input (tensor): states
Returns:
action (tensor): actions
"""
#Hidden Layer 1
out = F.elu(self.f1(input))
out = self.ln1(out)
#Hidden Layer 2
out = F.elu(self.f2(out))
out = self.ln2(out)
#Out
return torch.sigmoid(self.w_out(out))
示例10: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, inputs, noise):
out = self.input(inputs)
cond = torch.zeros(
inputs.size(0), 10,
dtype=inputs.dtype,
device=inputs.device
)
offset = (torch.log(noise) / torch.log(torch.tensor(0.60))).long()
cond[torch.arange(inputs.size(0)), offset.view(-1)] = 1
connections = []
for norm, block in zip(self.down_norm, self.down):
out = func.elu(block(norm(out, cond)))
connections.append(out)
features = func.adaptive_avg_pool2d(out, 1)
logits = self.predict(features.view(features.size(0), -1))
for norm, block, shortcut in zip(self.up_norm, self.up, reversed(connections)):
out = func.elu(block(norm(torch.cat((out, shortcut), dim=1), cond)))
del connections
return self.output(out), logits
示例11: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def __init__(self, in_size, out_size,
size=None, upsample=2,
activation=func.elu):
super(UpsampleBlock, self).__init__()
self.is_first = False
self.size = size
if size is not None:
self.is_first = True
total_size = torch.Size(size).numel()
self.input = nn.Linear(in_size, out_size * total_size)
self.pixnorm = tsn.PixelNorm()
self.convs = nn.ModuleList([
nn.Conv2d(in_size, in_size, 3),
nn.Conv2d(in_size, out_size, 3)
])
self.activation = activation
self.upsample = upsample
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = torch.zeros_like(e)
zero_vec = zero_vec.fill_(9e-15)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x, adjs, mask=None):
# x: (batch, N, input_dim)
# adjs: (batch, n_graph, N, N)
if len(adjs.size()) == 3:
adjs = adjs.unsqueeze(1)
batch, num_sent, d_input = x.size()
assert adjs.size(1) == self.n_graph
h = self.linear(x)
x = x.unsqueeze(1).expand(-1, self.n_graph, -1, -1)
h_gcn = torch.matmul(adjs, x).transpose(1, 2).contiguous().view(batch, num_sent, -1)
h_gcn = self.linear_gcn(h_gcn)
d = adjs.sum(dim=3).sum(dim=1).unsqueeze(2)
d = d + d.eq(0).float()
h = h + h_gcn / d # batch_size * docu_len * dim
if self.globalnode:
h = h + self.g_node(x, mask).unsqueeze(1).expand_as(h)
h = F.elu(h)
return h
示例15: _get_rnn_output
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def _get_rnn_output(self, input_word_orig, input_word, input_char,
mask=None, length=None, hx=None, show_net=False):
input, length = self._get_word_enc(
input_word_orig, input_word, input_char, mask=mask, length=length, show_net=show_net)
output, hn = self._get_rnn_enc(input, length, mask, hx, show_net=show_net)
if self.tag_space:
# [batch, length, tag_space]
output = self.dropout_tag(F.elu(self.lstm_to_tag_space(output)))
if show_net:
print("[Net] to_tag")
show_var(["self.lstm_to_tag_space"])
show_var(["F.elu"])
show_var(["self.dropout_tag"])
return output, hn, mask, length