当前位置: 首页>>代码示例>>Python>>正文


Python functional.elu方法代码示例

本文整理汇总了Python中torch.nn.functional.elu方法的典型用法代码示例。如果您正苦于以下问题:Python functional.elu方法的具体用法?Python functional.elu怎么用?Python functional.elu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.elu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:21,代码来源:bn.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x):
        inv_var = torch.rsqrt(self.running_var + self.eps)
        if self.affine:
            alpha = self.weight * inv_var
            beta = self.bias - self.running_mean * alpha
        else:
            alpha = inv_var
            beta = - self.running_mean * alpha

        x.mul_(alpha.view(self._broadcast_shape(x)))
        x.add_(beta.view(self._broadcast_shape(x)))

        if self.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
        elif self.activation == "elu":
            return functional.elu(x, alpha=self.activation_param, inplace=True)
        elif self.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
开发者ID:mapillary,项目名称:seamseg,代码行数:24,代码来源:misc.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x):
        if hasattr(self, "proj_conv"):
            residual = self.proj_conv(x)
            residual = self.proj_bn(residual)
        else:
            residual = x

        x = self.convs(x) + residual

        if self.convs.bn1.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.convs.bn1.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "elu":
            return functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
开发者ID:mapillary,项目名称:seamseg,代码行数:21,代码来源:residual.py

示例4: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x, vertices, adj):
        emb = self.embedding(vertices)
        if self.inst_norm:
            emb = self.norm(emb.transpose(1, 2)).transpose(1, 2)
        x = torch.cat((x, emb), dim=2)
        if self.use_vertex_feature:
            vfeature = self.vertex_feature(vertices)
            x = torch.cat((x, vfeature), dim=2)
        bs, n = adj.size()[:2]
        for i, gat_layer in enumerate(self.layer_stack):
            x = gat_layer(x, adj) # bs x n_head x n x f_out
            if i + 1 == self.n_layer:
                x = x.mean(dim=1)
            else:
                x = F.elu(x.transpose(1, 2).contiguous().view(bs, n, -1))
                x = F.dropout(x, self.dropout, training=self.training)
        return F.log_softmax(x, dim=-1) 
开发者ID:xptree,项目名称:DeepInf,代码行数:19,代码来源:gat.py

示例5: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        f_1 = torch.matmul(h, self.a1)
        f_2 = torch.matmul(h, self.a2)
        e = self.leakyrelu(f_1 + f_2.transpose(0,1))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
开发者ID:meliketoy,项目名称:graph-cnn.pytorch,代码行数:20,代码来源:layers.py

示例6: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28)
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99)
        x, _ = max_pool_x(cluster, data.x, data.batch, size=4)

        x = x.view(-1, self.fc1.weight.size(1))
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:22,代码来源:mnist_voxel_grid.py

示例7: lovasz_hinge_flat

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * signs)

    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted), grad)
    return loss 
开发者ID:neptune-ai,项目名称:open-solution-salt-identification,代码行数:21,代码来源:lovasz_losses.py

示例8: lovasz_hinge_flat

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
    return loss 
开发者ID:lRomul,项目名称:argus-tgs-salt,代码行数:20,代码来源:lovasz.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input):
        """Method to forward propagate through the actor's graph

            Parameters:
                  input (tensor): states

            Returns:
                  action (tensor): actions


        """
        #Hidden Layer 1
        out = F.elu(self.f1(input))
        out = self.ln1(out)

        #Hidden Layer 2
        out = F.elu(self.f2(out))
        out = self.ln2(out)

        #Out
        return torch.sigmoid(self.w_out(out)) 
开发者ID:IntelAI,项目名称:cerl,代码行数:23,代码来源:models.py

示例10: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, inputs, noise):
    out = self.input(inputs)
    cond = torch.zeros(
      inputs.size(0), 10,
      dtype=inputs.dtype,
      device=inputs.device
    )
    offset = (torch.log(noise) / torch.log(torch.tensor(0.60))).long()
    cond[torch.arange(inputs.size(0)), offset.view(-1)] = 1
    connections = []
    for norm, block in zip(self.down_norm, self.down):
      out = func.elu(block(norm(out, cond)))
      connections.append(out)
    features = func.adaptive_avg_pool2d(out, 1)
    logits = self.predict(features.view(features.size(0), -1))
    for norm, block, shortcut in zip(self.up_norm, self.up, reversed(connections)):
      out = func.elu(block(norm(torch.cat((out, shortcut), dim=1), cond)))
    del connections
    return self.output(out), logits 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:21,代码来源:conditional_mnist_score_classifier.py

示例11: __init__

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def __init__(self, in_size, out_size,
               size=None, upsample=2,
               activation=func.elu):
    super(UpsampleBlock, self).__init__()
    self.is_first = False
    self.size = size
    if size is not None:
      self.is_first = True
      total_size = torch.Size(size).numel()
      self.input = nn.Linear(in_size, out_size * total_size)
    self.pixnorm = tsn.PixelNorm()
    self.convs = nn.ModuleList([
      nn.Conv2d(in_size, in_size, 3),
      nn.Conv2d(in_size, out_size, 3)
    ])
    self.activation = activation
    self.upsample = upsample 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:19,代码来源:generative.py

示例12: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]
        a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)

        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))

        zero_vec = torch.zeros_like(e)
        zero_vec = zero_vec.fill_(9e-15)
        attention = torch.where(adj > 0, e, zero_vec)

        attention = F.softmax(attention, dim=1)

        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
开发者ID:rajammanabrolu,项目名称:KG-A2C,代码行数:22,代码来源:layers.py

示例13: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
开发者ID:plkmo,项目名称:NLP_Toolkit,代码行数:19,代码来源:layers.py

示例14: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def forward(self, x, adjs, mask=None):
        # x: (batch, N, input_dim)
        # adjs: (batch, n_graph, N, N)
        if len(adjs.size()) == 3:
            adjs = adjs.unsqueeze(1)
        batch, num_sent, d_input = x.size()
        assert adjs.size(1) == self.n_graph
        h = self.linear(x)
        x = x.unsqueeze(1).expand(-1, self.n_graph, -1, -1)
        h_gcn = torch.matmul(adjs, x).transpose(1, 2).contiguous().view(batch, num_sent, -1)
        h_gcn = self.linear_gcn(h_gcn)
        d = adjs.sum(dim=3).sum(dim=1).unsqueeze(2)
        d = d + d.eq(0).float()
        h = h + h_gcn / d # batch_size * docu_len * dim
        if self.globalnode:
            h = h + self.g_node(x, mask).unsqueeze(1).expand_as(h)
        h = F.elu(h)
        return h 
开发者ID:thomas0809,项目名称:GraphIE,代码行数:20,代码来源:gnn.py

示例15: _get_rnn_output

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import elu [as 别名]
def _get_rnn_output(self, input_word_orig, input_word, input_char,
                        mask=None, length=None, hx=None, show_net=False):

        input, length = self._get_word_enc(
            input_word_orig, input_word, input_char, mask=mask, length=length, show_net=show_net)

        output, hn = self._get_rnn_enc(input, length, mask, hx, show_net=show_net)

        if self.tag_space:
            # [batch, length, tag_space]
            output = self.dropout_tag(F.elu(self.lstm_to_tag_space(output)))
            if show_net:
                print("[Net] to_tag")
                show_var(["self.lstm_to_tag_space"])
                show_var(["F.elu"])
                show_var(["self.dropout_tag"])

        return output, hn, mask, length 
开发者ID:thomas0809,项目名称:GraphIE,代码行数:20,代码来源:sequence_labeling.py


注:本文中的torch.nn.functional.elu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。