當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Softplus方法代碼示例

本文整理匯總了Python中torch.nn.Softplus方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Softplus方法的具體用法?Python nn.Softplus怎麽用?Python nn.Softplus使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Softplus方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: activation

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def activation(name):
    if name in ['tanh', 'Tanh']:
        return nn.Tanh()
    elif name in ['relu', 'ReLU']:
        return nn.ReLU(inplace=True)
    elif name in ['lrelu', 'LReLU']:
        return nn.LeakyReLU(inplace=True)
    elif name in ['sigmoid', 'Sigmoid']:
        return nn.Sigmoid()
    elif name in ['softplus', 'Softplus']:
        return nn.Softplus(beta=4)
    else:
        raise ValueError('Unknown activation function')


# modify the decoder network
# use upsampling instead of transconv
# it seems tranconv is much faster than neareast upsampling (or other interpo) 
開發者ID:cics-nd,項目名稱:pde-surrogate,代碼行數:20,代碼來源:codec.py

示例2: get_activation

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def get_activation(self, act):
        if act == 'tanh':
            act = nn.Tanh()
        elif act == 'relu':
            act = nn.ReLU()
        elif act == 'softplus':
            act = nn.Softplus()
        elif act == 'rrelu':
            act = nn.RReLU()
        elif act == 'leakyrelu':
            act = nn.LeakyReLU()
        elif act == 'elu':
            act = nn.ELU()
        elif act == 'selu':
            act = nn.SELU()
        elif act == 'glu':
            act = nn.GLU()
        else:
            print('Defaulting to tanh activations...')
            act = nn.Tanh()
        return act 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:23,代碼來源:supervised_topic_model.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, outputs, inputs):
        super(ThreeConvThreeFC, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(inputs, 32, 5, stride=1, padding=2),
            nn.Softplus(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(32, 64, 5, stride=1, padding=2),
            nn.Softplus(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 128, 5, stride=1, padding=1),
            nn.Softplus(),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            FlattenLayer(2 * 2 * 128),
            nn.Linear(2 * 2 * 128, 1000),
            nn.Softplus(),
            nn.Linear(1000, 1000),
            nn.Softplus(),
            nn.Linear(1000, outputs)
        ) 
開發者ID:kumar-shridhar,項目名稱:PyTorch-BayesianCNN,代碼行數:23,代碼來源:ThreeConvThreeFC.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, atom_fea_len, nbr_fea_len):
        """
        Initialize ConvLayer.

        Parameters
        ----------

        atom_fea_len: int
          Number of atom hidden features.
        nbr_fea_len: int
          Number of bond features.
        """
        super(ConvLayer, self).__init__()
        self.atom_fea_len = atom_fea_len
        self.nbr_fea_len = nbr_fea_len
        self.fc_full = nn.Linear(2*self.atom_fea_len+self.nbr_fea_len,
                                 2*self.atom_fea_len)
        self.sigmoid = nn.Sigmoid()
        self.softplus1 = nn.Softplus()
        self.bn1 = nn.BatchNorm1d(2*self.atom_fea_len)
        self.bn2 = nn.BatchNorm1d(self.atom_fea_len)
        self.softplus2 = nn.Softplus() 
開發者ID:txie-93,項目名稱:cgcnn,代碼行數:24,代碼來源:model.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, rbf_dim, dim=64, act="sp"):
        """
        Args:
            rbf_dim: the dimsion of the RBF layer
            dim: the dimension of linear layers
            act: activation function (default shifted softplus)
        """
        super().__init__()
        self._rbf_dim = rbf_dim
        self._dim = dim

        self.linear_layer1 = nn.Linear(self._rbf_dim, self._dim)
        self.linear_layer2 = nn.Linear(self._dim, self._dim)

        if act == "sp":
            self.activation = nn.Softplus(beta=0.5, threshold=14)
        else:
            self.activation = act 
開發者ID:tencent-alchemy,項目名稱:Alchemy,代碼行數:20,代碼來源:layers.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, context_hidden, encoder_hidden, z_hidden):
        super(VariableLayer, self).__init__()
        self.context_hidden = context_hidden
        self.encoder_hidden = encoder_hidden
        self.z_hidden = z_hidden
        self.prior_h = nn.ModuleList([nn.Linear(context_hidden, context_hidden),
                                      nn.Linear(context_hidden, context_hidden)])
        self.prior_mu = nn.Linear(context_hidden, z_hidden)
        self.prior_var = nn.Linear(context_hidden, z_hidden)

        self.posterior_h = nn.ModuleList([nn.Linear(context_hidden+encoder_hidden,
                                                    context_hidden), 
                                          nn.Linear(context_hidden, 
                                                    context_hidden)])
        self.posterior_mu = nn.Linear(context_hidden, z_hidden)
        self.posterior_var = nn.Linear(context_hidden, z_hidden)
        self.softplus = nn.Softplus() 
開發者ID:gmftbyGMFTBY,項目名稱:MultiTurnDialogZoo,代碼行數:19,代碼來源:VHRED.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, config):
        super(RMeN, self).__init__(config)

        self.ent_embeddings = nn.Embedding(self.config.entTotal, self.config.hidden_size)  # vectorized quaternion
        self.rel_embeddings = nn.Embedding(self.config.relTotal, self.config.hidden_size)

        self.pos_h = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(1, self.config.hidden_size)))
        self.pos_r = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(1, self.config.hidden_size)))
        self.pos_t = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(1, self.config.hidden_size)))

        self.transformer_rel_rnn = RelationalMemory(
                        mem_slots=self.config.mem_slots, head_size=self.config.head_size,
                        num_heads=self.config.num_heads, input_size=self.config.hidden_size,
                        gate_style=self.config.gate_style, attention_mlp_layers=self.config.attention_mlp_layers,
                        return_all_outputs=True
                        ).to(device)
        self.model_memory = self.transformer_rel_rnn.initial_state(self.config.batch_seq_size).to(device)

        self.dropout = nn.Dropout(self.config.convkb_drop_prob)
        self.fc_layer = nn.Linear(self.transformer_rel_rnn.mem_size, 1)

        self.criterion = nn.Softplus()
        self.init_parameters() 
開發者ID:daiquocnguyen,項目名稱:R-MeN,代碼行數:25,代碼來源:RMeN_v1.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, n, latent_dim, hidden_dim, n_out=1, num_layers=1, activation=nn.Tanh
                , softplus=False, resid=False):
        super(VanillaGenerator, self).__init__()
        """
        The standard MLP structure for image generation. Decodes each pixel location as a funciton of z.
        """

        self.n_out = n_out
        self.softplus = softplus

        layers = [nn.Linear(latent_dim,hidden_dim), 
                  activation()]
        for _ in range(1,num_layers):
            if resid:
                layers.append(ResidLinear(hidden_dim, hidden_dim, activation=activation))
            else:
                layers.append(nn.Linear(hidden_dim,hidden_dim))
                layers.append(activation())
        layers.append(nn.Linear(hidden_dim, n*n_out))
        if softplus:
            layers.append(nn.Softplus())

        self.layers = nn.Sequential(*layers) 
開發者ID:tbepler,項目名稱:spatial-VAE,代碼行數:25,代碼來源:models.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, config):
        super(ConvKB, self).__init__(config)

        self.ent_embeddings = nn.Embedding(self.config.entTotal, self.config.hidden_size) 
        self.rel_embeddings = nn.Embedding(self.config.relTotal, self.config.hidden_size)

        self.conv1_bn = nn.BatchNorm1d(3)
        self.conv_layer = nn.Conv1d(3, self.config.out_channels, self.config.kernel_size)  # kernel size x 3
        self.conv2_bn = nn.BatchNorm1d(self.config.out_channels)
        self.dropout = nn.Dropout(self.config.convkb_drop_prob)
        self.non_linearity = nn.ReLU()
        self.fc_layer = nn.Linear((self.config.hidden_size - self.config.kernel_size + 1) * self.config.out_channels, 1, bias=False)

        self.criterion = nn.Softplus()
        self.init_parameters() 
開發者ID:daiquocnguyen,項目名稱:ConvKB,代碼行數:17,代碼來源:ConvKB_1D.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, config):
        super(ConvKB, self).__init__(config)

        self.ent_embeddings = nn.Embedding(self.config.entTotal, self.config.hidden_size) 
        self.rel_embeddings = nn.Embedding(self.config.relTotal, self.config.hidden_size)

        self.conv1_bn = nn.BatchNorm2d(1)
        self.conv_layer = nn.Conv2d(1, self.config.out_channels, (self.config.kernel_size, 3))  # kernel size x 3
        self.conv2_bn = nn.BatchNorm2d(self.config.out_channels)
        self.dropout = nn.Dropout(self.config.convkb_drop_prob)
        self.non_linearity = nn.ReLU()
        self.fc_layer = nn.Linear((self.config.hidden_size - self.config.kernel_size + 1) * self.config.out_channels, 1, bias=False)

        self.criterion = nn.Softplus()
        self.init_parameters() 
開發者ID:daiquocnguyen,項目名稱:ConvKB,代碼行數:17,代碼來源:ConvKB.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, in_nc=2, out_nc=8, channel=64):
        super(HyPaNet, self).__init__()
        self.mlp = nn.Sequential(
                nn.Conv2d(in_nc, channel, 1, padding=0, bias=True),
                nn.ReLU(inplace=True),
                nn.Conv2d(channel, channel, 1, padding=0, bias=True),
                nn.ReLU(inplace=True),
                nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
                nn.Softplus()) 
開發者ID:cszn,項目名稱:KAIR,代碼行數:11,代碼來源:network_usrnet.py

示例12: get_nonlinearity

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def get_nonlinearity(name):
    """Helper function to get non linearity module, choose from relu/softplus/swish/lrelu"""
    if name == 'relu':
        return nn.ReLU(inplace=True)
    elif name == 'softplus':
        return nn.Softplus()
    elif name == 'swish':
        return Swish(inplace=True)
    elif name == 'lrelu':
        return nn.LeakyReLU() 
開發者ID:DIAGNijmegen,項目名稱:neural-odes-segmentation,代碼行數:12,代碼來源:model_utils.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, beta=1, shift=2, threshold=20):
        super(ShiftedSoftplus, self).__init__()

        self.shift = shift
        self.softplus = nn.Softplus(beta=beta, threshold=threshold) 
開發者ID:dmlc,項目名稱:dgl,代碼行數:7,代碼來源:cfconv.py

示例14: activation

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def activation(name):
    if name in ['tanh', 'Tanh']:
        return nn.Tanh()
    elif name in ['relu', 'ReLU']:
        return nn.ReLU(inplace=True)
    elif name in ['lrelu', 'LReLU']:
        return nn.LeakyReLU(inplace=True)
    elif name in ['sigmoid', 'Sigmoid']:
        return nn.Sigmoid()
    elif name in ['softplus', 'Softplus']:
        return nn.Softplus(beta=4)
    else:
        raise ValueError('Unknown activation function')

# densely fully connected NN 
開發者ID:cics-nd,項目名稱:pde-surrogate,代碼行數:17,代碼來源:cppn.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Softplus [as 別名]
def __init__(self, in_channel, out_channel, resize=False, act='relu'):
        super().__init__()
        self.resize = resize

        def get_act():
            if act == 'relu':
                return nn.ReLU(inplace=True)
            elif act == 'softplus':
                return nn.Softplus()
            elif act == 'elu':
                return nn.ELU()
            elif act == 'leakyrelu':
                return nn.LeakyReLU(0.2, inplace=True)

        if not resize:
            self.main = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel)
            )
        else:
            self.main = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1),
                nn.GroupNorm(8, out_channel),
                get_act(),
                nn.Conv2d(out_channel, out_channel, 3, stride=1, padding=1),
                nn.GroupNorm(8, out_channel)
            )
            self.residual = nn.Conv2d(in_channel, out_channel, 3, stride=2, padding=1)

        self.final_act = get_act() 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:35,代碼來源:scorenet.py


注:本文中的torch.nn.Softplus方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。