當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.ELU屬性代碼示例

本文整理匯總了Python中torch.nn.ELU屬性的典型用法代碼示例。如果您正苦於以下問題:Python nn.ELU屬性的具體用法?Python nn.ELU怎麽用?Python nn.ELU使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在torch.nn的用法示例。


在下文中一共展示了nn.ELU屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: nonlinearity

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def nonlinearity(h_nonlin_name):

    def Nonlinearity(nonlin_name):
        if nonlin_name == 'relu':
            m = nn.ReLU()
        elif nonlin_name == 'tanh':
            m = nn.Tanh()
        elif nonlin_name == 'elu':
            m = nn.ELU()
        else:
            raise ValueError

        return m

    return hpt.siso_pytorch_module_from_pytorch_layer_fn(
        Nonlinearity, {'nonlin_name': h_nonlin_name}) 
開發者ID:negrinho,項目名稱:deep_architect,代碼行數:18,代碼來源:main_pytorch.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):
        super(DanEncoder, self).__init__()
        encoder_layers = []
        for i in range(n_hidden_layers):
            if i == 0:
                input_dim = embedding_dim
            else:
                input_dim = n_hidden_units

            encoder_layers.extend([
                nn.Linear(input_dim, n_hidden_units),
                nn.BatchNorm1d(n_hidden_units),
                nn.ELU(),
                nn.Dropout(dropout_prob),
            ])
        self.encoder = nn.Sequential(*encoder_layers) 
開發者ID:Pinafore,項目名稱:qb,代碼行數:18,代碼來源:dan.py

示例3: act_fun

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def act_fun(act_type):

 if act_type=="relu":
    return nn.ReLU()
            
 if act_type=="tanh":
    return nn.Tanh()
            
 if act_type=="sigmoid":
    return nn.Sigmoid()
           
 if act_type=="leaky_relu":
    return nn.LeakyReLU(0.2)
            
 if act_type=="elu":
    return nn.ELU()
                     
 if act_type=="softmax":
    return nn.LogSoftmax(dim=1)
        
 if act_type=="linear":
     return nn.LeakyReLU(1) # initializzed like this, but not used in forward! 
開發者ID:santi-pdp,項目名稱:pase,代碼行數:24,代碼來源:neural_networks.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, config):
        super().__init__()
        self.config = config
        self.main = nn.Sequential(
            nn.Linear(10 * 10, 1024),
            nn.LayerNorm(1024),
            nn.ELU(),
            nn.Linear(1024, 1024),
            nn.LayerNorm(1024),
            nn.ELU(),
            nn.Linear(1024, 512),
            nn.LayerNorm(512),
            nn.ELU(),
            nn.Linear(512, 100),
            nn.LayerNorm(100)
        ) 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:18,代碼來源:scorenet.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self,
                 in_channels,
                 out_channels):
        super(LwopEncoderFinalBlock, self).__init__()
        self.pre_conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            bias=True,
            use_bn=False)
        self.body = nn.Sequential()
        for i in range(3):
            self.body.add_module("block{}".format(i + 1), dwsconv3x3_block(
                in_channels=out_channels,
                out_channels=out_channels,
                use_bn=False,
                dw_activation=(lambda: nn.ELU(inplace=True)),
                pw_activation=(lambda: nn.ELU(inplace=True))))
        self.post_conv = conv3x3_block(
            in_channels=out_channels,
            out_channels=out_channels,
            bias=True,
            use_bn=False) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:24,代碼來源:lwopenpose_cmupan.py

示例6: get_activation

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def get_activation(self, act):
        if act == 'tanh':
            act = nn.Tanh()
        elif act == 'relu':
            act = nn.ReLU()
        elif act == 'softplus':
            act = nn.Softplus()
        elif act == 'rrelu':
            act = nn.RReLU()
        elif act == 'leakyrelu':
            act = nn.LeakyReLU()
        elif act == 'elu':
            act = nn.ELU()
        elif act == 'selu':
            act = nn.SELU()
        elif act == 'glu':
            act = nn.GLU()
        else:
            print('Defaulting to tanh activations...')
            act = nn.Tanh()
        return act 
開發者ID:blei-lab,項目名稱:causal-text-embeddings,代碼行數:23,代碼來源:supervised_topic_model.py

示例7: act_fn

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def act_fn(act):
    if act == 'relu':
        act_ = nn.ReLU(inplace=False)
    elif act == 'lrelu':
        act_ = nn.LeakyReLU(inplace=True)
    elif act == 'prelu':
        act_ = nn.PReLU()
    elif act == 'rrelu':
        act_ = nn.RReLU(inplace=True)
    elif act == 'elu':
        act_ = nn.ELU(inplace=True)
    elif act == 'selu':
        act_ = nn.SELU(inplace=True)
    elif act == 'tanh':
        act_ = nn.Tanh()
    elif act == 'sigmoid':
        act_ = nn.Sigmoid()
    else:
        print('\n\nActivation function {} is not supported/understood\n\n'.format(act))
        act_ = None
    return act_ 
開發者ID:juefeix,項目名稱:pnn.pytorch.update,代碼行數:23,代碼來源:utils.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, n_classes=1):
        super(Res34Unetv4, self).__init__()
        self.resnet = torchvision.models.resnet34(True)

        self.conv1 = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu)

        self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64))
        self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128))
        self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256))
        self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512))

        self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2))

        self.decode5 = Decoderv2(256, 512, 64)
        self.decode4 = Decoderv2(64, 256, 64)
        self.decode3 = Decoderv2(64, 128, 64)
        self.decode2 = Decoderv2(64, 64, 64)
        self.decode1 = Decoder(64, 32, 64)

        self.logit = nn.Sequential(
            nn.Conv2d(320, 64, kernel_size=3, padding=1),
            nn.ELU(True),
            nn.Conv2d(64, n_classes, kernel_size=1, bias=False),
        ) 
開發者ID:microsoft,項目名稱:seismic-deeplearning,代碼行數:26,代碼來源:resnet_unet.py

示例9: compute_flops

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def compute_flops(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_flops(module, inp, out), 'Conv2d'
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_flops(module, inp, out), 'BatchNorm2d'
    elif isinstance(module, (
            nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
            nn.AdaptiveMaxPool2d)):
        return compute_Pool2d_flops(module, inp, out), 'Pool2d'
    elif isinstance(module,
                    (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU,
                     nn.Sigmoid)):
        return compute_ReLU_flops(module, inp, out), 'Activation'
    elif isinstance(module, nn.Upsample):
        return compute_Upsample_flops(module, inp, out), 'Upsample'
    elif isinstance(module, nn.Linear):
        return compute_Linear_flops(module, inp, out), 'Linear'
    else:
        print("[Flops]: {} is not supported!".format(type(module).__name__))
        return 0, -1
    pass 
開發者ID:StevenGrove,項目名稱:TreeFilter-Torch,代碼行數:23,代碼來源:compute_flops.py

示例10: compute_memory

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def compute_memory(module, inp, out):
    if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)):
        return compute_ReLU_memory(module, inp, out)
    elif isinstance(module, nn.PReLU):
        return compute_PReLU_memory(module, inp, out)
    elif isinstance(module, nn.Conv2d):
        return compute_Conv2d_memory(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_memory(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_memory(module, inp, out)
    elif isinstance(module, (
            nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
            nn.AdaptiveMaxPool2d)):
        return compute_Pool2d_memory(module, inp, out)
    else:
        print("[Memory]: {} is not supported!".format(type(module).__name__))
        return 0, 0
    pass 
開發者ID:StevenGrove,項目名稱:TreeFilter-Torch,代碼行數:21,代碼來源:compute_memory.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, inplanes, planes, stride=1):
        super(ResNetBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.activation = nn.ELU()
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        downsample = None
        if stride != 1 or inplanes != planes:
            downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes),
            )
        self.downsample = downsample
        self.stride = stride
        self.reset_parameters() 
開發者ID:jxhe,項目名稱:vae-lagging-encoder,代碼行數:19,代碼來源:enc_resnet_v2.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, in_channels, kernel_size):
        super(PixelCNNBlock, self).__init__()
        self.mask_type = 'B'
        padding = kernel_size // 2
        out_channels = in_channels // 2

        self.main = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ELU(),
            MaskedConv2d(self.mask_type, out_channels, out_channels, out_channels, kernel_size, padding=padding, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ELU(),
            nn.Conv2d(out_channels, in_channels, 1, bias=False),
            nn.BatchNorm2d(in_channels),
        )
        self.activation = nn.ELU()
        self.reset_parameters() 
開發者ID:jxhe,項目名稱:vae-lagging-encoder,代碼行數:20,代碼來源:dec_pixelcnn_v2.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.01)
        elif 'selu' == self.activation:
            self.act = nn.SELU()

        self.weight_init() 
開發者ID:lijx10,項目名稱:USIP,代碼行數:24,代碼來源:layers.py

示例14: replace_bn

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def replace_bn(bn, act=None):
    slop = 0.01
    if isinstance(act, nn.ReLU):
        activation = 'leaky_relu'  # approximate relu
    elif isinstance(act, nn.LeakyReLU):
        activation = 'leaky_relu'
        slope = act.negative_slope
    elif isinstance(act, nn.ELU):
        activation = 'elu'
    else:
        activation = 'none'
    abn = ActivatedBatchNorm(num_features=bn.num_features,
                             eps=bn.eps,
                             momentum=bn.momentum,
                             affine=bn.affine,
                             track_running_stats=bn.track_running_stats,
                             activation=activation,
                             slope=slop)
    abn.load_state_dict(bn.state_dict())
    return abn 
開發者ID:tugstugi,項目名稱:pytorch-saltnet,代碼行數:22,代碼來源:basenet.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ELU [as 別名]
def __init__(self, dimc, act=nn.ELU()):
        super(MNISTConvEnc, self).__init__()
        self.enc = nn.Sequential(
            nn_.ResConv2d(1,16,3,2,padding=1,activation=act),
            act,
            nn_.ResConv2d(16,16,3,1,padding=1,activation=act),
            act,
            nn_.ResConv2d(16,32,3,2,padding=1,activation=act),
            act,
            nn_.ResConv2d(32,32,3,1,padding=1,activation=act),
            act,
            nn_.ResConv2d(32,32,3,2,padding=1,activation=act),
            act,
            nn_.Reshape((-1,32*4*4)),
            nn_.ResLinear(32*4*4,dimc),
            act
        ) 
開發者ID:CW-Huang,項目名稱:torchkit,代碼行數:19,代碼來源:autoencoders.py


注:本文中的torch.nn.ELU屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。