当前位置: 首页>>代码示例>>Python>>正文


Python nn.LeakyReLU方法代码示例

本文整理汇总了Python中mxnet.gluon.nn.LeakyReLU方法的典型用法代码示例。如果您正苦于以下问题:Python nn.LeakyReLU方法的具体用法?Python nn.LeakyReLU怎么用?Python nn.LeakyReLU使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.LeakyReLU方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_netD

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def get_netD():
    # build the discriminator
    netD = nn.Sequential()
    with netD.name_scope():
        # input is (nc) x 64 x 64
        netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf) x 32 x 32
        netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*2) x 16 x 16
        netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*4) x 8 x 8
        netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
        netD.add(nn.BatchNorm())
        netD.add(nn.LeakyReLU(0.2))
        # state size. (ndf*8) x 4 x 4
        netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
        # state size. 2 x 1 x 1

    return netD 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:26,代码来源:dcgan.py

示例2: test_lambda

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'),
             nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'),
             nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'),
             nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3, atol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3, atol=1e-3) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:test_gluon.py

示例3: get_activation

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def get_activation(act):
    """Get the activation based on the act string

    Parameters
    ----------
    act: str or HybridBlock

    Returns
    -------
    ret: HybridBlock
    """
    if act is None:
        return lambda x: x
    if isinstance(act, str):
        if act == 'leaky':
            return nn.LeakyReLU(0.1)
        elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:
            return nn.Activation(act)
        else:
            raise NotImplementedError
    else:
        return act 
开发者ID:dmlc,项目名称:dgl,代码行数:24,代码来源:utils.py

示例4: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self, act_func, **kwargs):
        super(Activation, self).__init__(**kwargs)
        if act_func == "relu":
            self.act = nn.Activation('relu')
        elif act_func == "relu6":
            self.act = ReLU6()
        elif act_func == "hard_sigmoid":
            self.act = HardSigmoid()
        elif act_func == "swish":
            self.act = nn.Swish()
        elif act_func == "hard_swish":
            self.act = HardSwish()
        elif act_func == "leaky":
            self.act = nn.LeakyReLU(alpha=0.375)
        else:
            raise NotImplementedError 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:18,代码来源:mobilenetv3.py

示例5: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 alpha,
                 **kwargs):
        super(DarkUnit, self).__init__(**kwargs)
        assert (out_channels % 2 == 0)
        mid_channels = out_channels // 2

        with self.name_scope():
            self.conv1 = conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                bn_use_global_stats=bn_use_global_stats,
                activation=nn.LeakyReLU(alpha=alpha))
            self.conv2 = conv3x3_block(
                in_channels=mid_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats,
                activation=nn.LeakyReLU(alpha=alpha)) 
开发者ID:osmr,项目名称:imgclsmob,代码行数:23,代码来源:darknet53.py

示例6: test_lambda

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def test_lambda():
    net1 = mx.gluon.nn.HybridSequential()
    net1.add(nn.Activation('tanh'),
             nn.LeakyReLU(0.1))

    net2 = mx.gluon.nn.HybridSequential()
    op3 = lambda F, x, *args: F.LeakyReLU(x, *args, slope=0.1)
    net2.add(nn.HybridLambda('tanh'),
             nn.HybridLambda(op3))

    op4 = lambda x: mx.nd.LeakyReLU(x, slope=0.1)
    net3 = mx.gluon.nn.Sequential()
    net3.add(nn.Lambda('tanh'),
             nn.Lambda(op4))

    input_data = mx.nd.random.uniform(shape=(2, 3, 5, 7))
    out1, out2, out3 = net1(input_data), net2(input_data), net3(input_data)
    assert_almost_equal(out1.asnumpy(), out2.asnumpy(), rtol=1e-3)
    assert_almost_equal(out1.asnumpy(), out3.asnumpy(), rtol=1e-3) 
开发者ID:mahyarnajibi,项目名称:SNIPER-mxnet,代码行数:21,代码来源:test_gluon.py

示例7: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self, n_classes):
        super(EdgeSpatial, self).__init__()
        self.mlp = nn.Sequential()
        self.mlp.add(nn.Dense(64))
        self.mlp.add(nn.LeakyReLU(0.1))
        self.mlp.add(nn.Dense(64))
        self.mlp.add(nn.LeakyReLU(0.1))
        self.mlp.add(nn.Dense(n_classes)) 
开发者ID:dmlc,项目名称:dgl,代码行数:10,代码来源:reldn.py

示例8: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self,filter_num,kernel_size=4,stride=2,padding=1):
        super(ConvBlock,self).__init__()
        self.model = nn.HybridSequential()
        with self.name_scope():
            self.model.add(
                nn.Conv2D(filter_num, kernel_size, stride,padding,use_bias=False),
                nn.BatchNorm(),
                nn.LeakyReLU(0.2),
            ) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:11,代码来源:train_srgan.py

示例9: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):
        super(DCGAN_D, self).__init__()
        self.ngpu = ngpu
        assert isize % 16 == 0, "isize has to be a multiple of 16"
        with self.name_scope():
            main = nn.Sequential()
            # input is nc x isize x isize
            main.add(nn.Conv2D(in_channels=nc, channels=ndf, kernel_size=4, strides=2, padding=1, use_bias=False,
                               prefix='initial.conv.{0}-{1}'.format(nc, ndf)))
            main.add(nn.LeakyReLU(0.2, prefix='initial.relu.{0}'.format(ndf)))
            csize, cndf = isize / 2, ndf

            # Extra layers
            for t in range(n_extra_layers):
                main.add(nn.Conv2D(in_channels=cndf, channels=cndf, kernel_size=3, strides=1, padding=1, use_bias=False,
                                   prefix='extra-layers-{0}.{1}.conv'.format(t, cndf)))
                main.add(nn.BatchNorm(in_channels=cndf, prefix='extra-layers-{0}.{1}.batchnorm'.format(t, cndf)))
                main.add(nn.LeakyReLU(0.2, prefix='extra-layers-{0}.{1}.relu'.format(t, cndf)))

            while csize > 4:
                in_feat = cndf
                out_feat = cndf * 2
                main.add(nn.Conv2D(in_channels=in_feat, channels=out_feat, kernel_size=4, strides=2, padding=1,
                                   use_bias=False, prefix='pyramid.{0}-{1}.conv'.format(in_feat, out_feat)))
                main.add(nn.BatchNorm(in_channels=out_feat, prefix='pyramid.{0}.batchnorm'.format(out_feat)))
                main.add(nn.LeakyReLU(0.2, prefix='pyramid.{0}.relu'.format(out_feat)))
                cndf = cndf * 2
                csize = csize / 2

            # state size. K x 4 x 4
            main.add(nn.Conv2D(in_channels=cndf, channels=1, kernel_size=4, strides=1, padding=0, use_bias=False,
                               prefix='final.{0}-{1}.conv'.format(cndf, 1)))
        self.main = main 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:35,代码来源:train_wgan.py

示例10: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self, ndf=64, n_layers=3, use_sigmoid=False):
        super(NLayerDiscriminator, self).__init__()
        self.model = nn.HybridSequential()
        kw = 4
        padw = 1
        with self.name_scope():
            self.model.add(
                nn.Conv2D(ndf, kernel_size=kw, strides=2, padding=padw),
                nn.LeakyReLU(0.2),
            )

            nf_mult = 1
            for n in range(1, n_layers):
                nf_mult = min(2**n, 8)
                self.model.add(
                    nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=2, padding=padw),
                    nn.InstanceNorm(),
                    nn.LeakyReLU(0.2),
                )

            nf_mult = min(2**n_layers, 8)
            self.model.add(
                nn.Conv2D(ndf * nf_mult,kernel_size=kw, strides=1, padding=padw),
                nn.InstanceNorm(),
                nn.LeakyReLU(0.2),
            )
            self.model.add(
                nn.Conv2D(1, kernel_size=kw, strides=1, padding=padw)
            )
            if use_sigmoid:
                self.model.add(nn.Activation('sigmoid')) 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:33,代码来源:train_cgan.py

示例11: _conv2d

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def _conv2d(channel, kernel, padding, stride, norm_layer=BatchNorm, norm_kwargs=None):
    """A common conv-bn-leakyrelu cell"""
    cell = nn.HybridSequential(prefix='')
    cell.add(nn.Conv2D(channel, kernel_size=kernel,
                       strides=stride, padding=padding, use_bias=False))
    cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs)))
    cell.add(nn.LeakyReLU(0.1))
    return cell 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:10,代码来源:darknet.py

示例12: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self,
                 passes,
                 backbone_out_channels,
                 outs_channels,
                 depth,
                 growth_rate,
                 use_bn,
                 in_channels=3,
                 in_size=(256, 256),
                 **kwargs):
        super(IbpPose, self).__init__(**kwargs)
        self.in_size = in_size
        activation = (lambda: nn.LeakyReLU(alpha=0.01))

        with self.name_scope():
            self.backbone = IbpBackbone(
                in_channels=in_channels,
                out_channels=backbone_out_channels,
                activation=activation)

            self.decoder = nn.HybridSequential(prefix="")
            for i in range(passes):
                merge = (i != passes - 1)
                self.decoder.add(IbpPass(
                    channels=backbone_out_channels,
                    mid_channels=outs_channels,
                    depth=depth,
                    growth_rate=growth_rate,
                    merge=merge,
                    use_bn=use_bn,
                    activation=activation)) 
开发者ID:osmr,项目名称:imgclsmob,代码行数:33,代码来源:ibppose_coco.py

示例13: dark_convYxY

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def dark_convYxY(in_channels,
                 out_channels,
                 bn_use_global_stats,
                 alpha,
                 pointwise):
    """
    DarkNet unit.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    alpha : float
        Slope coefficient for Leaky ReLU activation.
    pointwise : bool
        Whether use 1x1 (pointwise) convolution or 3x3 convolution.
    """
    if pointwise:
        return conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            bn_use_global_stats=bn_use_global_stats,
            activation=nn.LeakyReLU(alpha=alpha))
    else:
        return conv3x3_block(
            in_channels=in_channels,
            out_channels=out_channels,
            bn_use_global_stats=bn_use_global_stats,
            activation=nn.LeakyReLU(alpha=alpha)) 
开发者ID:osmr,项目名称:imgclsmob,代码行数:35,代码来源:darknet.py

示例14: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self, channels, kernel_size):
        super().__init__()
        with self.name_scope():
            self.conv = nn.HybridSequential()
            with self.conv.name_scope():
                self.conv.add(
                    nn.Conv2D(channels, kernel_size, padding=1, use_bias=False),
                    nn.BatchNorm(),
                    nn.LeakyReLU(0.1)
                ) 
开发者ID:WenmuZhou,项目名称:crnn.gluon,代码行数:12,代码来源:unet.py

示例15: __init__

# 需要导入模块: from mxnet.gluon import nn [as 别名]
# 或者: from mxnet.gluon.nn import LeakyReLU [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=False,
                 activation=None):
        super(GATConv, self).__init__()
        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._in_feats = in_feats
        self._out_feats = out_feats
        with self.name_scope():
            if isinstance(in_feats, tuple):
                self.fc_src = nn.Dense(out_feats * num_heads, use_bias=False,
                                       weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                       in_units=self._in_src_feats)
                self.fc_dst = nn.Dense(out_feats * num_heads, use_bias=False,
                                       weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                       in_units=self._in_dst_feats)
            else:
                self.fc = nn.Dense(out_feats * num_heads, use_bias=False,
                                   weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)),
                                   in_units=in_feats)
            self.attn_l = self.params.get('attn_l',
                                          shape=(1, num_heads, out_feats),
                                          init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
            self.attn_r = self.params.get('attn_r',
                                          shape=(1, num_heads, out_feats),
                                          init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
            self.feat_drop = nn.Dropout(feat_drop)
            self.attn_drop = nn.Dropout(attn_drop)
            self.leaky_relu = nn.LeakyReLU(negative_slope)
            if residual:
                if in_feats != out_feats:
                    self.res_fc = nn.Dense(out_feats * num_heads, use_bias=False,
                                           weight_initializer=mx.init.Xavier(
                                               magnitude=math.sqrt(2.0)),
                                           in_units=in_feats)
                else:
                    self.res_fc = Identity()
            else:
                self.res_fc = None
            self.activation = activation 
开发者ID:dmlc,项目名称:dgl,代码行数:48,代码来源:gatconv.py


注:本文中的mxnet.gluon.nn.LeakyReLU方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。