當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Activation方法代碼示例

本文整理匯總了Python中mxnet.gluon.nn.Activation方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Activation方法的具體用法?Python nn.Activation怎麽用?Python nn.Activation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在mxnet.gluon.nn的用法示例。


在下文中一共展示了nn.Activation方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=InstanceNorm):
        super(Bottleneck, self).__init__()
        self.expansion = 4
        self.downsample = downsample
        if self.downsample is not None:
            self.residual_layer = nn.Conv2D(in_channels=inplanes, 
                                            channels=planes * self.expansion,
                                            kernel_size=1, strides=(stride, stride))
        self.conv_block = nn.Sequential()
        with self.conv_block.name_scope():
            self.conv_block.add(norm_layer(in_channels=inplanes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(nn.Conv2D(in_channels=inplanes, channels=planes, 
                                 kernel_size=1))
            self.conv_block.add(norm_layer(in_channels=planes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(ConvLayer(planes, planes, kernel_size=3, 
                stride=stride))
            self.conv_block.add(norm_layer(in_channels=planes))
            self.conv_block.add(nn.Activation('relu'))
            self.conv_block.add(nn.Conv2D(in_channels=planes, 
                                 channels=planes * self.expansion, 
                                 kernel_size=1)) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:net.py

示例2: forward

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def forward(self, X):
        h = F.Activation(self.conv1_1(X), act_type='relu')
        h = F.Activation(self.conv1_2(h), act_type='relu')
        relu1_2 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv2_1(h), act_type='relu')
        h = F.Activation(self.conv2_2(h), act_type='relu')
        relu2_2 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv3_1(h), act_type='relu')
        h = F.Activation(self.conv3_2(h), act_type='relu')
        h = F.Activation(self.conv3_3(h), act_type='relu')
        relu3_3 = h
        h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))

        h = F.Activation(self.conv4_1(h), act_type='relu')
        h = F.Activation(self.conv4_2(h), act_type='relu')
        h = F.Activation(self.conv4_3(h), act_type='relu')
        relu4_3 = h

        return [relu1_2, relu2_2, relu3_3, relu4_3] 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:net.py

示例3: test_basic

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def test_basic():
    model = nn.Sequential()
    model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
    model.add(nn.Dropout(0.5))
    model.add(nn.Dense(64, activation='tanh', in_units=256),
              nn.Dense(32, in_units=64))
    model.add(nn.Activation('relu'))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 2, 10)))
    assert x.shape == (32, 32)
    x.wait_to_read()

    model.collect_params().setattr('grad_req', 'null')
    assert list(model.collect_params().values())[0]._grad is None
    model.collect_params().setattr('grad_req', 'write')
    assert list(model.collect_params().values())[0]._grad is not None 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:25,代碼來源:test_gluon.py

示例4: test_reshape_activation

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def test_reshape_activation():
    class Net(gluon.HybridBlock):
        def __init__(self, act, shape, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.act = nn.Activation(act)

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape)
            out = self.act(x_reshape)
            return out
    acts = ["relu", "sigmoid", "tanh", "softrelu"]
    for act in acts:
        x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32))
        shape = (4, 32, 32, -1)
        net = Net(act, shape)
        check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:20,代碼來源:test_gluon.py

示例5: test_slice_activation

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def test_slice_activation():
    class Net(gluon.HybridBlock):
        def __init__(self, act, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.slice = slice
                self.act = nn.Activation(act)

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=self.slice[0], end=self.slice[1])
            out = self.act(x_slice)
            return out

    acts = ["relu", "sigmoid", "tanh", "softrelu"]
    for act in acts:
        x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64))
        slice = [(0, 16, 32, 32), (4, 32, 64, 64)]
        net = Net(act, slice)
        check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:21,代碼來源:test_gluon.py

示例6: test_reshape_activation_reshape_activation

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def test_reshape_activation_reshape_activation():
    class Net(gluon.HybridBlock):
        def __init__(self, act0, act1, shape, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.reshape = shape
                self.act0 = nn.Activation(act0)
                self.act1 = nn.Activation(act1)

        def hybrid_forward(self, F, x):
            x_reshape = x.reshape(self.reshape[0])
            y = self.act0(x_reshape)
            y_reshape = y.reshape(self.reshape[1])
            out = self.act1(y_reshape)
            return out
    acts = ["relu", "sigmoid", "tanh", "softrelu"]
    for idx0, act0 in enumerate(acts):
        for idx1, act1 in enumerate(acts):
            if idx1 == idx0:
                continue
            x = mx.nd.random.uniform(-1, 1, shape=(4, 16, 32, 32))
            shape = [(4, 32, 32, -1), (4, 32, 16, -1)]
            net = Net(act0, act1, shape)
            check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:26,代碼來源:test_gluon.py

示例7: test_slice_activation_slice_activation

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def test_slice_activation_slice_activation():
    class Net(gluon.HybridBlock):
        def __init__(self, act0, act1, slice, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                self.slice = slice
                self.act0 = nn.Activation(act0)
                self.act1 = nn.Activation(act1)

        def hybrid_forward(self, F, x):
            x_slice = x.slice(begin=self.slice[0][0], end=self.slice[0][1])
            y = self.act0(x_slice)
            y_slice = y.slice(begin=self.slice[1][0], end=self.slice[1][1])
            out = self.act1(y_slice)
            return out
    acts = ["relu", "sigmoid", "tanh", "softrelu"]
    for idx0, act0 in enumerate(acts):
        for idx1, act1 in enumerate(acts):
            if idx1 == idx0:
                continue
            x = mx.nd.random.uniform(-1, 1, shape=(8, 32, 64, 64))
            slice = [[(0, 16, 32, 32), (4, 32, 64, 64)], [(2, 0, 16, 16), (4, 16, 32, 32)]]
            net = Net(act0, act1, slice)
            check_layer_forward_withinput(net, x) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:26,代碼來源:test_gluon.py

示例8: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self):
        super(CellStem0, self).__init__()
        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)

        self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
        self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1) 
開發者ID:deepinsight,項目名稱:insightocr,代碼行數:22,代碼來源:fnasnet.py

示例9: _make_dense_layer

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm())
    #new_features.add(nn.Activation('relu'))
    new_features.add(Act())
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
    out.add(gluon.contrib.nn.Identity())
    out.add(new_features)

    return out 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:20,代碼來源:fdensenet.py

示例10: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
                 bn_size=4, dropout=0, classes=1000, **kwargs):

        super(DenseNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
                                        strides=1, padding=1, use_bias=False))
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
            # Add dense blocks
            num_features = num_init_features
            for i, num_layers in enumerate(block_config):
                self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    self.features.add(_make_transition(num_features // 2))
                    num_features = num_features // 2
            self.features.add(nn.BatchNorm())
            self.features.add(nn.Activation('relu'))
            #self.features.add(nn.AvgPool2D(pool_size=7))
            #self.features.add(nn.Flatten())

            #self.output = nn.Dense(classes) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:27,代碼來源:fdensenet.py

示例11: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(NormalCell, self).__init__()
        self.conv_prev_1x1 = nn.HybridSequential()
        self.conv_prev_1x1.add(nn.Activation(activation='relu'))
        self.conv_prev_1x1.add(nn.Conv2D(channels=out_channels_left, kernel_size=1, strides=1, use_bias=False))
        self.conv_prev_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.conv_1x1 = nn.HybridSequential()
        self.conv_1x1.add(nn.Activation(activation='relu'))
        self.conv_1x1.add(nn.Conv2D(channels=out_channels_right, kernel_size=1, strides=1, use_bias=False))
        self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)

        self.comb_iter_2_left = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_3_left = nn.AvgPool2D(3, strides=1, padding=1)
        self.comb_iter_3_right = nn.AvgPool2D(3, strides=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) 
開發者ID:deepinsight,項目名稱:insightface,代碼行數:26,代碼來源:fnasnet.py

示例12: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self, prefix, entry_block3_stride, use_global_stats, norm_layer):
        super(EntryFlow, self).__init__(prefix)

        with self.name_scope():
            self.conv1 = nn.HybridSequential(prefix='conv1_')
            with self.conv1.name_scope():
                self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=2, padding=1, use_bias=False, prefix='1_'))
                self.conv1.add(norm_layer(in_channels=32, use_global_stats=use_global_stats, prefix='1_BN_'))
                self.conv1.add(nn.Activation("relu"))
            self.conv2 = nn.HybridSequential(prefix='conv1_')
            with self.conv2.name_scope():
                self.conv2.add(nn.Conv2D(64, kernel_size=3, padding=1, use_bias=False, prefix='2_'))
                self.conv2.add(norm_layer(in_channels=64, use_global_stats=use_global_stats, prefix='2_BN_'))
                self.conv2.add(nn.Activation("relu"))

            self.conv3 = XceptionBlock(filters_list=[128, 128, 128], kernel_size=3, strides=2,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=64, prefix='block1_')
            self.conv4 = XceptionBlock(filters_list=[256, 256, 256], kernel_size=3, strides=2, return_skip=True,
                                       use_global_stats=use_global_stats, norm_layer=norm_layer,
                                       dilation=1, depth_activation=False, in_filters=128, prefix='block2_')
            self.conv5 = XceptionBlock(filters_list=[728, 728, 728], kernel_size=3, strides=entry_block3_stride,
                                       use_shortcut_conv=True, dilation=1, depth_activation=False, in_filters=256,
                                       norm_layer=norm_layer, use_global_stats=use_global_stats, prefix='block3_') 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:26,代碼來源:aspp_temp.py

示例13: get_activation

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def get_activation(act):
    """Get the activation based on the act string

    Parameters
    ----------
    act: str or HybridBlock

    Returns
    -------
    ret: HybridBlock
    """
    if act is None:
        return lambda x: x
    if isinstance(act, str):
        if act == 'leaky':
            return nn.LeakyReLU(0.1)
        elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:
            return nn.Activation(act)
        else:
            raise NotImplementedError
    else:
        return act 
開發者ID:dmlc,項目名稱:dgl,代碼行數:24,代碼來源:utils.py

示例14: hybrid_forward

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def hybrid_forward(self, F, x):
        residual = x

        x = self.body(x)

        if self.se:
            w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
            w = self.se(w)
            x = F.broadcast_mul(x, w)

        if self.downsample:
            residual = self.downsample(residual)

        x = F.Activation(x + residual, act_type='relu')
        return x


# Nets 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:20,代碼來源:resnext.py

示例15: __init__

# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Activation [as 別名]
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
        super(_FCNHead, self).__init__()
        with self.name_scope():
            self.block = nn.HybridSequential()
            inter_channels = in_channels // 4
            with self.block.name_scope():
                self.block.add(nn.Conv2D(in_channels=in_channels, channels=inter_channels,
                                         kernel_size=3, padding=1, use_bias=False))
                self.block.add(norm_layer(in_channels=inter_channels,
                                          **({} if norm_kwargs is None else norm_kwargs)))
                self.block.add(nn.Activation('relu'))
                self.block.add(nn.Dropout(0.1))
                self.block.add(nn.Conv2D(in_channels=inter_channels, channels=channels,
                                         kernel_size=1))

    # pylint: disable=arguments-differ 
開發者ID:dmlc,項目名稱:gluon-cv,代碼行數:18,代碼來源:fcn.py


注:本文中的mxnet.gluon.nn.Activation方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。