当前位置: 首页>>代码示例>>Python>>正文


Python cntk.relu方法代码示例

本文整理汇总了Python中cntk.relu方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.relu方法的具体用法?Python cntk.relu怎么用?Python cntk.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cntk的用法示例。


在下文中一共展示了cntk.relu方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_basic_model

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def create_basic_model(input, out_dims):
    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(input)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 64, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Dense(64, init=C.initializer.glorot_uniform())(net)
    net = C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)(net)

    return net 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:22,代码来源:cifar_training.py

示例2: create_vgg9_model

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def create_vgg9_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.For(range(2), lambda: [
                C.layers.Dense(1024, init=C.initializer.glorot_uniform())
            ]),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])
    return model(input) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:20,代码来源:cifar_training.py

示例3: relu

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def relu(x, alpha=0., max_value=None, threshold=0.):

    if alpha != 0.:
        if threshold != 0.:
            negative_part = C.relu(-x + threshold)
        else:
            negative_part = C.relu(-x)

    if threshold != 0.:
        x = x * C.greater(x, threshold)
    else:
        x = C.relu(x)

    if max_value is not None:
        x = C.clip(x, 0.0, max_value)

    if alpha != 0.:
        x -= alpha * negative_part

    return x 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:22,代码来源:cntk_backend.py

示例4: create_model

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def create_model(features):
    '''
    This function creates the architecture model.
    :param features: The input features.
    :return: The output of the network which its dimentionality is num_classes.
    '''
    with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu):

            # Hidden input dimention
            hidden_dim = 64

            # Encoder
            encoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(features)
            encoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)

            # Decoder
            decoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)
            decoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(decoder_out)
            decoder_out = C.layers.Dense(feature_dim, activation=C.sigmoid)(decoder_out)

            return decoder_out

# Initializing the model with normalized input. 
开发者ID:astorfi,项目名称:CNTK-World,代码行数:25,代码来源:autoencoders.py

示例5: create_terse_model

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def create_terse_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:16,代码来源:cifar_training.py

示例6: create_dropout_model

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def create_dropout_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dropout(0.25),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:17,代码来源:cifar_training.py

示例7: convolution_bn

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def convolution_bn(input, filter_size, num_filters, strides=(1, 1),
                   init=C.he_normal(), activation=C.relu):
    if activation is None:
        activation = lambda x: x

    r = C.layers.Convolution(
        filter_size, num_filters,
        strides=strides, init=init,
        activation=None, pad=True, bias=False
    )(input)
    # r = C.layers.BatchNormalization(map_rank=1)(r)
    return activation(r) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:14,代码来源:cifar_training.py

示例8: resnet_basic_inc

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def resnet_basic_inc(input, num_filters):
    c1 = convolution_bn(input, (3, 3), num_filters, strides=(2, 2))
    c2 = convolution_bn(c1, (3, 3), num_filters, activation=None)
    s = convolution_bn(input, (1, 1), num_filters, strides=(2, 2), activation=None)
    return C.relu(c2 + s) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:7,代码来源:cifar_training.py

示例9: test_relu

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def test_relu():
    assert_cntk_ngraph_array_equal(C.relu([-2, -1., 0., 1., 2.]))
    assert_cntk_ngraph_array_equal(C.relu([0.]))
    assert_cntk_ngraph_array_equal(C.relu([-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1]))
    assert_cntk_ngraph_array_equal(C.relu([[1, 2, 3], [4, 5, 6]]))
    assert_cntk_ngraph_array_equal(C.relu([[-3, -2, -1], [1, 2, 3]])) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:8,代码来源:test_ops_unary.py

示例10: bn_relu

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def bn_relu(input, name=""):
    return bn(input, activation=C.relu, name=name) 
开发者ID:haixpham,项目名称:end2end_AU_speech,代码行数:4,代码来源:LayerUtils.py

示例11: conv_bn_relu

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def conv_bn_relu(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
开发者ID:haixpham,项目名称:end2end_AU_speech,代码行数:4,代码来源:LayerUtils.py

示例12: conv_bn_relu_nopad

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def conv_bn_relu_nopad(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn_nopad(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
开发者ID:haixpham,项目名称:end2end_AU_speech,代码行数:4,代码来源:LayerUtils.py

示例13: relu

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:11,代码来源:cntk_backend.py

示例14: D

# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import relu [as 别名]
def D(x_img, x_code):
    '''
    Detector network architecture

    Args:
        x_img: cntk.input_variable represent images to network
        x_code: cntk.input_variable represent conditional code to network
    '''
    def bn_with_leaky_relu(x, leak=0.2):
        h = C.layers.BatchNormalization(map_rank=1)(x)
        r = C.param_relu(C.constant((np.ones(h.shape) * leak).astype(np.float32)), h)
        return r

    with C.layers.default_options(init=C.normal(scale=0.02)):

        h0 = C.layers.Convolution2D(dkernel, 1, strides=dstride)(x_img)
        h0 = bn_with_leaky_relu(h0, leak=0.2)
        print('h0 shape :', h0.shape)

        h1 = C.layers.Convolution2D(dkernel, 64, strides=dstride)(h0)
        h1 = bn_with_leaky_relu(h1, leak=0.2)
        print('h1 shape :', h1.shape)

        h2 = C.layers.Dense(256, activation=None)(h1)
        h2 = bn_with_leaky_relu(h2, leak=0.2)
        print('h2 shape :', h2.shape)

        h2_aug = C.splice(h2, x_code)

        h3 = C.layers.Dense(256, activation=C.relu)(h2_aug)

        h4 = C.layers.Dense(1, activation=C.sigmoid, name='D_out')(h3)
        print('h3 shape :', h4.shape)

        return h4 
开发者ID:astorfi,项目名称:CNTK-World,代码行数:37,代码来源:conditional-DCGAN.py


注:本文中的cntk.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。