當前位置: 首頁>>代碼示例>>Python>>正文


Python cntk.relu方法代碼示例

本文整理匯總了Python中cntk.relu方法的典型用法代碼示例。如果您正苦於以下問題:Python cntk.relu方法的具體用法?Python cntk.relu怎麽用?Python cntk.relu使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cntk的用法示例。


在下文中一共展示了cntk.relu方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_basic_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def create_basic_model(input, out_dims):
    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(input)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 64, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Dense(64, init=C.initializer.glorot_uniform())(net)
    net = C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)(net)

    return net 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:22,代碼來源:cifar_training.py

示例2: create_vgg9_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def create_vgg9_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.For(range(2), lambda: [
                C.layers.Dense(1024, init=C.initializer.glorot_uniform())
            ]),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])
    return model(input) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:20,代碼來源:cifar_training.py

示例3: relu

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def relu(x, alpha=0., max_value=None, threshold=0.):

    if alpha != 0.:
        if threshold != 0.:
            negative_part = C.relu(-x + threshold)
        else:
            negative_part = C.relu(-x)

    if threshold != 0.:
        x = x * C.greater(x, threshold)
    else:
        x = C.relu(x)

    if max_value is not None:
        x = C.clip(x, 0.0, max_value)

    if alpha != 0.:
        x -= alpha * negative_part

    return x 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:22,代碼來源:cntk_backend.py

示例4: create_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def create_model(features):
    '''
    This function creates the architecture model.
    :param features: The input features.
    :return: The output of the network which its dimentionality is num_classes.
    '''
    with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu):

            # Hidden input dimention
            hidden_dim = 64

            # Encoder
            encoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(features)
            encoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)

            # Decoder
            decoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)
            decoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(decoder_out)
            decoder_out = C.layers.Dense(feature_dim, activation=C.sigmoid)(decoder_out)

            return decoder_out

# Initializing the model with normalized input. 
開發者ID:astorfi,項目名稱:CNTK-World,代碼行數:25,代碼來源:autoencoders.py

示例5: create_terse_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def create_terse_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:16,代碼來源:cifar_training.py

示例6: create_dropout_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def create_dropout_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dropout(0.25),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:17,代碼來源:cifar_training.py

示例7: convolution_bn

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def convolution_bn(input, filter_size, num_filters, strides=(1, 1),
                   init=C.he_normal(), activation=C.relu):
    if activation is None:
        activation = lambda x: x

    r = C.layers.Convolution(
        filter_size, num_filters,
        strides=strides, init=init,
        activation=None, pad=True, bias=False
    )(input)
    # r = C.layers.BatchNormalization(map_rank=1)(r)
    return activation(r) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:14,代碼來源:cifar_training.py

示例8: resnet_basic_inc

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def resnet_basic_inc(input, num_filters):
    c1 = convolution_bn(input, (3, 3), num_filters, strides=(2, 2))
    c2 = convolution_bn(c1, (3, 3), num_filters, activation=None)
    s = convolution_bn(input, (1, 1), num_filters, strides=(2, 2), activation=None)
    return C.relu(c2 + s) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:7,代碼來源:cifar_training.py

示例9: test_relu

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def test_relu():
    assert_cntk_ngraph_array_equal(C.relu([-2, -1., 0., 1., 2.]))
    assert_cntk_ngraph_array_equal(C.relu([0.]))
    assert_cntk_ngraph_array_equal(C.relu([-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1]))
    assert_cntk_ngraph_array_equal(C.relu([[1, 2, 3], [4, 5, 6]]))
    assert_cntk_ngraph_array_equal(C.relu([[-3, -2, -1], [1, 2, 3]])) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:8,代碼來源:test_ops_unary.py

示例10: bn_relu

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def bn_relu(input, name=""):
    return bn(input, activation=C.relu, name=name) 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:4,代碼來源:LayerUtils.py

示例11: conv_bn_relu

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def conv_bn_relu(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:4,代碼來源:LayerUtils.py

示例12: conv_bn_relu_nopad

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def conv_bn_relu_nopad(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn_nopad(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:4,代碼來源:LayerUtils.py

示例13: relu

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:11,代碼來源:cntk_backend.py

示例14: D

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import relu [as 別名]
def D(x_img, x_code):
    '''
    Detector network architecture

    Args:
        x_img: cntk.input_variable represent images to network
        x_code: cntk.input_variable represent conditional code to network
    '''
    def bn_with_leaky_relu(x, leak=0.2):
        h = C.layers.BatchNormalization(map_rank=1)(x)
        r = C.param_relu(C.constant((np.ones(h.shape) * leak).astype(np.float32)), h)
        return r

    with C.layers.default_options(init=C.normal(scale=0.02)):

        h0 = C.layers.Convolution2D(dkernel, 1, strides=dstride)(x_img)
        h0 = bn_with_leaky_relu(h0, leak=0.2)
        print('h0 shape :', h0.shape)

        h1 = C.layers.Convolution2D(dkernel, 64, strides=dstride)(h0)
        h1 = bn_with_leaky_relu(h1, leak=0.2)
        print('h1 shape :', h1.shape)

        h2 = C.layers.Dense(256, activation=None)(h1)
        h2 = bn_with_leaky_relu(h2, leak=0.2)
        print('h2 shape :', h2.shape)

        h2_aug = C.splice(h2, x_code)

        h3 = C.layers.Dense(256, activation=C.relu)(h2_aug)

        h4 = C.layers.Dense(1, activation=C.sigmoid, name='D_out')(h3)
        print('h3 shape :', h4.shape)

        return h4 
開發者ID:astorfi,項目名稱:CNTK-World,代碼行數:37,代碼來源:conditional-DCGAN.py


注:本文中的cntk.relu方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。