当前位置: 首页>>代码示例>>Python>>正文


Python activations.relu方法代码示例

本文整理汇总了Python中keras.activations.relu方法的典型用法代码示例。如果您正苦于以下问题:Python activations.relu方法的具体用法?Python activations.relu怎么用?Python activations.relu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.activations的用法示例。


在下文中一共展示了activations.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:22,代码来源:NER.py

示例2: test_relu

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''

    from keras.activations import relu as r

    assert r(5) == 5
    assert r(-5) == 0
    assert r(-0.1) == 0
    assert r(0.1) == 0.1

    x = T.vector()
    exp = r(x)
    f = theano.function([x], exp)

    test_values = get_standard_values()
    result = f(test_values)

    list_assert_equal(result, test_values) # because no negatives in test values 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:23,代码来源:test_activations.py

示例3: wavenetBlock

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def wavenetBlock(n_atrous_filters, atrous_filter_size, atrous_rate,
                 n_conv_filters, conv_filter_size):
    def f(input_):
        residual = input_
        tanh_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                       atrous_rate=atrous_rate,
                                       border_mode='same',
                                       activation='tanh')(input_)
        sigmoid_out = AtrousConvolution1D(n_atrous_filters, atrous_filter_size,
                                          atrous_rate=atrous_rate,
                                          border_mode='same',
                                          activation='sigmoid')(input_)
        merged = merge([tanh_out, sigmoid_out], mode='mul')
        skip_out = Convolution1D(1, 1, activation='relu', border_mode='same')(merged)
        out = merge([skip_out, residual], mode='sum')
        return out, skip_out
    return f 
开发者ID:llSourcell,项目名称:Music_Generation,代码行数:19,代码来源:simple-generative-model-regressor.py

示例4: get_basic_generative_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def get_basic_generative_model(input_size):
    input = Input(shape=(1, input_size, 1))
    l1a, l1b = wavenetBlock(10, 5, 2, 1, 3)(input)
    l2a, l2b = wavenetBlock(1, 2, 4, 1, 3)(l1a)
    l3a, l3b = wavenetBlock(1, 2, 8, 1, 3)(l2a)
    l4a, l4b = wavenetBlock(1, 2, 16, 1, 3)(l3a)
    l5a, l5b = wavenetBlock(1, 2, 32, 1, 3)(l4a)
    l6 = merge([l1b, l2b, l3b, l4b, l5b], mode='sum')
    l7 = Lambda(relu)(l6)
    l8 = Convolution2D(1, 1, 1, activation='relu')(l7)
    l9 = Convolution2D(1, 1, 1)(l8)
    l10 = Flatten()(l9)
    l11 = Dense(1, activation='tanh')(l10)
    model = Model(input=input, output=l11)
    model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
    model.summary()
    return model 
开发者ID:llSourcell,项目名称:Music_Generation,代码行数:19,代码来源:simple-generative-model-regressor.py

示例5: fCreateMNet_Block

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def fCreateMNet_Block(input_t, channels, kernel_size=(3, 3), type=1, forwarding=True, l1_reg=0.0, l2_reg=1e-6):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:25,代码来源:MNetArt.py

示例6: fCreateMNet_Block

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def fCreateMNet_Block(input_t, channels, kernel_size=(3,3), type=1, forwarding=True,l1_reg=0.0, l2_reg=1e-6 ):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:25,代码来源:motion_MNetArt.py

示例7: iris

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def iris():

    from keras.optimizers import Adam, Nadam
    from keras.losses import logcosh, categorical_crossentropy
    from keras.activations import relu, elu, softmax

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2, 3, 4],
         'batch_size': (2, 30, 10),
         'epochs': [2],
         'dropout': (0, 0.5, 5),
         'weight_regulizer': [None],
         'emb_output_dims':  [None],
         'shapes': ['brick', 'triangle', 0.2],
         'optimizer': [Adam, Nadam],
         'losses': [logcosh, categorical_crossentropy],
         'activation': [relu, elu],
         'last_activation': [softmax]}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:24,代码来源:params.py

示例8: breast_cancer

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def breast_cancer():

    from keras.optimizers import Adam, Nadam, RMSprop
    from keras.losses import logcosh, binary_crossentropy
    from keras.activations import relu, elu, sigmoid

    # then we can go ahead and set the parameter space
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2],
         'batch_size': (2, 30, 10),
         'epochs': [50, 100, 150],
         'dropout': (0, 0.5, 5),
         'shapes': ['brick', 'triangle', 'funnel'],
         'optimizer': [Adam, Nadam, RMSprop],
         'losses': [logcosh, binary_crossentropy],
         'activation': [relu, elu],
         'last_activation': [sigmoid]}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:22,代码来源:params.py

示例9: gcn_layer

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def gcn_layer(A_zkc_hat,D_zkc_hat, X ,W):
        return relu(np.dot(np.dot(np.dot(D_zkc_hat**-1, A_zkc_hat), X), W)) 
开发者ID:HaiyangLiu1997,项目名称:Pytorch-Networks,代码行数:4,代码来源:BasicGNN.py

示例10: prelu_block

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def prelu_block(use_prelu):
    def f(x):
        if use_prelu:
            x = PReLU()(x)
        else:
            x = Lambda(relu)(x)
        return x

    return f 
开发者ID:minerva-ml,项目名称:steppy-toolkit,代码行数:11,代码来源:architectures.py

示例11: cnn_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:29,代码来源:HandWritingRecognition.py

示例12: relu6

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def relu6(x):
    return relu(x, max_value=6) 
开发者ID:bubbliiiing,项目名称:Semantic-Segmentation,代码行数:4,代码来源:mobilenetV2.py

示例13: SepConv_BN

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
    # 计算padding的数量,hw是否需要收缩
    if stride == 1:
        depth_padding = 'same'
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        x = ZeroPadding2D((pad_beg, pad_end))(x)
        depth_padding = 'valid'
    
    # 如果需要激活函数
    if not depth_activation:
        x = Activation('relu')(x)

    # 分离卷积,首先3x3分离卷积,再1x1卷积
    # 3x3采用膨胀卷积
    x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
                        padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
    x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    # 1x1卷积,进行压缩
    x = Conv2D(filters, (1, 1), padding='same',
               use_bias=False, name=prefix + '_pointwise')(x)
    x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
    if depth_activation:
        x = Activation('relu')(x)

    return x 
开发者ID:bubbliiiing,项目名称:Semantic-Segmentation,代码行数:34,代码来源:deeplab.py

示例14: temporal_convs_linear

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len,
                          causal=False, loss='categorical_crossentropy',
                          optimizer='adam', return_param_str=False):
    """ Used in paper:
    Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation
    Lea et al. ECCV 2016

    Note: Spatial dropout was not used in the original paper.
    It tends to improve performance a little.
    """

    inputs = Input(shape=(max_len, n_feat))
    if causal: model = ZeroPadding1D((conv_len // 2, 0))(model)
    model = Conv1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, padding='same',
                   activation='relu')(inputs)
    if causal: model = Cropping1D((0, conv_len // 2))(model)

    model = SpatialDropout1D(0.3)(model)

    model = TimeDistributed(Dense(n_classes, activation="softmax"))(model)

    model = Model(input=inputs, output=model)
    model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal")

    if return_param_str:
        param_str = "tConv_C{}".format(conv_len)
        if causal:
            param_str += "_causal"

        return model, param_str
    else:
        return model 
开发者ID:Zephyr-D,项目名称:TCFPN-ISBA,代码行数:34,代码来源:tf_models.py

示例15: model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import relu [as 别名]
def model(self):
        input_layer = Input(shape=self.SHAPE)
        x = Convolution2D(96,3,3, subsample=(2,2), border_mode='same',activation='relu')(input_layer)
        x = Convolution2D(64,3,3, subsample=(2,2), border_mode='same',activation='relu')(x)
        x = MaxPooling2D(pool_size=(3,3),border_mode='same')(x)
        x = Convolution2D(32,3,3, subsample=(1,1), border_mode='same',activation='relu')(x)
        x = Convolution2D(32,1,1, subsample=(1,1), border_mode='same',activation='relu')(x)
        x = Convolution2D(2,1,1, subsample=(1,1), border_mode='same',activation='relu')(x)
        output_layer = Reshape((-1,2))(x)
        return Model(input_layer,output_layer) 
开发者ID:PacktPublishing,项目名称:Generative-Adversarial-Networks-Cookbook,代码行数:12,代码来源:discriminator.py


注:本文中的keras.activations.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。