当前位置: 首页>>代码示例>>Python>>正文


Python advanced_activations.ELU属性代码示例

本文整理汇总了Python中keras.layers.advanced_activations.ELU属性的典型用法代码示例。如果您正苦于以下问题:Python advanced_activations.ELU属性的具体用法?Python advanced_activations.ELU怎么用?Python advanced_activations.ELU使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在keras.layers.advanced_activations的用法示例。


在下文中一共展示了advanced_activations.ELU属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: deep_mlp

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def deep_mlp(self):
        """
        Deep Multilayer Perceptrop.
        """
        if self._config.num_mlp_layers == 0:
            self.add(Dropout(0.5))
        else:
            for j in xrange(self._config.num_mlp_layers):
                self.add(Dense(self._config.mlp_hidden_dim))
                if self._config.mlp_activation == 'elu':
                    self.add(ELU())
                elif self._config.mlp_activation == 'leaky_relu':
                    self.add(LeakyReLU())
                elif self._config.mlp_activation == 'prelu':
                    self.add(PReLU())
                else:
                    self.add(Activation(self._config.mlp_activation))
                self.add(Dropout(0.5)) 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:20,代码来源:model_zoo.py

示例2: test_tiny_conv_elu_random

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def test_tiny_conv_elu_random(self):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import ELU

        model = Sequential()
        model.add(
            Convolution2D(input_shape=(10, 10, 3), nb_filter=3, nb_row=5, nb_col=5)
        )
        model.add(ELU(alpha=0.8))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_keras_model(model) 
开发者ID:apple,项目名称:coremltools,代码行数:18,代码来源:test_keras_numeric.py

示例3: test_tiny_mcrnn_music_tagger

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def test_tiny_mcrnn_music_tagger(self):

        x_in = Input(shape=(4, 6, 1))
        x = ZeroPadding2D(padding=(0, 1))(x_in)
        x = BatchNormalization(axis=2, name="bn_0_freq")(x)
        # Conv block 1
        x = Convolution2D(2, 3, 3, border_mode="same", name="conv1")(x)
        x = BatchNormalization(axis=3, mode=0, name="bn1")(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x)
        # Conv block 2
        x = Convolution2D(4, 3, 3, border_mode="same", name="conv2")(x)
        x = BatchNormalization(axis=3, mode=0, name="bn2")(x)
        x = ELU()(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x)

        # Should get you (1,1,2,4)
        x = Reshape((2, 4))(x)
        x = GRU(32, return_sequences=True, name="gru1")(x)
        x = GRU(32, return_sequences=False, name="gru2")(x)

        # Create model.
        model = Model(x_in, x)
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
        self._test_keras_model(model, mode="random_zero_mean", delta=1e-2) 
开发者ID:apple,项目名称:coremltools,代码行数:27,代码来源:test_keras_numeric.py

示例4: build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    m_sizes = [50, 70]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]
    maxpool_const = 4

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
开发者ID:Veleslavia,项目名称:EUSIPCO2017,代码行数:38,代码来源:singlelayer.py

示例5: test_tiny_conv_elu_random

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def test_tiny_conv_elu_random(self):
        np.random.seed(1988)

        # Define a model
        from keras.layers.advanced_activations import ELU

        model = Sequential()
        model.add(Conv2D(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5)))
        model.add(ELU(alpha=0.8))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Get the coreml model
        self._test_model(model) 
开发者ID:apple,项目名称:coremltools,代码行数:16,代码来源:test_keras2_numeric.py

示例6: build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def build_model(X,Y,nb_classes):
    nb_filters = 32  # number of convolutional filters to use
    pool_size = (2, 2)  # size of pooling area for max pooling
    kernel_size = (3, 3)  # convolution kernel size
    nb_layers = 4
    input_shape = (1, X.shape[2], X.shape[3])

    model = Sequential()
    model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                        border_mode='valid', input_shape=input_shape))
    model.add(BatchNormalization(axis=1, mode=2))
    model.add(Activation('relu'))

    for layer in range(nb_layers-1):
        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
        model.add(BatchNormalization(axis=1, mode=2))
        model.add(ELU(alpha=1.0))  
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax"))
    return model 
开发者ID:drscotthawley,项目名称:audio-classifier-keras-cnn,代码行数:29,代码来源:eval_network.py

示例7: get_activation_layer

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def get_activation_layer(activation):
    if activation == 'LeakyReLU':
        return LeakyReLU()
    if activation == 'PReLU':
        return PReLU()
    if activation == 'ELU':
        return ELU()
    if activation == 'ThresholdedReLU':
        return ThresholdedReLU()

    return Activation(activation)

# TODO: same for optimizers, including clipnorm 
开发者ID:ClimbsRocks,项目名称:auto_ml,代码行数:15,代码来源:utils_models.py

示例8: build_mlp

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def build_mlp(last_layer, p_dropout=0.0, num_layers=1, with_bn=True, dim=None, l2_weight=0.0,
                  last_activity_regulariser=None, propensity_dropout=None, normalize=False):
        if dim is None:
            dim = K.int_shape(last_layer)[-1]

        for i in range(num_layers):
            last_layer = Dense(dim,
                               kernel_regularizer=L1L2(l2=l2_weight),
                               bias_regularizer=L1L2(l2=l2_weight),
                               use_bias=not with_bn,
                               activity_regularizer=last_activity_regulariser if i == num_layers-1 else None)\
                (last_layer)

            if with_bn:
                last_layer = BatchNormalization(gamma_regularizer=L1L2(l2=l2_weight),
                                                beta_regularizer=L1L2(l2=l2_weight))(last_layer)
            last_layer = ELU()(last_layer)
            last_layer = Dropout(p_dropout)(last_layer)
            if propensity_dropout is not None:
                last_layer = PerSampleDropout(propensity_dropout)(last_layer)

        if normalize:
            last_layer = Lambda(lambda x: x / safe_sqrt(tf.reduce_sum(tf.square(x),
                                                                      axis=1,
                                                                      keep_dims=True)))(last_layer)

        if last_activity_regulariser is not None:
            identity_layer = Lambda(lambda x: x)
            identity_layer.activity_regularizer = last_activity_regulariser
            last_layer = identity_layer(last_layer)

        return last_layer 
开发者ID:d909b,项目名称:perfect_match,代码行数:34,代码来源:model_builder.py

示例9: Panotti_CNN

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def Panotti_CNN(X_shape, nb_classes, nb_layers=4):
    # Inputs:
    #    X_shape = [ # spectrograms per batch, # audio channels, # spectrogram freq bins, # spectrogram time bins ]
    #    nb_classes = number of output n_classes
    #    nb_layers = number of conv-pooling sets in the CNN
    from keras import backend as K
    K.set_image_data_format('channels_last')                   # SHH changed on 3/1/2018 b/c tensorflow prefers channels_last

    nb_filters = 32  # number of convolutional filters = "feature maps"
    kernel_size = (3, 3)  # convolution kernel size
    pool_size = (2, 2)  # size of pooling area for max pooling
    cl_dropout = 0.5    # conv. layer dropout
    dl_dropout = 0.6    # dense layer dropout

    print(" MyCNN_Keras2: X_shape = ",X_shape,", channels = ",X_shape[3])
    input_shape = (X_shape[1], X_shape[2], X_shape[3])
    model = Sequential()
    model.add(Conv2D(nb_filters, kernel_size, padding='same', input_shape=input_shape, name="Input"))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Activation('relu'))        # Leave this relu & BN here.  ELU is not good here (my experience)
    model.add(BatchNormalization(axis=-1))  # axis=1 for 'channels_first'; but tensorflow preferse channels_last (axis=-1)

    for layer in range(nb_layers-1):   # add more layers than just the first
        model.add(Conv2D(nb_filters, kernel_size, padding='same'))
        model.add(MaxPooling2D(pool_size=pool_size))
        model.add(Activation('elu'))
        model.add(Dropout(cl_dropout))
        #model.add(BatchNormalization(axis=-1))  # ELU authors reccommend no BatchNorm. I confirm.

    model.add(Flatten())
    model.add(Dense(128))            # 128 is 'arbitrary' for now
    #model.add(Activation('relu'))   # relu (no BN) works ok here, however ELU works a bit better...
    model.add(Activation('elu'))
    model.add(Dropout(dl_dropout))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax",name="Output"))
    return model


# Used for when you want to use weights from a previously-trained model,
# with a different set/number of output classes 
开发者ID:drscotthawley,项目名称:panotti,代码行数:43,代码来源:models.py

示例10: _build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def _build_model(self, nfeatures, architecture, supervised, confusion, confusion_incr, confusion_max, 
        activations, noise, droprate, coral_layer_idx, optimizer):

        self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))
        self.lr = tf.placeholder(tf.float32, [], name='lr')

        nlayers = len(architecture)
        layers_a = [self.inp_a]
        layers_b = [self.inp_b]

        for i, nunits in enumerate(architecture):

            print nunits,
            if i in coral_layer_idx: print '(CORAL)'
            else: print

            if isinstance(nunits, int):
                shared_layer = Dense(nunits, activation='linear')
            elif nunits == 'noise':
                shared_layer = GaussianNoise(noise)
            elif nunits == 'bn':
                shared_layer = BatchNormalization()
            elif nunits == 'drop':
                shared_layer = Dropout(droprate)
            elif nunits == 'act':
                if activations == 'prelu':
                    shared_layer = PReLU()
                elif activations == 'elu':
                    shared_layer = ELU()
                elif activations == 'leakyrelu':
                    shared_layer = LeakyReLU()
                else:
                    shared_layer = Activation(activations)

            layers_a += [shared_layer(layers_a[-1])]
            layers_b += [shared_layer(layers_b[-1])] 
开发者ID:erlendd,项目名称:ddan,代码行数:40,代码来源:deepcoral.py

示例11: _build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def _build_model(self, arch, activations, nfeatures, droprate, noise, optimizer):

        self.layers = [Input(shape=(nfeatures,))]

        for i, nunits in enumerate(arch):

            if isinstance(nunits, int):
                self.layers += [Dense(nunits, activation='linear')(self.layers[-1])]

            elif nunits == 'noise':
                self.layers += [GaussianNoise(noise)(self.layers[-1])]

            elif nunits == 'bn':
                self.layers += [BatchNormalization()(self.layers[-1])]
            
            elif nunits == 'abn':
                self.layers += [AdaBN()(self.layers[-1])]

            elif nunits == 'drop':
                self.layers += [Dropout(droprate)(self.layers[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    self.layers += [PReLU()(self.layers[-1])]
                elif activations == 'elu':
                    self.layers += [ELU()(self.layers[-1])]
                elif activations == 'leakyrelu':
                    self.layers += [LeakyReLU()(self.layers[-1])]
                else:
                    self.layers += [Activation(activations)(self.layers[-1])]

            else:
                print 'Unrecognised layer {}, type: {}'.format(nunits, type(nunits))

        self.layers += [Dense(1, activation='sigmoid')(self.layers[-1])]

        self.model = Model(self.layers[0], self.layers[-1])
        self.model.compile(loss='binary_crossentropy', optimizer=optimizer) 
开发者ID:erlendd,项目名称:ddan,代码行数:40,代码来源:adabn.py

示例12: _build

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def _build(self, input_layer, arch, activations, noise, droprate, l2reg):
        print 'Building network layers...'
        network = [input_layer]
        for nunits in arch:
            print nunits
            if isinstance(nunits, int):
                network += [Dense(nunits, activation='linear', kernel_regularizer=l1_l2(l1=0.01, l2=l2reg))(network[-1])]

            elif nunits == 'noise':
                network += [GaussianNoise(noise)(network[-1])]

            elif nunits == 'bn':
                network += [BatchNormalization()(network[-1])]

            elif nunits == 'drop':
                network += [Dropout(droprate)(network[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    network += [PReLU()(network[-1])]
                elif activations == 'leakyrelu':
                	network += [LeakyReLU()(network[-1])]
                elif activations == 'elu':
                	network += [ELU()(network[-1])]
                else:
                    print 'Activation({})'.format(activations)
                    network += [Activation(activations)(network[-1])]
        return network 
开发者ID:erlendd,项目名称:ddan,代码行数:30,代码来源:dann.py

示例13: _build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def _build_model(self, nfeatures, architecture, supervised, confusion, confusion_incr, confusion_max, 
        activations, noise, droprate, mmd_layer_idx, optimizer):

        self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))

        nlayers = len(architecture)
        layers_a = [self.inp_a]
        layers_b = [self.inp_b]

        for i, nunits in enumerate(architecture):

            print nunits,
            if i in mmd_layer_idx: print '(MMD)'
            else: print

            if isinstance(nunits, int):
                shared_layer = Dense(nunits, activation='linear')
            elif nunits == 'noise':
                shared_layer = GaussianNoise(noise)
            elif nunits == 'bn':
                shared_layer = BatchNormalization()
            elif nunits == 'drop':
                shared_layer = Dropout(droprate)
            elif nunits == 'act':
                if activations == 'prelu':
                    shared_layer = PReLU()
                elif activations == 'elu':
                    shared_layer = ELU()
                elif activations == 'leakyrelu':
                    shared_layer = LeakyReLU()
                else:
                    shared_layer = Activation(activations)

            layers_a += [shared_layer(layers_a[-1])]
            layers_b += [shared_layer(layers_b[-1])] 
开发者ID:erlendd,项目名称:ddan,代码行数:39,代码来源:ddcn.py

示例14: activation

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def activation(layer, layer_in, layerId, tensor=True):
    out = {}
    if (layer['info']['type'] == 'ReLU'):
        if ('negative_slope' in layer['params'] and layer['params']['negative_slope'] != 0):
            out[layerId] = LeakyReLU(alpha=layer['params']['negative_slope'])
        else:
            out[layerId] = Activation('relu')
    elif (layer['info']['type'] == 'PReLU'):
        out[layerId] = PReLU()
    elif (layer['info']['type'] == 'ELU'):
        out[layerId] = ELU(alpha=layer['params']['alpha'])
    elif (layer['info']['type'] == 'ThresholdedReLU'):
        out[layerId] = ThresholdedReLU(theta=layer['params']['theta'])
    elif (layer['info']['type'] == 'Sigmoid'):
        out[layerId] = Activation('sigmoid')
    elif (layer['info']['type'] == 'TanH'):
        out[layerId] = Activation('tanh')
    elif (layer['info']['type'] == 'Softmax'):
        out[layerId] = Activation('softmax')
    elif (layer['info']['type'] == 'SELU'):
        out[layerId] = Activation('selu')
    elif (layer['info']['type'] == 'Softplus'):
        out[layerId] = Activation('softplus')
    elif (layer['info']['type'] == 'Softsign'):
        out[layerId] = Activation('softsign')
    elif (layer['info']['type'] == 'HardSigmoid'):
        out[layerId] = Activation('hard_sigmoid')
    elif (layer['info']['type'] == 'Linear'):
        out[layerId] = Activation('linear')
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
开发者ID:Cloud-CV,项目名称:Fabrik,代码行数:34,代码来源:layers_export.py

示例15: build_model

# 需要导入模块: from keras.layers import advanced_activations [as 别名]
# 或者: from keras.layers.advanced_activations import ELU [as 别名]
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    maxpool_const = 4
    m_sizes = [5, 80]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
                             name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
开发者ID:Veleslavia,项目名称:EUSIPCO2017,代码行数:55,代码来源:multilayer.py


注:本文中的keras.layers.advanced_activations.ELU属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。