当前位置: 首页>>代码示例>>Python>>正文


Python regularizers.l1方法代码示例

本文整理汇总了Python中keras.regularizers.l1方法的典型用法代码示例。如果您正苦于以下问题:Python regularizers.l1方法的具体用法?Python regularizers.l1怎么用?Python regularizers.l1使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.regularizers的用法示例。


在下文中一共展示了regularizers.l1方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_model

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def create_model(self, input_dim):
        encoding_dim = 14
        input_layer = Input(shape=(input_dim,))

        encoder = Dense(encoding_dim, activation="tanh",
                        activity_regularizer=regularizers.l1(10e-5))(input_layer)
        encoder = Dense(encoding_dim // 2, activation="relu")(encoder)

        decoder = Dense(encoding_dim // 2, activation='tanh')(encoder)
        decoder = Dense(input_dim, activation='relu')(decoder)

        model = Model(inputs=input_layer, outputs=decoder)
        model.compile(optimizer='adam',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        return model 
开发者ID:chen0040,项目名称:keras-anomaly-detection,代码行数:19,代码来源:feedforward.py

示例2: test_activity_regularization

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_activity_regularization():
    layer = layers.ActivityRegularization(l1=0.01, l2=0.01)

    # test in functional API
    x = layers.Input(shape=(3,))
    z = layers.Dense(2)(x)
    y = layer(z)
    model = Model(x, y)
    model.compile('rmsprop', 'mse')

    model.predict(np.random.random((2, 3)))

    # test serialization
    model_config = model.get_config()
    model = Model.from_config(model_config)
    model.compile('rmsprop', 'mse') 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:18,代码来源:core_test.py

示例3: test_regularizer

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_regularizer(layer_class):
    layer = layer_class(units, return_sequences=False, weights=None,
                        input_shape=(timesteps, embedding_dim),
                        kernel_regularizer=regularizers.l1(0.01),
                        recurrent_regularizer=regularizers.l1(0.01),
                        bias_regularizer='l2')
    layer.build((None, None, embedding_dim))
    assert len(layer.losses) == 3
    assert len(layer.cell.losses) == 3

    layer = layer_class(units, return_sequences=False, weights=None,
                        input_shape=(timesteps, embedding_dim),
                        activity_regularizer='l2')
    assert layer.activity_regularizer
    x = K.variable(np.ones((num_samples, timesteps, embedding_dim)))
    layer(x)
    assert len(layer.cell.get_losses_for(x)) == 0
    assert len(layer.get_losses_for(x)) == 1 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:20,代码来源:recurrent_test.py

示例4: test_W_reg

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_W_reg(self):
        for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
            model = create_model(weight_reg=reg)
            model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
            model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:8,代码来源:test_regularizers.py

示例5: DL_single_run

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def DL_single_run(xtr, ytr, units1, units2, dro, lr, l1r, alpha, batchsize, numepochs):
    #Data preparation: create X, E and TM where X=input vector, E=censoring status and T=survival time. Apply formatting (X and T as 'float32', E as 'int32')
    X_tr, E_tr, TM_tr = prepare_data(xtr, ytr[:,0,np.newaxis], ytr[:,1])

    #Arrange data into minibatches (based on specified batch size), and within each minibatch, sort in descending order of survival/censoring time (see explanation of Cox PH loss function definition)
    X_tr, E_tr, TM_tr, _ = sort4minibatches(X_tr, E_tr, TM_tr, batchsize)
    
    #before defining network architecture, clear current computation graph (if one exists), and specify input dimensionality
    K.clear_session()
    inpshape = xtr.shape[1]
    
    #Define Network Architecture
    inputvec= Input(shape=(inpshape,))
    x       = Dropout(dro, input_shape=(inpshape,))(inputvec)
    x       = Dense(units=int(units1), activation='relu', activity_regularizer=l1(l1r))(x)
    encoded = Dense(units=int(units2), activation='relu', name='encoded')(x)
    riskpred= Dense(units=1,  activation='linear', name='predicted_risk')(encoded)
    z       = Dense(units=int(units1),  activation='relu')(encoded)
    decoded = Dense(units=inpshape, activation='linear', name='decoded')(z)

    model = Model(inputs=inputvec, outputs=[decoded,riskpred])
    model.summary()
    
    #Model compilation
    optimdef = Adam(lr = lr)
    model.compile(loss=[keras.losses.mean_squared_error, _negative_log_likelihood], loss_weights=[alpha,1-alpha], optimizer=optimdef, metrics={'decoded':keras.metrics.mean_squared_error})
    
    #Run model
    mlog = model.fit(X_tr, [X_tr,E_tr], batch_size=batchsize, epochs=numepochs, shuffle=False, verbose=1)

    return mlog 
开发者ID:UK-Digital-Heart-Project,项目名称:4Dsurvival,代码行数:33,代码来源:trainDL.py

示例6: __generate_regulariser

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def __generate_regulariser(self, l1_value, l2_value):
		""" Returns keras l1/l2 regulariser"""
		if l1_value and l2_value:
			return l1_l2(l1=l1_value, l2=l2_value)
		elif l1_value and not l2_value:
			return l1(l1_value)
		elif l2_value:
			return l2(l2_value)
		else:
			return None 
开发者ID:mprhode,项目名称:malware-prediction-rnn,代码行数:12,代码来源:RNN.py

示例7: _get_regularizer

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def _get_regularizer(regularizer_name, weight):
    if regularizer_name is None:
        return None
    if regularizer_name == 'l1':
        return l1(weight)
    if regularizer_name == 'l2':
        return l2(weight)
    if regularizer_name == 'l1_l2':
        return l1_l2(weight)
    return None 
开发者ID:Donskov7,项目名称:toxic_comments,代码行数:12,代码来源:models.py

示例8: test_cosinedense_reg_constraint

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_cosinedense_reg_constraint():
    layer_test(core.CosineDense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.l2(0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2)) 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:11,代码来源:test_core.py

示例9: get_mlp_model

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def get_mlp_model(self, data_dim, output_classes):
        model = Sequential()
        model.add(Dense(64,
                        input_shape=(data_dim,),
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.001),
                        #activity_regularizer=regularizers.l1(0.001),
                        #kernel_constraint=max_norm(3),
                        activation='relu'))
        """
        model.add(Dropout(0.1))
        model.add(Dense(64,
                        input_shape=(data_dim,),
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.001),
                        #activity_regularizer=regularizers.l1(0.001),
                        kernel_constraint=max_norm(3),
                        activation='relu'))
        """
        model.add(Dropout(0.1))
        model.add(Dense(output_classes,
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.0001),
                        #activity_regularizer=regularizers.l2(0.01),
                        #kernel_constraint=max_norm(3),
                        activation='sigmoid'))
        #model.compile(optimizer='sgd',
        model.compile(optimizer='rmsprop',
                      loss='binary_crossentropy',
                      )
        return model 
开发者ID:plastering,项目名称:plastering,代码行数:33,代码来源:ir2tagsets.py

示例10: test_kernel_regularization

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_kernel_regularization():
    x_train, y_train = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1_l2()]:
        model = create_model(kernel_regularizer=reg)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        assert len(model.losses) == 1
        model.train_on_batch(x_train, y_train) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:11,代码来源:regularizers_test.py

示例11: test_activity_regularization

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_activity_regularization():
    x_train, y_train = get_data()
    for reg in [regularizers.l1(), regularizers.l2()]:
        model = create_model(activity_regularizer=reg)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        assert len(model.losses) == 1
        model.train_on_batch(x_train, y_train) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:9,代码来源:regularizers_test.py

示例12: test_regularization_shared_layer

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_regularization_shared_layer():
    dense_layer = Dense(num_classes,
                        kernel_regularizer=regularizers.l1(),
                        activity_regularizer=regularizers.l1())

    model = create_multi_input_model_from(dense_layer, dense_layer)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 6 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:10,代码来源:regularizers_test.py

示例13: test_regularization_shared_model

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_regularization_shared_model():
    dense_layer = Dense(num_classes,
                        kernel_regularizer=regularizers.l1(),
                        activity_regularizer=regularizers.l1())

    input_tensor = Input(shape=(data_dim,))
    dummy_model = Model(input_tensor, dense_layer(input_tensor))

    model = create_multi_input_model_from(dummy_model, dummy_model)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 6 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:13,代码来源:regularizers_test.py

示例14: test_regularization_shared_layer_in_different_models

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_regularization_shared_layer_in_different_models():
    shared_dense = Dense(num_classes,
                         kernel_regularizer=regularizers.l1(),
                         activity_regularizer=regularizers.l1())
    models = []
    for _ in range(2):
        input_tensor = Input(shape=(data_dim,))
        unshared_dense = Dense(num_classes, kernel_regularizer=regularizers.l1())
        out = unshared_dense(shared_dense(input_tensor))
        models.append(Model(input_tensor, out))

    model = create_multi_input_model_from(*models)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 8 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:16,代码来源:regularizers_test.py

示例15: test_maxout_dense

# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1 [as 别名]
def test_maxout_dense():
    layer_test(legacy_layers.MaxoutDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(legacy_layers.MaxoutDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2)) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:15,代码来源:layers_test.py


注:本文中的keras.regularizers.l1方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。