当前位置: 首页>>代码示例>>Python>>正文


Python constraints.maxnorm方法代码示例

本文整理汇总了Python中keras.constraints.maxnorm方法的典型用法代码示例。如果您正苦于以下问题:Python constraints.maxnorm方法的具体用法?Python constraints.maxnorm怎么用?Python constraints.maxnorm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.constraints的用法示例。


在下文中一共展示了constraints.maxnorm方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_maxnorm

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def test_maxnorm(self):
        from keras.constraints import maxnorm

        for m in self.some_values:
            norm_instance = maxnorm(m)
            normed = norm_instance(self.example_array)
            assert (np.all(normed.eval() < m))

        # a more explicit example
        norm_instance = maxnorm(2.0)
        x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
        x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
        x_normed_actual = norm_instance(x).eval()
        assert_allclose(x_normed_actual, x_normed_target) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:16,代码来源:test_constraints.py

示例2: create_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def create_model(neurons=1):
	# create model
	model = Sequential()
	model.add(Dense(neurons, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(4)))
	model.add(Dropout(0.1))
	model.add(Dense(1, init='uniform', activation='sigmoid'))
	# Compile model
	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
	return model
# fix random seed for reproducibility 
开发者ID:54chen,项目名称:deep,代码行数:12,代码来源:test7.py

示例3: create_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def create_model(dropout_rate=0.0, weight_constraint=0):
	# create model
	model = Sequential()
	model.add(Dense(12, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(weight_constraint)))
	model.add(Dropout(dropout_rate))
	model.add(Dense(1, init='uniform', activation='sigmoid'))
	# Compile model
	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
	return model
# fix random seed for reproducibility 
开发者ID:54chen,项目名称:deep,代码行数:12,代码来源:test6.py

示例4: Build_Model_CNN_Image

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def Build_Model_CNN_Image(shape, nclasses, sparse_categorical,
                          min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn,
                          max_nodes_cnn, random_optimizor, dropout):
    """""
    def Image_model_CNN(num_classes,shape):
    num_classes is number of classes,
    shape is (w,h,p)
    """""

    model = Sequential()
    values = list(range(min_nodes_cnn,max_nodes_cnn))
    Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
    Layer = random.choice(Layers)
    Filter = random.choice(values)
    model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Conv2D(Filter, (3, 3)))
    model.add(Activation('relu'))

    for i in range(0,Layer):
        Filter = random.choice(values)
        model.add(Conv2D(Filter, (3, 3),padding='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(dropout))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(dropout))
    model.add(Dense(nclasses,activation='softmax',kernel_constraint=maxnorm(3)))
    model_tmp = model
    if sparse_categorical:
        model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=['accuracy'])
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=['accuracy'])

    return model,model_tmp 
开发者ID:kk7nc,项目名称:RMDL,代码行数:43,代码来源:BuildModel.py

示例5: feed_forward_net

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def feed_forward_net(input, output, hidden_layers=[64, 64], activations='relu',
                     dropout_rate=0., l2=0., constrain_norm=False):
    '''
    Helper function for building a Keras feed forward network.

    input:  Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
    output: Function representing final layer for the network that maps from the last
            hidden layer to output.
            e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
            classification or output = Dense(1, activation='linear') if we're doing
            regression.
    '''
    state = input
    if isinstance(activations, str):
        activations = [activations] * len(hidden_layers)
    
    for h, a in zip(hidden_layers, activations):
        if l2 > 0.:
            w_reg = keras.regularizers.l2(l2)
        else:
            w_reg = None
        const = maxnorm(2) if constrain_norm else  None
        state = Dense(h, activation=a, kernel_regularizer=w_reg, kernel_constraint=const)(state)
        if dropout_rate > 0.:
            state = Dropout(dropout_rate)(state)
    return output(state) 
开发者ID:jhartford,项目名称:DeepIV,代码行数:28,代码来源:architectures.py

示例6: convnet

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def convnet(input, output, dropout_rate=0., input_shape=(1, 28, 28), batch_size=100,
            l2_rate=0.001, nb_epoch=12, img_rows=28, img_cols=28, nb_filters=64,
            pool_size=(2, 2), kernel_size=(3, 3), activations='relu', constrain_norm=False):
    '''
    Helper function for building a Keras convolutional network.

    input:  Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
    output: Function representing final layer for the network that maps from the last
            hidden layer to output.
            e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
            classification or output = Dense(1, activation='linear') if we're doing
            regression.
    '''
    const = maxnorm(2) if constrain_norm else None

    state = Convolution2D(nb_filters, kernel_size, padding='valid',
                          input_shape=input_shape, activation=activations,
                          kernel_regularizer=l2(l2_rate), kernel_constraint=const)(input)

    state = Convolution2D(nb_filters, kernel_size,
                          activation=activations, kernel_regularizer=l2(l2_rate),
                          kernel_constraint=const)(state)

    state = MaxPooling2D(pool_size=pool_size)(state)

    state = Flatten()(state)

    if dropout_rate > 0.:
        state = Dropout(dropout_rate)(state)
    state = Dense(128, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(state)

    if dropout_rate > 0.:
        state = Dropout(dropout_rate)(state)
    return output(state) 
开发者ID:jhartford,项目名称:DeepIV,代码行数:36,代码来源:architectures.py

示例7: build_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Convolution1D(nb_filter=nb_filter,
                          filter_length=4,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_length=2,
                             name='maxConv4')(conv4)

    conv5 = Convolution1D(nb_filter=nb_filter,
                          filter_length=5,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_length=2,
                            name='maxConv5')(conv5)

    x = merge([maxConv4, maxConv5], mode='concat')

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', init='he_normal',
              W_constraint = maxnorm(3), b_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(nb_classes, init='he_normal',
                   activation='softmax', name='output')(x)

    model = Model(input=main_input, output=output)
    model.compile(loss={'output':'categorical_crossentropy'},
                optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                metrics=["accuracy"])
    return model 
开发者ID:ultimate010,项目名称:crnn,代码行数:48,代码来源:sst1_cnn_rnn.py

示例8: build_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Convolution1D(nb_filter=nb_filter,
                          filter_length=4,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_length=2,
                             name='maxConv4')(conv4)

    conv5 = Convolution1D(nb_filter=nb_filter,
                          filter_length=5,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_length=2,
                            name='maxConv5')(conv5)

    x = merge([maxConv4, maxConv5], mode='concat')

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', init='he_normal',
              W_constraint = maxnorm(3), b_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(1, init='he_normal',
                   activation='sigmoid', name='output')(x)

    model = Model(input=main_input, output=output)
    model.compile(loss={'output':'binary_crossentropy'},
                optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                metrics=["accuracy"])
    return model 
开发者ID:ultimate010,项目名称:crnn,代码行数:48,代码来源:sst2_cnn_rnn_kera1.py

示例9: build_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_model():
        print('Build model...%d of %d' % (i + 1, folds))
        main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
        embedding  = Embedding(max_features, embedding_dims,
                      weights=[np.matrix(W)], input_length=maxlen,
                      name='embedding')(main_input)

        embedding = Dropout(0.50)(embedding)


        conv4 = Convolution1D(nb_filter=nb_filter,
                              filter_length=4,
                              border_mode='valid',
                              activation='relu',
                              subsample_length=1,
                              name='conv4')(embedding)
        maxConv4 = MaxPooling1D(pool_length=2,
                                 name='maxConv4')(conv4)

        conv5 = Convolution1D(nb_filter=nb_filter,
                              filter_length=5,
                              border_mode='valid',
                              activation='relu',
                              subsample_length=1,
                              name='conv5')(embedding)
        maxConv5 = MaxPooling1D(pool_length=2,
                                name='maxConv5')(conv5)

        x = merge([maxConv4, maxConv5], mode='concat')

        x = Dropout(0.15)(x)

        x = RNN(rnn_output_size)(x)

        x = Dense(hidden_dims, activation='relu', init='he_normal',
                  W_constraint = maxnorm(3), b_constraint=maxnorm(3),
                  name='mlp')(x)

        x = Dropout(0.10, name='drop')(x)

        output = Dense(1, init='he_normal',
                       activation='sigmoid', name='output')(x)

        model = Model(input=main_input, output=output)
        model.compile(loss={'output':'binary_crossentropy'},
                    optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                    metrics=["accuracy"])
        return model 
开发者ID:ultimate010,项目名称:crnn,代码行数:50,代码来源:mr_cnn_rnn.py

示例10: build_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Conv1D(filters=nb_filter,
                          kernel_size=4,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_size=2,
                             name='maxConv4')(conv4)

    conv5 = Conv1D(filters=nb_filter,
                          kernel_size=5,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_size=2,
                            name='maxConv5')(conv5)

    # x = merge([maxConv4, maxConv5], mode='concat')
    x = keras.layers.concatenate([maxConv4, maxConv5])

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', kernel_initializer='he_normal',
              kernel_constraint = maxnorm(3), bias_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(1, kernel_initializer='he_normal',
                   activation='sigmoid', name='output')(x)

    model = Model(inputs=main_input, outputs=output)
    model.compile(loss='binary_crossentropy',
               	# optimizer=Adadelta(lr=0.95, epsilon=1e-06),
               	# optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                # optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4),
                metrics=["accuracy"])
    return model 
开发者ID:ultimate010,项目名称:crnn,代码行数:51,代码来源:sst2_cnn_rnn.py

示例11: AddDense

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def AddDense(x, size, activation, dropout_rate, output=False, momentum=MOMENTUM,
    constraint=3,
    bn=True,
    kr=0.,
    ar=0.,
    perm_drop=False):
    '''
    Add a single dense block with batchnorm and activation.

    Parameters:
    -----------
    x: input tensor
    size: number of dense neurons
    activation: activation fn to use
    dropout_rate: dropout to use after activation

    Returns:
    --------
    x: output tensor
    '''

    if isinstance(kr, float) and kr > 0:
        kr = keras.regularizers.l2(kr)
    elif isinstance(kr, float):
        kr = None
    else:
        kr = kr

    if isinstance(ar, float) and ar > 0:
        ar = keras.regularizers.l1(ar)
    elif isinstance(ar, float):
        ar = None
    else:
        ar = ar

    if constraint is not None:
        x = Dense(size, kernel_constraint=maxnorm(constraint),
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)
    else:
        x = Dense(size,
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)

    if not output and bn:
        #x = BatchNormalization(momentum=momentum)(x)
        x = InstanceNormalization()(x)

    if activation == "lrelu":
        x = LeakyReLU(alpha=0.2)(x)
    else:
        x = Activation(activation)(x)
    if dropout_rate > 0:
        if perm_drop:
            x = PermanentDropout(dropout_rate)(x)
        else:
            x = Dropout(dropout_rate)(x)
    return x 
开发者ID:jhu-lcsr,项目名称:costar_plan,代码行数:60,代码来源:planner.py

示例12: build_model

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_model(max_length=1000,
                nb_filters=64,
                kernel_size=3,
                pool_size=2,
                regularization=0.01,
                weight_constraint=2.,
                dropout_prob=0.4,
                clear_session=True):
    if clear_session:
        K.clear_session()

    model = Sequential()
    model.add(Embedding(
        embeddings.shape[0],
        embeddings.shape[1],
        input_length=max_length,
        trainable=False,
        weights=[embeddings]))

    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(GlobalAveragePooling1D())
    model.add(Dense(1,
        kernel_regularizer=l2(regularization),
        kernel_constraint=maxnorm(weight_constraint),
        activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy',
        optimizer='rmsprop',
        metrics=['accuracy'])

    return model 
开发者ID:bogdan-kulynych,项目名称:textfool,代码行数:45,代码来源:model.py

示例13: build_attention_RNN

# 需要导入模块: from keras import constraints [as 别名]
# 或者: from keras.constraints import maxnorm [as 别名]
def build_attention_RNN(embeddings, classes, max_length, unit=LSTM, cells=64,
                        layers=1, **kwargs):
    # parameters
    bi = kwargs.get("bidirectional", False)
    noise = kwargs.get("noise", 0.)
    dropout_words = kwargs.get("dropout_words", 0)
    dropout_rnn = kwargs.get("dropout_rnn", 0)
    dropout_rnn_U = kwargs.get("dropout_rnn_U", 0)
    dropout_attention = kwargs.get("dropout_attention", 0)
    dropout_final = kwargs.get("dropout_final", 0)
    attention = kwargs.get("attention", None)
    final_layer = kwargs.get("final_layer", False)
    clipnorm = kwargs.get("clipnorm", 1)
    loss_l2 = kwargs.get("loss_l2", 0.)
    lr = kwargs.get("lr", 0.001)

    model = Sequential()
    model.add(embeddings_layer(max_length=max_length, embeddings=embeddings,
                               trainable=False, masking=True, scale=False,
                               normalize=False))

    if noise > 0:
        model.add(GaussianNoise(noise))
    if dropout_words > 0:
        model.add(Dropout(dropout_words))

    for i in range(layers):
        rs = (layers > 1 and i < layers - 1) or attention
        model.add(get_RNN(unit, cells, bi, return_sequences=rs,
                          dropout_U=dropout_rnn_U))
        if dropout_rnn > 0:
            model.add(Dropout(dropout_rnn))

    if attention == "memory":
        model.add(AttentionWithContext())
        if dropout_attention > 0:
            model.add(Dropout(dropout_attention))
    elif attention == "simple":
        model.add(Attention())
        if dropout_attention > 0:
            model.add(Dropout(dropout_attention))

    if final_layer:
        model.add(MaxoutDense(100, W_constraint=maxnorm(2)))
        # model.add(Highway())
        if dropout_final > 0:
            model.add(Dropout(dropout_final))

    model.add(Dense(classes, activity_regularizer=l2(loss_l2)))
    model.add(Activation('softmax'))

    model.compile(optimizer=Adam(clipnorm=clipnorm, lr=lr),
                  loss='categorical_crossentropy')
    return model 
开发者ID:cbaziotis,项目名称:datastories-semeval2017-task4,代码行数:56,代码来源:nn_models.py


注:本文中的keras.constraints.maxnorm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。