当前位置: 首页>>代码示例>>Python>>正文


Python noise.GaussianNoise方法代码示例

本文整理汇总了Python中keras.layers.noise.GaussianNoise方法的典型用法代码示例。如果您正苦于以下问题:Python noise.GaussianNoise方法的具体用法?Python noise.GaussianNoise怎么用?Python noise.GaussianNoise使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers.noise的用法示例。


在下文中一共展示了noise.GaussianNoise方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: makecnn

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def makecnn(in_shape, K):
    model = Sequential()
    model.add(
        Convolution2D(
            32, 3, 3, border_mode='same', input_shape=in_shape[1:]))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(SReLU())
    model.add(Dense(64))
    # model.add(SReLU())
    model.add(Dense(1))
    model.add(Activation('linear'))
    return model 
开发者ID:RivuletStudio,项目名称:rivuletpy,代码行数:24,代码来源:riveal.py

示例2: test_GaussianNoise

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:6,代码来源:noise_test.py

示例3: model

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def model(data, hidden_layers, hidden_neurons, output_file, validation_split=0.9):


	train_n = int(validation_split * len(data))
	batch_size = 50
	train_data = data[:train_n,:]
	val_data = data[train_n:,:]

	input_sh = Input(shape=(data.shape[1],))
	encoded = noise.GaussianNoise(0.2)(input_sh)
	for i in range(hidden_layers):
		encoded = Dense(hidden_neurons[i], activation='relu')(encoded)
		encoded = noise.GaussianNoise(0.2)(encoded)

	decoded = Dense(hidden_neurons[-2], activation='relu')(encoded)
	for j in range(hidden_layers-3,-1,-1):
		decoded = Dense(hidden_neurons[j], activation='relu')(decoded)
	decoded = Dense(data.shape[1], activation='sigmoid')(decoded)

	autoencoder = Model(input=input_sh, output=decoded)
	autoencoder.compile(optimizer='adadelta', loss='mse')

	checkpointer = ModelCheckpoint(filepath='data/bestmodel' + output_file + ".hdf5", verbose=1, save_best_only=True)
	earlystopper = EarlyStopping(monitor='val_loss', patience=15, verbose=1)

	train_generator = DataGenerator(batch_size)
	train_generator.fit(train_data, train_data)
	val_generator = DataGenerator(batch_size)
	val_generator.fit(val_data, val_data)

	autoencoder.fit_generator(train_generator,
		samples_per_epoch=len(train_data),
		nb_epoch=100,
		validation_data=val_generator,
		nb_val_samples=len(val_data),
		max_q_size=batch_size,
		callbacks=[checkpointer, earlystopper])
	enco = Model(input=input_sh, output=encoded)
	enco.compile(optimizer='adadelta', loss='mse')
	reprsn = enco.predict(data)
	return reprsn 
开发者ID:MdAsifKhan,项目名称:DNGR-Keras,代码行数:43,代码来源:DNGR.py

示例4: _build_model

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def _build_model(self, arch, activations, nfeatures, droprate, noise, optimizer):

        self.layers = [Input(shape=(nfeatures,))]

        for i, nunits in enumerate(arch):

            if isinstance(nunits, int):
                self.layers += [Dense(nunits, activation='linear')(self.layers[-1])]

            elif nunits == 'noise':
                self.layers += [GaussianNoise(noise)(self.layers[-1])]

            elif nunits == 'bn':
                self.layers += [BatchNormalization()(self.layers[-1])]
            
            elif nunits == 'abn':
                self.layers += [AdaBN()(self.layers[-1])]

            elif nunits == 'drop':
                self.layers += [Dropout(droprate)(self.layers[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    self.layers += [PReLU()(self.layers[-1])]
                elif activations == 'elu':
                    self.layers += [ELU()(self.layers[-1])]
                elif activations == 'leakyrelu':
                    self.layers += [LeakyReLU()(self.layers[-1])]
                else:
                    self.layers += [Activation(activations)(self.layers[-1])]

            else:
                print 'Unrecognised layer {}, type: {}'.format(nunits, type(nunits))

        self.layers += [Dense(1, activation='sigmoid')(self.layers[-1])]

        self.model = Model(self.layers[0], self.layers[-1])
        self.model.compile(loss='binary_crossentropy', optimizer=optimizer) 
开发者ID:erlendd,项目名称:ddan,代码行数:40,代码来源:adabn.py

示例5: build_shallow_weight

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def build_shallow_weight(channels, width, height, output_size, nb_classes):
	# input
	inputs = Input(shape=(channels, height, width))
	# 1 conv
	conv1_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', 
		W_regularizer=l2(0.01))(inputs)
	bn1 = BatchNormalization(mode=0, axis=1)(conv1_1)
	pool1 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn1)
	gn1 = GaussianNoise(0.5)(pool1)
	drop1 = SpatialDropout2D(0.5)(gn1)
	# 2 conv
	conv2_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(gn1)
	bn2 = BatchNormalization(mode=0, axis=1)(conv2_1)
	pool2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn2)
	gn2 = GaussianNoise(0.5)(pool2)
	drop2 = SpatialDropout2D(0.5)(gn2)
	# 3 conv
	conv3_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(drop2)
	bn3 = BatchNormalization(mode=0, axis=1)(conv3_1)
	pool3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn3)
	gn3 = GaussianNoise(0.5)(pool3)
	drop3 = SpatialDropout2D(0.5)(gn3)
	# 4 conv
	conv4_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(gn3)
	bn4 = BatchNormalization(mode=0, axis=1)(conv4_1)
	pool4 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn4)
	gn4 = GaussianNoise(0.5)(pool4)
	drop4 = SpatialDropout2D(0.5)(gn4)
	# flaten
	flat = Flatten()(gn4)
	# 1 dense
	dense1 = Dense(8, activation='relu', W_regularizer=l2(0.1))(flat)
	bn6 = BatchNormalization(mode=0, axis=1)(dense1)
	drop6 = Dropout(0.5)(bn6)
	# output
	out = []
	for i in range(output_size):
		out.append(Dense(nb_classes, activation='softmax')(bn6))
	if output_size > 1:
		merged_out = merge(out, mode='concat')
		shaped_out = Reshape((output_size, nb_classes))(merged_out)
		sample_weight_mode = 'temporal'
	else:
		shaped_out = out
		sample_weight_mode = None
	model = Model(input=[inputs], output=shaped_out)
	model.summary()
	model.compile(loss='categorical_crossentropy',
				  optimizer='adam',
				  metrics=[categorical_accuracy_per_sequence],
				  sample_weight_mode = sample_weight_mode
				  )

	return model 
开发者ID:xingjian-f,项目名称:DeepLearning-OCR,代码行数:59,代码来源:shallow_weight.py

示例6: Regularize

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def Regularize(layer, params,
               shared_layers=False,
               name='',
               apply_noise=True,
               apply_batch_normalization=True,
               apply_prelu=True,
               apply_dropout=True,
               apply_l2=True,
               trainable=True):
    """
    Apply the regularization specified in parameters to the layer
    :param layer: Layer to regularize
    :param params: Params specifying the regularizations to apply
    :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model.
    :param name: Name prepended to regularizer layer
    :param apply_noise: If False, noise won't be applied, independently of params
    :param apply_dropout: If False, dropout won't be applied, independently of params
    :param apply_prelu: If False, prelu won't be applied, independently of params
    :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params
    :param apply_l2: If False, l2 normalization won't be applied, independently of params
    :return: Regularized layer
    """
    shared_layers_list = []

    if apply_noise and params.get('USE_NOISE', False):
        shared_layers_list.append(GaussianNoise(params.get('NOISE_AMOUNT', 0.01), name=name + '_gaussian_noise', trainable=trainable))

    if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION', False):
        if params.get('WEIGHT_DECAY'):
            l2_gamma_reg = l2(params['WEIGHT_DECAY'])
            l2_beta_reg = l2(params['WEIGHT_DECAY'])
        else:
            l2_gamma_reg = None
            l2_beta_reg = None

        bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0)

        shared_layers_list.append(BatchNormalization(mode=bn_mode,
                                                     gamma_regularizer=l2_gamma_reg,
                                                     beta_regularizer=l2_beta_reg,
                                                     name=name + '_batch_normalization',
                                                     trainable=trainable))

    if apply_prelu and params.get('USE_PRELU', False):
        shared_layers_list.append(PReLU(name=name + '_PReLU', trainable=trainable))

    if apply_dropout and params.get('DROPOUT_P', 0) > 0:
        shared_layers_list.append(Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout', trainable=trainable))

    if apply_l2 and params.get('USE_L2', False):
        shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm', trainable=trainable))

    # Apply all the previously built shared layers
    for l in shared_layers_list:
        layer = l(layer)
    result = layer

    # Return result or shared layers too
    if shared_layers:
        return result, shared_layers_list
    return result 
开发者ID:sheffieldnlp,项目名称:deepQuest,代码行数:63,代码来源:regularize.py

示例7: Regularize

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def Regularize(layer, params,
               shared_layers=False,
               name='',
               apply_noise=True,
               apply_batch_normalization=True,
               apply_prelu=True,
               apply_dropout=True,
               apply_l2=True):
    """
    Apply the regularization specified in parameters to the layer
    :param layer: Layer to regularize
    :param params: Params specifying the regularizations to apply
    :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model.
    :param name: Name prepended to regularizer layer
    :param apply_noise: If False, noise won't be applied, independently of params
    :param apply_dropout: If False, dropout won't be applied, independently of params
    :param apply_prelu: If False, prelu won't be applied, independently of params
    :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params
    :param apply_l2: If False, l2 normalization won't be applied, independently of params
    :return: Regularized layer
    """
    shared_layers_list = []

    if apply_noise and params.get('USE_NOISE', False):
        shared_layers_list.append(GaussianNoise(params.get('NOISE_AMOUNT', 0.01), name=name + '_gaussian_noise'))

    if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION', False):
        if params.get('WEIGHT_DECAY'):
            l2_gamma_reg = l2(params['WEIGHT_DECAY'])
            l2_beta_reg = l2(params['WEIGHT_DECAY'])
        else:
            l2_gamma_reg = None
            l2_beta_reg = None

        bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0)

        shared_layers_list.append(BatchNormalization(mode=bn_mode,
                                                     gamma_regularizer=l2_gamma_reg,
                                                     beta_regularizer=l2_beta_reg,
                                                     name=name + '_batch_normalization'))

    if apply_prelu and params.get('USE_PRELU', False):
        shared_layers_list.append(PReLU(name=name + '_PReLU'))

    if apply_dropout and params.get('DROPOUT_P', 0) > 0:
        shared_layers_list.append(Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout'))

    if apply_l2 and params.get('USE_L2', False):
        shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm'))

    # Apply all the previously built shared layers
    for l in shared_layers_list:
        layer = l(layer)
    result = layer

    # Return result or shared layers too
    if shared_layers:
        return result, shared_layers_list
    return result 
开发者ID:sheffieldnlp,项目名称:deepQuest,代码行数:61,代码来源:regularize.py

示例8: minst_attention

# 需要导入模块: from keras.layers import noise [as 别名]
# 或者: from keras.layers.noise import GaussianNoise [as 别名]
def minst_attention(inc_noise=False, attention=True):
    #make layers
    inputs = Input(shape=(1,image_size,image_size),name='input')

    conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1')
    maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1')
    norm_1a = crosschannelnormalization(name="convpool_1")
    zero_1a = ZeroPadding2D((2,2),name='convzero_1')

    conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2')
    maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2')
    norm_2a = crosschannelnormalization(name="convpool_2")
    zero_2a = ZeroPadding2D((2,2),name='convzero_2')

    dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1')
    dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2')

    #make actual model
    if inc_noise:
        inputs_noise = noise.GaussianNoise(2.5)(inputs)
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise)
    else:
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs)

    conv_1 = conv_1a(input_pad)
    conv_1 = maxp_1a(conv_1)
    conv_1 = norm_1a(conv_1)
    conv_1 = zero_1a(conv_1)

    conv_2_x = conv_2a(conv_1)
    conv_2 = maxp_2a(conv_2_x)
    conv_2 = norm_2a(conv_2)
    conv_2 = zero_2a(conv_2)
    conv_2 = Dropout(0.5)(conv_2)

    dense_1 = dense_1a(conv_2)
    dense_2 = dense_2a(dense_1)

    conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x)
    find_att = dense_2a(conv_shape1)

    if attention:
        find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])
    else:
        find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])

    zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att)
    apply_attention  = Merge(mode='mul',name='attend')([zero_3a,conv_1])

    conv_3 = conv_2a(apply_attention)
    conv_3 = maxp_2a(conv_3)
    conv_3 = norm_2a(conv_3)
    conv_3 = zero_2a(conv_3)

    dense_3 = dense_1a(conv_3)
    dense_4 = dense_2a(dense_3)

    model = Model(input=inputs,output=dense_4)

    return model 
开发者ID:dvatterott,项目名称:BMM_attentional_CNN,代码行数:62,代码来源:BMM_attention_model.py


注:本文中的keras.layers.noise.GaussianNoise方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。