本文整理汇总了Python中keras.regularizers.l1_l2方法的典型用法代码示例。如果您正苦于以下问题:Python regularizers.l1_l2方法的具体用法?Python regularizers.l1_l2怎么用?Python regularizers.l1_l2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.regularizers
的用法示例。
在下文中一共展示了regularizers.l1_l2方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_variational_encoder
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def get_variational_encoder(node_num, d,
n_units, nu1, nu2,
activation_fn):
K = len(n_units) + 1
# Input
x = Input(shape=(node_num,))
# Encoder layers
y = [None] * (K + 3)
y[0] = x
for i in range(K - 1):
y[i + 1] = Dense(n_units[i], activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
y[K] = Dense(d, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
y[K + 1] = Dense(d)(y[K - 1])
# y[K + 1] = Dense(d, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
y[K + 2] = Lambda(sampling, output_shape=(d,))([y[K], y[K + 1]])
# Encoder model
encoder = Model(input=x, outputs=[y[K], y[K + 1], y[K + 2]])
return encoder
示例2: get_decoder
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def get_decoder(node_num, d,
n_units, nu1, nu2,
activation_fn):
K = len(n_units) + 1
# Input
y = Input(shape=(d,))
# Decoder layers
y_hat = [None] * (K + 1)
y_hat[K] = y
for i in range(K - 1, 0, -1):
y_hat[i] = Dense(n_units[i - 1],
activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
y_hat[0] = Dense(node_num, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
# Output
x_hat = y_hat[0] # decoder's output is also the actual output
# Decoder Model
decoder = Model(input=y, output=x_hat)
return decoder
示例3: regression
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def regression(X, Y, epochs, reg_mode):
x, y = np.array(X),np.array(Y)
model = Sequential()
if reg_mode == 'linear':
model.add(Dense(1, input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='mse')
elif reg_mode == 'logistic':
model.add(Dense(1, activation='sigmoid', input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')
elif reg_mode == 'regularized':
reg = l1_l2(l1=0.01, l2=0.01)
model.add(Dense(1, activation='sigmoid', W_regularizer=reg, input_dim=x.shape[1]))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')
out = model.fit(x, y, nb_epoch=epochs, verbose=0, validation_split=.33)
return model, out
示例4: fCreateMNet_Block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def fCreateMNet_Block(input_t, channels, kernel_size=(3, 3), type=1, forwarding=True, l1_reg=0.0, l2_reg=1e-6):
tower_t = Conv2D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(input_t)
tower_t = Activation('relu')(tower_t)
for counter in range(1, type):
tower_t = Conv2D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(tower_t)
tower_t = Activation('relu')(tower_t)
if (forwarding):
tower_t = concatenate([tower_t, input_t], axis=1)
return tower_t
示例5: fCreateVNet_Block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def fCreateVNet_Block(input_t, channels, type=1, kernel_size=(3, 3, 3), l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
tower_t = Dropout(dr_rate)(input_t)
tower_t = Conv3D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(tower_t)
tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
for counter in range(1, type):
tower_t = Dropout(dr_rate)(tower_t)
tower_t = Conv3D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(tower_t)
tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
tower_t = concatenate([tower_t, input_t], axis=1)
return tower_t
示例6: fCreateMNet_Block
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def fCreateMNet_Block(input_t, channels, kernel_size=(3,3), type=1, forwarding=True,l1_reg=0.0, l2_reg=1e-6 ):
tower_t = Conv2D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(input_t)
tower_t = Activation('relu')(tower_t)
for counter in range(1, type):
tower_t = Conv2D(channels,
kernel_size=kernel_size,
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(tower_t)
tower_t = Activation('relu')(tower_t)
if (forwarding):
tower_t = concatenate([tower_t, input_t], axis=1)
return tower_t
示例7: fConvIncep
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def fConvIncep(input_t, KB=64, layernum=2, l1_reg=0.0, l2_reg=1e-6, iPReLU=0):
tower_t = Conv3D(filters=KB,
kernel_size=[2,2,1],
kernel_initializer='he_normal',
weights=None,
padding='same',
strides=(1, 1, 1),
kernel_regularizer=l1_l2(l1_reg, l2_reg),
)(input_t)
incep = fGetActivation(tower_t, iPReLU=iPReLU)
for counter in range(1,layernum):
incep = InceptionBlock(incep, l1_reg=l1_reg, l2_reg=l2_reg)
incepblock_out = concatenate([incep, input_t], axis=1)
return incepblock_out
示例8: InceptionBlock
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def InceptionBlock(inp, l1_reg=0.0, l2_reg=1e-6):
KN = fgetKernelNumber()
branch1 = Conv3D(filters=KN[0], kernel_size=(1,1,1), kernel_initializer='he_normal', weights=None,padding='same',
strides=(1,1,1),kernel_regularizer=l1_l2(l1_reg, l2_reg),activation='relu')(inp)
branch3 = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(inp)
branch3 = Conv3D(filters=KN[2], kernel_size=(3, 3, 3), kernel_initializer='he_normal', weights=None, padding='same',
strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branch3)
branch5 = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(inp)
branch5 = Conv3D(filters=KN[1], kernel_size=(5, 5, 5), kernel_initializer='he_normal', weights=None, padding='same',
strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branch5)
branchpool = MaxPooling3D(pool_size=(3,3,3),strides=(1,1,1),padding='same',data_format='channels_first')(inp)
branchpool = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branchpool)
out = concatenate([branch1, branch3, branch5, branchpool], axis=1)
return out
示例9: _build_model
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def _build_model(self, input_shape, **kwargs):
K.clear_session()
model = Sequential()
for layer in range(self.model_params['layers']):
config = {key: val[layer] for key, val in self.model_params.items() if key != 'layers'}
if layer == 0:
model.add(Dense(config['neurons'],
kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2']),
input_shape=input_shape))
else:
model.add(Dense(config['neurons'],
kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2'])))
if config['batch_norm']:
model.add(BatchNormalization())
model.add(Activation(config['activation']))
model.add(Dropout(config['dropout']))
return model
示例10: __init__
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def __init__(self,
units: int,
num_filters: int,
ngram_filter_sizes: Tuple[int]=(2, 3, 4, 5),
conv_layer_activation: str='relu',
l1_regularization: float=None,
l2_regularization: float=None,
**kwargs):
self.num_filters = num_filters
self.ngram_filter_sizes = ngram_filter_sizes
self.output_dim = units
self.conv_layer_activation = conv_layer_activation
self.l1_regularization = l1_regularization
self.l2_regularization = l2_regularization
self.regularizer = lambda: l1_l2(l1=self.l1_regularization, l2=self.l2_regularization)
# These are member variables that will be defined during self.build().
self.convolution_layers = None
self.max_pooling_layers = None
self.projection_layer = None
self.input_spec = [InputSpec(ndim=3)]
super(CNNEncoder, self).__init__(**kwargs)
示例11: get_decoder
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def get_decoder(node_num, d, K,
n_units, nu1, nu2,
activation_fn):
# Input
y = Input(shape=(d,))
# Decoder layers
y_hat = [None] * (K + 1)
y_hat[K] = y
for i in range(K - 1, 0, -1):
y_hat[i] = Dense(n_units[i - 1],
activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
y_hat[0] = Dense(node_num, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
# Output
x_hat = y_hat[0] # decoder's output is also the actual output
# Decoder Model
decoder = Model(input=y, output=x_hat)
return decoder
示例12: build_output
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def build_output(self):
mean = Dense(self.output_size, activation=MeanAct, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='mean')(self.decoder_output)
# Plug in dispersion parameters via fake dispersion layer
disp = ConstantDispersionLayer(name='dispersion')
mean = disp(mean)
output = ColwiseMultLayer([mean, self.sf_layer])
nb = NB(disp.theta_exp)
self.loss = nb.loss
self.extra_models['dispersion'] = lambda :K.function([], [nb.theta])([])[0].squeeze()
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.extra_models['decoded'] = Model(inputs=self.input_layer, outputs=self.decoder_output)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
self.encoder = self.get_encoder()
示例13: get_encoder
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def get_encoder(node_num, d, n_units, nu1, nu2, activation_fn):
K = len(n_units) + 1
# Input
x = Input(shape=(node_num,))
# Encoder layers
y = [None] * (K + 1)
y[0] = x # y[0] is assigned the input
for i in range(K - 1):
y[i + 1] = Dense(n_units[i], activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
y[K] = Dense(d, activation=activation_fn,
W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
# Encoder model
encoder = Model(input=x, output=y[K])
return encoder
示例14: __generate_regulariser
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def __generate_regulariser(self, l1_value, l2_value):
""" Returns keras l1/l2 regulariser"""
if l1_value and l2_value:
return l1_l2(l1=l1_value, l2=l2_value)
elif l1_value and not l2_value:
return l1(l1_value)
elif l2_value:
return l2(l2_value)
else:
return None
示例15: test_arg_l1_reg_and_l2_reg
# 需要导入模块: from keras import regularizers [as 别名]
# 或者: from keras.regularizers import l1_l2 [as 别名]
def test_arg_l1_reg_and_l2_reg(self, model):
model._regularizer = l1_l2(0.01, 0.01)
self._build_and_assert(model)