本文整理匯總了Python中keras.layers.core.Dropout方法的典型用法代碼示例。如果您正苦於以下問題:Python core.Dropout方法的具體用法?Python core.Dropout怎麽用?Python core.Dropout使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.layers.core
的用法示例。
在下文中一共展示了core.Dropout方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: deep_mlp
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def deep_mlp(self):
"""
Deep Multilayer Perceptrop.
"""
if self._config.num_mlp_layers == 0:
self.add(Dropout(0.5))
else:
for j in xrange(self._config.num_mlp_layers):
self.add(Dense(self._config.mlp_hidden_dim))
if self._config.mlp_activation == 'elu':
self.add(ELU())
elif self._config.mlp_activation == 'leaky_relu':
self.add(LeakyReLU())
elif self._config.mlp_activation == 'prelu':
self.add(PReLU())
else:
self.add(Activation(self._config.mlp_activation))
self.add(Dropout(0.5))
示例2: create
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def create(self):
self.textual_embedding(self, mask_zero=True)
self.stacked_RNN(self)
self.add(self._config.recurrent_encoder(
self._config.hidden_state_dim,
return_sequences=False,
go_backwards=self._config.go_backwards))
self.add(Dropout(0.5))
self.add(RepeatVector(self._config.max_output_time_steps))
self.add(self._config.recurrent_decoder(
self._config.hidden_state_dim, return_sequences=True))
self.add(Dropout(0.5))
self.add(TimeDistributedDense(self._config.output_dim))
self.add(Activation('softmax'))
###
# Multimodal models
###
示例3: model_create
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def model_create(input_shape, num_classes):
logging.debug('input_shape {}'.format(input_shape))
model = Sequential()
model.add(Conv2D(32, (3, 3), border_mode='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# use binary_crossentropy if has just 2 prediction yes or no
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
示例4: build_3dcnn_model
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def build_3dcnn_model(self, fusion_type, Fusion):
if len(Fusion[0]) == 1:
input_shape = (32, 32, len(Fusion))
model_in,model = self.cnn_2D(input_shape)
else:
input_shape = (32, 32, 5, len(Fusion))
model_in,model = self.cnn_3D(input_shape)
model = Dropout(0.5)(model)
model = Dense(32, activation='relu', name = 'fc2')(model)
model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model)
model = Model(input=model_in,output=model)
# 統計參數
# model.summary()
plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True)
print(' Saving model Architecture')
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
# model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不穩定
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
return model
示例5: cnn_2D
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def cnn_2D(self, input_shape, modual=''):
#建立Sequential模型
model_in = Input(input_shape)
model = Conv2D(
filters = 6,
kernel_size = (3, 3),
input_shape = input_shape,
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv1'
)(model_in)# now 30x30x6
model = MaxPooling2D(pool_size=(2,2))(model)# now 15x15x6
model = Conv2D(
filters = 8,
kernel_size = (4, 4),
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv2'
)(model)# now 12x12x8
model = MaxPooling2D(pool_size=(2,2))(model)# now 6x6x8
model = Flatten()(model)
model = Dropout(0.5)(model)
model_out = Dense(100, activation='relu', name = modual+'fc1')(model)
return model_in, model_out
示例6: cnn_3D
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def cnn_3D(self, input_shape, modual=''):
#建立Sequential模型
model_in = Input(input_shape)
model = Convolution3D(
filters = 6,
kernel_size = (3, 3, 3),
input_shape = input_shape,
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv1'
)(model_in)# now 30x30x3x6
model = MaxPooling3D(pool_size=(2,2,1))(model)# now 15x15x3x6
model = Convolution3D(
filters = 8,
kernel_size = (4, 4, 3),
activation='relu',
kernel_initializer='he_normal',
name = modual+'conv2'
)(model)# now 12x12x1x8
model = MaxPooling3D(pool_size=(2,2,1))(model)# now 6x6x1x8
model = Flatten()(model)
model = Dropout(0.5)(model)
model_out = Dense(100, activation='relu', name = modual+'fc1')(model)
return model_in, model_out
示例7: build_model
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def build_model(layers):
"""
模型定義
"""
model = Sequential()
model.add(LSTM(units=layers[1], input_shape=(layers[1], layers[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(layers[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=layers[3]))
model.add(Activation("tanh"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
示例8: build_model
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def build_model():
"""
定義模型
"""
model = Sequential()
model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=Conf.LAYERS[3]))
# model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
model.add(Activation("tanh"))
# act = PReLU(alpha_initializer='zeros', weights=None)
# act = LeakyReLU(alpha=0.3)
# model.add(act)
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("> Compilation Time : ", time.time() - start)
return model
示例9: build_model
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def build_model(layers):
model = Sequential()
model.add(LSTM(
input_dim=layers[0],
output_dim=layers[1],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers[2]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop", metrics=['accuracy'])
print("Compilation Time : ", time.time() - start)
return model
示例10: build_model2
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def build_model2(layers):
d = 0.2
model = Sequential()
model.add(LSTM(128, input_shape=(
layers[1], layers[0]), return_sequences=True))
model.add(Dropout(d))
model.add(LSTM(64, input_shape=(
layers[1], layers[0]), return_sequences=False))
model.add(Dropout(d))
model.add(Dense(16, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
return model
# In[10]:
示例11: inception_pseudo
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
model = InceptionV3(weights='imagenet',include_top=False)
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(5,activation='softmax')(x)
model_final = Model(input = model.input,outputs=out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# ResNet50 Model for transfer Learning
示例12: resnet_pseudo
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
model = ResNet50(weights='imagenet',include_top=False)
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(5,activation='softmax')(x)
model_final = Model(input = model.input,outputs=out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# VGG16 Model for transfer Learning
示例13: inception_pseudo
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
model = InceptionV3(weights='imagenet',include_top=False)
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(1)(x)
model_final = Model(input = model.input,outputs=out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# ResNet50 Model for transfer Learning
示例14: resnet_pseudo
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
model = ResNet50(weights='imagenet',include_top=False)
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(1)(x)
model_final = Model(input = model.input,outputs=out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# VGG16 Model for transfer Learning
示例15: inception_pseudo
# 需要導入模塊: from keras.layers import core [as 別名]
# 或者: from keras.layers.core import Dropout [as 別名]
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
model = InceptionV3(weights='imagenet',include_top=False)
x = model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
out = Dense(5,activation='softmax')(x)
model_final = Model(input = model.input,outputs=out)
if full_freeze != 'N':
for layer in model.layers[0:freeze_layers]:
layer.trainable = False
return model_final
# ResNet50 Model for transfer Learning