本文整理汇总了Python中keras.initializers.glorot_uniform方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.glorot_uniform方法的具体用法?Python initializers.glorot_uniform怎么用?Python initializers.glorot_uniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.glorot_uniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: encoding_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def encoding_block(X, filter_size, filters_num, layer_num, block_type, stage, s = 1, X_skip=0):
# defining name basis
conv_name_base = 'conv_' + block_type + str(stage) + '_'
bn_name_base = 'bn_' + block_type + str(stage) + '_'
for i in np.arange(layer_num)+1:
# First component of main path
X = Conv2D(filters_num, filter_size , strides = (s,s), padding = 'same', name = conv_name_base + 'main_' + str(i), kernel_initializer = glorot_uniform())(X)
X = BatchNormalization(axis = 3, name = bn_name_base + 'main_' + str(i))(X)
if i != layer_num:
X = Activation('relu')(X)
X = Activation('relu')(X)
# Down sampling layer
X_downed = Conv2D(filters_num*2, (2, 2), strides = (2,2), padding = 'valid', name = conv_name_base + 'down', kernel_initializer = glorot_uniform())(X)
X_downed = BatchNormalization(axis = 3, name = bn_name_base + 'down')(X_downed)
X_downed = Activation('relu')(X_downed)
return X, X_downed
示例2: _getKerasModelWeightInitializer
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def _getKerasModelWeightInitializer(self):
"""
Get initializer for a set of weights (e.g. the kernel/bias of a single Dense layer)
within a Keras model.
"""
return glorot_uniform(seed=self.RANDOM_SEED)
示例3: define
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def define(self, n_output: int=2, dropout: float=1., base_model=None):
"""
Define model architecture for eyes_fcscratch
:param n_output: number of network outputs
:param dropout: dropout value
:param base_model: Base model whose architecture and weights are used for convolutional blocks.
"""
hidden_dim = 1536
image_input = Input(shape=base_model.input_size[input_type.EYES], name='input')
# Load base model without FC layers
base = base_model.load_model(input_tensor=image_input, include_top=False)
weight_init = glorot_uniform(seed=3)
# Define architecture on top of base model
last_layer = base.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
x = Dense(hidden_dim, activation='relu', kernel_initializer=weight_init, name='fc6')(x)
if dropout < 1.:
x = Dropout(dropout, seed=0, name='dp6')(x)
out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)
# First for layers are not trained
for layer in base.layers[:4]:
layer.trainable = False
self.model = Model([image_input], out)
print(len(self.model.layers))
print([n.name for n in self.model.layers])
# Print model summary
self.model.summary()
示例4: define
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def define(self, n_output: int=2, dropout: float=1., base_model=None):
"""
Define model architecture for face_finetune
:param n_output: number of network outputs
:param dropout: dropout value
:param base_model: Base model whose architecture and weights are used for all network except last FC layer.
"""
image_input = Input(shape=base_model.input_size[input_type.FACE], name='input')
weight_init = glorot_uniform(seed=3)
# Load model with FC layers
base = base_model.load_model(input_tensor=image_input, include_top=True)
last_layer = base.get_layer('fc6/relu').output
fc7 = base.get_layer('fc7')
fc7r = base.get_layer('fc7/relu')
x = last_layer
if dropout < 1.:
x = Dropout(dropout, seed=0, name='dp6')(x)
x = fc7(x)
x = fc7r(x)
if dropout < 1.:
x = Dropout(dropout, seed=1, name='dp7')(x)
out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)
# Freeze first conv layers
for layer in base.layers[:4]:
layer.trainable = False
self.model = Model(image_input, out)
# Print model summary
self.model.summary()
示例5: compile_sesemi
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def compile_sesemi(network, input_shape, nb_classes,
lrate, in_network_dropout, super_dropout):
weight_decay = 0.0005
initer = initializers.glorot_uniform()
fc_params = dict(
use_bias=True,
activation='softmax',
kernel_initializer=initer,
kernel_regularizer=l2(weight_decay),
)
cnn_trunk = network.create_network(input_shape, in_network_dropout)
super_in = Input(shape=input_shape, name='super_data')
self_in = Input(shape=input_shape, name='self_data')
super_out = cnn_trunk(super_in)
self_out = cnn_trunk(self_in)
super_out = GlobalAveragePooling2D(name='super_gap')(super_out)
self_out = GlobalAveragePooling2D(name='self_gap')(self_out)
if super_dropout > 0.0:
super_out = Dropout(super_dropout, name='super_dropout')(super_out)
super_out = Dense(nb_classes, name='super_clf', **fc_params)(super_out)
self_out = Dense(proxy_labels, name='self_clf', **fc_params)(self_out)
sesemi_model = Model(inputs=[self_in, super_in],
outputs=[self_out, super_out])
inference_model = Model(inputs=[super_in], outputs=[super_out])
sgd = optimizers.SGD(lr=lrate, momentum=0.9, nesterov=True)
sesemi_model.compile(optimizer=sgd,
loss={'super_clf': 'categorical_crossentropy',
'self_clf' : 'categorical_crossentropy'},
loss_weights={'super_clf': 1.0, 'self_clf': 1.0},
metrics=None)
return sesemi_model, inference_model
示例6: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def __init__(self, topic, emb_dim=300, return_sequence=False, W_regularizer=None, W_constraint=None, return_att_weights=False,
**kwargs):
self.supports_masking = True
self.init = initializers.glorot_uniform()
self.W_regularizer = regularizers.get(W_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.emb_dim = emb_dim
self.topic = topic
self.return_sequences = return_sequence
self.return_att_weights = return_att_weights
super(InnerAttentionLayer, self).__init__(**kwargs)
示例7: test_glorot_uniform
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def test_glorot_uniform(tensor_shape):
fan_in, fan_out = initializers._compute_fans(tensor_shape)
scale = np.sqrt(6. / (fan_in + fan_out))
_runner(initializers.glorot_uniform(), tensor_shape,
target_mean=0., target_max=scale, target_min=-scale)
示例8: identity_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def identity_block(X, f, filters, stage, block):
# defining_name_basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve filters
f1, f2, f3 = filters
# Copy the input value for the skip branch
X_copy = X
# First component of main path
X = Conv2D(filters=f1, kernel_size=(1, 1), strides=(1, 1), padding='valid',
name=conv_name_base + '2a', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters=f2, kernel_size=(f, f), strides=(1, 1), padding='same',
name=conv_name_base + '2b', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component
X = Conv2D(filters=f3, kernel_size=(1, 1), strides=(1, 1), padding='valid',
name=conv_name_base + '2c', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
# Final Step
X = Add()([X, X_copy])
X = Activation('relu')(X)
return X
示例9: convolutional_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def convolutional_block(X, f, filters, stage, block, s=2):
# Defining base names
conv_base_name = 'res' + str(stage) + block + '_branch'
bn_base_name = 'bn' + str(stage) + block + '_branch'
# Retrieve filters
f1, f2, f3 = filters
# Save copy for skip branch ops
X_copy = X
# First component - Main path
X = Conv2D(filters=f1, kernel_size=(1, 1), strides=(s, s), padding='valid',
name=conv_base_name + '2a', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_base_name + '2a')(X)
X = Activation('relu')(X)
# Second component - Main path
X = Conv2D(filters=f2, kernel_size=(f, f), strides=(1, 1), padding='same',
name=conv_base_name + '2b', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_base_name + '2b')(X)
X = Activation('relu')(X)
# Third Component - Main path
X = Conv2D(filters=f3, kernel_size=(1, 1), strides=(1, 1), padding='valid',
name=conv_base_name + '2c', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name=bn_base_name + '2c')(X)
# First Component - Skip Path
X_copy = Conv2D(filters=f3, kernel_size=(1, 1), strides=(s, s), padding='valid',
name=conv_base_name + '1', kernel_initializer=glorot_uniform())(X_copy)
X_copy = BatchNormalization(axis=3, name=bn_base_name + '1')(X_copy)
# Add the shortcut
X = Add()([X_copy, X])
X = Activation('relu')(X)
return X
示例10: decoding_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def decoding_block(X, filter_size, filters_num, layer_num, block_type, stage, s = 1, X_jump = 0, up_sampling = True):
# defining name basis
conv_name_base = 'conv_' + block_type + str(stage) + '_'
bn_name_base = 'bn_' + block_type + str(stage) + '_'
# Joining X_jump from encoding side with X_uped
if X_jump == 0:
X_joined_input = X
else:
# X_joined_input = Add()([X,X_jump])
X_joined_input = Concatenate(axis = 3)([X,X_jump])
##### MAIN PATH #####
for i in np.arange(layer_num)+1:
# First component of main path
X_joined_input = Conv2D(filters_num, filter_size , strides = (s,s), padding = 'same',
name = conv_name_base + 'main_' + str(i), kernel_initializer = glorot_uniform())(X_joined_input)
X_joined_input = BatchNormalization(axis = 3, name = bn_name_base + 'main_' + str(i))(X_joined_input)
if i != layer_num:
X_joined_input = Activation('relu')(X_joined_input)
X_joined_input = Activation('relu')(X_joined_input)
# Up-sampling layer. At the output layer, up-sampling is disabled and replaced by other stuffs manually
if up_sampling == True:
X_uped = Conv2DTranspose(filters_num, (2, 2), strides = (2,2), padding = 'valid',
name = conv_name_base + 'up', kernel_initializer = glorot_uniform())(X_joined_input)
X_uped = BatchNormalization(axis = 3, name = bn_name_base + 'up')(X_uped)
X_uped = Activation('relu')(X_uped)
return X_uped
else:
return X_joined_input
# FullVnet
# Output layers have 3 channels. The first two channels represent two one-hot vectors (pupil and non-pupil)
# The third layer contains all zeros in all cases (trivial)
示例11: DeepVOG_net
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def DeepVOG_net(input_shape = (240, 320, 3), filter_size= (3,3)):
X_input = Input(input_shape)
Nh, Nw = input_shape[0], input_shape[1]
# Encoding Stream
X_jump1, X_out = encoding_block(X = X_input, X_skip = 0, filter_size= filter_size, filters_num= 16,
layer_num= 1, block_type = "down", stage = 1, s = 1)
X_jump2, X_out = encoding_block(X = X_out, X_skip = X_out, filter_size= filter_size, filters_num= 32,
layer_num= 1, block_type = "down", stage = 2, s = 1)
X_jump3, X_out = encoding_block(X = X_out, X_skip = X_out, filter_size= filter_size, filters_num= 64,
layer_num= 1, block_type = "down", stage = 3, s = 1)
X_jump4, X_out = encoding_block(X = X_out, X_skip = X_out, filter_size= filter_size, filters_num= 128,
layer_num= 1, block_type = "down", stage = 4, s = 1)
# Decoding Stream
X_out = decoding_block(X = X_out, X_jump = 0, filter_size= filter_size, filters_num= 256,
layer_num= 1, block_type = "up", stage = 1, s = 1)
X_out = decoding_block(X = X_out, X_jump = X_jump4, filter_size= filter_size, filters_num= 256,
layer_num= 1, block_type = "up", stage = 2, s = 1)
X_out = decoding_block(X = X_out, X_jump = X_jump3, filter_size= filter_size, filters_num= 128,
layer_num= 1, block_type = "up", stage = 3, s = 1)
X_out = decoding_block(X = X_out, X_jump = X_jump2, filter_size= filter_size, filters_num= 64,
layer_num= 1, block_type = "up", stage = 4, s = 1)
X_out = decoding_block(X = X_out, X_jump = X_jump1, filter_size= filter_size, filters_num= 32,
layer_num= 1, block_type = "up", stage = 5, s = 1, up_sampling = False)
# Output layer operations
X_out = Conv2D(filters = 3, kernel_size = (1,1) , strides = (1,1), padding = 'valid',
name = "conv_out", kernel_initializer = glorot_uniform())(X_out)
X_out = Activation("softmax")(X_out)
model = Model(inputs = X_input, outputs = X_out, name='Pupil')
return model
示例12: define
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def define(self, n_output: int=2, dropout: float=1., hidden_dim: int=4096,
base_model=None, use_metadata: bool=False):
"""
Define model architecture for face_fcscratch. If use_metadata is True, landmarks are concatenated to
flattened face features.
:param n_output: number of network outputs
:param dropout: dropout value
:param hidden_dim: number of hidden dimensions of FC layers
:param base_model: Base model whose architecture and weights are used for convolutional blocks.
:param use_metadata: add metadata (landmarks) to model
"""
image_input = Input(shape=base_model.input_size[input_type.FACE], name='input-'+input_type.FACE.value)
weight_init = glorot_uniform(seed=3)
# Load model with FC layers
base = base_model.load_model(input_tensor=image_input, include_top=False)
last_layer = base.get_layer('pool5').output
x = Flatten(name='flatten')(last_layer)
if use_metadata:
metadata_input = Input(shape=base_model.input_size[input_type.LANDMARKS],
name='input-'+input_type.LANDMARKS.value)
x = concatenate([x, metadata_input])
x = Dense(hidden_dim, activation='relu', kernel_initializer=weight_init, name='fc6')(x)
if dropout < 1.:
x = Dropout(dropout, seed=0, name='dp6')(x)
x = Dense(hidden_dim, activation='relu', kernel_initializer=weight_init, name='fc7')(x)
if dropout < 1.:
x = Dropout(dropout, seed=1, name='dp7')(x)
out = Dense(n_output, kernel_initializer=weight_init, name='fc8')(x)
# Freeze first conv layers
for layer in base.layers[:4]:
layer.trainable = False
if use_metadata:
self.model = Model([image_input, metadata_input], out)
else:
self.model = Model(image_input, out)
# Print model summary
self.model.summary()
示例13: ResNet50
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def ResNet50(input_shape=(64, 64, 3), classes=6):
# Define the Input Tensor
X_inp = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_inp)
# Stage 1
X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=glorot_uniform())(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f=3, filters=[64, 64, 256], stage=2, block='a', s=1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
X = convolutional_block(X, f=3, filters=[128, 128, 512], stage=3, block='a', s=1)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, filters=[128, 128, 512], stage=3, block='d')
# Stage 4
X = convolutional_block(X, f=3, filters=[256, 256, 1024], stage=4, block='a', s=1)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, filters=[256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, filters=[256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, filters=[256, 256, 1024], stage=4, block='f')
# Stage 5
X = convolutional_block(X, f=3, filters=[512, 512, 2048], s=2, stage=5, block='a')
X = identity_block(X, 3, filters=[512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, filters=[512, 512, 2048], stage=5, block='c')
# AvgPool
X = AveragePooling2D(pool_size=(2, 2), name='avg_pool')(X)
# Output Layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer=glorot_uniform())(X)
model = Model(inputs=X_inp, outputs=X, name='ResNet50')
return model
示例14: identity_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2a',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = Activation('elu')(X)
# Second component of main path (≈3 lines)
X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = Activation('elu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('elu')(X)
return X
示例15: convolutional_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import glorot_uniform [as 别名]
def convolutional_block(X, f, filters, stage, block, s=2):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
# First component of main path
X = Conv2D(F1, (1, 1), padding='valid', strides=(s, s), name=conv_name_base + '2a',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = Activation('elu')(X)
# Second component of main path (≈3 lines)
X = Conv2D(F2, (f, f), padding='same', strides=(1, 1), name=conv_name_base + '2b',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = Activation('elu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(F3, (1, 1), padding='valid', strides=(1, 1), name=conv_name_base + '2c',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
##### SHORTCUT PATH #### (≈2 lines)
X_shortcut = Conv2D(F3, (1, 1), padding='valid', strides=(s, s), name=conv_name_base + '1',
kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('elu')(X)
return X