本文整理汇总了Python中keras.initializers.he_normal方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.he_normal方法的具体用法?Python initializers.he_normal怎么用?Python initializers.he_normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.he_normal方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_he_normal
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import he_normal [as 别名]
def test_he_normal(tensor_shape):
fan_in, _ = initializers._compute_fans(tensor_shape)
scale = np.sqrt(2. / fan_in)
_runner(initializers.he_normal(), tensor_shape,
target_mean=0., target_std=None, target_max=2 * scale)
示例2: _conv_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import he_normal [as 别名]
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1), bn_epsilon=1e-3,
bn_momentum=0.99, weight_decay=0., block_id=1):
"""Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
bn_epsilon: Epsilon value for BatchNormalization
bn_momentum: Momentum value for BatchNormalization
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = filters * alpha
filters = _make_divisible(filters)
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay),
name='conv%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, momentum=bn_momentum, epsilon=bn_epsilon,
name='conv%d_bn' % block_id)(x)
return Activation(relu6, name='conv%d_relu' % block_id)(x)
示例3: _conv_block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import he_normal [as 别名]
def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1), bn_epsilon=1e-3,
bn_momentum=0.99, weight_decay=0., block_id=1):
"""Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
bn_epsilon: Epsilon value for BatchNormalization
bn_momentum: Momentum value for BatchNormalization
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = _make_divisible(filters)
x = Conv2D(filters, kernel,
padding='same',
use_bias=False,
strides=strides,
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay),
name='conv%d' % block_id)(inputs)
x = BatchNormalization(axis=channel_axis, momentum=bn_momentum, epsilon=bn_epsilon,
name='conv%d_bn' % block_id)(x)
return Activation(relu6, name='conv%d_relu' % block_id)(x)
示例4: densenet
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import he_normal [as 别名]
def densenet(img_input,classes_num):
def bn_relu(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def bottleneck(x):
channels = growth_rate * 4
x = bn_relu(x)
x = Conv2D(channels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
x = bn_relu(x)
x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
return x
def single(x):
x = bn_relu(x)
x = Conv2D(growth_rate,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
return x
def transition(x, inchannels):
outchannels = int(inchannels * compression)
x = bn_relu(x)
x = Conv2D(outchannels,kernel_size=(1,1),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(x)
x = AveragePooling2D((2,2), strides=(2, 2))(x)
return x, outchannels
def dense_block(x,blocks,nchannels):
concat = x
for i in range(blocks):
x = bottleneck(concat)
concat = concatenate([x,concat], axis=-1)
nchannels += growth_rate
return concat, nchannels
def dense_layer(x):
return Dense(classes_num,activation='softmax',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay))(x)
nblocks = (depth - 4) // 6
nchannels = growth_rate * 2
x = Conv2D(nchannels,kernel_size=(3,3),strides=(1,1),padding='same',kernel_initializer=he_normal(),kernel_regularizer=regularizers.l2(weight_decay),use_bias=False)(img_input)
x, nchannels = dense_block(x,nblocks,nchannels)
x, nchannels = transition(x,nchannels)
x, nchannels = dense_block(x,nblocks,nchannels)
x, nchannels = transition(x,nchannels)
x, nchannels = dense_block(x,nblocks,nchannels)
x, nchannels = transition(x,nchannels)
x = bn_relu(x)
x = GlobalAveragePooling2D()(x)
x = dense_layer(x)
return x
示例5: build_initializer
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import he_normal [as 别名]
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
""" Set the initializer to the appropriate Keras initializer function
based on the input string and learning rate. Other required values
are set to the Keras default values
Parameters
----------
type : string
String to choose the initializer
Options recognized: 'constant', 'uniform', 'normal',
'glorot_uniform', 'lecun_uniform', 'he_normal'
See the Keras documentation for a full description of the options
kerasDefaults : list
List of default parameter values to ensure consistency between frameworks
seed : integer
Random number seed
constant : float
Constant value (for the constant initializer only)
Return
----------
The appropriate Keras initializer function
"""
if type == 'constant':
return initializers.Constant(value=constant)
elif type == 'uniform':
return initializers.RandomUniform(minval=kerasDefaults['minval_uniform'],
maxval=kerasDefaults['maxval_uniform'],
seed=seed)
elif type == 'normal':
return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
stddev=kerasDefaults['stddev_normal'],
seed=seed)
# Not generally available
# elif type == 'glorot_normal':
# return initializers.glorot_normal(seed=seed)
elif type == 'glorot_uniform':
return initializers.glorot_uniform(seed=seed)
elif type == 'lecun_uniform':
return initializers.lecun_uniform(seed=seed)
elif type == 'he_normal':
return initializers.he_normal(seed=seed)