本文整理汇总了Python中keras.initializers.TruncatedNormal方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.TruncatedNormal方法的具体用法?Python initializers.TruncatedNormal怎么用?Python initializers.TruncatedNormal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.TruncatedNormal方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DC_CNN_Block
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import TruncatedNormal [as 别名]
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
def f(input_):
residual = input_
layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length,
dilation_rate=dilation,
activation='linear', padding='causal', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)
layer_out = Activation('selu')(layer_out)
skip_out = Conv1D(1,1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_in = Conv1D(1,1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_out = Add()([residual, network_in])
return network_out, skip_out
return f
示例2: DC_CNN_Model
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import TruncatedNormal [as 别名]
def DC_CNN_Model(length):
input = Input(shape=(length,1))
l1a, l1b = DC_CNN_Block(32,2,1,0.001)(input)
l2a, l2b = DC_CNN_Block(32,2,2,0.001)(l1a)
l3a, l3b = DC_CNN_Block(32,2,4,0.001)(l2a)
l4a, l4b = DC_CNN_Block(32,2,8,0.001)(l3a)
l5a, l5b = DC_CNN_Block(32,2,16,0.001)(l4a)
l6a, l6b = DC_CNN_Block(32,2,32,0.001)(l5a)
l6b = Dropout(0.8)(l6b) #dropout used to limit influence of earlier data
l7a, l7b = DC_CNN_Block(32,2,64,0.001)(l6a)
l7b = Dropout(0.8)(l7b) #dropout used to limit influence of earlier data
l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])
l9 = Activation('relu')(l8)
l21 = Conv1D(1,1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
kernel_regularizer=l2(0.001))(l9)
model = Model(input=input, output=l21)
adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None,
decay=0.0, amsgrad=False)
model.compile(loss='mae', optimizer=adam, metrics=['mse'])
return model
示例3: _fire_layer
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import TruncatedNormal [as 别名]
def _fire_layer(self, name, input, s1x1, e1x1, e3x3, stdd=0.01):
"""
wrapper for fire layer constructions
:param name: name for layer
:param input: previous layer
:param s1x1: number of filters for squeezing
:param e1x1: number of filter for expand 1x1
:param e3x3: number of filter for expand 3x3
:param stdd: standard deviation used for intialization
:return: a keras fire layer
"""
sq1x1 = Conv2D(
name = name + '/squeeze1x1', filters=s1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True,
padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu",
kernel_regularizer=l2(self.config.WEIGHT_DECAY))(input)
ex1x1 = Conv2D(
name = name + '/expand1x1', filters=e1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True,
padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu",
kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1)
ex3x3 = Conv2D(
name = name + '/expand3x3', filters=e3x3, kernel_size=(3, 3), strides=(1, 1), use_bias=True,
padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu",
kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1)
return concatenate([ex1x1, ex3x3], axis=3)
#wrapper for padding, written in tensorflow. If you want to change to theano you need to rewrite this!
示例4: test_truncated_normal
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import TruncatedNormal [as 别名]
def test_truncated_normal(tensor_shape):
_runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape,
target_mean=0., target_std=None, target_max=2)