本文整理匯總了Python中keras_layer_normalization.LayerNormalization方法的典型用法代碼示例。如果您正苦於以下問題:Python keras_layer_normalization.LayerNormalization方法的具體用法?Python keras_layer_normalization.LayerNormalization怎麽用?Python keras_layer_normalization.LayerNormalization使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras_layer_normalization
的用法示例。
在下文中一共展示了keras_layer_normalization.LayerNormalization方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_custom_objects
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def get_custom_objects() -> dict:
return {
'gelu': gelu,
'EmbeddingRet': EmbeddingRet,
'EmbeddingSim': EmbeddingSim,
'CreateMask': CreateMask,
'RestoreMask': RestoreMask,
'PositionalEmbedding': PositionalEmbedding,
'PermutationMask': PermutationMask,
'MaskEmbedding': MaskEmbedding,
'RelativeBias': RelativeBias,
'SegmentBias': SegmentBias,
'RelativeSegmentEmbedding': RelativeSegmentEmbedding,
'Memory': Memory,
'LayerNormalization': LayerNormalization,
'RelativePartialMultiHeadSelfAttention': Attention,
'FeedForward': FeedForward,
}
示例2: _wrap_layer
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def _wrap_layer(name, input_layer, build_func, trainable=True):
"""Wrap layers with normalization and residual.
:param name: Prefix of names for internal layers.
:param input_layer: Input layer.
:param build_func: A callable that takes the input tensor and generates the output tensor.
:param trainable: Whether the layers are trainable.
:return: Output layer.
"""
normal_layer = LayerNormalization(
trainable=trainable,
name='%s-Norm' % name,
)(input_layer)
build_output = build_func(normal_layer)
return keras.layers.Add(name='%s-Add' % name)([input_layer, build_output])
示例3: test_save_load_json
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def test_save_load_json(self):
model = keras.models.Sequential()
model.add(LayerNormalization(input_shape=(2, 3)))
model.compile(optimizer='adam', loss='mse')
encoded = model.to_json()
model = keras.models.model_from_json(encoded, custom_objects={'LayerNormalization': LayerNormalization})
model.summary()
示例4: get_custom_objects
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def get_custom_objects():
return {
'gelu': gelu,
'LayerNormalization': LayerNormalization,
'MultiHeadAttention': MultiHeadAttention,
'FeedForward': FeedForward,
'TrigPosEmbedding': TrigPosEmbedding,
'EmbeddingRet': EmbeddingRet,
'EmbeddingSim': EmbeddingSim,
}
示例5: _wrap_layer
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def _wrap_layer(name,
input_layer,
build_func,
dropout_rate=0.0,
trainable=True):
"""Wrap layers with residual, normalization and dropout.
:param name: Prefix of names for internal layers.
:param input_layer: Input layer.
:param build_func: A callable that takes the input tensor and generates the output tensor.
:param dropout_rate: Dropout rate.
:param trainable: Whether the layers are trainable.
:return: Output layer.
"""
build_output = build_func(input_layer)
if dropout_rate > 0.0:
dropout_layer = keras.layers.Dropout(
rate=dropout_rate,
name='%s-Dropout' % name,
)(build_output)
else:
dropout_layer = build_output
if isinstance(input_layer, list):
input_layer = input_layer[0]
add_layer = keras.layers.Add(name='%s-Add' % name)([input_layer, dropout_layer])
normal_layer = LayerNormalization(
trainable=trainable,
name='%s-Norm' % name,
)(add_layer)
return normal_layer
示例6: get_custom_objects
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def get_custom_objects():
return {
'AdaptiveEmbedding': AdaptiveEmbedding,
'AdaptiveSoftmax': AdaptiveSoftmax,
'Scale': Scale,
'Memory': Memory,
'LayerNormalization': LayerNormalization,
'FeedForward': FeedForward,
'PositionalEmbedding': PositionalEmbedding,
'RelativeBias': RelativeBias,
'RelativePartialMultiHeadSelfAttention': RelativePartialMultiHeadSelfAttention,
}
示例7: test_sample
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def test_sample(self):
input_layer = keras.layers.Input(
shape=(2, 3),
name='Input',
)
norm_layer = LayerNormalization(
name='Layer-Normalization',
)(input_layer)
model = keras.models.Model(
inputs=input_layer,
outputs=norm_layer,
)
model.compile(
optimizer='adam',
loss='mse',
metrics={},
)
model.summary()
inputs = np.array([[
[0.2, 0.1, 0.3],
[0.5, 0.1, 0.1],
]])
predict = model.predict(inputs)
expected = np.asarray([[
[0.0, -1.22474487, 1.22474487],
[1.41421356, -0.707106781, -0.707106781],
]])
self.assertTrue(np.allclose(expected, predict), predict)
input_layer = keras.layers.Input(
shape=(10, 256),
name='Input',
)
norm_layer = LayerNormalization(
name='Layer-Normalization',
beta_initializer='ones',
)(input_layer)
model = keras.models.Model(
inputs=input_layer,
outputs=norm_layer,
)
model.compile(
optimizer='adam',
loss='mse',
metrics={},
)
model.summary()
inputs = np.zeros((2, 10, 256))
predict = model.predict(inputs)
expected = np.ones((2, 10, 256))
self.assertTrue(np.allclose(expected, predict))
示例8: test_fit_zeros
# 需要導入模塊: import keras_layer_normalization [as 別名]
# 或者: from keras_layer_normalization import LayerNormalization [as 別名]
def test_fit_zeros(self):
def _leaky_relu(x):
return keras.activations.relu(x, alpha=0.01)
input_layer = keras.layers.Input(
shape=(2, 3),
name='Input',
)
norm_layer = LayerNormalization(
name='Layer-Normalization-1',
trainable=False,
)(input_layer)
att_layer = MultiHeadAttention(
head_num=3,
activation=_leaky_relu,
name='Multi-Head-Attentions'
)(norm_layer)
dense_layer = keras.layers.Dense(units=3, name='Dense-1')(att_layer)
norm_layer = LayerNormalization(
name='Layer-Normalization-2',
trainable=False,
)(dense_layer)
dense_layer = keras.layers.Dense(units=3, name='Dense-2')(norm_layer)
model = keras.models.Model(
inputs=input_layer,
outputs=dense_layer,
)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss='mse',
metrics={},
)
model.summary()
def _generator_zeros(batch_size=32):
while True:
batch_inputs = np.zeros((batch_size, 2, 3))
batch_outputs = np.asarray([[[0.0, -0.1, 0.2]] * 2] * batch_size)
yield batch_inputs, batch_outputs
model.fit_generator(
generator=_generator_zeros(),
steps_per_epoch=1000,
epochs=10,
validation_data=_generator_zeros(),
validation_steps=100,
callbacks=[
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
],
)
for inputs, _ in _generator_zeros(batch_size=3):
predicts = model.predict(inputs)
expect = np.round(np.asarray([[[0.0, -0.1, 0.2]] * 2] * 3), decimals=1)
actual = np.round(predicts, decimals=1)
self.assertTrue(np.allclose(expect, actual), (expect, actual))
break