本文整理汇总了Python中keras.engine.topology.Layer方法的典型用法代码示例。如果您正苦于以下问题:Python topology.Layer方法的具体用法?Python topology.Layer怎么用?Python topology.Layer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.engine.topology
的用法示例。
在下文中一共展示了topology.Layer方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def __init__(self,
W_regularizer=None,
b_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
示例2: __init__
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
示例3: __init__
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def __init__(self, W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.supports_masking = True
self.init = initializations.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
示例4: __init__
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True,
return_attention=False,
**kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 1.x
Example:
# 1
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
# next add a Dense layer (for classification/regression) or whatever...
# 2 - Get the attention scores
hidden = LSTM(64, return_sequences=True)(words)
sentence, word_scores = Attention(return_attention=True)(hidden)
"""
self.supports_masking = True
self.return_attention = return_attention
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
示例5: createModel
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def createModel(patchSize, dHyper, dParam):
# input corrupted and non-corrupted image
x_ref = Input(shape=(1, patchSize[0], patchSize[1]))
x_art = Input(shape=(1, patchSize[0], patchSize[1]))
encoded_ref, conv_1_ref = encode(x_ref, patchSize)
encoded_art, conv_1_art = encode(x_art, patchSize)
# concatenate the encoded features together
conv_1 = concatenate([conv_1_ref, conv_1_art], axis=0)
conv_2 = concatenate([encoded_ref, encoded_art], axis=0)
# create the shared encoder
z, z_mean, z_log_var, conv_3, conv_4 = encode_shared(conv_2, patchSize)
# create the decoder
decoded = decode(z, patchSize, conv_1, conv_2, conv_3, conv_4, dHyper['arch'])
# separate the concatenated images
decoded_ref2ref = Lambda(lambda input: input[:input.shape[0]//2, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded)
decoded_art2ref = Lambda(lambda input: input[input.shape[0]//2:, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded)
# input to CustomLoss Layer
[decoded_ref2ref, decoded_art2ref] = CustomLossLayer(dHyper, patchSize, dParam)([x_ref, decoded_ref2ref, decoded_art2ref, z_log_var, z_mean])
# generate the VAE and encoder model
vae = Model([x_ref, x_art], [decoded_ref2ref, decoded_art2ref])
return vae
示例6: build_vae
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def build_vae(patchSize, dHyper):
# input corrupted and non-corrupted image
x_ref = Input(shape=(1, patchSize[0], patchSize[1]))
x_art = Input(shape=(1, patchSize[0], patchSize[1]))
# create respective encoders
encoded_ref = encode(x_ref, patchSize)
encoded_art = encode(x_art, patchSize)
# concatenate the encoded features together
combined = concatenate([encoded_ref, encoded_art], axis=0)
# create the shared encoder
z, z_mean, z_log_var = encode_shared(combined, patchSize)
# create the decoder
decoded = decode(z, patchSize, dHyper['dropout'])
# separate the concatenated images
decoded_ref2ref = Lambda(lambda input: input[:input.shape[0]//2, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded)
decoded_art2ref = Lambda(lambda input: input[input.shape[0]//2:, :, :, :], output_shape=(1, patchSize[0], patchSize[1]))(decoded)
# input to CustomLoss Layer
[decoded_ref2ref, decoded_art2ref] = CustomLossLayer(dHyper, patchSize)([x_ref, decoded_ref2ref, decoded_art2ref, z_log_var, z_mean])
# generate the VAE and encoder model
vae = Model([x_ref, x_art], [decoded_ref2ref, decoded_art2ref])
return vae
示例7: __init__
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Attention mechanism for temporal data.
Supports Masking.
Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
# Input shape
3D tensor with shape: (samples, steps, features).
# Output shape
2D tensor with shape: (samples, features).
:param kwargs:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True. # noqa
The dimensions are inferred based on the output shape of the RNN.
Example:
model.add(LSTM(64, return_sequences=True))
model.add(Attention())
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
示例8: build
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def build(self, input_shape):
super(AttentionLSTM, self).build(input_shape)
if hasattr(self.attention_vec, '_keras_shape'):
attention_dim = self.attention_vec._keras_shape[1]
else:
raise Exception('Layer could not be build: No information about expected input shape.')
self.U_a = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_a'.format(self.name))
self.b_a = K.zeros((self.output_dim,), name='{}_b_a'.format(self.name))
self.U_m = self.inner_init((attention_dim, self.output_dim),
name='{}_U_m'.format(self.name))
self.b_m = K.zeros((self.output_dim,), name='{}_b_m'.format(self.name))
if self.single_attention_param:
self.U_s = self.inner_init((self.output_dim, 1),
name='{}_U_s'.format(self.name))
self.b_s = K.zeros((1,), name='{}_b_s'.format(self.name))
else:
self.U_s = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_s'.format(self.name))
self.b_s = K.zeros((self.output_dim,), name='{}_b_s'.format(self.name))
self.trainable_weights += [self.U_a, self.U_m, self.U_s, self.b_a, self.b_m, self.b_s]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
示例9: reset_states
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete '
'input_shape must be provided '
'(including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
示例10: compute_output_shape
# 需要导入模块: from keras.engine import topology [as 别名]
# 或者: from keras.engine.topology import Layer [as 别名]
def compute_output_shape(self, input_shape):
return input_shape
#----------------------------------------------------------------------------
# Layer normalization. Custom reimplementation based on the paper:
# https://arxiv.org/abs/1607.06450