本文整理汇总了Python中keras.layers.Layer方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Layer方法的具体用法?Python layers.Layer怎么用?Python layers.Layer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.Layer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: modelDiscriminator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def modelDiscriminator(self, name=None):
# Specify input
input_img = Input(shape=self.img_shape)
# Layer 1 (#Instance normalization is not used for this layer)
x = self.ck(input_img, 64, False, 2)
# Layer 2
x = self.ck(x, 128, True, 2)
# Layer 3
x = self.ck(x, 256, True, 2)
# Layer 4
x = self.ck(x, 512, True, 1)
# Output layer
if self.use_patchgan:
x = Conv2D(filters=1, kernel_size=4, strides=1, padding='same')(x)
else:
x = Flatten()(x)
x = Dense(1)(x)
#x = Activation('sigmoid')(x) - No sigmoid to avoid near-fp32 machine epsilon discriminator cost
return Model(inputs=input_img, outputs=x, name=name)
示例2: custom_layer
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def custom_layer():
class MyDense(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyDense, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyDense, self).build(input_shape)
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
return {'output_dim': self.output_dim}
return MyDense
示例3: _validate_input_shape
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def _validate_input_shape(self, input_shape):
if len(input_shape) != 3:
raise ValueError("Layer received an input shape {0} but expected three inputs (Q, V, K).".format(input_shape))
else:
if input_shape[0][0] != input_shape[1][0] or input_shape[1][0] != input_shape[2][0]:
raise ValueError("All three inputs (Q, V, K) have to have the same batch size; received batch sizes: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0]))
if input_shape[0][1] != input_shape[1][1] or input_shape[1][1] != input_shape[2][1]:
raise ValueError("All three inputs (Q, V, K) have to have the same length; received lengths: {0}, {1}, {2}".format(input_shape[0][0], input_shape[1][0], input_shape[2][0]))
if input_shape[0][2] != input_shape[1][2]:
raise ValueError("Input shapes of Q {0} and V {1} do not match.".format(input_shape[0], input_shape[1]))
示例4: layernorm
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def layernorm(x, axis, epsilon, gamma, beta):
# assert self.built, 'Layer must be built before being called'
input_shape = K.shape(x)
reduction_axes = list(range(K.ndim(x)))
del reduction_axes[axis]
del reduction_axes[0]
broadcast_shape = [1] * K.ndim(x)
broadcast_shape[axis] = input_shape[axis]
broadcast_shape[0] = K.shape(x)[0]
# Perform normalization: centering and reduction
mean = K.mean(x, axis=reduction_axes)
broadcast_mean = K.reshape(mean, broadcast_shape)
x_centred = x - broadcast_mean
variance = K.mean(x_centred ** 2, axis=reduction_axes) + epsilon
broadcast_variance = K.reshape(variance, broadcast_shape)
x_normed = x_centred / K.sqrt(broadcast_variance)
# Perform scaling and shifting
broadcast_shape_params = [1] * K.ndim(x)
broadcast_shape_params[axis] = K.shape(x)[axis]
broadcast_gamma = K.reshape(gamma, broadcast_shape_params)
broadcast_beta = K.reshape(beta, broadcast_shape_params)
x_LN = broadcast_gamma * x_normed + broadcast_beta
return x_LN
示例5: call
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
return layernorm(x, self.axis, self.epsilon, self.gamma, self.beta)
示例6: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def __init__(self,
center=True,
scale=True,
epsilon=None,
gamma_initializer='ones',
beta_initializer='zeros',
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param center: Add an offset parameter if it is True.
:param scale: Add a scale parameter if it is True.
:param epsilon: Epsilon for calculating variance.
:param gamma_initializer: Initializer for the gamma weight.
:param beta_initializer: Initializer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_constraint: Optional constraint for the gamma weight.
:param beta_constraint: Optional constraint for the beta weight.
:param kwargs:
"""
super(LayerNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
示例7: modelGenerator
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def modelGenerator(self, name=None):
# Specify input
input_img = Input(shape=self.img_shape)
# Layer 1
x = ReflectionPadding2D((3, 3))(input_img)
x = self.c7Ak(x, 32)
# Layer 2
x = self.dk(x, 64)
# Layer 3
x = self.dk(x, 128)
if self.use_multiscale_discriminator:
# Layer 3.5
x = self.dk(x, 256)
# Layer 4-12: Residual layer
for _ in range(4, 13):
x = self.Rk(x)
if self.use_multiscale_discriminator:
# Layer 12.5
x = self.uk(x, 128)
# Layer 13
x = self.uk(x, 64)
# Layer 14
x = self.uk(x, 32)
x = ReflectionPadding2D((3, 3))(x)
x = Conv2D(self.channels, kernel_size=7, strides=1)(x)
x = Activation('tanh')(x) # They say they use Relu but really they do not
return Model(inputs=input_img, outputs=x, name=name)
#===============================================================================
# Test - simple model
示例8: build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
F = input_shape[0][-1]
# Initialize weights for each attention head
for head in range(self.attn_heads):
# Layer kernel
kernel = self.add_weight(shape=(F, self.F_),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
name='kernel_{}'.format(head))
self.kernels.append(kernel)
# # Layer bias
if self.use_bias:
bias = self.add_weight(shape=(self.F_, ),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
name='bias_{}'.format(head))
self.biases.append(bias)
# Attention kernels
attn_kernel_self = self.add_weight(shape=(self.F_, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name='attn_kernel_self_{}'.format(head),)
attn_kernel_neighs = self.add_weight(shape=(self.F_, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name='attn_kernel_neigh_{}'.format(head))
self.attn_kernels.append([attn_kernel_self, attn_kernel_neighs])
self.built = True
示例9: __init__
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def __init__(self, inner_layer_arg, **kwargs):
# Initialise based on one of the three initialisation methods
# Case 1: Check if inner_layer_arg is conv_width
if isinstance(inner_layer_arg, (int, long)):
self.conv_width = inner_layer_arg
dense_layer_kwargs, kwargs = filter_func_args(layers.Dense.__init__,
kwargs, overrule_args=['name'])
self.create_inner_layer_fn = lambda: layers.Dense(self.conv_width, **dense_layer_kwargs)
# Case 2: Check if an initialised keras layer is given
elif isinstance(inner_layer_arg, layers.Layer):
assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.'
_, self.conv_width = inner_layer_arg.get_output_shape_for((None, None))
# layer_from_config will mutate the config dict, therefore create a get fn
self.create_inner_layer_fn = lambda: layer_from_config(dict(
class_name=inner_layer_arg.__class__.__name__,
config=inner_layer_arg.get_config()))
# Case 3: Check if a function is provided that returns a initialised keras layer
elif callable(inner_layer_arg):
example_instance = inner_layer_arg()
assert isinstance(example_instance, layers.Layer), 'When initialising with a function, the function has to return a keras layer'
assert example_instance.built == False, 'When initialising with a keras layer, it cannot be built.'
_, self.conv_width = example_instance.get_output_shape_for((None, None))
self.create_inner_layer_fn = inner_layer_arg
else:
raise ValueError('NeuralGraphHidden has to be initialised with 1). int conv_widht, 2). a keras layer instance, or 3). a function returning a keras layer instance.')
super(NeuralGraphHidden, self).__init__(**kwargs)
示例10: copy_weights
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def copy_weights(src_model, dst_model, must_exist=True):
"""Copy weights from `src_model` to `dst_model`.
Parameters
----------
src_model
Keras source model.
dst_model
Keras destination model.
must_exist: bool
If `True`, raises `ValueError` if a layer in `dst_model` does not exist
in `src_model`.
Returns
-------
list
Names of layers that were copied.
"""
copied = []
for dst_layer in dst_model.layers:
for src_layer in src_model.layers:
if src_layer.name == dst_layer.name:
break
if not src_layer:
if must_exist:
tmp = 'Layer "%s" not found!' % (src_layer.name)
raise ValueError(tmp)
else:
continue
dst_layer.set_weights(src_layer.get_weights())
copied.append(dst_layer.name)
return copied
示例11: embed_input
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=''):
return embed_function(input_layer,
embedding_name='characters' + embedding_suffix,
vocab_name='words')
示例12: embed_input
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=""):
# pylint: disable=protected-access
return embed_function(input_layer,
embedding_name='words' + embedding_suffix,
vocab_name='words')
示例13: get_custom_objects
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def get_custom_objects(self) -> Dict[str, 'Layer']: # pylint: disable=no-self-use
"""
If you use any custom ``Layers`` in your ``embed_input`` method, you need to return them
here, so that the ``TextTrainer`` can correctly load models.
"""
return {}
示例14: embed_input
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def embed_input(self,
input_layer: Layer,
embed_function: Callable[[Layer, str, str], Layer],
text_trainer,
embedding_suffix: str=''):
"""
Applies embedding layers to the input_layer. See :func:`TextTrainer._embed_input
<deep_qa.training.text_trainer.TextTrainer._embed_input>` for a more detailed comment on
what this method does.
Parameters
----------
input_layer: Keras ``Input()`` layer
The layer to embed.
embed_function: Callable[['Layer', str, str], 'Tensor']
This should be the __get_embedded_input method from your instantiated ``TextTrainer``.
This function actually applies an ``Embedding`` layer (and maybe also a projection and
dropout) to the input layer.
text_trainer: TextTrainer
Simple ``Tokenizers`` will just need to use the ``embed_function`` that gets passed as
a parameter here, but complex ``Tokenizers`` might need more than just an embedding
function. So that you can get an encoder or other things from the ``TextTrainer`` here
if you need them, we take this object as a parameter.
embedding_suffix: str, optional (default="")
A suffix to add to embedding keys that we use, so that, e.g., you could specify several
different word embedding matrices, for whatever reason.
"""
raise NotImplementedError
示例15: reset_states
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Layer [as 别名]
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]