本文整理汇总了Python中keras.initializers.Ones方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.Ones方法的具体用法?Python initializers.Ones怎么用?Python initializers.Ones使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.Ones方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(ScaleLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable # for this layer.
self.weights = self.add_weight(
name='weights',
shape=(input_shape[1], self.output_dim),
initializer=Ones(),
trainable=True)
super(ScaleLayer, self).build(input_shape)
# Be sure to call this at the end
def call(self, x):
return x * self.weights
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:21,代码来源:models.py
示例2: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def build(self, input_shape):
self._g = self.add_weight(
name='gain',
shape=(input_shape[-1],),
initializer=Ones(),
trainable=True
)
self._b = self.add_weight(
name='bias',
shape=(input_shape[-1],),
initializer=Zeros(),
trainable=True
)
示例3: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True)
super().build(input_shape)
示例4: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
示例5: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernels = []
self.biases = []
for i in range(self.depth):
if i == 0:
input_kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='input_kernel_%d' % (i + 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
hidden_kernel = self.add_weight(shape=(self.units, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
kernel = K.concatenate([input_kernel, hidden_kernel], axis=0)
else:
kernel = self.add_weight(shape=(self.units * 2, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.kernels.append(kernel)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
for i in range(self.depth):
bias = self.add_weight(shape=(self.units * 4,),
name='bias_%d' % (i + 1),
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.biases.append(bias)
else:
self.biases = None
self.built = True
示例6: tp1_node_update
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import Ones [as 别名]
def tp1_node_update(graph_node_embs, node_rel, node_rel_weight, max_nodes, max_bi_relations, embed_dim, label):
"""
graph_node_embs has shape (batch_size, max_nodes per graph, embed_dim feats).
"""
dense_dim = embed_dim
x = gather_layer([graph_node_embs, node_rel])
logging.debug('After gather3 shape: {0}'.format(x.shape))
x = Reshape((max_nodes * max_bi_relations, 2 * embed_dim))(x)
x = TimeDistributed(
Dense(
dense_dim,
kernel_initializer=initializers.Ones(),
bias_initializer=initializers.Zeros(),
name=label + '_dense1'))(x)
# TODO: re-enable the batch normalization.
# x = BatchNormalization(axis=2, name=label + '_bn1')(x)
x = Activation('relu')(x)
x = TimeDistributed(
Dense(
dense_dim,
kernel_initializer=initializers.Ones(),
bias_initializer=initializers.Zeros(),
name=label + '_dense2'))(x)
# x = BatchNormalization(axis=2, name=label + '_bn2')(x)
x = Activation('relu')(x)
normalizer = Reshape((max_nodes * max_bi_relations,))(node_rel_weight)
normalizer = RepeatVector(dense_dim)(normalizer)
normalizer = Permute((2, 1))(normalizer)
x = Multiply()([x, normalizer])
x = Reshape((max_nodes, max_bi_relations, dense_dim))(x)
x = Lambda(
lambda xin: K.sum(xin, axis=2),
output_shape=(None, max_nodes * max_bi_relations, dense_dim),
name=label + '_integrate')(x)
return x
# TODO: Dense use_bias=True