本文整理匯總了Python中keras.initializers.Ones方法的典型用法代碼示例。如果您正苦於以下問題:Python initializers.Ones方法的具體用法?Python initializers.Ones怎麽用?Python initializers.Ones使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.initializers
的用法示例。
在下文中一共展示了initializers.Ones方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(ScaleLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable # for this layer.
self.weights = self.add_weight(
name='weights',
shape=(input_shape[1], self.output_dim),
initializer=Ones(),
trainable=True)
super(ScaleLayer, self).build(input_shape)
# Be sure to call this at the end
def call(self, x):
return x * self.weights
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
開發者ID:PacktPublishing,項目名稱:Hands-On-Generative-Adversarial-Networks-with-Keras,代碼行數:21,代碼來源:models.py
示例2: build
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def build(self, input_shape):
self._g = self.add_weight(
name='gain',
shape=(input_shape[-1],),
initializer=Ones(),
trainable=True
)
self._b = self.add_weight(
name='bias',
shape=(input_shape[-1],),
initializer=Zeros(),
trainable=True
)
示例3: build
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True)
super().build(input_shape)
示例4: build
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
示例5: build
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernels = []
self.biases = []
for i in range(self.depth):
if i == 0:
input_kernel = self.add_weight(shape=(input_dim, self.units * 4),
name='input_kernel_%d' % (i + 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
hidden_kernel = self.add_weight(shape=(self.units, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
kernel = K.concatenate([input_kernel, hidden_kernel], axis=0)
else:
kernel = self.add_weight(shape=(self.units * 2, self.units * 4),
name='kernel_%d' % (i + 1),
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.kernels.append(kernel)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
for i in range(self.depth):
bias = self.add_weight(shape=(self.units * 4,),
name='bias_%d' % (i + 1),
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.biases.append(bias)
else:
self.biases = None
self.built = True
示例6: tp1_node_update
# 需要導入模塊: from keras import initializers [as 別名]
# 或者: from keras.initializers import Ones [as 別名]
def tp1_node_update(graph_node_embs, node_rel, node_rel_weight, max_nodes, max_bi_relations, embed_dim, label):
"""
graph_node_embs has shape (batch_size, max_nodes per graph, embed_dim feats).
"""
dense_dim = embed_dim
x = gather_layer([graph_node_embs, node_rel])
logging.debug('After gather3 shape: {0}'.format(x.shape))
x = Reshape((max_nodes * max_bi_relations, 2 * embed_dim))(x)
x = TimeDistributed(
Dense(
dense_dim,
kernel_initializer=initializers.Ones(),
bias_initializer=initializers.Zeros(),
name=label + '_dense1'))(x)
# TODO: re-enable the batch normalization.
# x = BatchNormalization(axis=2, name=label + '_bn1')(x)
x = Activation('relu')(x)
x = TimeDistributed(
Dense(
dense_dim,
kernel_initializer=initializers.Ones(),
bias_initializer=initializers.Zeros(),
name=label + '_dense2'))(x)
# x = BatchNormalization(axis=2, name=label + '_bn2')(x)
x = Activation('relu')(x)
normalizer = Reshape((max_nodes * max_bi_relations,))(node_rel_weight)
normalizer = RepeatVector(dense_dim)(normalizer)
normalizer = Permute((2, 1))(normalizer)
x = Multiply()([x, normalizer])
x = Reshape((max_nodes, max_bi_relations, dense_dim))(x)
x = Lambda(
lambda xin: K.sum(xin, axis=2),
output_shape=(None, max_nodes * max_bi_relations, dense_dim),
name=label + '_integrate')(x)
return x
# TODO: Dense use_bias=True