本文整理汇总了Python中keras.layers.Multiply方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Multiply方法的具体用法?Python layers.Multiply怎么用?Python layers.Multiply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers
的用法示例。
在下文中一共展示了layers.Multiply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def get_model(num_users, num_items, latent_dim, regs=[0,0]):
user_input = Input(shape=(1,), dtype='int32', name='user_input')
item_input = Input(shape=(1,), dtype='int32', name='item_input')
MF_Embedding_User = Embedding(input_dim=num_users, output_dim=latent_dim, name='user_embedding',
embeddings_regularizer = l2(regs[0]), input_length=1)
MF_Embedding_Item = Embedding(input_dim=num_items, output_dim=latent_dim, name='item_embedding',
embeddings_regularizer = l2(regs[1]), input_length=1)
user_latent = Flatten()(MF_Embedding_User(user_input))
item_latent = Flatten()(MF_Embedding_Item(item_input))
predict_vector = Multiply()([user_latent, item_latent])
prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector)
model = Model(inputs=[user_input, item_input], outputs=prediction)
return model
示例2: _squeeze
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def _squeeze(self, inputs):
"""Squeeze and Excitation.
This function defines a squeeze structure.
# Arguments
inputs: Tensor, input tensor of conv layer.
"""
input_channels = int(inputs.shape[-1])
x = GlobalAveragePooling2D()(inputs)
x = Dense(input_channels, activation='relu')(x)
x = Dense(input_channels, activation='hard_sigmoid')(x)
x = Reshape((1, 1, input_channels))(x)
x = Multiply()([inputs, x])
return x
示例3: test_merge_multiply
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def test_merge_multiply():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
i3 = layers.Input(shape=(4, 5))
o = layers.multiply([i1, i2, i3])
assert o._keras_shape == (None, 4, 5)
model = models.Model([i1, i2, i3], o)
mul_layer = layers.Multiply()
o2 = mul_layer([i1, i2, i3])
assert mul_layer.output_shape == (None, 4, 5)
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
assert out.shape == (2, 4, 5)
assert_allclose(out, x1 * x2 * x3, atol=1e-4)
示例4: model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def model(self):
inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels))
inputs = Multiply()([inputs_img, inputs_mask])
# Local discriminator
l_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Flatten()(l_dis)
l_dis = Dense(units=1)(l_dis)
model = Model(name=self.model_name, inputs=[inputs_img, inputs_mask], outputs=l_dis)
return model
示例5: joint_branch
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def joint_branch(self, trainable=True, softmax_trainable=False):
"""
joint branch of detection and classification
:param trainable: unfreeze detection branch layer if set to true
"""
input_img = Input(shape=self.input_shape)
x_future_det_one, x_future_cls_det_two = self.share_layer(input_img, trainable=trainable)
x_detection = self.detection_branch_wrapper(x_future_det_one, x_future_cls_det_two, trainable=trainable,
softmax_trainable=softmax_trainable)
x_classification = self.classification_branch_wrapper(x_future_cls_det_two,
softmax_trainable=softmax_trainable)
joint_x = Multiply()([x_detection, x_classification], name='joint_multiply_layer')
input_img = Input(shape=self.input_shape)
joint_model = Model(inputs=input_img,
outputs=joint_x)
return joint_model
示例6: GMF_get_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def GMF_get_model(num_users, num_items, latent_dim, regs=[0,0]):
# Input variables
user_input = Input(shape=(1,), dtype='int32', name = 'user_input')
item_input = Input(shape=(1,), dtype='int32', name = 'item_input')
MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding',
embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[0]), input_length=1)
MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding',
embeddings_initializer = 'random_normal', embeddings_regularizer = l2(regs[1]), input_length=1)
# Crucial to flatten an embedding vector!
user_latent = Flatten()(MF_Embedding_User(user_input))
item_latent = Flatten()(MF_Embedding_Item(item_input))
# Element-wise product of user and item embeddings
predict_vector = Multiply()([user_latent, item_latent])
# Final prediction layer
prediction = Dense(1, activation='sigmoid', kernel_initializer='lecun_uniform', name = 'prediction')(predict_vector)
model = Model(inputs=[user_input, item_input],
outputs=prediction)
return model
示例7: prepare_model
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
#m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(50,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例8: prepare_model2
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
inp1 = Input(shape=(ninputs,))
inp2 = Input(shape=(n_feats,))
inp3 = Input(shape=(n_tfidf,))
reg = 0.00005
out_neurons1 = 500
#out_neurons2 = 20
#out_neurons2 = 10
m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
m1 = Dropout(0.2)(m1)
m1 = Dense(100,activation='sigmoid')(m1)
#m1 = Dropout(0.2)(m1)
#m1 = Dense(4, activation='sigmoid')(m1)
m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
m2 = Dense(4,activation='relu')(inp2)
#m2=Dense(4,activation='relu')(m2)
m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
kernel_regularizer=regularizers.l2(reg))(inp3)
m3 = Dropout(0.4)(m3)
m3 = Dense(50, activation='relu')(m3)
#m3 = Dropout(0.4)(m3)
#m3 = Dense(4, activation='softmax')(m3)
#m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
#m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
m = Merge(mode='concat')([m1,m2,m3])
#mul = Multiply()([m1,m2])
#add = Abs()([m1,m2])
#m = Merge(mode='concat')([mul,add])
score = Dense(output_dim=nclass,activation='softmax')(m)
model = Model([inp1,inp2,inp3],score)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
示例9: attention_temporal
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def attention_temporal(self, input_data, sequence_length):
"""
A temporal attention layer
:param input_data: Network input
:param sequence_length: Length of the input sequence
:return: The output of attention layer
"""
a = Permute((2, 1))(input_data)
a = Dense(sequence_length, activation='sigmoid')(a)
a_probs = Permute((2, 1))(a)
output_attention_mul = Multiply()([input_data, a_probs])
return output_attention_mul
示例10: attention_element
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def attention_element(self, input_data, input_dim):
"""
A self-attention unit
:param input_data: Network input
:param input_dim: The feature dimension of the input
:return: The output of the attention network
"""
input_data_probs = Dense(input_dim, activation='sigmoid')(input_data) # sigmoid
output_attention_mul = Multiply()([input_data, input_data_probs]) # name='att_mul'
return output_attention_mul
示例11: _to_normal2d
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL:
"""
:param output_batch: (n_samples, 5)
:return
"""
# mean of x and y
x_mean = Lambda(lambda o: o[:, 0])(output_batch)
y_mean = Lambda(lambda o: o[:, 1])(output_batch)
# std of x and y
# std is must be 0 or positive
x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch)
y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch)
# correlation coefficient
# correlation coefficient range is [-1, 1]
cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch)
loc = Concatenate()([
Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean),
Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean)
])
x_var = Lambda(lambda x_std: K.square(x_std))(x_std)
y_var = Lambda(lambda y_std: K.square(y_std))(y_std)
xy_cor = Multiply()([x_std, y_std, cor])
cov = Lambda(lambda inputs: K.stack(inputs, axis=0))(
[x_var, xy_cor, xy_cor, y_var])
cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov)
cov = Reshape((2, 2))(cov)
scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov)
mvn = ds.MultivariateNormalTriL(loc, scale_tril)
return mvn
示例12: call
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def call(self, x):
dim = K.int_shape(x)[-1]
transform_gate = self.dense_1(x)
transform_gate = Activation("sigmoid")(transform_gate)
carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(transform_gate)
transformed_data = self.dense_2(x)
transformed_data = Activation(self.activation)(transformed_data)
transformed_gated = Multiply()([transform_gate, transformed_data])
identity_gated = Multiply()([carry_gate, x])
value = Add()([transformed_gated, identity_gated])
return value
示例13: call
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def call(self, inputs, **kwargs):
x = inputs[:, 1]
# print('x.shape: ' + str(K.int_shape(x)))
bool_mask = Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]),
output_shape=K.int_shape(x)[1:])(inputs)
# print('bool_mask.shape: ' + str(K.int_shape(bool_mask)))
mask = Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
# print('mask.shape: ' + str(K.int_shape(mask)))
x = Multiply()([mask, x])
# print('x.shape: ' + str(K.int_shape(x)))
return x
示例14: attention
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def attention(inputs, single_attention_vector=False):
# attention机制
time_steps = k_keras.int_shape(inputs)[1]
input_dim = k_keras.int_shape(inputs)[2]
x = Permute((2, 1))(inputs)
x = Dense(time_steps, activation='softmax')(x)
if single_attention_vector:
x = Lambda(lambda x: k_keras.mean(x, axis=1))(x)
x = RepeatVector(input_dim)(x)
a_probs = Permute((2, 1))(x)
output_attention_mul = Multiply()([inputs, a_probs])
return output_attention_mul
示例15: build
# 需要导入模块: from keras import layers [as 别名]
# 或者: from keras.layers import Multiply [as 别名]
def build(self):
# qd_input = Input((self.config.kernel_size,), name="qd_input")
dd_input = Input((self.config.nb_supervised_doc, self.config.kernel_size), name='dd_input')
# z = Dense(self.config.hidden_size, activation='tanh', name="qd_hidden")(qd_input)
# qd_out = Dense(self.config.out_size, name="qd_out")(z)
z = Dense(self.config.hidden_size, activation='tanh', name="dd_hidden")(dd_input)
dd_init_out = Dense(self.config.out_size, name='dd_init_out')(z)
dd_gate = Input((self.config.nb_supervised_doc, 1), name='baseline_doc_score')
dd_w = Dense(1, kernel_initializer=self.initializer_gate, use_bias=False, name='dd_gate')(dd_gate)
# dd_w = Lambda(lambda x: softmax(x, axis=1), output_shape=(self.config.nb_supervised_doc,), name='dd_softmax')(dd_w)
dd_w = Reshape((self.config.nb_supervised_doc,))(dd_w)
dd_init_out = Reshape((self.config.nb_supervised_doc,))(dd_init_out)
if self.config.method in [1, 3]: # no doc gating, with dense layer
z = dd_init_out
elif self.config.method == 2:
logging.info("Apply doc gating")
z = Multiply(name='dd_out')([dd_init_out, dd_w])
else:
raise ValueError("Method not initialized, please check config file")
if self.config.method in [1, 2]:
logging.info("Dense layer on top")
z = Dense(self.config.merge_hidden, activation='tanh', name='merge_hidden')(z)
out = Dense(self.config.merge_out, name='score')(z)
else:
logging.info("Apply doc gating, No dense layer on top, sum up scores")
out = Dot(axes=[1, 1], name='score')([z, dd_w])
model = Model(inputs=[dd_input, dd_gate], outputs=[out])
print(model.summary())
return model