本文整理汇总了Python中keras.backend.l2_normalize方法的典型用法代码示例。如果您正苦于以下问题:Python backend.l2_normalize方法的具体用法?Python backend.l2_normalize怎么用?Python backend.l2_normalize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.l2_normalize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_model_41
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_41(params):
embedding_weights = pickle.load(open("../data/datasets/train_data/embedding_weights_w2v-google_MSD-AG.pk","rb"))
# main sequential model
model = Sequential()
model.add(Embedding(len(embedding_weights[0]), params['embedding_dim'], input_length=params['sequence_length'],
weights=embedding_weights))
#model.add(Dropout(params['dropout_prob'][0], input_shape=(params['sequence_length'], params['embedding_dim'])))
model.add(LSTM(2048))
#model.add(Dropout(params['dropout_prob'][1]))
model.add(Dense(output_dim=params["n_out"], init="uniform"))
model.add(Activation(params['final_activation']))
logging.debug("Output CNN: %s" % str(model.output_shape))
if params['final_activation'] == 'linear':
model.add(Lambda(lambda x :K.l2_normalize(x, axis=1)))
return model
# CRNN Arch for audio
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, input_tensor, mask=None):
z_s = input_tensor[0]
z_n = input_tensor[1]
r_s = input_tensor[2]
z_s = K.l2_normalize(z_s, axis=-1)
z_n = K.l2_normalize(z_n, axis=-1)
r_s = K.l2_normalize(r_s, axis=-1)
steps = z_n.shape[1]
pos = K.sum(z_s * r_s, axis=-1, keepdims=True)
pos = K.repeat_elements(pos, steps, axis=1)
r_s = K.expand_dims(r_s, axis=-2)
r_s = K.repeat_elements(r_s, steps, axis=1)
neg = K.sum(z_n * r_s, axis=-1)
loss = K.cast(K.sum(K.maximum(0., (1. - pos + neg)), axis=-1, keepdims=True), K.floatx())
return loss
示例3: _compute_energy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def _compute_energy(self, stm):
# "concat" energy function
# energy_i = g * V / |V| * tanh([stm, h_i] * W + b) + r
_stm = K.dot(stm, self.W_a)
V_a = self.V_a
if self.normalize_energy:
V_a = self.Energy_g * K.l2_normalize(self.V_a)
et = K.dot(activations.tanh(K.expand_dims(_stm, axis=1) + self._uxpb),
K.expand_dims(V_a))
if self.is_monotonic:
et += self.Energy_r
return et
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingTensorLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
# Normalize x1 and x2
if self._normalize:
x1 = K.l2_normalize(x1, axis=2)
x2 = K.l2_normalize(x2, axis=2)
# b = batch size
# l = length of `x1`
# r = length of `x2`
# d, e = embedding size
# c = number of channels
# output = [b, c, l, r]
output = tf.einsum(
'bld,cde,bre->bclr',
x1, self.interaction_matrix, x2
)
return output
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, inputs):
x1 = inputs[0]
x2 = inputs[1]
if self.match_type in ['dot']:
if self.normalize:
x1 = K.l2_normalize(x1, axis=2)
x2 = K.l2_normalize(x2, axis=2)
output = K.tf.einsum('abd,acd->abc', x1, x2)
output = K.tf.expand_dims(output, 3)
elif self.match_type in ['mul', 'plus', 'minus']:
x1_exp = K.tf.stack([x1] * self.shape2[1], 2)
x2_exp = K.tf.stack([x2] * self.shape1[1], 1)
if self.match_type == 'mul':
output = x1_exp * x2_exp
elif self.match_type == 'plus':
output = x1_exp + x2_exp
elif self.match_type == 'minus':
output = x1_exp - x2_exp
elif self.match_type in ['concat']:
x1_exp = K.tf.stack([x1] * self.shape2[1], axis=2)
x2_exp = K.tf.stack([x2] * self.shape1[1], axis=1)
output = K.tf.concat([x1_exp, x2_exp], axis=3)
return output
示例6: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, x, mask=None):
uit = K.tanh(K.dot(x, self.Ws1))
ait = K.dot(uit, self.Ws2)
ait = K.permute_dimensions(ait, (0, 2, 1))
A = K.softmax(ait, axis=1)
M = K.batch_dot(A, x)
if self.punish:
A_T = K.permute_dimensions(A, (0, 2, 1))
tile_eye = K.tile(K.eye(self.weight_ws2), [self.batch_size, 1])
tile_eye = K.reshape(
tile_eye, shape=[-1, self.weight_ws2, self.weight_ws2])
AA_T = K.batch_dot(A, A_T) - tile_eye
P = K.l2_normalize(AA_T, axis=(1, 2))
return M, P
else:
return M
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, inputs): # pylint:disable=arguments-differ
"""This is where the layer's logic lives.
Parameters
----------
inputs: tensor
Input tensor, or list/tuple of input tensors
kwargs: dict
Additional keyword arguments
Returns
-------
tensor
A tensor or list/tuple of tensors
"""
return K.l2_normalize(inputs, self.axis)
示例8: build_siamese_resnet_18
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def build_siamese_resnet_18(input_shape, num_outputs):
channels, height, width = input_shape
branch_channels = 3 #channels / 2
branch_input_shape = (branch_channels, height, width)
branch = ResnetBuilder.build_resnet_18(branch_input_shape, NUM_EMBEDDING, False)
input = Input(shape=(height, width, channels))
first_branch = branch(Lambda(lambda x: x[:, :, :, :3])(input))
second_branch = branch(Lambda(lambda x: x[:, :, :, 3:])(input))
if NORMALIZATION_ON:
first_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_branch)
second_branch = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_branch)
raw_result = concatenate([first_branch, second_branch])
output = _top_network(raw_result)
# raw_result = dot([first_branch, second_branch], axes=1)
# result = Lambda(lambda x: (K.clip(x, 0.5, 1) - 0.5) * 2.0)(raw_result)
# negated_result = Lambda(lambda x: 1 - x)(result)
# output = concatenate([negated_result, result])
return Model(inputs=input, outputs=output)
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def call(self, x):
# feat : bz x W x H x D, cluster_score: bz X W x H x clusters.
feat, cluster_score = x
num_features = feat.shape[-1]
# softmax normalization to get soft-assignment.
# A : bz x W x H x clusters
max_cluster_score = K.max(cluster_score, -1, keepdims=True)
exp_cluster_score = K.exp(cluster_score - max_cluster_score)
A = exp_cluster_score / K.sum(exp_cluster_score, axis=-1, keepdims = True)
# Now, need to compute the residual, self.cluster: clusters x D
A = K.expand_dims(A, -1) # A : bz x W x H x clusters x 1
feat_broadcast = K.expand_dims(feat, -2) # feat_broadcast : bz x W x H x 1 x D
feat_res = feat_broadcast - self.cluster # feat_res : bz x W x H x clusters x D
weighted_res = tf.multiply(A, feat_res) # weighted_res : bz x W x H x clusters x D
cluster_res = K.sum(weighted_res, [1, 2])
if self.mode == 'gvlad':
cluster_res = cluster_res[:, :self.k_centers, :]
cluster_l2 = K.l2_normalize(cluster_res, -1)
outputs = K.reshape(cluster_l2, [-1, int(self.k_centers) * int(num_features)])
return outputs
示例10: cosine
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def cosine(a, b):
"""Cosine similarity. Maximum is 1 (a == b), minimum is -1 (a == -b)."""
a = K.l2_normalize(a)
b = K.l2_normalize(b)
return 1 - K.mean(a * b, axis=-1)
示例11: get_model_3
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_3(params):
# metadata
inputs2 = Input(shape=(params["n_metafeatures"],))
x2 = Dropout(params["dropout_factor"])(inputs2)
if params["n_dense"] > 0:
dense2 = Dense(output_dim=params["n_dense"], init="uniform", activation='relu')
x2 = dense2(x2)
logging.debug("Output CNN: %s" % str(dense2.output_shape))
x2 = Dropout(params["dropout_factor"])(x2)
if params["n_dense_2"] > 0:
dense3 = Dense(output_dim=params["n_dense_2"], init="uniform", activation='relu')
x2 = dense3(x2)
logging.debug("Output CNN: %s" % str(dense3.output_shape))
x2 = Dropout(params["dropout_factor"])(x2)
dense4 = Dense(output_dim=params["n_out"], init="uniform", activation=params['final_activation'])
xout = dense4(x2)
logging.debug("Output CNN: %s" % str(dense4.output_shape))
if params['final_activation'] == 'linear':
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
xout = reg(xout)
model = Model(input=inputs2, output=xout)
return model
# Metadata 2 inputs, post-merge with dense layers
示例12: get_model_32
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_32(params):
# metadata
inputs = Input(shape=(params["n_metafeatures"],))
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
x1 = reg(inputs)
inputs2 = Input(shape=(params["n_metafeatures2"],))
reg2 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x2 = reg2(inputs2)
# merge
x = merge([x1, x2], mode='concat', concat_axis=1)
x = Dropout(params["dropout_factor"])(x)
if params['n_dense'] > 0:
dense2 = Dense(output_dim=params["n_dense"], init="uniform", activation='relu')
x = dense2(x)
logging.debug("Output CNN: %s" % str(dense2.output_shape))
dense4 = Dense(output_dim=params["n_out"], init="uniform", activation=params['final_activation'])
xout = dense4(x)
logging.debug("Output CNN: %s" % str(dense4.output_shape))
if params['final_activation'] == 'linear':
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
xout = reg(xout)
model = Model(input=[inputs,inputs2], output=xout)
return model
# Metadata 3 inputs, pre-merge and l2
示例13: get_model_33
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_33(params):
# metadata
inputs = Input(shape=(params["n_metafeatures"],))
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
x1 = reg(inputs)
inputs2 = Input(shape=(params["n_metafeatures2"],))
reg2 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x2 = reg2(inputs2)
inputs3 = Input(shape=(params["n_metafeatures3"],))
reg3 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x3 = reg3(inputs3)
# merge
x = merge([x1, x2, x3], mode='concat', concat_axis=1)
x = Dropout(params["dropout_factor"])(x)
if params['n_dense'] > 0:
dense2 = Dense(output_dim=params["n_dense"], init="uniform", activation='relu')
x = dense2(x)
logging.debug("Output CNN: %s" % str(dense2.output_shape))
dense4 = Dense(output_dim=params["n_out"], init="uniform", activation=params['final_activation'])
xout = dense4(x)
logging.debug("Output CNN: %s" % str(dense4.output_shape))
if params['final_activation'] == 'linear':
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
xout = reg(xout)
model = Model(input=[inputs,inputs2,inputs3], output=xout)
return model
# Metadata 4 inputs, pre-merge and l2
示例14: get_model_34
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_34(params):
# metadata
inputs = Input(shape=(params["n_metafeatures"],))
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
x1 = reg(inputs)
inputs2 = Input(shape=(params["n_metafeatures2"],))
reg2 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x2 = reg2(inputs2)
inputs3 = Input(shape=(params["n_metafeatures3"],))
reg3 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x3 = reg3(inputs3)
inputs4 = Input(shape=(params["n_metafeatures4"],))
reg4 = Lambda(lambda x :K.l2_normalize(x, axis=1))
x4 = reg4(inputs4)
# merge
x = merge([x1, x2, x3, x4], mode='concat', concat_axis=1)
x = Dropout(params["dropout_factor"])(x)
if params['n_dense'] > 0:
dense2 = Dense(output_dim=params["n_dense"], init="uniform", activation='relu')
x = dense2(x)
logging.debug("Output CNN: %s" % str(dense2.output_shape))
dense4 = Dense(output_dim=params["n_out"], init="uniform", activation=params['final_activation'])
xout = dense4(x)
logging.debug("Output CNN: %s" % str(dense4.output_shape))
if params['final_activation'] == 'linear':
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
xout = reg(xout)
model = Model(input=[inputs,inputs2,inputs3,inputs4], output=xout)
return model
示例15: get_model_6
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import l2_normalize [as 别名]
def get_model_6(params):
# metadata
inputs2 = Input(shape=(params["n_metafeatures"],))
#x2 = Dropout(params["dropout_factor"])(inputs2)
if params["n_dense"] > 0:
dense21 = Dense(output_dim=params["n_dense"], init="uniform", activation='relu')
x21 = dense21(inputs2)
logging.debug("Output CNN: %s" % str(dense21.output_shape))
dense22 = Dense(output_dim=params["n_dense"], init="uniform", activation='tanh')
x22 = dense22(inputs2)
logging.debug("Output CNN: %s" % str(dense22.output_shape))
dense23 = Dense(output_dim=params["n_dense"], init="uniform", activation='sigmoid')
x23 = dense23(inputs2)
logging.debug("Output CNN: %s" % str(dense23.output_shape))
# merge
x = merge([x21, x22, x23], mode='concat', concat_axis=1)
x2 = Dropout(params["dropout_factor"])(x)
if params["n_dense_2"] > 0:
dense3 = Dense(output_dim=params["n_dense_2"], init="uniform", activation='relu')
x2 = dense3(x2)
logging.debug("Output CNN: %s" % str(dense3.output_shape))
x2 = Dropout(params["dropout_factor"])(x2)
dense4 = Dense(output_dim=params["n_out"], init="uniform", activation=params['final_activation'])
xout = dense4(x2)
logging.debug("Output CNN: %s" % str(dense4.output_shape))
if params['final_activation'] == 'linear':
reg = Lambda(lambda x :K.l2_normalize(x, axis=1))
xout = reg(xout)
model = Model(input=inputs2, output=xout)
return model