本文整理汇总了Python中keras.backend.flatten方法的典型用法代码示例。如果您正苦于以下问题:Python backend.flatten方法的具体用法?Python backend.flatten怎么用?Python backend.flatten使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.flatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: audio_discriminate_loss2
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
for i in range(num_speaker):
sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
for j in range(num_speaker):
if i != j:
sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))
for i in range(num_speaker):
for j in range(i+1,num_speaker):
#sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
#sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
pass
#sum = K.sum(K.maximum(K.flatten(sum_mtr),0))
loss = K.mean(K.flatten(sum_mtr))
return loss
return loss_func
示例2: labelembed_model
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def labelembed_model(base_model, num_classes, **kwargs):
input_ = base_model.input
embedding = base_model.output
out = keras.layers.Activation('relu')(embedding)
out = keras.layers.BatchNormalization(name = 'embedding_bn')(out)
out1 = keras.layers.Dense(num_classes, name = 'prob')(out)
out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out))
cls_input_ = keras.layers.Input((1,), name = 'labels')
cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings')
cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_))
loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_])
return keras.models.Model([input_, cls_input_], [embedding, out1, loss])
示例3: softmax_sparse_crossentropy_ignoring_last_label
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
'''
Softmax cross-entropy loss function for pascal voc segmentation
and models which do not perform softmax.
tensorlow only
'''
y_pred = KB.reshape(y_pred, (-1, KB.int_shape(y_pred)[-1]))
log_softmax = tf.nn.log_softmax(y_pred)
y_true = KB.one_hot(tf.to_int32(KB.flatten(y_true)),
KB.int_shape(y_pred)[-1]+1)
unpacked = tf.unstack(y_true, axis=-1)
y_true = tf.stack(unpacked[:-1], axis=-1)
cross_entropy = -KB.sum(y_true * log_softmax, axis=1)
cross_entropy_mean = KB.mean(cross_entropy)
return cross_entropy_mean
示例4: dice_coef
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def dice_coef(y_true, y_pred, smooth=1.0):
''' Dice Coefficient
Args:
y_true (np.array): Ground Truth Heatmap (Label)
y_pred (np.array): Prediction Heatmap
'''
class_num = 2
for i in range(class_num):
y_true_f = K.flatten(y_true[:,:,:,i])
y_pred_f = K.flatten(y_pred[:,:,:,i])
intersection = K.sum(y_true_f * y_pred_f)
loss = ((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
if i == 0:
total_loss = loss
else:
total_loss = total_loss + loss
total_loss = total_loss / class_num
return total_loss
示例5: iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def iou(actual, predicted):
"""Compute Intersection over Union statistic (i.e. Jaccard Index)
See https://en.wikipedia.org/wiki/Jaccard_index
Parameters
----------
actual : list
Ground-truth labels
predicted : list
Predicted labels
Returns
-------
float
Intersection over Union value
"""
actual = backend.flatten(actual)
predicted = backend.flatten(predicted)
intersection = backend.sum(actual * predicted)
union = backend.sum(actual) + backend.sum(predicted) - intersection
return 1.0 * intersection / union
示例6: audio_discriminate_loss2
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,people_num=2):
def loss_func(S_true,S_pred,gamma=gamma,beta=beta,people_num=people_num):
sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
for i in range(people_num):
sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
for j in range(people_num):
if i != j:
sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))
for i in range(people_num):
for j in range(i+1,people_num):
#sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
#sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
pass
#sum = K.sum(K.maximum(K.flatten(sum_mtr),0))
loss = K.mean(K.flatten(sum_mtr))
return loss
return loss_func
示例7: dice_coef_clipped
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def dice_coef_clipped(y_true, y_pred, smooth=1.0):
y_true_f = K.flatten(K.round(y_true))
y_pred_f = K.flatten(K.round(y_pred))
intersection = K.sum(y_true_f * y_pred_f)
return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
示例8: dice_coef
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def dice_coef(y_true, y_pred, smooth=1.0):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
示例9: online_bootstrapping
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
""" Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
see https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
The implementation is a bit different as we use binary crossentropy instead of softmax
SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
# Arguments
y_true: A tensor with labels.
y_pred: A tensor with predicted probabilites.
pixels: number of hard pixels to keep
threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
# Returns
Mean loss value
"""
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
difference = K.abs(y_true - y_pred)
values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
min_difference = (1 - threshold)
y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))
return K.mean(K.binary_crossentropy(y_true, y_pred))
示例10: dice_coef_border
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def dice_coef_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))
return dice_coef(y_true_f, y_pred_f)
示例11: bce_border
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def bce_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))
return binary_crossentropy(y_true_f, y_pred_f)
示例12: audio_discriminate_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def audio_discriminate_loss(gamma=0.1,num_speaker=2):
def loss_func(S_true,S_pred,gamma=gamma,num_speaker=num_speaker):
sum = 0
for i in range(num_speaker):
sum += K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,i]))))
for j in range(num_speaker):
if i != j:
sum -= gamma*K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,j]))))
loss = sum / (num_speaker*298*257*2)
return loss
return loss_func
示例13: _buildEncoder
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation='relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(
shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.mean(
1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
return (vae_loss, Lambda(
sampling, output_shape=(latent_rep_size,),
name='lambda')([z_mean, z_log_var]))
示例14: quantile_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def quantile_loss(y_true, y_pred, taus):
"""
The quantiles loss for a list of quantiles. Sums up the error contribution
from the each of the quantile loss functions.
"""
e = skewed_absolute_error(
K.flatten(y_true), K.flatten(y_pred[:, 0]), taus[0])
for i, tau in enumerate(taus[1:]):
e += skewed_absolute_error(K.flatten(y_true),
K.flatten(y_pred[:, i + 1]),
tau)
return e
示例15: _vae_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import flatten [as 别名]
def _vae_loss(self,input,output):
'''
loss function for variational autoencoder
'''
input_flat = K.flatten(input)
output_flat = K.flatten(output)
xent_loss = self.image_size[0] * self.image_size[1] \
* objectives.binary_crossentropy(input_flat,output_flat)
kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean)
- K.exp(self.z_log_var), axis=-1)
return xent_loss + kl_loss