本文整理汇总了Python中keras.losses.sparse_categorical_crossentropy方法的典型用法代码示例。如果您正苦于以下问题:Python losses.sparse_categorical_crossentropy方法的具体用法?Python losses.sparse_categorical_crossentropy怎么用?Python losses.sparse_categorical_crossentropy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.losses
的用法示例。
在下文中一共展示了losses.sparse_categorical_crossentropy方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def train():
model = create_model()
model.compile(optimizer='adam',
loss=losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="../Output/checkpoint.hdf5", verbose=1, save_best_only=True)
x_train, x_test, y_train, y_test = load_audio_data()
model.fit(x_train,
y_train,
epochs=1000,
batch_size=1000,
validation_split=0.2,
callbacks=[checkpointer])
results = model.evaluate(x_test, y_test)
print('test_results: ', results)
model.save(MODEL_FILE_PATH)
示例2: crf_loss
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def crf_loss(y_true, y_pred):
"""General CRF loss function depending on the learning mode.
# Arguments
y_true: tensor with true targets.
y_pred: tensor with predicted targets.
# Returns
If the CRF layer is being trained in the join mode, returns the negative
log-likelihood. Otherwise returns the categorical crossentropy implemented
by the underlying Keras backend.
# About GitHub
If you open an issue or a pull request about CRF, please
add `cc @lzfelix` to notify Luiz Felix.
"""
crf, idx = y_pred._keras_history[:2]
if crf.learn_mode == 'join':
return crf_nll(y_true, y_pred)
else:
if crf.sparse_target:
return sparse_categorical_crossentropy(y_true, y_pred)
else:
return categorical_crossentropy(y_true, y_pred)
示例3: test_sparse_categorical_crossentropy_4d
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def test_sparse_categorical_crossentropy_4d():
y_pred = K.variable(np.array([[[[0.7, 0.1, 0.2],
[0.0, 0.3, 0.7],
[0.1, 0.1, 0.8]],
[[0.3, 0.7, 0.0],
[0.3, 0.4, 0.3],
[0.2, 0.5, 0.3]],
[[0.8, 0.1, 0.1],
[1.0, 0.0, 0.0],
[0.4, 0.3, 0.3]]]]))
y_true = K.variable(np.array([[[0, 1, 0],
[2, 1, 0],
[2, 2, 1]]]))
expected_loss = - (np.log(0.7) + np.log(0.3) + np.log(0.1) +
np.log(K.epsilon()) + np.log(0.4) + np.log(0.2) +
np.log(0.1) + np.log(K.epsilon()) + np.log(0.3)) / 9
loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
assert np.isclose(expected_loss, np.mean(loss))
示例4: crf_loss
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def crf_loss(y_true, y_pred):
"""General CRF loss function depending on the learning mode.
# Arguments
y_true: tensor with true targets.
y_pred: tensor with predicted targets.
# Returns
If the CRF layer is being trained in the join mode, returns the negative
log-likelihood. Otherwise returns the categorical crossentropy implemented
by the underlying Keras backend.
# About GitHub
If you open an issue or a pull request about CRF, please
add `cc @lzfelix` to notify Luiz Felix.
"""
crf, idx = y_pred._keras_history[:2]
if crf.learn_mode == 'join':
return crf_nll(y_true, y_pred)
else:
if crf.sparse_target:
return sparse_categorical_crossentropy(y_true, y_pred)
else:
return categorical_crossentropy(y_true, y_pred)
# crf_marginal_accuracy, crf_viterbi_accuracy
示例5: get_model_lstm
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def get_model_lstm():
nclass = 5
seq_input = Input(shape=(None, 3000, 1))
base_model = get_base_model()
for layer in base_model.layers:
layer.trainable = False
encoded_sequence = TimeDistributed(base_model)(seq_input)
encoded_sequence = Bidirectional(LSTM(100, return_sequences=True))(encoded_sequence)
encoded_sequence = Dropout(rate=0.5)(encoded_sequence)
encoded_sequence = Bidirectional(LSTM(100, return_sequences=True))(encoded_sequence)
#out = TimeDistributed(Dense(nclass, activation="softmax"))(encoded_sequence)
out = Convolution1D(nclass, kernel_size=1, activation="softmax", padding="same")(encoded_sequence)
model = models.Model(seq_input, out)
model.compile(optimizers.Adam(0.001), losses.sparse_categorical_crossentropy, metrics=['acc'])
model.summary()
return model
示例6: perplexity
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def perplexity(y_true, y_pred):
"""
Popular metric for evaluating language modelling architectures.
More info: http://cs224d.stanford.edu/lecture_notes/LectureNotes4.pdf
"""
cross_entropy = K.sparse_categorical_crossentropy(y_true, y_pred)
return K.mean(K.exp(K.mean(cross_entropy, axis=-1)))
示例7: output_suggested_loss
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def output_suggested_loss(self):
self._check_output_support()
suggested_loss = losses.sparse_categorical_crossentropy
return suggested_loss
示例8: test_cce_one_hot
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def test_cce_one_hot():
y_a = K.variable(np.random.randint(0, 7, (5, 6)))
y_b = K.variable(np.random.random((5, 6, 7)))
objective_output = sparse_categorical_crossentropy(y_a, y_b)
assert K.eval(objective_output).shape == (5, 6)
y_a = K.variable(np.random.randint(0, 7, (6,)))
y_b = K.variable(np.random.random((6, 7)))
assert K.eval(sparse_categorical_crossentropy(y_a, y_b)).shape == (6,)
示例9: get_model
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def get_model():
nclass = 5
inp = Input(shape=(187, 1))
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.2)(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3_mitbih")(dense_1)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
model.summary()
return model
示例10: test_cce_one_hot
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def test_cce_one_hot():
y_a = K.variable(np.random.randint(0, 7, (5, 6)))
y_b = K.variable(np.random.random((5, 6, 7)))
objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
assert K.eval(objective_output).shape == (5, 6)
y_a = K.variable(np.random.randint(0, 7, (6,)))
y_b = K.variable(np.random.random((6, 7)))
assert K.eval(losses.sparse_categorical_crossentropy(y_a, y_b)).shape == (6,)
示例11: test_sparse_categorical_crossentropy
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def test_sparse_categorical_crossentropy():
y_pred = K.variable(np.array([[0.3, 0.6, 0.1],
[0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([1, 2]))
expected_loss = - (np.log(0.6) + np.log(0.7)) / 2
loss = K.eval(losses.sparse_categorical_crossentropy(y_true, y_pred))
assert np.isclose(expected_loss, np.mean(loss))
示例12: get_model
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def get_model():
nclass = 5
inp = Input(shape=(3000, 1))
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.01)(img_1)
dense_1 = Dropout(rate=0.01)(Dense(64, activation=activations.relu, name="dense_1")(img_1))
dense_1 = Dropout(rate=0.05)(Dense(64, activation=activations.relu, name="dense_2")(dense_1))
dense_1 = Dense(nclass, activation=activations.softmax, name="dense_3")(dense_1)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
model.summary()
return model
示例13: get_base_model
# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import sparse_categorical_crossentropy [as 别名]
def get_base_model():
inp = Input(shape=(3000, 1))
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = SpatialDropout1D(rate=0.01)(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.01)(img_1)
dense_1 = Dropout(0.01)(Dense(64, activation=activations.relu, name="dense_1")(img_1))
base_model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
base_model.compile(optimizer=opt, loss=losses.sparse_categorical_crossentropy, metrics=['acc'])
#model.summary()
return base_model