本文整理汇总了Python中keras.losses方法的典型用法代码示例。如果您正苦于以下问题:Python keras.losses方法的具体用法?Python keras.losses怎么用?Python keras.losses使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras
的用法示例。
在下文中一共展示了keras.losses方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _parsePredictorLoss
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def _parsePredictorLoss(self, losses):
(_, img_loss, arm_loss, gripper_loss, label_loss, next_opt_loss,
val_loss) = losses
#print("img loss = ", img_loss)
#print("arm loss = ", arm_loss)
#print("gripper loss = ", gripper_loss)
#print("label loss = ", label_loss)
#print("next_opt loss = ", next_opt_loss)
return [img_loss, arm_loss, gripper_loss, label_loss]
示例2: _fitPredictor
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def _fitPredictor(self, features, targets, real_targets=[]):
if self.show_iter > 0:
fig, axes = plt.subplots(6, 6,)
plt.tight_layout()
image_shape = features[0].shape[1:]
image_size = 1.
for dim in image_shape:
image_size *= dim
for i in xrange(self.iter):
idx = np.random.randint(0, features[0].shape[0], size=self.batch_size)
x = []
y = []
for f in features:
x.append(f[idx])
for f in targets:
y.append(np.expand_dims(f[idx],1))
losses = self.train_predictor.train_on_batch(x, y)
print("Iter %d: loss ="%(i),losses)
if self.show_iter > 0 and (i+1) % self.show_iter == 0:
self.plotPredictions(features[:4], real_targets[:1], axes)
self._fixWeights()
示例3: inject_global_losses
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def inject_global_losses(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['losses'] = _KERAS_LOSSES
return func(*args, **kwargs)
return wrapper
示例4: embedding_mlp
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def embedding_mlp(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set, dataset=FLAGS.dataset,
units=FLAGS.units, dropout=FLAGS.dropout):
import keras
from keras.layers import Dense, Dropout
from keras.losses import categorical_crossentropy
from keras.callbacks import EarlyStopping
from keras import backend as K
if dataset != 'celeba':
_, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partition = task_generator.get_partition_from_labels(Y_test)
partitions = [partition]
else:
_, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
partitions = task_generator.get_celeba_task_pool(attributes_test)
tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)
train_accuracies, test_accuracies = [], []
start = time.time()
for i_task, task in enumerate(tqdm(tasks)):
if (i_task + 1) % (num_tasks // 10) == 0:
tqdm.write('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(test_accuracies)))
ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
Y_train_few, Y_test_few = keras.utils.to_categorical(Y_train_few, num_classes=num_classes), keras.utils.to_categorical(Y_test_few, num_classes=num_classes)
model = keras.Sequential()
model.add(Dense(units=units, activation='relu', input_dim=Z_train_few.shape[1]))
model.add(Dropout(rate=dropout))
model.add(Dense(units=num_classes, activation='softmax'))
model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
model.fit(Z_train_few, Y_train_few, batch_size=Z_train_few.shape[0], epochs=500, verbose=0, validation_data=(Z_test_few, Y_test_few), callbacks=[early_stopping])
train_score = model.evaluate(Z_train_few, Y_train_few, verbose=0)
train_accuracies.append(train_score[1])
test_score = model.evaluate(Z_test_few, Y_test_few, verbose=0)
test_accuracies.append(test_score[1])
K.clear_session()
print('units={}, dropout={}'.format(units, dropout))
print('{}-way {}-shot embedding mlp: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start))
示例5: validate
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def validate(self, *args, **kwargs):
'''
Run validation on a given trial.
Note: this takes in whatever data your model needs to extract
information for the next task. It's designed to work for any variant of
the "predictor" model architecture, regardless of the specifics of the
dataset -- or at least so we hope.
> For a special case of the multi-predictor model:
You MUST override the _targetsFromTrainTargets function above.
Parameters:
----------
None - just args and kwargs passed to _getData.
Returns:
--------
error
train_loss
[loss per train target]
'''
features, targets = self._getData(*args, **kwargs)
length = features[0].shape[0]
prediction_targets = self._targetsFromTrainTargets(targets)
for i in range(len(prediction_targets)):
prediction_targets[i] = np.expand_dims(
prediction_targets[i],
axis=1)
prediction_targets += [np.zeros((length,self.num_options))]
prediction_targets += [np.zeros((length,))]
sums = None
train_sum = 0
for i in range(length):
f = [np.array([f[i]]) for f in features]
t = [np.array([t[i]]) for t in targets]
pt = [np.array([pt[i]]) for pt in prediction_targets]
loss, train_loss, next_loss = self.model.evaluate(f, t,
verbose=0)
#print ("actual arm = ", kwargs['goal_arm'][0])
#print ("actual gripper = ", kwargs['goal_gripper'][0])
#print ("actual prev opt = ", kwargs['label'][0])
predictor_losses = self.predictor.evaluate(f, pt, verbose=0)
losses = self._parsePredictorLoss(predictor_losses)
train_sum += train_loss
if sums is None:
sums = np.array(losses)
else:
sums += np.array(losses)
return sums, train_sum, length
示例6: get_config
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def get_config(self):
config = {
'units': self.units,
'learn_mode': self.learn_mode,
'test_mode': self.test_mode,
'use_boundary': self.use_boundary,
'use_bias': self.use_bias,
'sparse_target': self.sparse_target,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'chain_initializer': initializers.serialize(self.chain_initializer),
'boundary_initializer': initializers.serialize(
self.boundary_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'activation': activations.serialize(self.activation),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'chain_regularizer': regularizers.serialize(self.chain_regularizer),
'boundary_regularizer': regularizers.serialize(
self.boundary_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'chain_constraint': constraints.serialize(self.chain_constraint),
'boundary_constraint': constraints.serialize(self.boundary_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'input_dim': self.input_dim,
'unroll': self.unroll}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# @property
# def loss_function(self):
# warnings.warn('CRF.loss_function is deprecated '
# 'and it might be removed in the future. Please '
# 'use losses.crf_loss instead.')
# return crf_loss
#
# @property
# def accuracy(self):
# warnings.warn('CRF.accuracy is deprecated and it '
# 'might be removed in the future. Please '
# 'use metrics.crf_accuracy')
# if self.test_mode == 'viterbi':
# return crf_viterbi_accuracy
# else:
# return crf_marginal_accuracy
#
# @property
# def viterbi_acc(self):
# warnings.warn('CRF.viterbi_acc is deprecated and it might '
# 'be removed in the future. Please '
# 'use metrics.viterbi_acc instead.')
# return crf_viterbi_accuracy
#
# @property
# def marginal_acc(self):
# warnings.warn('CRF.moarginal_acc is deprecated and it '
# 'might be removed in the future. Please '
# 'use metrics.marginal_acc instead.')
# return crf_marginal_accuracy
示例7: set_framework
# 需要导入模块: import keras [as 别名]
# 或者: from keras import losses [as 别名]
def set_framework(name):
"""Set framework for Segmentation Models
Args:
name (str): one of ``keras``, ``tf.keras``, case insensitive.
Raises:
ValueError: in case of incorrect framework name.
ImportError: in case framework is not installed.
"""
name = name.lower()
if name == _KERAS_FRAMEWORK_NAME:
import keras
import efficientnet.keras # init custom objects
elif name == _TF_KERAS_FRAMEWORK_NAME:
from tensorflow import keras
import efficientnet.tfkeras # init custom objects
else:
raise ValueError('Not correct module name `{}`, use `{}` or `{}`'.format(
name, _KERAS_FRAMEWORK_NAME, _TF_KERAS_FRAMEWORK_NAME))
global _KERAS_BACKEND, _KERAS_LAYERS, _KERAS_MODELS
global _KERAS_UTILS, _KERAS_LOSSES, _KERAS_FRAMEWORK
_KERAS_FRAMEWORK = name
_KERAS_BACKEND = keras.backend
_KERAS_LAYERS = keras.layers
_KERAS_MODELS = keras.models
_KERAS_UTILS = keras.utils
_KERAS_LOSSES = keras.losses
# allow losses/metrics get keras submodules
base.KerasObject.set_submodules(
backend=keras.backend,
layers=keras.layers,
models=keras.models,
utils=keras.utils,
)
# set default framework