本文整理汇总了Python中keras.backend.get_value方法的典型用法代码示例。如果您正苦于以下问题:Python backend.get_value方法的具体用法?Python backend.get_value怎么用?Python backend.get_value使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.get_value方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_batch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def on_batch_end(self, batch, logs):
# Log the learning rate
lr = K.get_value(self.model.optimizer.lr)
self.lrs.append(lr)
# Log the loss
loss = logs['loss']
self.losses.append(loss)
# Check whether the loss got too large or NaN
if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4):
self.model.stop_training = True
return
if loss < self.best_loss:
self.best_loss = loss
# Increase the learning rate for the next batch
lr *= self.lr_mult
K.set_value(self.model.optimizer.lr, lr)
示例2: get_config
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'batch_size': int(self.batch_size),
'total_iterations': int(self.total_iterations),
'weight_decays': self.weight_decays,
'lr_multipliers': self.lr_multipliers,
'use_cosine_annealing': self.use_cosine_annealing,
't_cur': int(K.get_value(self.t_cur)),
'eta_t': float(K.eval(self.eta_t)),
'eta_min': float(K.get_value(self.eta_min)),
'eta_max': float(K.get_value(self.eta_max)),
'init_verbose': self.init_verbose,
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(AdamW, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例3: build
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
self.W = self.init((self.output_dim, 4 * self.input_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.input_dim),
K.get_value(self.forget_bias_init((self.input_dim,))),
np.zeros(self.input_dim),
np.zeros(self.input_dim))),
name='{}_b'.format(self.name))
self.A = self.init((self.input_dim, self.output_dim),
name='{}_A'.format(self.name))
self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
示例4: on_epoch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
lr = self.model.optimizer.lr
# If you want to apply decay.
if k.get_value(self.model.optimizer.iterations) == 100:
k.set_value(self.model.optimizer.lr, 0.01)
print("Updating Learning rate", 0.01)
print("Current learning rate", k.get_value(self.model.optimizer.lr))
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
#if k.get_value(self.model.optimizer.iterations)%5 == 0:
#save_to_drive(k.get_value(self.model.optimizer.iterations))
if current >= self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
# Load CIFAR10 Data
示例5: on_batch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.clr_iterations += 1
new_lr = self.compute_lr()
self.history.setdefault('lr', []).append(
K.get_value(self.model.optimizer.lr))
K.set_value(self.model.optimizer.lr, new_lr)
if self._update_momentum:
if not hasattr(self.model.optimizer, 'momentum'):
raise ValueError("Momentum can be updated only on SGD optimizer !")
new_momentum = self.compute_momentum()
self.history.setdefault('momentum', []).append(
K.get_value(self.model.optimizer.momentum))
K.set_value(self.model.optimizer.momentum, new_momentum)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
示例6: _runner
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def _runner(init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None, upper_bound=None, lower_bound=None):
variable = init(shape)
if not isinstance(variable, np.ndarray):
output = K.get_value(variable)
else:
output = variable
lim = 1e-2
if target_std is not None:
assert abs(output.std() - target_std) < lim
if target_mean is not None:
assert abs(output.mean() - target_mean) < lim
if target_max is not None:
assert abs(output.max() - target_max) < lim
if target_min is not None:
assert abs(output.min() - target_min) < lim
if upper_bound is not None:
assert output.max() < upper_bound
if lower_bound is not None:
assert output.min() > lower_bound
示例7: test_get_iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def test_get_iou(self):
gtbox = K.variable([[1, 1, 3, 3], [2, 2, 4, 4]])
anchor = K.variable([
[1, 1, 3, 3], # gtbox[0]とは完全に一致。つまりIoU=1。
# gtbox[1]とは1/4重なる。つまりIoU=1/7。
[1, 0, 3, 2], # gtbox[0]とは半分重なる。つまりIoU=1/3。
[2, 2, 4, 4], # gtbox[0]とは1/4重なる。つまりIoU=1/7。gtbox[1]とは一致。
[0, 3, 2, 5], # gtbox[0]とは隣接。
[4, 3, 6, 5], # gtbox[0]とは接点無し。
])
expected = np.array([
[1, 1 / 7],
[1 / 3, 0],
[1 / 7, 1],
[0, 0],
[0, 0],
])
iou = K.get_value(bbox.get_iou(anchor, gtbox))
np.testing.assert_almost_equal(iou, expected, decimal=5)
示例8: test_LearningRateScheduler
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def test_LearningRateScheduler():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
num_test=test_samples,
input_shape=(input_dim,),
classification=True,
num_classes=num_classes)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
示例9: test_ReduceLROnPlateau_patience
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def test_ReduceLROnPlateau_patience():
class DummyOptimizer(object):
def __init__(self):
self.lr = K.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0
示例10: on_epoch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def on_epoch_end(self, epoch, logs={}):
if self.gamma is not None:
K.set_value(self.model.optimizer.lr, self.gamma * K.get_value(self.model.optimizer.lr))
示例11: find
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1, **kw_fit):
# If x_train contains data for multiple inputs, use length of the first input.
# Assumption: the first element in the list is single input; NOT a list of inputs.
N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]
# Compute number of batches and LR multiplier
num_batches = epochs * N / batch_size
self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
# Save weights into a file
initial_weights = self.model.get_weights()
# Remember the original learning rate
original_lr = K.get_value(self.model.optimizer.lr)
# Set the initial learning rate
K.set_value(self.model.optimizer.lr, start_lr)
callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))
self.model.fit(x_train, y_train,
batch_size=batch_size, epochs=epochs,
callbacks=[callback],
**kw_fit)
# Restore the weights to the state before model fitting
self.model.set_weights(initial_weights)
# Restore the original learning rate
K.set_value(self.model.optimizer.lr, original_lr)
示例12: find_generator
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def find_generator(self, generator, start_lr, end_lr, epochs=1, steps_per_epoch=None, **kw_fit):
if steps_per_epoch is None:
try:
steps_per_epoch = len(generator)
except (ValueError, NotImplementedError) as e:
raise e('`steps_per_epoch=None` is only valid for a'
' generator based on the '
'`keras.utils.Sequence`'
' class. Please specify `steps_per_epoch` '
'or use the `keras.utils.Sequence` class.')
self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(epochs * steps_per_epoch))
# Save weights into a file
initial_weights = self.model.get_weights()
# Remember the original learning rate
original_lr = K.get_value(self.model.optimizer.lr)
# Set the initial learning rate
K.set_value(self.model.optimizer.lr, start_lr)
callback = LambdaCallback(on_batch_end=lambda batch,
logs: self.on_batch_end(batch, logs))
self.model.fit_generator(generator=generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[callback],
**kw_fit)
# Restore the weights to the state before model fitting
self.model.set_weights(initial_weights)
# Restore the original learning rate
K.set_value(self.model.optimizer.lr, original_lr)
示例13: get_learning_rate
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def get_learning_rate(self):
if hasattr(self.model, 'optimizer'):
config = self.model.optimizer.get_config()
from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD
if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))
elif 'lr' in config:
return config['lr']
示例14: change_lr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def change_lr(self, new_lr):
old_lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose == 1:
print('Learning rate is %g' %new_lr)
示例15: on_epoch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_value [as 别名]
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
self.loss_plot('batch')
self.loss_plot('epoch')
tmp_path=os.path.join(self.model_path,str(len(self.accuracy['epoch']))+'_epoch.h5')
self.model.save_weights(tmp_path)
lr=K.get_value(self.model.optimizer.lr)
tmp_path = os.path.join(self.model_path, str(len(self.accuracy['epoch'])) + '_lr.txt')
lr_result=np.zeros(1)
lr_result[0]=lr
np.savetxt(tmp_path,lr_result)