本文整理汇总了Python中keras.backend.update_add方法的典型用法代码示例。如果您正苦于以下问题:Python backend.update_add方法的具体用法?Python backend.update_add怎么用?Python backend.update_add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.update_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update_sub(p, p_t))
return self.updates
示例2: __call__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def __call__(self, y_true, y_pred):
y_true = K.cast(K.round(y_true), "int32")
y_pred = K.cast(K.round(y_pred), "int32")
neg_y_pred = 1 - y_pred
tp = K.sum(K.transpose(y_true * y_pred), axis=-1)
fn = K.sum(K.transpose(y_true * neg_y_pred), axis=-1)
current_tp = K.cast(self.tp + tp, self.epsilon.dtype)
current_fn = K.cast(self.fn + fn, self.epsilon.dtype)
tp_update = K.update_add(self.tp, tp)
fn_update = K.update_add(self.fn, fn)
self.add_update(tp_update, inputs=[y_true, y_pred])
self.add_update(fn_update, inputs=[y_true, y_pred])
return K.mean(truediv(current_tp, current_tp + current_fn + self.epsilon))
示例3: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update(vhat, vhat_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例4: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
beta_1_t = K.pow(self.beta_1, t)
beta_2_t = K.pow(self.beta_2, t)
rho = 2 / (1 - self.beta_2) - 1
rho_t = rho - 2 * t * beta_2_t / (1 - beta_2_t)
r_t = K.sqrt(
K.relu(rho_t - 4) * K.relu(rho_t - 2) * rho / ((rho - 4) * (rho - 2) * rho_t)
)
flag = K.cast(rho_t > 4, K.floatx())
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
mhat_t = m_t / (1 - beta_1_t)
vhat_t = K.sqrt(v_t / (1 - beta_2_t))
p_t = p - lr * mhat_t * (flag * r_t / (vhat_t + self.epsilon) + (1 - flag))
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例5: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
weights = self.get_weights()
self.updates = [K.update_add(self.iterations, 1)]
scaled_lr = self.lr
w_norm = K.sqrt(K.sum([K.sum(K.square(weight))
for weight in weights]))
g_norm = K.sqrt(K.sum([K.sum(K.square(grad))
for grad in grads]))
scaled_lr = K.switch(K.greater(w_norm * g_norm, K.zeros([1])),
K.expand_dims((self.eeta * w_norm /
(g_norm + self.weight_decay * w_norm +
self.epsilon)) * self.lr),
K.ones([1]) * self.lr)
if K.backend() == 'theano':
scaled_lr = scaled_lr[0] # otherwise theano raise broadcasting error
# momentum
moments = [K.zeros(K.int_shape(param), dtype=K.dtype(param))
for param in params]
self.weights = [self.iterations] + moments
for param, grad, moment in zip(params, grads, moments):
v0 = (moment * self.momentum)
v1 = scaled_lr * grad # velocity
veloc = v0 - v1
self.updates.append(K.update(moment, veloc))
if self.nesterov:
new_param = param + (veloc * self.momentum) - v1
else:
new_param = param + veloc
# Apply constraints.
if getattr(param, 'constraint', None) is not None:
new_param = param.constraint(new_param)
self.updates.append(K.update(param, new_param))
return self.updates
示例6: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
g2 = K.square(g)
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = v - (1. - self.beta_2) * K.sign(v - g2) * g2
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例7: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.inital_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
t = self.iterations + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
zs = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
ds = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + zs + vs + ds
for p, g, z, v, d in zip(params, grads, zs, vs, ds):
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
d_t = (K.sqrt(v_t / (1. - K.pow(self.beta_2, t)))
+ self.epsilon) / lr_t
sigma_t = d_t - self.beta_1 * d
z_t = self.beta_1 * z + (1. - self.beta_1) * g - sigma_t * p
p_t = - z_t / d_t
self.updates.append(K.update(z, z_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update(d, d_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例8: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = []
self.updates.append(K.update_add(self.iterations, 1))
for p, g in zip(params, grads):
self.updates.append((p, p - self.lr * g))
return self.updates
示例9: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
tower_gradvars = []
gdev_list = self._gdev_list
global_scope = tf.get_variable_scope()
for idev, device in enumerate(gdev_list):
with tf.device(device), \
tf.variable_scope(global_scope, reuse=idev > 0), \
tf.name_scope('tower_%i' % idev):
grads = self.optimizer.compute_gradients(loss, params)
gradvars = zip(grads, params)
tower_gradvars.append(gradvars)
tower_gradvars = all_avg_gradients(tower_gradvars,
gdev_list,
usenccl=False)
self.updates = [K.update_add(self.iterations, 1)]
for device_num, device in enumerate(gdev_list):
with tf.device(device):
gradvars = tower_gradvars[device_num]
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
示例10: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
wd = self.wd # decoupled weight decay (3/4)
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p # decoupled weight decay (4/4)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例11: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m + g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p - lr * (self.momentum * v + g)
else:
new_p = p - lr * v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def call(self, inputs, training=None):
noise_shape = self._get_noise_shape(inputs)
t = K.cast(self.iterations, K.floatx()) + 1
p = t / float(self.decay_interval)
keep_rate = self.initial_keep_rate * K.pow(self.decay_rate, p)
def dropped_inputs():
self.add_update([K.update_add(self.iterations, [1])], inputs)
return K.dropout(inputs, 1 - keep_rate[0], noise_shape, seed=self.seed)
return K.in_train_phase(dropped_inputs, inputs, training=training)
示例13: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
accum_switch = K.equal(self.iterations % self.accum_iters, 0)
print(accum_switch)
accum_switch = K.cast(accum_switch, dtype='float32')
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
temp_grads = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, cg, m, tg in zip(params, grads, moments, temp_grads):
g = cg + tg
v = self.momentum * m - (lr * g / self.accum_iters) # velocity
self.updates.append(K.update(m, (1 - accum_switch) * m + accum_switch * v))
self.updates.append(K.update(tg, (1 - accum_switch) * g))
if self.nesterov:
new_p = p + self.momentum * v - (lr * g / self.accum_iters)
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, (1 - accum_switch) * p + accum_switch * new_p))
return self.updates
开发者ID:wwoody827,项目名称:cvpr-2018-autonomous-driving-autopilot-solution,代码行数:37,代码来源:model_inceptionresnet.py
示例14: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
matched_layer = [x for x in self.lr_multipliers.keys() if x in p.name]
if matched_layer:
new_lr = lr * self.lr_multipliers[matched_layer[0]]
else:
new_lr = lr
v = self.momentum * m - new_lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - new_lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
示例15: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import update_add [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (1. - 0.5 * (
K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (1. - 0.5 * (
K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape, name='m_' + str(i))
for (i, shape) in enumerate(shapes)]
vs = [K.zeros(shape, name='v_' + str(i))
for (i, shape) in enumerate(shapes)]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# Learning rate multipliers
lr_t = self.learning_rate
if self.lr_multipliers is not None:
lr_t = _apply_lr_multiplier(self, lr_t, p)
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
m_t_bar = (1. - momentum_cache_t) * g_prime + (
momentum_cache_t_1 * m_t_prime)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
p_t = p - self.eta_t * lr_t * m_t_bar / (
K.sqrt(v_t_prime) + self.epsilon)
# Weight decays
if p.name in self.weight_decays.keys():
p_t = _apply_weight_decays(self, p, p_t)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
# Cosine annealing
_update_t_cur_eta_t(self)
self.lr_t = lr_t * self.eta_t # for external tracking
self._init_notified = True
return self.updates