本文整理汇总了Python中keras.backend.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python backend.sqrt方法的具体用法?Python backend.sqrt怎么用?Python backend.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_pattern
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def generate_pattern(layer_name, filter_index, size=150):
# 过滤器可视化函数
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
grads = K.gradients(loss, model.input)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [loss, grads])
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
step = 1
for _ in range(40):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]
return deprocess_image(img)
示例2: gradient_penalty_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
示例3: smoothing
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def smoothing(im, mode = None):
# utility function to smooth an image
if mode is None:
return im
elif mode == 'L2':
# L2 norm
return im / (np.sqrt(np.mean(np.square(im))) + K.epsilon())
elif mode == 'GaussianBlur':
# Gaussian Blurring with width of 3
return filters.gaussian_filter(im,1/8)
elif mode == 'Decay':
# Decay regularization
decay = 0.98
return decay * im
elif mode == 'Clip_weak':
# Clip weak pixel regularization
percentile = 1
threshold = np.percentile(np.abs(im),percentile)
im[np.where(np.abs(im) < threshold)] = 0
return im
else:
# print error message
print('Unknown smoothing parameter. No smoothing implemented.')
return im
示例4: get_weightnorm_params_and_grads
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def get_weightnorm_params_and_grads(p, g):
ps = K.get_variable_shape(p)
# construct weight scaler: V_scaler = g/||V||
V_scaler_shape = (ps[-1],) # assumes we're using tensorflow!
V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change
# get V parameters = ||V||/g * W
norm_axes = [i for i in range(len(ps) - 1)]
V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])
# split V_scaler into ||V|| and g parameters
V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
g_param = V_scaler * V_norm
# get grad in V,g parameters
grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
(g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)
return V, V_norm, V_scaler, g_param, grad_g, grad_V
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def call(self, inputs, **kwargs):
if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = K.sqrt(K.sum(K.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
return masked
示例6: get_updates
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
self.updates.append(K.update_sub(p, p_t))
return self.updates
示例7: loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def loss(self, y_true, y_pred):
# get the value for the true and fake images
disc_true = self.disc(y_true)
disc_pred = self.disc(y_pred)
# sample a x_hat by sampling along the line between true and pred
# z = tf.placeholder(tf.float32, shape=[None, 1])
# shp = y_true.get_shape()[0]
# WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
# self.batch_size does not work, since it's not None!!!
alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
diff = y_pred - y_true
interp = y_true + alpha * diff
# take gradient of D(x_hat)
gradients = K.gradients(self.disc(interp), [interp])[0]
grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))
# compute loss
return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
示例8: fgsm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001):
adv = inp.copy()
loss = K.mean(model.output[:, 0])
grads = K.gradients(loss, model.layers[1].output)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
mask[pad_idx:pad_idx+pad_len] = 1
grads *= K.constant(mask)
iterate = K.function([model.layers[1].output], [loss, grads])
g = 0.
step = int(1/step_size)*10
for _ in range(step):
loss_value, grads_value = iterate([adv])
grads_value *= step_size
g += grads_value
adv += grads_value
#print (e, loss_value, end='\r')
if loss_value >= 0.9:
break
return adv, g, loss_value
示例9: fgsm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1):
adv = inp.copy()
loss = K.mean(model.output[:, target_class])
grads = K.gradients(loss, model.layers[1].output)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
mask[pad_idx:pad_idx+pad_len] = 1
grads *= K.constant(mask)
iterate = K.function([model.layers[1].output], [loss, grads])
g = 0.
step = int(1/step_size)*10
for _ in range(step):
loss_value, grads_value = iterate([adv])
grads_value *= step_size
g += grads_value
adv += grads_value
#print (e, loss_value, grads_value.mean(), end='\r')
if loss_value >= 0.9:
break
return adv, g, loss_value
示例10: norm
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def norm(self, xs, norm_id):
mu = K.mean(xs, axis=-1, keepdims=True)
sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
return xs
示例11: squash
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def squash(s, axis=-1):
"""
Squash function. This could be viewed as one kind of activations.
"""
squared_s = K.sum(K.square(s), axis=axis, keepdims=True)
scale = squared_s / (1 + squared_s) / K.sqrt(squared_s + K.epsilon())
return scale * s
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def call(self, inputs, **kwargs):
return K.sqrt(K.sum(K.square(inputs), axis=-1))
示例13: std_rmse
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def std_rmse(std=1):
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square((y_pred - y_true)))) * std
return rmse
示例14: __call__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def __call__(self, p):
if self.skip:
return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100))
return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)))
示例15: cor
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import sqrt [as 别名]
def cor(self,y1, y2, lamda):
y1_mean = K.mean(y1, axis=0)
y1_centered = y1 - y1_mean
y2_mean = K.mean(y2, axis=0)
y2_centered = y2 - y2_mean
corr_nr = K.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = K.sqrt(T.sum(y1_centered * y1_centered, axis=0) + 1e-8)
corr_dr2 = K.sqrt(T.sum(y2_centered * y2_centered, axis=0) + 1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr / corr_dr
return K.sum(corr) * lamda