本文整理汇总了Python中keras.backend.stop_gradient方法的典型用法代码示例。如果您正苦于以下问题:Python backend.stop_gradient方法的具体用法?Python backend.stop_gradient怎么用?Python backend.stop_gradient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.stop_gradient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: symbolic_fgs
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
"""
FGSM attack.
"""
# signed gradient
normed_grad = K.sign(grad)
# Multiply by constant epsilon
scaled_grad = eps * normed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = K.stop_gradient(x + scaled_grad)
if clipping:
adv_x = K.clip(adv_x, 0, 1)
return adv_x
示例2: symbolic_fg
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def symbolic_fg(x, grad, eps=0.3, clipping=True):
"""
FG attack
"""
# Unit vector in direction of gradient
reduc_ind = list(xrange(1, len(x.get_shape())))
normed_grad = grad / tf.sqrt(tf.reduce_sum(tf.square(grad),
reduction_indices=reduc_ind,
keep_dims=True))
# Multiply by constant epsilon
scaled_grad = eps * normed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = K.stop_gradient(x + scaled_grad)
if clipping:
adv_x = K.clip(adv_x, 0, 1)
return adv_x
示例3: actor_optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def actor_optimizer(self):
action = K.placeholder(shape=(None, self.action_size))
advantages = K.placeholder(shape=(None, ))
policy = self.actor.output
good_prob = K.sum(action * policy, axis=1)
eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
loss = -K.sum(eligibility)
entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
actor_loss = loss + 0.01*entropy
optimizer = Adam(lr=self.actor_lr)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train
# make loss function for Value approximation
示例4: labelembed_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100):
out2_prob = K.softmax(out2)
tau2_prob = K.stop_gradient(K.softmax(out2 / tau))
soft_tar = K.stop_gradient(K.softmax(tar))
L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets)
pred = K.argmax(out2, axis = -1)
mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx()))
L_o1_emb = -cross_entropy(out1, soft_tar) # pylint: disable=invalid-unary-operand-type
L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets)
L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8)) # pylint: disable=invalid-unary-operand-type
L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha)
return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re
示例5: labelembed_model
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def labelembed_model(base_model, num_classes, **kwargs):
input_ = base_model.input
embedding = base_model.output
out = keras.layers.Activation('relu')(embedding)
out = keras.layers.BatchNormalization(name = 'embedding_bn')(out)
out1 = keras.layers.Dense(num_classes, name = 'prob')(out)
out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out))
cls_input_ = keras.layers.Input((1,), name = 'labels')
cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings')
cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_))
loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_])
return keras.models.Model([input_, cls_input_], [embedding, out1, loss])
示例6: inst_weight
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
output_y[:,2:,:-2]-output_y[:,:-2,:-2]
dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
output_x[:,:-2,2:]- output_x[:,:-2,:-2]
ddr= (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
ddl= (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
dpred = K.spatial_2d_padding(dpred)
weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3,
keepdims=True), K.floatx())
weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)),
config.WEIGHT_AREA/config.CLIP_AREA_HIGH,
config.WEIGHT_AREA/config.CLIP_AREA_LOW)
weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
padding='same')
return K.stop_gradient(weight)
示例7: profile_contrib
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def profile_contrib(p):
return kl.Lambda(lambda p:
K.mean(K.sum(K.stop_gradient(tf.nn.softmax(p, dim=-2)) * p, axis=-2), axis=-1)
)(p)
示例8: mcmc_chain
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def mcmc_chain(self, x, nb_gibbs_steps):
xi = x
for i in range(nb_gibbs_steps):
xi, xi_pre, xi_sigm = self.gibbs_xhx(xi)
x_rec, x_rec_pre, x_rec_sigm = xi, xi_pre, xi_sigm
x_rec = K.stop_gradient(x_rec)
return x_rec, x_rec_pre, x_rec_sigm
示例9: round_through
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def round_through(x):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
rounded = K.round(x)
return x + K.stop_gradient(rounded - x)
示例10: _mean_abs
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def _mean_abs(x, axis=None, keepdims=False):
return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims))
示例11: ternarize_dot
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def ternarize_dot(x, W):
'''For RNN (maybe Dense or Conv too).
Refer to 'Recurrent Neural Networks with Limited Numerical Precision' Section 3.1
'''
Wt = _ternarize(W)
return K.dot(x, W) + K.stop_gradient(K.dot(x, Wt - W))
示例12: round_through
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def round_through(x):
'''Element-wise rounding to the closest integer with full gradient propagation.
A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
'''
rounded = K.round(x)
rounded_through = x + K.stop_gradient(rounded - x)
return rounded_through
示例13: clip_through
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def clip_through(x, min_val, max_val):
'''Element-wise clipping with gradient propagation
Analogue to round_through
'''
clipped = K.clip(x, min_val, max_val)
clipped_through= x + K.stop_gradient(clipped-x)
return clipped_through
示例14: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def call(self, inputs):
quantized_kernel = quantize(self.kernel, nb=self.nb)
inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
inputs_qnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
* inverse_kernel_lr_multiplier
outputs_qnn_gradient = K.conv2d(
inputs_qnn_gradient,
quantized_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
outputs = (outputs_qnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_qnn_gradient))\
* self.kernel_lr_multiplier
#outputs = outputs*K.mean(K.abs(self.kernel))
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例15: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def call(self, inputs):
binary_kernel = binarize(self.kernel, H=self.H)
inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
inputs_bnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
* inverse_kernel_lr_multiplier
outputs_bnn_gradient = K.conv2d(
inputs_bnn_gradient,
binary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
outputs = (outputs_bnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient))\
* self.kernel_lr_multiplier
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs