当前位置: 首页>>代码示例>>Python>>正文


Python backend.stop_gradient方法代码示例

本文整理汇总了Python中keras.backend.stop_gradient方法的典型用法代码示例。如果您正苦于以下问题:Python backend.stop_gradient方法的具体用法?Python backend.stop_gradient怎么用?Python backend.stop_gradient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.stop_gradient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: symbolic_fgs

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
    """
    FGSM attack.
    """

    # signed gradient
    normed_grad = K.sign(grad)

    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)
    return adv_x 
开发者ID:sunblaze-ucb,项目名称:blackbox-attacks,代码行数:19,代码来源:fgs.py

示例2: symbolic_fg

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def symbolic_fg(x, grad, eps=0.3, clipping=True):
    """
    FG attack
    """
    # Unit vector in direction of gradient
    reduc_ind = list(xrange(1, len(x.get_shape())))
    normed_grad = grad / tf.sqrt(tf.reduce_sum(tf.square(grad),
                                                   reduction_indices=reduc_ind,
                                                   keep_dims=True))
    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)

    return adv_x 
开发者ID:sunblaze-ucb,项目名称:blackbox-attacks,代码行数:21,代码来源:fgs.py

示例3: actor_optimizer

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def actor_optimizer(self):
        action = K.placeholder(shape=(None, self.action_size))
        advantages = K.placeholder(shape=(None, ))

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
        loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)

        actor_loss = loss + 0.01*entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
        train = K.function([self.actor.input, action, advantages], [], updates=updates)
        return train

    # make loss function for Value approximation 
开发者ID:rlcode,项目名称:reinforcement-learning,代码行数:22,代码来源:cartpole_a3c.py

示例4: labelembed_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100):
    
    out2_prob = K.softmax(out2)
    tau2_prob = K.stop_gradient(K.softmax(out2 / tau))
    soft_tar = K.stop_gradient(K.softmax(tar))
    
    L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets)
    
    pred = K.argmax(out2, axis = -1)
    mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx()))
    L_o1_emb = -cross_entropy(out1, soft_tar)  # pylint: disable=invalid-unary-operand-type
    
    L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets)
    L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8))  # pylint: disable=invalid-unary-operand-type
    L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha)
    
    return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:19,代码来源:learn_labelembedding.py

示例5: labelembed_model

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def labelembed_model(base_model, num_classes, **kwargs):
    
    input_ = base_model.input
    embedding = base_model.output
    
    out = keras.layers.Activation('relu')(embedding)
    out = keras.layers.BatchNormalization(name = 'embedding_bn')(out)
    out1 = keras.layers.Dense(num_classes, name = 'prob')(out)
    out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out))
    
    cls_input_ = keras.layers.Input((1,), name = 'labels')
    cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings')
    cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_))
    
    loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_])
    
    return keras.models.Model([input_, cls_input_], [embedding, out1, loss]) 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:19,代码来源:learn_labelembedding.py

示例6: inst_weight

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def inst_weight(output_y, output_x, output_dr, output_dl, config=None):
    dy = output_y[:,2:,2:]-output_y[:, :-2,2:] + \
         2*(output_y[:,2:,1:-1]- output_y[:,:-2,1:-1]) + \
         output_y[:,2:,:-2]-output_y[:,:-2,:-2]
    dx = output_x[:,2:,2:]- output_x[:,2:,:-2] + \
         2*( output_x[:,1:-1,2:]- output_x[:,1:-1,:-2]) +\
         output_x[:,:-2,2:]- output_x[:,:-2,:-2]
    ddr=  (output_dr[:,2:,2:]-output_dr[:,:-2,:-2] +\
           output_dr[:,1:-1,2:]-output_dr[:,:-2,1:-1]+\
           output_dr[:,2:,1:-1]-output_dr[:,1:-1,:-2])*K.constant(2)
    ddl=  (output_dl[:,2:,:-2]-output_dl[:,:-2,2:] +\
           output_dl[:,2:,1:-1]-output_dl[:,1:-1,2:]+\
           output_dl[:,1:-1,:-2]-output_dl[:,:-2,1:-1])*K.constant(2)
    dpred = K.concatenate([dy,dx,ddr,ddl],axis=-1)
    dpred = K.spatial_2d_padding(dpred)
    weight_fg = K.cast(K.all(dpred>K.constant(config.GRADIENT_THRES), axis=3, 
                          keepdims=True), K.floatx())
    
    weight = K.clip(K.sqrt(weight_fg*K.prod(dpred, axis=3, keepdims=True)), 
                    config.WEIGHT_AREA/config.CLIP_AREA_HIGH, 
                    config.WEIGHT_AREA/config.CLIP_AREA_LOW)
    weight +=(1-weight_fg)*config.WEIGHT_AREA/config.BG_AREA
    weight = K.conv2d(weight, K.constant(config.GAUSSIAN_KERNEL),
                      padding='same')
    return K.stop_gradient(weight) 
开发者ID:jacobkie,项目名称:2018DSB,代码行数:27,代码来源:model.py

示例7: profile_contrib

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def profile_contrib(p):
    return kl.Lambda(lambda p:
                     K.mean(K.sum(K.stop_gradient(tf.nn.softmax(p, dim=-2)) * p, axis=-2), axis=-1)
                     )(p) 
开发者ID:kipoi,项目名称:models,代码行数:6,代码来源:model.py

示例8: mcmc_chain

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def mcmc_chain(self, x, nb_gibbs_steps):
		xi = x
		for i in range(nb_gibbs_steps):
			xi, xi_pre, xi_sigm = self.gibbs_xhx(xi)
		x_rec, x_rec_pre, x_rec_sigm = xi, xi_pre, xi_sigm

		x_rec = K.stop_gradient(x_rec)

		return x_rec, x_rec_pre, x_rec_sigm 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:11,代码来源:rbm.py

示例9: round_through

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def round_through(x):
    '''Element-wise rounding to the closest integer with full gradient propagation.
    A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
    '''
    rounded = K.round(x)
    return x + K.stop_gradient(rounded - x) 
开发者ID:DingKe,项目名称:nn_playground,代码行数:8,代码来源:binary_ops.py

示例10: _mean_abs

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def _mean_abs(x, axis=None, keepdims=False):
    return K.stop_gradient(K.mean(K.abs(x), axis=axis, keepdims=keepdims)) 
开发者ID:DingKe,项目名称:nn_playground,代码行数:4,代码来源:binary_ops.py

示例11: ternarize_dot

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def ternarize_dot(x, W):
    '''For RNN (maybe Dense or Conv too). 
    Refer to 'Recurrent Neural Networks with Limited Numerical Precision' Section 3.1
    '''
    Wt = _ternarize(W)
    return K.dot(x, W) + K.stop_gradient(K.dot(x, Wt - W)) 
开发者ID:DingKe,项目名称:nn_playground,代码行数:8,代码来源:ternary_ops.py

示例12: round_through

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def round_through(x):
    '''Element-wise rounding to the closest integer with full gradient propagation.
    A trick from [Sergey Ioffe](http://stackoverflow.com/a/36480182)
    '''
    rounded = K.round(x)
    rounded_through = x + K.stop_gradient(rounded - x)
    return rounded_through 
开发者ID:BertMoons,项目名称:QuantizedNeuralNetworks-Keras-Tensorflow,代码行数:9,代码来源:quantized_ops.py

示例13: clip_through

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def clip_through(x, min_val, max_val):
    '''Element-wise clipping with gradient propagation
    Analogue to round_through
    '''
    clipped = K.clip(x, min_val, max_val)
    clipped_through= x + K.stop_gradient(clipped-x)
    return clipped_through 
开发者ID:BertMoons,项目名称:QuantizedNeuralNetworks-Keras-Tensorflow,代码行数:9,代码来源:quantized_ops.py

示例14: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def call(self, inputs):
        quantized_kernel = quantize(self.kernel, nb=self.nb)

        inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
        inputs_qnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
                  * inverse_kernel_lr_multiplier

        outputs_qnn_gradient = K.conv2d(
            inputs_qnn_gradient,
            quantized_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        outputs = (outputs_qnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_qnn_gradient))\
                  * self.kernel_lr_multiplier


        #outputs = outputs*K.mean(K.abs(self.kernel))

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
开发者ID:BertMoons,项目名称:QuantizedNeuralNetworks-Keras-Tensorflow,代码行数:33,代码来源:quantized_layers.py

示例15: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import stop_gradient [as 别名]
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H)

        inverse_kernel_lr_multiplier = 1./self.kernel_lr_multiplier
        inputs_bnn_gradient = (inputs - (1. - 1./inverse_kernel_lr_multiplier) * K.stop_gradient(inputs))\
                  * inverse_kernel_lr_multiplier

        outputs_bnn_gradient = K.conv2d(
            inputs_bnn_gradient,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        outputs = (outputs_bnn_gradient - (1. - 1./self.kernel_lr_multiplier) * K.stop_gradient(outputs_bnn_gradient))\
                  * self.kernel_lr_multiplier


        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
开发者ID:BertMoons,项目名称:QuantizedNeuralNetworks-Keras-Tensorflow,代码行数:30,代码来源:binary_layers.py


注:本文中的keras.backend.stop_gradient方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。