当前位置: 首页>>代码示例>>Python>>正文


Python backend.gradients方法代码示例

本文整理汇总了Python中keras.backend.gradients方法的典型用法代码示例。如果您正苦于以下问题:Python backend.gradients方法的具体用法?Python backend.gradients怎么用?Python backend.gradients使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.gradients方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generate_pattern

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def generate_pattern(layer_name, filter_index, size=150):
    # 过滤器可视化函数
    layer_output = model.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_index])
    grads = K.gradients(loss, model.input)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([model.input], [loss, grads])
    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
    
    step = 1
    for _ in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step
    
    img = input_img_data[0]
    return deprocess_image(img) 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:18,代码来源:7_visualize_filters.py

示例2: reverse_generator

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def reverse_generator(generator, X_sample, y_sample, title):
    """Gradient descent to map images back to their latent vectors."""

    latent_vec = np.random.normal(size=(1, 100))

    # Function for figuring out how to bump the input.
    target = K.placeholder()
    loss = K.sum(K.square(generator.outputs[0] - target))
    grad = K.gradients(loss, generator.inputs[0])[0]
    update_fn = K.function(generator.inputs + [target], [grad])

    # Repeatedly apply the update rule.
    xs = []
    for i in range(60):
        print('%d: latent_vec mean=%f, std=%f'
              % (i, np.mean(latent_vec), np.std(latent_vec)))
        xs.append(generator.predict_on_batch([latent_vec, y_sample]))
        for _ in range(10):
            update_vec = update_fn([latent_vec, y_sample, X_sample])[0]
            latent_vec -= update_vec * update_rate

    # Plots the samples.
    xs = np.concatenate(xs, axis=0)
    plot_as_gif(xs, X_sample, title) 
开发者ID:codekansas,项目名称:gandlf,代码行数:26,代码来源:reversing_gan.py

示例3: gradient_penalty_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:18,代码来源:wgan_gp.py

示例4: predictions_and_gradient

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def predictions_and_gradient(self, image, criterion):
        """ Returns both predictions and gradients, and
        potentially loss w.r.t. to certain criterion.
        """

        input_shape = image.shape
        px, dpdx = self._process_input(image)

        if isinstance(criterion, TargetClassMiss) or \
                isinstance(criterion, RegionalTargetClassMiss):
            boxes, scores, classes, loss, gradient =\
                self._tgt_cls_pred_and_grad_fn(
                    [px[np.newaxis], criterion.target_class()])
        else:
            raise NotImplementedError

        prediction = {}
        num = (scores[0] > 0.).sum()
        prediction['boxes'] = boxes[0][:num].tolist()
        prediction['scores'] = scores[0][:num].tolist()
        prediction['classes'] = classes[0][:num].tolist()

        gradient = self._process_gradient(dpdx, gradient)
        assert gradient.shape == input_shape
        return prediction, loss, gradient, 
开发者ID:advboxes,项目名称:perceptron-benchmark,代码行数:27,代码来源:keras_yolov3.py

示例5: loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:23,代码来源:metrics.py

示例6: query

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def query(self, context):
        x0, x0context = helper.find_closest_positive_context_param(
            context, self.xx, self.yy, self.func.param_idx, self.func.context_idx)
        g = kb.gradients(self.model.outputs[0], self.model.inputs)
        gfn = kb.function(self.model.inputs, g)

        def fn(param):
            x = np.hstack((param, np.tile(context, (param.shape[0], 1))))
            return -self.model.predict(x).astype(np.float64)

        def fgfn(param):
            x = np.hstack((param, context))
            return -self.model.predict(np.array([x]))[0].astype(np.float64), \
                   -gfn([np.array([x])])[0][0,
                                            self.func.param_idx].astype(np.float64)
        x_range = self.func.x_range
        guesses = helper.grid_around_point(
            x0, 0.5*(x_range[1]-x_range[0]), 5, x_range)
        x_star, y_star = helper.global_minimize(
            fn, fgfn, x_range[:, self.func.param_idx], 10000, guesses)
        print('x_star={}, y_star={}'.format(x_star, y_star))
        return np.hstack((x_star, context)) 
开发者ID:zi-w,项目名称:Kitchen2D,代码行数:24,代码来源:active_nn.py

示例7: fgsm

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001):
    adv = inp.copy()
    loss = K.mean(model.output[:, 0])
    grads = K.gradients(loss, model.layers[1].output)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
    
    mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
    mask[pad_idx:pad_idx+pad_len] = 1
    grads *= K.constant(mask)
    
    iterate = K.function([model.layers[1].output], [loss, grads])
    g = 0.
    step = int(1/step_size)*10
    for _ in range(step):
        loss_value, grads_value = iterate([adv])
        grads_value *= step_size
        g += grads_value
        adv += grads_value
        #print (e, loss_value, end='\r')
        if loss_value >= 0.9:
            break
    
    return adv, g, loss_value 
开发者ID:j40903272,项目名称:MalConv-keras,代码行数:25,代码来源:gen_adversarial.py

示例8: fgsm

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1):
    adv = inp.copy()
    loss = K.mean(model.output[:, target_class])
    grads = K.gradients(loss, model.layers[1].output)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
    
    mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
    mask[pad_idx:pad_idx+pad_len] = 1
    grads *= K.constant(mask)
    
    iterate = K.function([model.layers[1].output], [loss, grads])
    g = 0.
    step = int(1/step_size)*10
    for _ in range(step):
        loss_value, grads_value = iterate([adv])
        grads_value *= step_size
        g += grads_value
        adv += grads_value
        #print (e, loss_value, grads_value.mean(), end='\r')
        if loss_value >= 0.9:
            break
    
    return adv, g, loss_value 
开发者ID:j40903272,项目名称:MalConv-keras,代码行数:25,代码来源:gen_adversarial2.py

示例9: render_naive

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def render_naive(layer_name, filter_index, img0=img_noise, iter_n=20, step=1.0):
    if layer_name not in layer_dict:
        print("ERROR: invalid layer name: %s" % layer_name)
        return

    layer = layer_dict[layer_name]

    print("{} < {}".format(filter_index, layer.output_shape[-1]))

    activation = K.mean(layer.output[:, :, :, filter_index])
    grads = K.gradients(activation, input_tensor)[0]

    # DropoutやBNを含むネットワークはK.learning_phase()が必要
    iterate = K.function([input_tensor, K.learning_phase()], [activation, grads])

    img = img0.copy()
    for i in range(iter_n):
        # 学習はしないので0を入力
        activation_value, grads_value = iterate([img, 0])
        grads_value /= K.std(grads_value) + 1e-8
        img += grads_value * step
        print(i, activation_value) 
开发者ID:aidiary,项目名称:keras-examples,代码行数:24,代码来源:dream1.py

示例10: eval_loss_and_grads

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def eval_loss_and_grads(x):
    if K.image_data_format() == 'channels_first':
        x = x.reshape((1, 3, img_nrows, img_ncols))
    else:
        x = x.reshape((1, img_nrows, img_ncols, 3))
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient. 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:21,代码来源:neural_style_transfer.py

示例11: eval_loss_and_grads

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def eval_loss_and_grads(x):
    x = x.reshape((1,) + img_size)
    outs = f_outputs([x])
    loss_value = outs[0]
    if len(outs[1:]) == 1:
        grad_values = outs[1].flatten().astype('float64')
    else:
        grad_values = np.array(outs[1:]).flatten().astype('float64')
    return loss_value, grad_values

# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient. 
开发者ID:jhu-lcsr,项目名称:costar_plan,代码行数:18,代码来源:deep_dream.py

示例12: _rmsprop

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def _rmsprop(self, grads, cache=None, decay_rate=0.95):
        """Uses RMSProp to compute step from gradients.

        Args:
            grads: numpy array of gradients.
            cache: numpy array of same shape as `grads` as RMSProp cache
            decay_rate: How fast to decay cache

        Returns:
            A tuple of
                step: numpy array of the same shape as `grads` giving the step.
                    Note that this does not yet take the learning rate into account.
                cache: Updated RMSProp cache.
        """
        if cache is None:
            cache = np.zeros_like(grads)
        cache = decay_rate * cache + (1 - decay_rate) * grads ** 2
        step = -grads / np.sqrt(cache + K.epsilon())
        return step, cache 
开发者ID:raghakot,项目名称:keras-vis,代码行数:21,代码来源:optimizer.py

示例13: gram_loss_callable

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def gram_loss_callable(gram_model, target_grams, shape):
    ''' Returns a function which takes in an image and outputs both the gram-matrix
    loss of that image relative to the targets, and the gradients of that loss with respect
    to the image pixels'''
    loss = diff_loss(gram_model, target_grams)

    gradients = K.gradients(loss, gram_model.input)
    if keras.backend.backend() == 'tensorflow':
        gradients = gradients[0] # This is a Keras inconsistency between theano and tf backends
    
    loss_and_gradients = K.function([gram_model.input], [loss, gradients])
    
    def callable(x):
        deflattened = x.reshape([-1] + list(shape) + [3])

        loss, grad = loss_and_gradients([deflattened])
        
        #print(formatter.format("{:q} ", float(loss)), end=' | ', flush=True)
        return loss.astype('float64'), np.ravel(grad.astype('float64'))
    return callable 
开发者ID:wxs,项目名称:subjective-functions,代码行数:22,代码来源:gram.py

示例14: loss_and_gradients_callable

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def loss_and_gradients_callable(loss_model, shape):

    loss = loss_model.output
    gradients = K.gradients(loss, loss_model.input)

    if keras.backend.backend() == 'tensorflow':
        gradients = gradients[0] # This is a Keras inconsistency between theano and tf backends
    
    loss_and_gradients = K.function([loss_model.input], [loss, gradients])

    def callable(x):
        deflattened = x.reshape([-1] + list(shape) + [3])

        loss, grad = loss_and_gradients([deflattened])
        
        #print(formatter.format("{:q} ", float(loss)), end=' | ', flush=True)
        return loss.astype('float64'), np.ravel(grad.astype('float64'))
    return callable 
开发者ID:wxs,项目名称:subjective-functions,代码行数:20,代码来源:gram.py

示例15: get_gradients

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gradients [as 别名]
def get_gradients(self, loss, params):
    '''
    Replacement for the default keras get_gradients() function.
    Modification: checks if the object has the attribute grads and 
    returns that rather than calculating the gradients using automatic
    differentiation. 
    '''
    if hasattr(self, 'grads'):
        grads = self.grads
    else:
        grads = K.gradients(loss, params)
    if hasattr(self, 'clipnorm') and self.clipnorm > 0:
        norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
        grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
    if hasattr(self, 'clipvalue') and self.clipvalue > 0:
        grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
    return grads 
开发者ID:jhartford,项目名称:DeepIV,代码行数:19,代码来源:custom_gradients.py


注:本文中的keras.backend.gradients方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。