当前位置: 首页>>代码示例>>Python>>正文


Python gradient.disconnected_grad方法代码示例

本文整理汇总了Python中theano.gradient.disconnected_grad方法的典型用法代码示例。如果您正苦于以下问题:Python gradient.disconnected_grad方法的具体用法?Python gradient.disconnected_grad怎么用?Python gradient.disconnected_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.gradient的用法示例。


在下文中一共展示了gradient.disconnected_grad方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def test_grad(self):
        T = theano.tensor
        a = np.asarray(self.rng.randn(5, 5),
                       dtype=config.floatX)

        x = T.matrix('x')

        expressions_gradients = [
            (x * gradient.disconnected_grad(x), x),
            (x * gradient.disconnected_grad(T.exp(x)), T.exp(x)),
            (x**2 * gradient.disconnected_grad(x), 2 * x**2),
        ]

        for expr, expr_grad in expressions_gradients:
            g = gradient.grad(expr.sum(), x)
            # gradient according to theano
            f = theano.function([x], g, on_unused_input='ignore')
            # desired gradient
            f2 = theano.function([x], expr_grad, on_unused_input='ignore')

            assert np.allclose(f(a), f2(a)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:23,代码来源:test_gradient.py

示例2: build_functions

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def build_functions(self):
        print("Initializing MLP and Q-Learning...")
        S = Input(shape=self.state_size)
        NS = Input(shape=self.state_size)
        A = Input(shape=(1,), dtype='int32')
        R = Input(shape=(1,), dtype='float32')
        T = Input(shape=(1,), dtype='int32')
        self.build_model()
        self.value_fn = K.function([S], self.model(S))

        VS = self.model(S)
        VNS = disconnected_grad(self.model(NS))
        future_value = (1-T) * VNS.max(axis=1, keepdims=True)
        discounted_future_value = self.discount * future_value
        target = R + discounted_future_value
        cost = ((VS[:, A] - target)**2).mean()
        opt = RMSprop(0.0001)
        params = self.model.trainable_weights
        updates = opt.get_updates(params, [], cost)
        self.train_fn = K.function([S, NS, A, R, T], cost, updates=updates) 
开发者ID:13o-bbr-bbq,项目名称:SAIVS,代码行数:22,代码来源:MyAgent.py

示例3: test_op_removed

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def test_op_removed(self):
        x = theano.tensor.matrix('x')
        y = x * gradient.disconnected_grad(x)
        f = theano.function([x], y)
        # need to refer to theano.gradient.disconnected_grad here,
        # theano.gradient.disconnected_grad is a wrapper function!
        assert gradient.disconnected_grad_ not in \
            [node.op for node in f.maker.fgraph.toposort()] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:10,代码来源:test_gradient.py

示例4: test_connection_pattern

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def test_connection_pattern(self):
        T = theano.tensor
        x = T.matrix('x')
        y = gradient.disconnected_grad(x)

        connection_pattern = y.owner.op.connection_pattern(y.owner)
        assert connection_pattern == [[False]] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:9,代码来源:test_gradient.py

示例5: test_disconnected_paths

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def test_disconnected_paths(self):
        # Test that taking gradient going through a disconnected
        # path rasises an exception
        T = theano.tensor
        a = np.asarray(self.rng.randn(5, 5),
                       dtype=config.floatX)

        x = T.matrix('x')

        # This MUST raise a DisconnectedInputError error.
        # This also rasies an additional warning from gradients.py.
        self.assertRaises(gradient.DisconnectedInputError, gradient.grad,
                          gradient.disconnected_grad(x).sum(), x)

        # This MUST NOT raise a DisconnectedInputError error.
        y = gradient.grad((x + gradient.disconnected_grad(x)).sum(), x)

        a = T.matrix('a')
        b = T.matrix('b')
        y = a + gradient.disconnected_grad(b)
        # This MUST raise a DisconnectedInputError error.
        # This also rasies an additional warning from gradients.py.
        self.assertRaises(gradient.DisconnectedInputError,
                          gradient.grad, y.sum(), b)

        # This MUST NOT raise a DisconnectedInputError error.
        gradient.grad(y.sum(), a) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:29,代码来源:test_gradient.py

示例6: vatm

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def vatm(model, x, predictions, eps, num_iterations=1, xi=1e-6,
         clip_min=None, clip_max=None, seed=12345):
    """
    Theano implementation of the perturbation method used for virtual
    adversarial training: https://arxiv.org/abs/1507.00677
    :param model: the model which returns the network unnormalized logits
    :param x: the input placeholder
    :param predictions: the model's unnormalized output tensor
    :param eps: the epsilon (input variation parameter)
    :param num_iterations: the number of iterations
    :param xi: the finite difference parameter
    :param clip_min: optional parameter that can be used to set a minimum
                    value for components of the example returned
    :param clip_max: optional parameter that can be used to set a maximum
                    value for components of the example returned
    :param seed: the seed for random generator
    :return: a tensor for the adversarial example
    """
    eps = np.asarray(eps, dtype=floatX)
    xi = np.asarray(xi, dtype=floatX)
    rng = RandomStreams(seed=seed)
    d = rng.normal(size=x.shape, dtype=x.dtype)
    for i in range(num_iterations):
        d = xi * utils_th.l2_batch_normalize(d)
        logits_d = model(x + d)
        kl = utils_th.kl_with_logits(predictions, logits_d)
        Hd = T.grad(kl.sum(), d)
        d = gradient.disconnected_grad(Hd)
    d = eps * utils_th.l2_batch_normalize(d)
    adv_x = gradient.disconnected_grad(x + d)
    if (clip_min is not None) and (clip_max is not None):
        adv_x = T.clip(adv_x, clip_min, clip_max)
    return adv_x 
开发者ID:evtimovi,项目名称:robust_physical_perturbations,代码行数:35,代码来源:attacks_th.py

示例7: test_disconnected_paths

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def test_disconnected_paths(self):
        # Test that taking gradient going through a disconnected
        # path rasises an exception
        T = theano.tensor
        a = np.asarray(self.rng.randn(5, 5),
                       dtype=config.floatX)

        x = T.matrix('x')

        # This MUST raise a DisconnectedInputError error.
        # This also rasies an additional warning from gradients.py.
        self.assertRaises(gradient.DisconnectedInputError, gradient.grad,
                          gradient.disconnected_grad(x).sum(), x)

        # This MUST NOT raise a DisconnectedInputError error.
        y = gradient.grad((x + gradient.disconnected_grad(x)).sum(), x)

        a = T.matrix('a')
        b = T.matrix('b')
        y = a + gradient.disconnected_grad(b)
        # This MUST raise a DisconnectedInputError error.
        # This also rasies an additional warning from gradients.py.
        self.assertRaises(gradient.DisconnectedInputError,
                          gradient.grad, y.sum(), b)

        # This MUST NOT raise a DisconnectedInputError error.
        z = gradient.grad(y.sum(), a) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:29,代码来源:test_gradient.py

示例8: _create_components

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def _create_components(self, deterministic=False):
    # load network input
    X = self.inputs[0]
    x = X.flatten(2)

    # load networks
    l_p_mu, l_q_mu, l_q_sample, _, _, _ = self.network

    # load network output
    z, q_mu = lasagne.layers.get_output([l_q_sample, l_q_mu], deterministic=deterministic)
    p_mu = lasagne.layers.get_output(l_p_mu, z, deterministic=deterministic)

    # entropy term
    log_qz_given_x = log_bernoulli(dg(z), q_mu).sum(axis=1)

    # expected p(x,z) term
    z_prior = T.ones_like(z)*np.float32(0.5)
    log_pz = log_bernoulli(z, z_prior).sum(axis=1)
    log_px_given_z = log_bernoulli(x, p_mu).sum(axis=1)
    log_pxz = log_pz + log_px_given_z

    # save them for later
    self.log_pxz = log_pxz
    self.log_qz_given_x = log_qz_given_x

    return log_pxz.flatten(), log_qz_given_x.flatten() 
开发者ID:kuleshov,项目名称:deep-learning-models,代码行数:28,代码来源:sbn.py

示例9: create_gradients

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def create_gradients(self, loss, deterministic=False):
    # load networks
    l_p_mu, l_q_mu, _, l_cv, c, v = self.network

    # load params
    p_params  = lasagne.layers.get_all_params(l_p_mu, trainable=True)
    q_params  = lasagne.layers.get_all_params(l_q_mu, trainable=True)
    cv_params = lasagne.layers.get_all_params(l_cv, trainable=True)

    # load neural net outputs (probabilities have been precomputed)
    log_pxz, log_qz_given_x = self.log_pxz, self.log_qz_given_x
    cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)

    # compute learning signals
    l = log_pxz - log_qz_given_x - cv
    l_avg, l_var = l.mean(), l.var()
    c_new = 0.8*c + 0.2*l_avg
    v_new = 0.8*v + 0.2*l_var
    l = (l - c_new) / T.maximum(1, T.sqrt(v_new))
  
    # compute grad wrt p
    p_grads = T.grad(-log_pxz.mean(), p_params)

    # compute grad wrt q
    q_target = T.mean(dg(l) * log_qz_given_x)
    q_grads = T.grad(-0.2*q_target, q_params) # 5x slower rate for q

    # compute grad of cv net
    cv_target = T.mean(l**2)
    cv_grads = T.grad(cv_target, cv_params)

    # combine and clip gradients
    clip_grad = 1
    max_norm = 5
    grads = p_grads + q_grads + cv_grads
    mgrads = lasagne.updates.total_norm_constraint(grads, max_norm=max_norm)
    cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads]

    return cgrads 
开发者ID:kuleshov,项目名称:deep-learning-models,代码行数:41,代码来源:sbn.py

示例10: fgm

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def fgm(x, predictions, y=None, eps=0.3, ord=np.inf, clip_min=None,
        clip_max=None):
    """
    Theano implementation of the Fast Gradient
    Sign method.
    :param x: the input placeholder
    :param predictions: the model's output tensor
    :param y: the output placeholder. Use None (the default) to avoid the
            label leaking effect.
    :param eps: the epsilon (input variation parameter)
    :param ord: (optional) Order of the norm (mimics Numpy).
                Possible values: np.inf (other norms not implemented yet).
    :param clip_min: optional parameter that can be used to set a minimum
                    value for components of the example returned
    :param clip_max: optional parameter that can be used to set a maximum
                    value for components of the example returned
    :return: a tensor for the adversarial example
    """
    warnings.warn("CleverHans support for Theano is deprecated and "
                  "will be dropped on 2017-11-08.")
    assert ord == np.inf, "Theano implementation not available for this norm."
    eps = np.asarray(eps, dtype=floatX)

    if y is None:
        # Using model predictions as ground truth to avoid label leaking
        y = T.eq(predictions, T.max(predictions, axis=1, keepdims=True))
    y = T.cast(y, utils_th.floatX)
    y = y / T.sum(y, 1, keepdims=True)
    # Compute loss
    loss = utils_th.model_loss(y, predictions, mean=True)

    # Define gradient of loss wrt input
    grad = T.grad(loss, x)

    # Take sign of gradient
    signed_grad = T.sgn(grad)

    # Multiply by constant epsilon
    scaled_signed_grad = eps * signed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = gradient.disconnected_grad(x + scaled_signed_grad)

    # If clipping is needed, reset all values outside of [clip_min, clip_max]
    if (clip_min is not None) and (clip_max is not None):
        adv_x = T.clip(adv_x, clip_min, clip_max)

    return adv_x 
开发者ID:evtimovi,项目名称:robust_physical_perturbations,代码行数:50,代码来源:attacks_th.py

示例11: _create_components

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def _create_components(self, deterministic=False):
    # load network input
    X = self.inputs[0]
    x = X.flatten(2)

    # load networks
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
    l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, _, _, _ = self.network
    l_qa_in, l_qz_in, l_px_in = self.input_layers

    # load network output
    qa_mu, qa_logsigma, a = lasagne.layers.get_output([l_qa_mu, l_qa_logsigma, l_qa], 
                                                    deterministic=deterministic)
    qz_mu, z = lasagne.layers.get_output([l_qz_mu, l_qz],
                                        # {l_qz_in : T.zeros_like(qa_mu), l_qa_in : X}, 
                                        # {l_qz_in : qa_mu, l_qa_in : X}, 
                                        {l_qz_in : a, l_qa_in : X}, 
                                        deterministic=deterministic)
    pa_mu, pa_logsigma = lasagne.layers.get_output([l_pa_mu, l_pa_logsigma], z,
                                                   deterministic=deterministic)

    if self.model == 'bernoulli':
      px_mu = lasagne.layers.get_output(l_px_mu, z, deterministic=deterministic)
    elif self.model == 'gaussian':
      px_mu, px_logsigma  = lasagne.layers.get_output([l_px_mu, l_px_logsigma], z, 
                                                       deterministic=deterministic)

    # entropy term
    log_qa_given_x  = log_normal2(a, qa_mu, qa_logsigma).sum(axis=1)
    log_qz_given_x = log_bernoulli(z, qz_mu).sum(axis=1)
    log_qz_given_x_dgz = log_bernoulli(dg(z), qz_mu).sum(axis=1)
    # log_qz_given_x = log_normal2(z, qz_mu, qz_logsigma).sum(axis=1)
    # log_qz_given_x_dgz = log_normal2(dg(z), qz_mu, qz_logsigma).sum(axis=1)
    log_qza_given_x =  log_qz_given_x + log_qa_given_x

    # log-probability term
    z_prior = T.ones_like(z)*np.float32(0.5)
    log_pz = log_bernoulli(z, z_prior).sum(axis=1)
    # z_prior_sigma = T.cast(T.ones_like(qz_logsigma), dtype=theano.config.floatX)
    # z_prior_mu = T.cast(T.zeros_like(qz_mu), dtype=theano.config.floatX)
    # log_pz = log_normal(z, z_prior_mu,  z_prior_sigma).sum(axis=1)
    log_px_given_z = log_bernoulli(x, px_mu).sum(axis=1)
    log_pa_given_z = log_normal2(a, pa_mu, pa_logsigma).sum(axis=1)

    log_pxz = log_pa_given_z + log_px_given_z + log_pz

    # save them for later
    if deterministic == False:
      self.log_pxz = log_pxz
      self.log_px_given_z = log_px_given_z
      self.log_pz = log_pz
      self.log_qza_given_x = log_qza_given_x
      self.log_qa_given_x = log_qa_given_x
      self.log_qz_given_x = log_qz_given_x
      self.log_qz_given_x_dgz = log_qz_given_x_dgz

    # return log_paxz, log_qza_given_x
    return log_pxz, log_qza_given_x 
开发者ID:kuleshov,项目名称:deep-learning-models,代码行数:60,代码来源:dadgm.py

示例12: create_gradients

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import disconnected_grad [as 别名]
def create_gradients(self, loss, deterministic=False):
    # load networks
    l_px_mu, l_px_logsigma, l_pa_mu, l_pa_logsigma, \
    l_qa_mu, l_qa_logsigma, l_qz_mu, l_qz_logsigma, l_qa, l_qz, l_cv, c, v = self.network

    # load params
    p_params  = lasagne.layers.get_all_params(
        # [l_px_mu], trainable=True)
        [l_px_mu, l_pa_mu, l_pa_logsigma], trainable=True)
    qa_params  = lasagne.layers.get_all_params(l_qa_mu, trainable=True)    
    qz_params  = lasagne.layers.get_all_params(l_qz, trainable=True)
    cv_params = lasagne.layers.get_all_params(l_cv, trainable=True)

    # load neural net outputs (probabilities have been precomputed)
    log_pxz, log_px_given_z, log_pz = self.log_pxz, self.log_px_given_z, self.log_pz
    log_qza_given_x = self.log_qza_given_x    
    log_qz_given_x = self.log_qz_given_x    
    log_qz_given_x_dgz = self.log_qz_given_x_dgz
    cv = T.addbroadcast(lasagne.layers.get_output(l_cv),1)

    # compute learning signals
    l0 = log_px_given_z + log_pz - log_qz_given_x #- cv # NOTE: this disn't have q(a)
    l_avg, l_var = l0.mean(), l0.var()
    c_new = 0.8*c + 0.2*l_avg
    v_new = 0.8*v + 0.2*l_var
    l = (l0 - c_new) / T.maximum(1, T.sqrt(v_new))
    l_target = (l0 - c_new) / T.maximum(1, T.sqrt(v_new))
    # l_target = log_px_given_z + log_pz - log_qz_given_x
  
    # compute grad wrt p
    p_grads = T.grad(-log_pxz.mean(), p_params)

    # compute grad wrt q_a
    elbo = T.mean(log_pxz - log_qza_given_x)
    qa_grads = T.grad(-elbo, qa_params)

    # compute grad wrt q_z
    qz_target = T.mean(dg(l_target) * log_qz_given_x_dgz)
    qz_grads = T.grad(-0.2*qz_target, qz_params) # 5x slower rate for q
    # qz_grads = T.grad(-0.2*T.mean(l0), qz_params) # 5x slower rate for q
    # qz_grads = T.grad(-0.2*elbo, qz_params) # 5x slower rate for q

    # compute grad of cv net
    cv_target = T.mean(l0**2)
    # cv_grads = [0.2*g for g in T.grad(cv_target, cv_params)]

    # combine and clip gradients
    clip_grad = 1
    max_norm = 5
    # grads = p_grads + qa_grads + qz_grads + cv_grads
    grads = p_grads + qa_grads + qz_grads #+ cv_grads
    mgrads = lasagne.updates.total_norm_constraint(grads, max_norm=max_norm)
    cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads]

    return cgrads 
开发者ID:kuleshov,项目名称:deep-learning-models,代码行数:57,代码来源:dadgm.py


注:本文中的theano.gradient.disconnected_grad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。