當前位置: 首頁>>代碼示例>>Python>>正文


Python autograd.grad方法代碼示例

本文整理匯總了Python中torch.autograd.grad方法的典型用法代碼示例。如果您正苦於以下問題:Python autograd.grad方法的具體用法?Python autograd.grad怎麽用?Python autograd.grad使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.autograd的用法示例。


在下文中一共展示了autograd.grad方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __call__

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def __call__(self, netD, real_data, fake_data):
        batch_size = real_data.size(0)

        fake_data = fake_data[:batch_size]
        
        alpha = torch.rand(batch_size, 1, 1, requires_grad=True).to(self.device)
        # randomly mix real and fake data
        interpolates = real_data + alpha * (fake_data - real_data)
        # compute output of D for interpolated input
        disc_interpolates = netD(interpolates)
        # compute gradients w.r.t the interpolated outputs
        
        gradients = grad(outputs=disc_interpolates, inputs=interpolates,
                         grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
                         create_graph=True, retain_graph=True, only_inputs=True)[0].contiguous().view(batch_size,-1)
                         
        gradient_penalty = (((gradients.norm(2, dim=1) - self.gamma) / self.gamma) ** 2).mean() * self.lambdaGP

        return gradient_penalty 
開發者ID:seowok,項目名稱:TreeGAN,代碼行數:21,代碼來源:gradient_penalty.py

示例2: compute_d_norms

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def compute_d_norms(self, A_real_, B_real_):
        A_real = Variable(A_real_.data, requires_grad=True)
        B_real = Variable(B_real_.data, requires_grad=True)
        d_a_real = self.d_a(A_real)
        d_b_real = self.d_b(B_real)
        this_ones_dafake = torch.ones(d_a_real.size())
        this_ones_dbfake = torch.ones(d_b_real.size())
        if self.use_cuda:
            this_ones_dafake = this_ones_dafake.cuda()
            this_ones_dbfake = this_ones_dbfake.cuda()
        gradients_da = grad(outputs=d_a_real,
                            inputs=A_real,
                            grad_outputs=this_ones_dafake,
                            create_graph=True,
                            retain_graph=True,
                            only_inputs=True)[0]
        gradients_db = grad(outputs=d_b_real,
                            inputs=B_real,
                            grad_outputs=this_ones_dbfake,
                            create_graph=True,
                            retain_graph=True,
                            only_inputs=True)[0]
        gp_a = ((gradients_da.view(gradients_da.size()[0], -1).norm(2, 1) - 1) ** 2).mean()
        gp_b = ((gradients_db.view(gradients_db.size()[0], -1).norm(2, 1) - 1) ** 2).mean()
        return gp_a, gp_b 
開發者ID:joelmoniz,項目名稱:DepthNets,代碼行數:27,代碼來源:cyclegan.py

示例3: D_logistic_r2

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def D_logistic_r2(fake_img, real_img, D, gamma=10.0):
    real_img = Variable(real_img, requires_grad=True).to(real_img.device)
    fake_img = Variable(fake_img, requires_grad=True).to(fake_img.device)

    real_score = D(real_img)
    fake_score = D(fake_img)

    loss = F.softplus(fake_score)
    loss = loss + F.softplus(-real_score)

    # GradientPenalty
    # One of the differentiated Tensors does not require grad?
    # https://discuss.pytorch.org/t/one-of-the-differentiated-tensors-does-not-require-grad/54694
    fake_grads = grad(torch.sum(fake_score), fake_img)[0]
    gradient_penalty = torch.sum(torch.square(fake_grads), dim=[1, 2, 3])
    reg = gradient_penalty * (gamma * 0.5)

    # fixme: only support non-lazy mode
    return loss + reg


# ==============================================================================
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
# ============================================================================== 
開發者ID:tomguluson92,項目名稱:StyleGAN2_PyTorch,代碼行數:27,代碼來源:loss.py

示例4: conv_constitutive_constraint_nonlinear_exp

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def conv_constitutive_constraint_nonlinear_exp(input, output, sobel_filter):
    """Nonlinear extension of Darcy's law
        sigma = - exp(K * u) grad(u)

    Args:
        input: K
        output: u, sigma1, sigma2
    """
    grad_h = sobel_filter.grad_h(output[:, [0]])
    grad_v = sobel_filter.grad_v(output[:, [0]])

    sigma_h = - torch.exp(input * output[:, [0]]) * grad_h
    sigma_v = - torch.exp(input * output[:, [0]]) * grad_v

    return ((output[:, [1]] - sigma_h) ** 2 
        + (output[:, [2]] - sigma_v) ** 2).mean() 
開發者ID:cics-nd,項目名稱:pde-surrogate,代碼行數:18,代碼來源:darcy.py

示例5: sliced_score_matching

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def sliced_score_matching(energy_net, samples, n_particles=1):
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)
    vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)

    logp = -energy_net(dup_samples).sum()
    grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
    gradv = torch.sum(grad1 * vectors)
    loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
    loss2 = torch.sum(vectors * grad2, dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)
    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:19,代碼來源:sliced_sm.py

示例6: sliced_score_matching_vr

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def sliced_score_matching_vr(energy_net, samples, n_particles=1):
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)

    logp = -energy_net(dup_samples).sum()
    grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
    loss1 = torch.sum(grad1 * grad1, dim=-1) / 2.
    gradv = torch.sum(grad1 * vectors)
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
    loss2 = torch.sum(vectors * grad2, dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)

    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:19,代碼來源:sliced_sm.py

示例7: sliced_score_estimation_vr

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def sliced_score_estimation_vr(score_net, samples, n_particles=1):
    """
    Be careful if the shape of samples is not B x x_dim!!!!
    """
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)

    grad1 = score_net(dup_samples)
    gradv = torch.sum(grad1 * vectors)
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]

    grad1 = grad1.view(dup_samples.shape[0], -1)
    loss1 = torch.sum(grad1 * grad1, dim=-1) / 2.

    loss2 = torch.sum((vectors * grad2).view(dup_samples.shape[0], -1), dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)

    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
開發者ID:ermongroup,項目名稱:ncsn,代碼行數:24,代碼來源:sliced_sm.py

示例8: compute_GP

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def compute_GP(netD, real_data, real_embed, fake_data, LAMBDA):
        BATCH_SIZE = real_data.size(0)
        alpha = torch.rand(BATCH_SIZE, 1)
        alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement() / BATCH_SIZE)).contiguous().view(BATCH_SIZE, 3, 64, 64)
        alpha = alpha.cuda()

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)

        interpolates = interpolates.cuda()

        interpolates = autograd.Variable(interpolates, requires_grad=True)

        disc_interpolates, _ = netD(interpolates, real_embed)

        gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                                  grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
                                  create_graph=True, retain_graph=True, only_inputs=True)[0]

        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA

        return gradient_penalty 
開發者ID:aelnouby,項目名稱:Text-to-Image-Synthesis,代碼行數:23,代碼來源:utils.py

示例9: energy_loss

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def energy_loss(self, data, score, critic):
    vectors = self.noise_vectors(critic)
    grad_score = ag.grad(
      score, data,
      grad_outputs=torch.ones_like(score),
      create_graph=True
    )[0]
    jacobian = ag.grad(
      critic, data,
      grad_outputs=vectors,
      create_graph=True
    )[0]
    jacobian_term = (vectors * jacobian).view(score.size(0), -1).sum(dim=-1)
    critic_term = (grad_score * critic).view(score.size(0), -1).sum(dim=-1)

    penalty_term = (score ** 2).mean()

    self.current_losses["jacobian"] = float(jacobian_term.mean())
    self.current_losses["critic"] = float(critic_term.mean())
    self.current_losses["penalty"] = float(penalty_term.mean())

    return (jacobian_term + critic_term).mean() 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:24,代碼來源:lsd.py

示例10: integrate

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def integrate(self, score, data, *args):
    done = False
    count = 0
    step_count = self.steps if self.step > 0 else 10 * self.steps
    while not done:
      make_differentiable(data)
      make_differentiable(args)
      energy = score(data + self.noise * torch.randn_like(data), *args)
      if isinstance(energy, (list, tuple)):
        energy, *_ = energy
      gradient = ag.grad(energy, data, torch.ones_like(energy))[0]
      if self.max_norm:
        gradient = clip_grad_by_norm(gradient, self.max_norm)
      data = data - self.rate * gradient
      if self.clamp is not None:
        data = data.clamp(*self.clamp)
      data = data.detach()
      done = count >= step_count
      if self.target is not None:
        done = done and bool((energy.mean(dim=0) <= self.target).all())
      count += 1
      if (count + 1) % 500 == 0:
        data.random_()
    self.step += 1
    return data 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:27,代碼來源:samplers.py

示例11: calc_gradient_penalty

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def calc_gradient_penalty(netD, real_data, fake_data):
    #print real_data.size()
    alpha = torch.rand(BATCH_SIZE, 1)
    alpha = alpha.expand(real_data.size())
    alpha = alpha.cuda(gpu) if use_cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if use_cuda:
        interpolates = interpolates.cuda(gpu)
    interpolates = autograd.Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
                                  disc_interpolates.size()),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
    return gradient_penalty

# ==================Definition End====================== 
開發者ID:caogang,項目名稱:wgan-gp,代碼行數:25,代碼來源:gan_mnist.py

示例12: calc_gradient_penalty

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def calc_gradient_penalty(netD, real_data, fake_data):
    alpha = torch.rand(BATCH_SIZE, 1, 1)
    alpha = alpha.expand(real_data.size())
    alpha = alpha.cuda(gpu) if use_cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if use_cuda:
        interpolates = interpolates.cuda(gpu)
    interpolates = autograd.Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    # TODO: Make ConvBackward diffentiable
    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
                                  disc_interpolates.size()),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
    return gradient_penalty 
開發者ID:caogang,項目名稱:wgan-gp,代碼行數:23,代碼來源:gan_language.py

示例13: calc_gradient_penalty

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def calc_gradient_penalty(netD, real_data, fake_data):
    # print "real_data: ", real_data.size(), fake_data.size()
    alpha = torch.rand(BATCH_SIZE, 1)
    alpha = alpha.expand(BATCH_SIZE, real_data.nelement()/BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)
    alpha = alpha.cuda(gpu) if use_cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if use_cuda:
        interpolates = interpolates.cuda(gpu)
    interpolates = autograd.Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
                                  disc_interpolates.size()),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]
    gradients = gradients.view(gradients.size(0), -1)

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
    return gradient_penalty

# For generating samples 
開發者ID:caogang,項目名稱:wgan-gp,代碼行數:26,代碼來源:gan_cifar10.py

示例14: calc_gradient_penalty

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def calc_gradient_penalty(netD, real_data, fake_data):
    alpha = torch.rand(BATCH_SIZE, 1)
    alpha = alpha.expand(real_data.size())
    alpha = alpha.cuda() if use_cuda else alpha

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    if use_cuda:
        interpolates = interpolates.cuda()
    interpolates = autograd.Variable(interpolates, requires_grad=True)

    disc_interpolates = netD(interpolates)

    gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                              grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_cuda else torch.ones(
                                  disc_interpolates.size()),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
    return gradient_penalty

# ==================Definition End====================== 
開發者ID:caogang,項目名稱:wgan-gp,代碼行數:24,代碼來源:gan_toy.py

示例15: compute_GP

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import grad [as 別名]
def compute_GP(netD, real_data, real_embed, fake_data, LAMBDA, project=False):
        #TODO: Should be improved!!!! Maybe using: https://github.com/EmilienDupont/wgan-gp/blob/master/training.py
        BATCH_SIZE = real_data.size(0)
        alpha = torch.rand(BATCH_SIZE, 1)
        alpha = alpha.expand(real_data.size())
        alpha = alpha.cuda()

        interpolates = alpha * real_data + ((1 - alpha) * fake_data)

        interpolates = interpolates.cuda()

        interpolates = autograd.Variable(interpolates, requires_grad=True)
        disc_interpolates, _ = netD(interpolates, real_embed, project=project)

        gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
                                  grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
                                  create_graph=True, retain_graph=True, only_inputs=True)[0]

        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA

        return gradient_penalty 
開發者ID:franroldans,項目名稱:tfm-franroldan-wav2pix,代碼行數:23,代碼來源:utils.py


注:本文中的torch.autograd.grad方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。