本文整理汇总了Python中torch.autograd.grad方法的典型用法代码示例。如果您正苦于以下问题:Python autograd.grad方法的具体用法?Python autograd.grad怎么用?Python autograd.grad使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd
的用法示例。
在下文中一共展示了autograd.grad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def __call__(self, netD, real_data, fake_data):
batch_size = real_data.size(0)
fake_data = fake_data[:batch_size]
alpha = torch.rand(batch_size, 1, 1, requires_grad=True).to(self.device)
# randomly mix real and fake data
interpolates = real_data + alpha * (fake_data - real_data)
# compute output of D for interpolated input
disc_interpolates = netD(interpolates)
# compute gradients w.r.t the interpolated outputs
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0].contiguous().view(batch_size,-1)
gradient_penalty = (((gradients.norm(2, dim=1) - self.gamma) / self.gamma) ** 2).mean() * self.lambdaGP
return gradient_penalty
示例2: compute_d_norms
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def compute_d_norms(self, A_real_, B_real_):
A_real = Variable(A_real_.data, requires_grad=True)
B_real = Variable(B_real_.data, requires_grad=True)
d_a_real = self.d_a(A_real)
d_b_real = self.d_b(B_real)
this_ones_dafake = torch.ones(d_a_real.size())
this_ones_dbfake = torch.ones(d_b_real.size())
if self.use_cuda:
this_ones_dafake = this_ones_dafake.cuda()
this_ones_dbfake = this_ones_dbfake.cuda()
gradients_da = grad(outputs=d_a_real,
inputs=A_real,
grad_outputs=this_ones_dafake,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradients_db = grad(outputs=d_b_real,
inputs=B_real,
grad_outputs=this_ones_dbfake,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gp_a = ((gradients_da.view(gradients_da.size()[0], -1).norm(2, 1) - 1) ** 2).mean()
gp_b = ((gradients_db.view(gradients_db.size()[0], -1).norm(2, 1) - 1) ** 2).mean()
return gp_a, gp_b
示例3: D_logistic_r2
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def D_logistic_r2(fake_img, real_img, D, gamma=10.0):
real_img = Variable(real_img, requires_grad=True).to(real_img.device)
fake_img = Variable(fake_img, requires_grad=True).to(fake_img.device)
real_score = D(real_img)
fake_score = D(fake_img)
loss = F.softplus(fake_score)
loss = loss + F.softplus(-real_score)
# GradientPenalty
# One of the differentiated Tensors does not require grad?
# https://discuss.pytorch.org/t/one-of-the-differentiated-tensors-does-not-require-grad/54694
fake_grads = grad(torch.sum(fake_score), fake_img)[0]
gradient_penalty = torch.sum(torch.square(fake_grads), dim=[1, 2, 3])
reg = gradient_penalty * (gamma * 0.5)
# fixme: only support non-lazy mode
return loss + reg
# ==============================================================================
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
# ==============================================================================
示例4: conv_constitutive_constraint_nonlinear_exp
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def conv_constitutive_constraint_nonlinear_exp(input, output, sobel_filter):
"""Nonlinear extension of Darcy's law
sigma = - exp(K * u) grad(u)
Args:
input: K
output: u, sigma1, sigma2
"""
grad_h = sobel_filter.grad_h(output[:, [0]])
grad_v = sobel_filter.grad_v(output[:, [0]])
sigma_h = - torch.exp(input * output[:, [0]]) * grad_h
sigma_v = - torch.exp(input * output[:, [0]]) * grad_v
return ((output[:, [1]] - sigma_h) ** 2
+ (output[:, [2]] - sigma_v) ** 2).mean()
示例5: sliced_score_matching
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def sliced_score_matching(energy_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)
logp = -energy_net(dup_samples).sum()
grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
gradv = torch.sum(grad1 * vectors)
loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum(vectors * grad2, dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例6: sliced_score_matching_vr
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def sliced_score_matching_vr(energy_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
logp = -energy_net(dup_samples).sum()
grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
loss1 = torch.sum(grad1 * grad1, dim=-1) / 2.
gradv = torch.sum(grad1 * vectors)
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum(vectors * grad2, dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例7: sliced_score_estimation_vr
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def sliced_score_estimation_vr(score_net, samples, n_particles=1):
"""
Be careful if the shape of samples is not B x x_dim!!!!
"""
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
grad1 = score_net(dup_samples)
gradv = torch.sum(grad1 * vectors)
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
grad1 = grad1.view(dup_samples.shape[0], -1)
loss1 = torch.sum(grad1 * grad1, dim=-1) / 2.
loss2 = torch.sum((vectors * grad2).view(dup_samples.shape[0], -1), dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例8: compute_GP
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def compute_GP(netD, real_data, real_embed, fake_data, LAMBDA):
BATCH_SIZE = real_data.size(0)
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement() / BATCH_SIZE)).contiguous().view(BATCH_SIZE, 3, 64, 64)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates, _ = netD(interpolates, real_embed)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
示例9: energy_loss
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def energy_loss(self, data, score, critic):
vectors = self.noise_vectors(critic)
grad_score = ag.grad(
score, data,
grad_outputs=torch.ones_like(score),
create_graph=True
)[0]
jacobian = ag.grad(
critic, data,
grad_outputs=vectors,
create_graph=True
)[0]
jacobian_term = (vectors * jacobian).view(score.size(0), -1).sum(dim=-1)
critic_term = (grad_score * critic).view(score.size(0), -1).sum(dim=-1)
penalty_term = (score ** 2).mean()
self.current_losses["jacobian"] = float(jacobian_term.mean())
self.current_losses["critic"] = float(critic_term.mean())
self.current_losses["penalty"] = float(penalty_term.mean())
return (jacobian_term + critic_term).mean()
示例10: integrate
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def integrate(self, score, data, *args):
done = False
count = 0
step_count = self.steps if self.step > 0 else 10 * self.steps
while not done:
make_differentiable(data)
make_differentiable(args)
energy = score(data + self.noise * torch.randn_like(data), *args)
if isinstance(energy, (list, tuple)):
energy, *_ = energy
gradient = ag.grad(energy, data, torch.ones_like(energy))[0]
if self.max_norm:
gradient = clip_grad_by_norm(gradient, self.max_norm)
data = data - self.rate * gradient
if self.clamp is not None:
data = data.clamp(*self.clamp)
data = data.detach()
done = count >= step_count
if self.target is not None:
done = done and bool((energy.mean(dim=0) <= self.target).all())
count += 1
if (count + 1) % 500 == 0:
data.random_()
self.step += 1
return data
示例11: calc_gradient_penalty
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data):
#print real_data.size()
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# ==================Definition End======================
示例12: calc_gradient_penalty
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data):
alpha = torch.rand(BATCH_SIZE, 1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
# TODO: Make ConvBackward diffentiable
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
示例13: calc_gradient_penalty
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()/BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# For generating samples
示例14: calc_gradient_penalty
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data):
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda() if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# ==================Definition End======================
示例15: compute_GP
# 需要导入模块: from torch import autograd [as 别名]
# 或者: from torch.autograd import grad [as 别名]
def compute_GP(netD, real_data, real_embed, fake_data, LAMBDA, project=False):
#TODO: Should be improved!!!! Maybe using: https://github.com/EmilienDupont/wgan-gp/blob/master/training.py
BATCH_SIZE = real_data.size(0)
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates, _ = netD(interpolates, real_embed, project=project)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty