本文整理汇总了Python中torch.randn_like方法的典型用法代码示例。如果您正苦于以下问题:Python torch.randn_like方法的具体用法?Python torch.randn_like怎么用?Python torch.randn_like使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.randn_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_q
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def sample_q(args, device, f, replay_buffer, y=None):
"""this func takes in replay_buffer now so we have the option to sample from
scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example.
"""
f.eval()
# get batch size
bs = args.batch_size if y is None else y.size(0)
# generate initial samples and buffer inds of those samples (if buffer is used)
init_sample, buffer_inds = sample_p_0(device, replay_buffer, bs=bs, y=y)
x_k = t.autograd.Variable(init_sample, requires_grad=True)
# sgld
for k in range(args.n_steps):
f_prime = t.autograd.grad(f(x_k, y=y).sum(), [x_k], retain_graph=True)[0]
x_k.data += args.sgld_lr * f_prime + args.sgld_std * t.randn_like(x_k)
f.train()
final_samples = x_k.detach()
# update replay buffer
if len(replay_buffer) > 0:
replay_buffer[buffer_inds] = final_samples.cpu()
return final_samples
示例2: step
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def step(self, closure):
logp = closure()
logp.backward()
with torch.no_grad():
for group in self.param_groups:
for p in group["params"]:
if isinstance(p, (ManifoldParameter, ManifoldTensor)):
manifold = p.manifold
else:
manifold = self._default_manifold
egrad2rgrad, retr = manifold.egrad2rgrad, manifold.retr
epsilon = group["epsilon"]
n = torch.randn_like(p).mul_(math.sqrt(epsilon))
r = egrad2rgrad(p, 0.5 * epsilon * p.grad + n)
# use copy only for user facing point
copy_or_set_(p, retr(p, r))
p.grad.zero_()
if not self.burnin:
self.steps += 1
self.log_probs.append(logp.item())
示例3: anneal_Langevin_dynamics
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def anneal_Langevin_dynamics(self, x_mod, scorenet, sigmas, n_steps_each=100, step_lr=0.00002):
images = []
with torch.no_grad():
for c, sigma in tqdm.tqdm(enumerate(sigmas), total=len(sigmas), desc='annealed Langevin dynamics sampling'):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(torch.clamp(x_mod, 0.0, 1.0).to('cpu'))
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
# print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
# grad.abs().max()))
return images
示例4: sliced_score_matching
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def sliced_score_matching(energy_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)
logp = -energy_net(dup_samples).sum()
grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
gradv = torch.sum(grad1 * vectors)
loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum(vectors * grad2, dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例5: sliced_score_estimation
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def sliced_score_estimation(score_net, samples, n_particles=1):
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)
grad1 = score_net(dup_samples)
gradv = torch.sum(grad1 * vectors)
loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
loss2 = torch.sum(vectors * grad2, dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例6: sliced_score_estimation_vr
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def sliced_score_estimation_vr(score_net, samples, n_particles=1):
"""
Be careful if the shape of samples is not B x x_dim!!!!
"""
dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
dup_samples.requires_grad_(True)
vectors = torch.randn_like(dup_samples)
grad1 = score_net(dup_samples)
gradv = torch.sum(grad1 * vectors)
grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
grad1 = grad1.view(dup_samples.shape[0], -1)
loss1 = torch.sum(grad1 * grad1, dim=-1) / 2.
loss2 = torch.sum((vectors * grad2).view(dup_samples.shape[0], -1), dim=-1)
loss1 = loss1.view(n_particles, -1).mean(dim=0)
loss2 = loss2.view(n_particles, -1).mean(dim=0)
loss = loss1 + loss2
return loss.mean(), loss1.mean(), loss2.mean()
示例7: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def forward(self, images, labels):
r"""
Overridden.
"""
images = images.to(self.device)
labels = labels.to(self.device)
loss = nn.CrossEntropyLoss()
images = images + self.alpha*torch.randn_like(images).sign()
for i in range(self.iters) :
images.requires_grad = True
outputs = self.model(images)
cost = loss(outputs, labels).to(self.device)
grad = torch.autograd.grad(cost, images,
retain_graph=False, create_graph=False)[0]
adv_images = images + (self.eps-self.alpha)*grad.sign()
images = torch.clamp(adv_images, min=0, max=1).detach_()
adv_images = images
return adv_images
示例8: expectation
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def expectation(self):
self.net.eval()
with torch.no_grad():
embedding = []
batch_loader = DataLoader(
self.data,
batch_size=self.batch_size,
shuffle=False
)
for point, *_ in batch_loader:
features, mean, logvar = self.net(point.to(self.device))
std = torch.exp(0.5 * logvar)
sample = torch.randn_like(std).mul(std).add_(mean)
latent_point = func.adaptive_avg_pool2d(sample, 1)
latent_point = latent_point
latent_point = latent_point.reshape(latent_point.size(0), -1)
embedding.append(latent_point)
embedding = torch.cat(embedding, dim=0)
expectation = self.classifier(embedding)
self.net.train()
return expectation.to("cpu"), embedding.to("cpu")
示例9: integrate
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def integrate(self, score, data, *args):
done = False
count = 0
step_count = self.steps if self.step > 0 else 10 * self.steps
while not done:
make_differentiable(data)
make_differentiable(args)
energy = score(data + self.noise * torch.randn_like(data), *args)
if isinstance(energy, (list, tuple)):
energy, *_ = energy
gradient = ag.grad(energy, data, torch.ones_like(energy))[0]
if self.max_norm:
gradient = clip_grad_by_norm(gradient, self.max_norm)
data = data - self.rate * gradient
if self.clamp is not None:
data = data.clamp(*self.clamp)
data = data.detach()
done = count >= step_count
if self.target is not None:
done = done and bool((energy.mean(dim=0) <= self.target).all())
count += 1
if (count + 1) % 500 == 0:
data.random_()
self.step += 1
return data
示例10: reparameterize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def reparameterize(mu, logvar):
std = (0.5*logvar).exp()
eps = torch.randn_like(std)
return eps.mul(std) + mu
示例11: rsample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def rsample(self, z_vecs, W_mean, W_var, perturb=True):
batch_size = z_vecs.size(0)
z_mean = W_mean(z_vecs)
z_log_var = -torch.abs( W_var(z_vecs) )
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = torch.randn_like(z_mean).cuda()
z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon if perturb else z_mean
return z_vecs, kl_loss
示例12: rsample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def rsample(self, z_vecs, W_mean, W_var):
batch_size = z_vecs.size(0)
z_mean = W_mean(z_vecs)
z_log_var = -torch.abs( W_var(z_vecs) )
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = torch.randn_like(z_mean).cuda()
z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon
return z_vecs, kl_loss
示例13: refined_logits
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def refined_logits(self, x, n_steps=args.n_steps_refine):
xs = x.size()
dup_x = x.view(xs[0], 1, xs[1], xs[2], xs[3]).repeat(1, args.n_dup_chains, 1, 1, 1)
dup_x = dup_x.view(xs[0] * args.n_dup_chains, xs[1], xs[2], xs[3])
dup_x = dup_x + torch.randn_like(dup_x) * args.sigma
refined = self.refine(dup_x, n_steps=n_steps, detach=False)
logits = self.logits(refined)
logits = logits.view(x.size(0), args.n_dup_chains, logits.size(1))
logits = logits.mean(1)
return logits
示例14: refine
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def refine(self, x, n_steps=args.n_steps_refine, detach=True):
# runs a markov chain seeded at x, use n_steps=10
x_k = torch.autograd.Variable(x, requires_grad=True) if detach else x
# sgld
for k in range(n_steps):
f_prime = torch.autograd.grad(self.f(x_k).sum(), [x_k], retain_graph=True)[0]
x_k.data += f_prime + args.sgld_sigma * torch.randn_like(x_k)
final_samples = x_k.detach() if detach else x_k
return final_samples
示例15: get_sample_q
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randn_like [as 别名]
def get_sample_q(args, device):
def sample_p_0(replay_buffer, bs, y=None):
if len(replay_buffer) == 0:
return init_random(args, bs), []
buffer_size = len(replay_buffer) if y is None else len(replay_buffer) // args.n_classes
inds = t.randint(0, buffer_size, (bs,))
# if cond, convert inds to class conditional inds
if y is not None:
inds = y.cpu() * buffer_size + inds
assert not args.uncond, "Can't drawn conditional samples without giving me y"
buffer_samples = replay_buffer[inds]
random_samples = init_random(args, bs)
choose_random = (t.rand(bs) < args.reinit_freq).float()[:, None, None, None]
samples = choose_random * random_samples + (1 - choose_random) * buffer_samples
return samples.to(device), inds
def sample_q(f, replay_buffer, y=None, n_steps=args.n_steps):
"""this func takes in replay_buffer now so we have the option to sample from
scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example.
"""
f.eval()
# get batch size
bs = args.batch_size if y is None else y.size(0)
# generate initial samples and buffer inds of those samples (if buffer is used)
init_sample, buffer_inds = sample_p_0(replay_buffer, bs=bs, y=y)
x_k = t.autograd.Variable(init_sample, requires_grad=True)
# sgld
for k in range(n_steps):
f_prime = t.autograd.grad(f(x_k, y=y).sum(), [x_k], retain_graph=True)[0]
x_k.data += args.sgld_lr * f_prime + args.sgld_std * t.randn_like(x_k)
f.train()
final_samples = x_k.detach()
# update replay buffer
if len(replay_buffer) > 0:
replay_buffer[buffer_inds] = final_samples.cpu()
return final_samples
return sample_q