本文整理汇总了Python中torch.normal方法的典型用法代码示例。如果您正苦于以下问题:Python torch.normal方法的具体用法?Python torch.normal怎么用?Python torch.normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_sparse_input
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def get_sparse_input(x_input):
"""
get a sparse matrix of x_input: [T,B,V] where x_sparse[i][j][k]=1, and others = 1e-8
:param x_input: *Tensor* of [T,B]
:return: *Tensor* in shape [B,T,V]
"""
# indexes that will make no effect in copying
sw = time.time()
print('sparse input start: %s' % sw)
ignore_index = [0]
result = torch.normal(mean=0, std=torch.zeros(x_input.size(0), x_input.size(1), cfg.vocab_size))
for t in range(x_input.size(0)):
for b in range(x_input.size(1)):
if x_input[t][b] not in ignore_index:
result[t][b][x_input[t][b]] = 1.0
print('sparse input end %s' % time.time())
return result.transpose(0, 1)
示例2: _sqrt_hessian_sampled
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
"""A Monte-Carlo estimate of the square-root of the Hessian.
Attributes:
module: (torch.nn.MSELoss) module.
g_inp: Gradient of loss w.r.t. input.
g_out: Gradient of loss w.r.t. output.
mc_samples: (int, optional) Number of MC samples to use. Default: 1.
Returns:
tensor:
"""
N, D = module.input0.shape
samples = normal(0, 1, size=[mc_samples, N, D], device=module.input0.device)
samples *= sqrt(2) / sqrt(mc_samples)
if module.reduction == "mean":
samples /= sqrt(module.input0.numel())
return samples
示例3: construct_edge_mask
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def construct_edge_mask(self, num_nodes, init_strategy="normal", const_val=1.0):
mask = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
if init_strategy == "normal":
std = nn.init.calculate_gain("relu") * math.sqrt(
2.0 / (num_nodes + num_nodes)
)
with torch.no_grad():
mask.normal_(1.0, std)
# mask.clamp_(0.0, 1.0)
elif init_strategy == "const":
nn.init.constant_(mask, const_val)
if self.args.mask_bias:
mask_bias = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
nn.init.constant_(mask_bias, 0.0)
else:
mask_bias = None
return mask, mask_bias
示例4: inverse
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def inverse(self, melspectrogram, iters=1000):
x = torch.normal(0, 1e-6, size=((melspectrogram.size(1) - 1) * self.hp.audio.hop_length, )).cuda().requires_grad_()
optimizer = torch.optim.LBFGS([x], tolerance_change=1e-16)
melspectrogram = self.post_spec(melspectrogram)
def closure():
optimizer.zero_grad()
mel = self.get_mel(x)
loss = self.criterion(mel, melspectrogram)
loss.backward()
return loss
with tqdm(range(iters)) as pbar:
for i in pbar:
optimizer.step(closure=closure)
pbar.set_postfix(loss=self.criterion(self.get_mel(x), melspectrogram).item())
return x, self.pre_spec(self.get_mel(x))
示例5: random_masking
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def random_masking(self, batch_images, batch_mask, device):
"""
with probability 10% we keep the image unchanged;
with probability 10% we change the mask region to a normal distribution
with 80% we mask the region as 0.
:param batch_images: image to be masked
:param batch_mask: mask region
:param device:
:return: masked image
"""
return batch_images
# TODO disabled
temp = random.random()
if temp > 0.1:
batch_images = batch_images * batch_mask.unsqueeze(1).float()
if temp < 0.2:
batch_images = batch_images + (
((-batch_mask.unsqueeze(1).float()) + 1)
* torch.normal(mean=0.5, std=torch.ones(batch_images.shape)).to(device)
)
return batch_images
示例6: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def sample(self, n):
self.generator.eval()
output_info = self.transformer.output_info
steps = n // self.batch_size + 1
data = []
for i in range(steps):
mean = torch.zeros(self.batch_size, self.embedding_dim)
std = mean + 1
noise = torch.normal(mean=mean, std=std).to(self.device)
fake = self.generator(noise, output_info)
data.append(fake.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
data = data[:n]
return self.transformer.inverse_transform(data)
示例7: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def sample(self, samples):
self.decoder.eval()
steps = samples // self.batch_size + 1
data = []
for _ in range(steps):
mean = torch.zeros(self.batch_size, self.embedding_dim)
std = mean + 1
noise = torch.normal(mean=mean, std=std).to(self.device)
fake, sigmas = self.decoder(noise)
fake = torch.tanh(fake)
data.append(fake.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
data = data[:samples]
return self.transformer.inverse_transform(data, sigmas.detach().cpu().numpy())
示例8: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def sample(self, n):
self.generator.eval()
self.decoder.eval()
steps = n // self.batch_size + 1
data = []
for i in range(steps):
mean = torch.zeros(self.batch_size, self.random_dim)
std = mean + 1
noise = torch.normal(mean=mean, std=std).to(self.device)
emb = self.generator(noise)
fake = self.decoder(emb, self.transformer.output_info)
fake = torch.sigmoid(fake)
data.append(fake.detach().cpu().numpy())
data = np.concatenate(data, axis=0)
data = data[:n]
return self.transformer.inverse_transform(data)
示例9: train_advreg_mmd
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def train_advreg_mmd(iter_cnt, encoder, gan_g, gan_d, corpus_loader, args, optimizer_reg):
encoder.train()
gan_g.train()
gan_d.train()
# train gan_disc
for batch, labels in corpus_loader:
optimizer_reg.zero_grad()
batch = Variable(batch.cuda())
z_real_hidden = encoder(batch)
z_gauss = torch.normal(means=torch.zeros(batch.size()),
std=args.noise_radius)
z_gauss = Variable(z_gauss.cuda())
z_gauss_hidden = gan_g(z_gauss)
loss_ar = gan_d(z_real_hidden, z_gauss_hidden)
loss_ar.backward()
optimizer_reg.step()
示例10: z2dec
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def z2dec(self, last_h, requires_grad):
p_mu, p_logvar = self.c2z(last_h)
if requires_grad:
sample_z = self.gauss_connector(p_mu, p_logvar)
joint_logpz = None
else:
sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
logprob_sample_z = self.gaussian_logprob(p_mu, p_logvar, sample_z)
joint_logpz = th.sum(logprob_sample_z.squeeze(0), dim=1)
dec_init_state = self.z_embedding(sample_z)
attn_context = None
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
return dec_init_state, attn_context, joint_logpz
示例11: encode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def encode(self, indices, lengths, noise):
embeddings = self.embedding(indices)
packed_embeddings = pack_padded_sequence(input=embeddings,
lengths=lengths,
batch_first=True)
packed_output, state = self.encoder(packed_embeddings)
hidden = state[0][-1]
hidden = hidden / torch.norm(hidden, p=2, dim=1, keepdim=True)
if noise and self.noise_r > 0:
gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
std=self.noise_r)
hidden = hidden + Variable(gauss_noise.cuda())
return hidden
示例12: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def forward(self, input):
"""
Forward with all regularized connections and random activations (Beyesian mode). Typically used for train
"""
if self.training == False: return F.linear(input, self.weights_clipped, self.bias)
clip_mask = self.get_clip_mask()
W = self.weight
zeros = torch.zeros_like(W)
mu = input.matmul(W.t())
eps = 1e-8
log_alpha = self.clip(self.log_alpha)
si = torch.sqrt((input * input) \
.matmul(((torch.exp(log_alpha) * self.weight * self.weight)+eps).t()))
activation = mu + torch.normal(torch.zeros_like(mu), torch.ones_like(mu)) * si
return activation + self.bias
示例13: setUp
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def setUp(self):
batch_size = 2
rv_dimension = 5
p = torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension))
p_pos = torch.abs(torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension)))
p_pos = torch.clamp(p_pos, 0.1, 0.9)
if cuda:
p = p.cuda()
p_pos = p_pos.cuda()
p = Variable(p)
p_pos = Variable(p_pos)
self.rv = [
stat.Normal(size=(batch_size, rv_dimension), cuda=cuda),
stat.Normal(p, p_pos),
stat.Categorical(size=(batch_size, rv_dimension), cuda=cuda),
stat.Categorical(p_pos / torch.sum(p_pos, 1).expand_as(p_pos)),
stat.Bernoulli(size=(batch_size, rv_dimension), cuda=cuda),
stat.Bernoulli(p_pos),
stat.Uniform(size=(batch_size, rv_dimension), cuda=cuda)
]
示例14: truncated_normal
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def truncated_normal(model):
std = math.sqrt(2./(model.in_features + model.out_features))
if model.bias is not None:
model.bias.data.zero_()
model.weight.data.normal_(std=std)
truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
while truncate_me.sum() > 0:
model.weight.data[truncate_me] = torch.normal(std=std*torch.ones(truncate_me.sum()))
truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
return model
示例15: forward_rl
# 需要导入模块: import torch [as 别名]
# 或者: from torch import normal [as 别名]
def forward_rl(self, data_feed, max_words, temp=0.1):
ctx_lens = data_feed['context_lens'] # (batch_size, )
short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
bs_label = self.np2var(data_feed['bs'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
db_label = self.np2var(data_feed['db'], FLOAT) # (batch_size, max_ctx_len, max_utt_len)
batch_size = len(ctx_lens)
utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))
# create decoder initial states
enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
# create decoder initial states
p_mu, p_logvar = self.c2z(enc_last)
sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
logprob_sample_z = self.gaussian_logprob(p_mu, self.zero, sample_z)
joint_logpz = th.sum(logprob_sample_z, dim=1)
# pack attention context
dec_init_state = self.z_embedding(sample_z.unsqueeze(0))
attn_context = None
# decode
if self.config.dec_rnn_cell == 'lstm':
dec_init_state = tuple([dec_init_state, dec_init_state])
# decode
logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,
dec_init_state=dec_init_state,
attn_context=attn_context,
vocab=self.vocab,
max_words=max_words,
temp=0.1)
return logprobs, outs, joint_logpz, sample_z