本文整理汇总了Python中torch.multinomial方法的典型用法代码示例。如果您正苦于以下问题:Python torch.multinomial方法的具体用法?Python torch.multinomial怎么用?Python torch.multinomial使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.multinomial方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_random_binary_mask
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def create_random_binary_mask(features):
"""
Creates a random binary mask of a given dimension with half of its entries
randomly set to 1s.
:param features: Dimension of mask.
:return: Binary mask with half of its entries set to 1s, of type torch.Tensor.
"""
mask = torch.zeros(features).byte()
weights = torch.ones(features).float()
num_samples = features // 2 if features % 2 == 0 else features // 2 + 1
indices = torch.multinomial(
input=weights,
num_samples=num_samples,
replacement=False
)
mask[indices] += 1
return mask
示例2: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
开发者ID:649453932,项目名称:Bert-Chinese-Text-Classification-Pytorch,代码行数:22,代码来源:modeling_transfo_xl_utilities.py
示例3: random_tensor
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def random_tensor(inputs, output=reals()):
"""
Creates a random :class:`funsor.tensor.Tensor` with given inputs and output.
"""
backend = get_backend()
assert isinstance(inputs, OrderedDict)
assert isinstance(output, Domain)
shape = tuple(d.dtype for d in inputs.values()) + output.shape
if output.dtype == 'real':
data = randn(shape)
else:
num_elements = reduce(operator.mul, shape, 1)
if backend == "torch":
import torch
data = torch.multinomial(torch.ones(output.dtype), num_elements, replacement=True)
else:
data = np.random.choice(output.dtype, num_elements, replace=True)
data = data.reshape(shape)
return Tensor(data, inputs, output.dtype)
示例4: _sqrt_hessian_sampled
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
self._check_2nd_order_parameters(module)
M = mc_samples
C = module.input0.shape[1]
probs = self._get_probs(module)
V_dim = 0
probs_unsqueezed = probs.unsqueeze(V_dim).repeat(M, 1, 1)
multi = multinomial(probs, M, replacement=True)
classes = one_hot(multi, num_classes=C)
classes = einsum("nvc->vnc", classes).float()
sqrt_mc_h = (probs_unsqueezed - classes) / sqrt(M)
if module.reduction == "mean":
N = module.input0.shape[0]
sqrt_mc_h /= sqrt(N)
return sqrt_mc_h
示例5: gen_step
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def gen_step(self, src, rel, dst, n_sample=1, temperature=1.0, train=True):
if not hasattr(self, 'opt'):
self.opt = Adam(self.mdl.parameters(), weight_decay=self.weight_decay)
n, m = dst.size()
rel_var = Variable(rel.cuda())
src_var = Variable(src.cuda())
dst_var = Variable(dst.cuda())
logits = self.mdl.prob_logit(src_var, rel_var, dst_var) / temperature
probs = nnf.softmax(logits)
row_idx = torch.arange(0, n).type(torch.LongTensor).unsqueeze(1).expand(n, n_sample)
sample_idx = torch.multinomial(probs, n_sample, replacement=True)
sample_srcs = src[row_idx, sample_idx.data.cpu()]
sample_dsts = dst[row_idx, sample_idx.data.cpu()]
rewards = yield sample_srcs, sample_dsts
if train:
self.mdl.zero_grad()
log_probs = nnf.log_softmax(logits)
reinforce_loss = -torch.sum(Variable(rewards) * log_probs[row_idx.cuda(), sample_idx.data])
reinforce_loss.backward()
self.opt.step()
self.mdl.constraint()
yield None
示例6: select_paths
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def select_paths(self, logprobs, prior_scores, current_length):
# Unlike the other treesearch methods, we have to switch to linspace
# for the probabilities in order to compute the CDF.
probs = torch.softmax(logprobs, dim=-1)
sprobs, sinds = probs.sort(dim=-1, descending=True)
# The subtraction here is to get the exclusive prefix sum,
# to guarantee the first element is not masked
mask = (sprobs.cumsum(dim=-1) - sprobs) >= self.p
sprobs[mask] = 0
sprobs.div_(sprobs.sum(dim=-1).unsqueeze(1))
choices = torch.multinomial(sprobs, 1)[:, 0]
hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)
tok_ids = sinds[hyp_ids, choices]
# Convert back to logspace.
scores = sprobs[hyp_ids, choices].log()
best_scores = prior_scores.expand_as(scores) + scores
return (hyp_ids, tok_ids, best_scores)
示例7: sample_sequence
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)
prev = context
output = context
past = None
with torch.no_grad():
for i in trange(length):
logits, past = model(prev, past=past)
logits = logits[:, -1, :] / temperature
logits = top_k_logits(logits, k=top_k)
log_probs = F.softmax(logits, dim=-1)
if sample:
prev = torch.multinomial(log_probs, num_samples=1)
else:
_, prev = torch.topk(log_probs, k=1, dim=-1)
output = torch.cat((output, prev), dim=1)
return output
示例8: decode
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def decode(self, out):
"""
Args:
out: unnormalized word distribution [batch_size, vocab_size]
Return:
x: word_index [batch_size]
"""
# Sample next word from multinomial word distribution
if self.sample:
# x: [batch_size] - word index (next input)
x = torch.multinomial(self.softmax(out / self.temperature), 1).view(-1)
# Greedy sampling
else:
# x: [batch_size] - word index (next input)
_, x = out.max(dim=1)
return x
开发者ID:ctr4si,项目名称:A-Hierarchical-Latent-Structure-for-Variational-Conversation-Modeling,代码行数:20,代码来源:decoder.py
示例9: generate
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def generate(model, idx2word, word_len=200, temperature=1.0):
"""生成一定数量的文本,temperature结合多项式分布可增添抽样的多样性。"""
model.eval()
hidden = model.init_hidden(1) # batch_size为1
inputs = Variable(torch.rand(1, 1).mul(len(idx2word)).long(), volatile=True) # 随机选取一个字作为开始
if use_cuda:
inputs = inputs.cuda()
word_list = []
for i in range(word_len): # 逐字生成
output, hidden = model(inputs, hidden)
word_weights = output.squeeze().data.div(temperature).exp().cpu()
# 基于词的权重,对其再进行一次抽样,增添其多样性,如果不使用此法,会导致常用字的无限循环
word_idx = torch.multinomial(word_weights, 1)[0]
inputs.data.fill_(word_idx) # 将新生成的字赋给inputs
word = idx2word[word_idx]
word_list.append(word)
return word_list
示例10: _draw_choices
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def _draw_choices(self, probs, n_choices):
"""
Draw `n_choices` sample from `probs`.
References:
Code from https://github.com/BlackHC/BatchBALD/blob/master/src/torch_utils.py#L187
Returns:
choices: B... x `n_choices`
"""
probs = probs.permute(0, 2, 1)
probs_B_C = probs.reshape((-1, probs.shape[-1]))
# samples: Ni... x draw_per_xx
choices = torch.multinomial(probs_B_C,
num_samples=n_choices, replacement=True)
choices_b_M = choices.reshape(list(probs.shape[:-1]) + [n_choices])
return choices_b_M.long()
示例11: logits2words
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def logits2words(self, output, decoded_words, dataset, sample_size):
'''
* Decode words from logits output at a time step AND put decoded words in final results *
* take argmax if sample size == 1
'''
batch_size = output.size(0)
if sample_size == 1: # take argmax directly w/o sampling
topv, topi = F.softmax(output, dim=1).data.topk(1) # both (batch_size, 1)
else: # sample over word distribution
topv, topi = [], []
word_dis = F.softmax(output, dim=1) # (batch_size, output_size)
# sample from part of the output distribution for word variations
n_candidate = 3
word_dis_sort, idx_of_idx = torch.sort(word_dis, dim=1, descending=True)
word_dis_sort = word_dis_sort[:, :n_candidate]
idx_of_idx = idx_of_idx[:, :n_candidate]
sample_idx = torch.multinomial(word_dis_sort, 1) # (batch_size,)
for b in range(batch_size):
i = int(sample_idx[b])
idx = int(idx_of_idx[b][i])
prob = float(word_dis[b][idx])
topi.append(idx)
topv.append(prob)
topv = torch.FloatTensor(topv).view(batch_size, 1)
topi = torch.LongTensor(topi).view(batch_size, 1)
decoded_words_t = np.zeros((batch_size, self.output_size))
for b in range(batch_size):
idx = topi[b][0]
word = dataset.index2word[idx.item()]
decoded_words[b] += (word + ' ')
decoded_words_t[b][idx] = 1
decoded_words_t = Variable(torch.from_numpy(decoded_words_t.astype(np.float32)))
if self.USE_CUDA:
decoded_words_t = decoded_words_t.cuda()
return decoded_words_t
示例12: multinomial
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def multinomial(w: torch.Tensor, normalized=False):
"""
Performs multinomial sampling.
:param w: The weights to use for resampling
:param normalized: Whether the data is normalized
:return: Resampled indices
"""
return torch.multinomial(normalize(w) if not normalized else w, w.shape[-1], replacement=True)
示例13: residual
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def residual(w: torch.Tensor, normalized=False):
"""
Performs residual resampling. Inspired by solution provided by the package "particles" on GitHub
authored by the user "nchopin".
:param w: The weights to use for resampling
:param normalized: Whether the data is normalized
:return: Resampled indices
"""
if w.dim() > 1:
raise NotImplementedError('Not implemented for multidimensional arrays!')
w = normalize(w) if not normalized else w
# ===== Calculate the number of deterministic to get ===== #
mw = (w.shape[-1] * w)
floored = mw.floor()
res = mw - floored
# ===== Make flat ===== #
out = torch.ones_like(w, dtype=torch.long)
# ===== Get the indexes of those to sample ===== #
numelems = floored.sum(-1)
res /= numelems
intpart = floored.long()
ranged = torch.arange(w.shape[-1], dtype=intpart.dtype, device=w.device) * out
# ===== Repeat the integers and transform to correct ===== #
modded = ranged.repeat_interleave(intpart)
aslong = numelems.long()
out[:aslong] = modded
if numelems == w.shape[-1]:
return out
out[aslong:] = torch.multinomial(res, w.shape[-1] - aslong, replacement=True)
return out
示例14: sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def sample(self, num=1, device=None):
assert self.natural_ordering
assert self.input_bins and self.nout > self.nin
with torch.no_grad():
sampled = torch.zeros((num, self.nin), device=device)
indices = np.cumsum(self.input_bins)
for i in range(self.nin):
logits = self.forward(sampled)
s = torch.multinomial(
torch.softmax(self.logits_for_i(i, logits), -1), 1)
sampled[:, i] = s.view(-1,)
return sampled
示例15: __iter__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import multinomial [as 别名]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement))