本文整理汇总了Python中torch.multinomial函数的典型用法代码示例。如果您正苦于以下问题:Python multinomial函数的具体用法?Python multinomial怎么用?Python multinomial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了multinomial函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample
def sample(self,batch_size,seq_len,data=None):
#用来采样出一个batch的结果,
"""
data 是已有序列
"""
#如果没有data就从0开始
if data is None:
sample_batch=(torch.zeros(batch_size,seq_len).type(torch.LongTensor)).cuda()
inp=Variable(torch.zeros(batch_size,1).type(torch.LongTensor)).cuda()
h=self.init_hidden(batch_size)
for i in range(seq_len):
output,h=self.forward(inp,h)
output=torch.multinomial(output.exp().squeeze(),1)
sample_batch[:,i]=output.data
inp=output
return sample_batch
#否则就从部分开始
else:
sample_batch=(torch.zeros(batch_size,seq_len).type(torch.LongTensor)).cuda()
inp=Variable(torch.zeros(batch_size,1).type(torch.LongTensor)).cuda()
h=self.init_hidden(batch_size)
for i in range(seq_len):
if i<data.size(1):
inp=data[:,i].unsqueeze(1)
else:
inp=sample_batch[:,i-1].unsqueeze(1)
output,h=self.forward(inp,h)
output=torch.multinomial(output.exp().squeeze(),1)
sample_batch[:,i]=output.data
return sample_batch
示例2: blue_eval
def blue_eval(output,corpus):
#采样以后为20*19
sent_idx=torch.multinomial(output.exp().cpu(), 1).view(-1,19)
sent_idx=sent_idx.cpu().data.numpy()
sent_str=[]
#对生产的一个batch量数据进行处理
for i in range(sent_idx.shape[0]):
str_=[str(int(x)) for x in sent_idx[i,:-1]]
sent_str.append(str_)
eval_data=[]
for sent in corpus.valid.numpy():
eval_data.append([str(int(x)) for x in sent[1:-1]])
weight = tuple((1. / 4 for _ in range(4)))
BLEUscores=[]
for gen_sent in sent_str:
ref_sent_info=[]
for ref_sent in eval_data:
#找到与这个最相似的句子
common_tokens = Counter(gen_sent) & Counter(ref_sent)
correct_preds = sum(common_tokens.values())
recall_wrt = float(correct_preds) / len(gen_sent)
ref_sent_info.append((ref_sent,recall_wrt))
ref_sent_info.sort(key=lambda x: -x[1])
top_refs=[x[0] for x in ref_sent_info[:50]]
BLEUscore = nltk.translate.bleu_score.sentence_bleu(top_refs, gen_sent, weight)
BLEUscores.append(BLEUscore)
score=(np.mean(BLEUscores))
return score
示例3: forward
def forward(self, fc_feats, att_feats, seq):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
outputs = []
for i in range(seq.size(1)):
if i == 0:
xt = self.img_embed(fc_feats)
else:
if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i-1].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i-1].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
it = Variable(it, requires_grad=False)
else:
it = seq[:, i-1].clone()
# break if all the sequences end
if i >= 2 and seq[:, i-1].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt, state)
output = F.log_softmax(self.logit(output))
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
示例4: predict_fn
def predict_fn(input_data, model):
logger.info('Generating text based on input parameters.')
corpus = model['corpus']
model = model['model']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
torch.manual_seed(input_data['seed'])
ntokens = len(corpus.dictionary)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
hidden = model.init_hidden(1)
logger.info('Generating {} words.'.format(input_data['words']))
result = []
with torch.no_grad(): # no tracking history
for i in range(input_data['words']):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(input_data['temperature']).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
word = word if type(word) == str else word.decode()
if word == '<eos>':
word = '\n'
elif i % 12 == 11:
word = word + '\n'
else:
word = word + ' '
result.append(word)
return ''.join(result)
示例5: sample
def sample(self, sample_shape=torch.Size()):
sample_shape = self._extended_shape(sample_shape)
param_shape = sample_shape + torch.Size((self._num_events,))
probs = self.probs.expand(param_shape)
probs_2d = probs.contiguous().view(-1, self._num_events)
sample_2d = torch.multinomial(probs_2d, 1, True)
return sample_2d.contiguous().view(sample_shape)
示例6: translate
def translate(enc_input='thisissungkim.iloveyou.', predict_len=100, temperature=0.9):
input_var = str2tensor(enc_input)
encoder_hidden = encoder.init_hidden()
encoder_outputs, encoder_hidden = encoder(input_var, encoder_hidden)
hidden = encoder_hidden
predicted = ''
dec_input = str2tensor(SOS_token)
for c in range(predict_len):
output, hidden = decoder(dec_input, hidden)
# Sample from the network as a multi nominal distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
# Stop at the EOS
if top_i is EOS_token:
break
predicted_char = chr(top_i)
predicted += predicted_char
dec_input = str2tensor(predicted_char)
return enc_input, predicted
示例7: sample
def sample(self, input, temperature=1., hidden=None):
hidden = self.module_.init_hidden(1) if hidden is None else hidden
output, hidden = self.module_(input, hidden)
probas = output.squeeze().data.div(temperature).exp()
sample = torch.multinomial(probas, 1)[-1]
if probas.dim() > 1:
sample = sample[0]
return sample, self.repackage_hidden(hidden)
示例8: torch_multinomial
def torch_multinomial(input, num_samples, replacement=False):
"""
Like `torch.multinomial()` but works with cuda tensors.
Does not support keyword argument `out`.
"""
if input.is_cuda:
return torch_multinomial(input.cpu(), num_samples, replacement).cuda()
else:
return torch.multinomial(input, num_samples, replacement)
示例9: sample
def sample(self, fc_feats, att_feats, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1:
return self.sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
seq = []
seqLogprobs = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
elif sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).cuda()
sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = self.embed(Variable(it, requires_grad=False))
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished * (it > 0)
if unfinished.sum() == 0:
break
it = it * unfinished.type_as(it)
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
示例10: forward
def forward(self, x, hiddens):
batchsize = x["s"].size(0)
if not hasattr(self, "prob"):
self.prob = x["res"].clone().resize_(2)
self.prob[0] = 1 - self.args.ratio_skip_observation
self.prob[1] = self.args.ratio_skip_observation
skip_mat = self._var(torch.multinomial(self.prob, batchsize, replacement=True).float().view(-1, 1))
output = self._merge(x, hiddens, skip_mat)
return self.decision(output)
示例11: sample_K
def sample_K(probs, K, mode='test'):
probs = 1e-6 + probs*(1 - 2e-6) # to avoid log(0)
probs = probs.view(-1, 2**K)
if mode == 'train':
bin_sample = torch.multinomial(probs, 1).detach()
else:
bin_sample = probs.max(1)[1].detach().unsqueeze(1)
sample = bin_sample.clone().type(dtype)
log_probs_samples = torch.log(probs).gather(1, bin_sample).squeeze()
log_probs_samples = log_probs_samples.view(batch_size, N).sum(1)
return bin_sample.data.view(batch_size, N), log_probs_samples
示例12: sample
def sample(self, Q):
if self.use_actor_critic:
pi = F.softmax(Q, dim=-1)
a = torch.multinomial(pi, 1).squeeze()
return a.data.cpu().numpy()
else:
sample = random.random()
if sample > self.eps_threshold:
return Q.data.max(1)[1].cpu().numpy()
else:
return np.random.randint(0, self.num_actions, self.nenv)
示例13: main
def main():
path='my_data1'
sec_text="I wanna go out tonight"
n_bins=4
#加载数据,默认是data1
corpus = data.Corpus(path=os.path.join("data", path))
gen=torch.load("models/gen_"+path+".pt").cuda()
print(gen)
bin_stream=string2bins(sec_text,n_bins)
ntokens = len(corpus.dictionary)
tokens = list(range(ntokens)) # * args.replication_factor
np.random.shuffle(tokens)
words_in_bin = int(len(tokens) /n_bins)
bins = [tokens[i:i + words_in_bin] for i in range(0, len(tokens), words_in_bin)]
zero = [list(set(tokens) - set(bin_)) for bin_ in bins]
#循环生成每一个词
for _ in range(10):
input = Variable(torch.Tensor([corpus.dictionary.word2idx['<start>']]), volatile=True).view(-1,1).type(torch.LongTensor).cuda()
h=gen.init_hidden(1)
gen_words=[]
for i in range(len(bin_stream[:16])):
output,h=gen(input,h)
zero_index = zero[int(bin_stream[i],2)]
zero_index = torch.LongTensor(zero_index).cuda()
output = output.squeeze().data.div(0.8).exp()
output.index_fill_(0, zero_index, 0)
word_idx = torch.multinomial(output, 1)[0]
gen_words.append(word_idx)
input.data.fill_(word_idx)
print(len(gen_words))
str_=" ".join([corpus.dictionary.idx2word[x] for x in gen_words])
print(str_)
示例14: sample
def sample(self, max_time_step=200):
"""generate one sample"""
sample_words = [self.vocab['<s>']]
h_tm1 = None
for t in xrange(max_time_step):
x_tm1_embed = self.embed(Variable(torch.LongTensor([sample_words[-1]])))
x_tm1_embed = x_tm1_embed.unsqueeze(0)
h_t, (last_state, last_cell) = self.lstm(x_tm1_embed, h_tm1)
h_t = self.dropout(h_t.view(-1))
p_t = F.softmax(self.read_out(h_t), dim=-1)
x_t_wid = torch.multinomial(p_t).data[0]
x_t = self.vocab.id2word[x_t_wid]
if x_t == '</s>':
return [self.vocab.id2word[wid] for wid in sample_words[1:]]
else:
sample_words.append(x_t_wid)
h_tm1 = last_state, last_cell
示例15: sample_from_model
def sample_from_model(model, vectorizer, nationalities, sample_size=20,
temperature=1.0):
num_samples = len(nationalities)
begin_seq_index = [vectorizer.char_vocab.begin_seq_index
for _ in range(num_samples)]
begin_seq_index = torch.tensor(begin_seq_index, dtype=torch.int64).unsqueeze(dim=1)
indices = [begin_seq_index]
nationality_indices = torch.tensor(nationalities, dtype=torch.int64).unsqueeze(dim=0)
h_t = model.nation_emb(nationality_indices)
for time_step in range(sample_size):
x_t = indices[time_step]
x_emb_t = model.char_emb(x_t)
rnn_out_t, h_t = model.rnn(x_emb_t, h_t)
prediction_vector = model.fc(rnn_out_t.squeeze(dim=1))
probability_vector = F.softmax(prediction_vector / temperature, dim=1)
indices.append(torch.multinomial(probability_vector, num_samples=1))
indices = torch.stack(indices).squeeze().permute(1, 0)
return indices