本文整理匯總了Python中torch.distributions.Categorical方法的典型用法代碼示例。如果您正苦於以下問題:Python distributions.Categorical方法的具體用法?Python distributions.Categorical怎麽用?Python distributions.Categorical使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.distributions
的用法示例。
在下文中一共展示了distributions.Categorical方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: sample_rl
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sample_rl(self, images, seq_len, *args):
device = images.device
b_s = images.size(0)
state = self.init_state(b_s, device)
out = None
outputs = []
log_probs = []
for t in range(seq_len):
out, state = self.step(t, state, out, images, None, *args, mode='feedback')
distr = distributions.Categorical(logits=out)
out = distr.sample()
outputs.append(out)
log_probs.append(distr.log_prob(out))
return torch.cat([o.unsqueeze(1) for o in outputs], 1), torch.cat([o.unsqueeze(1) for o in log_probs], 1)
示例2: forward
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def forward(self, state, hidden=None):
explore = random.random() < self.epsilon
state = state.unsqueeze(0)
hidden = hidden.unsqueeze(0) if hidden else None
logits = self.policy(
state, hidden=hidden
)
outputs = [None]
if isinstance(logits, tuple):
logits, outputs = logits
action = logits.argmax(dim=1)
if explore:
logits = torch.ones_like(logits)
logits = logits / logits.size(1)
action = Categorical(logits=logits).sample()
return action[0], logits[0], outputs[0]
示例3: select_action
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection
masked_q_values = agent_inputs.clone()
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long()
random_actions = Categorical(avail_actions.float()).sample().long()
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
示例4: select_action
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def select_action(self, ob_id, state):
r"""
This function is mostly borrowed from the Reinforcement Learning example.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
The main difference is that instead of keeping all probs in one list,
the agent keeps probs in a dictionary, one key per observer.
NB: no need to enforce thread-safety here as GIL will serialize
executions.
"""
state = torch.from_numpy(state).float().unsqueeze(0)
probs = self.policy(state)
m = Categorical(probs)
action = m.sample()
self.saved_log_probs[ob_id].append(m.log_prob(action))
return action.item()
示例5: select_action_batch
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def select_action_batch(agent_rref, ob_id, state):
r"""
Batching select_action: In each step, the agent waits for states from
all observers, and process them together. This helps to reduce the
number of CUDA kernels launched and hence speed up amortized inference
speed.
"""
self = agent_rref.local_value()
self.states[ob_id].copy_(state)
future_action = self.future_actions.then(
lambda future_actions: future_actions.wait()[ob_id].item()
)
with self.lock:
self.pending_states -= 1
if self.pending_states == 0:
self.pending_states = len(self.ob_rrefs)
probs = self.policy(self.states.cuda())
m = Categorical(probs)
actions = m.sample()
self.saved_log_probs.append(m.log_prob(actions).t()[0])
future_actions = self.future_actions
self.future_actions = torch.futures.Future()
future_actions.set_result(actions.cpu())
return future_action
示例6: forward
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def forward(self, input, params=None):
if params is None:
params = OrderedDict(self.named_parameters())
output = input
for i in range(1, self.num_layers):
output = F.linear(output,
weight=params['layer{0}.weight'.format(i)],
bias=params['layer{0}.bias'.format(i)])
output = self.nonlinearity(output)
logits = F.linear(output,
weight=params['layer{0}.weight'.format(self.num_layers)],
bias=params['layer{0}.bias'.format(self.num_layers)])
return Categorical(logits=logits)
示例7: sample_model_spec
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sample_model_spec(self, num):
"""
Sample model specs by number.
:param num:
:return: list, num x [architecture ]
"""
alpha_topology = self.alpha_topology.detach().clone()
alpha_ops = self.alpha_ops.detach().clone()
sample_archs = []
sample_ops = []
with torch.no_grad():
for i in range(self.num_intermediate_nodes):
# align with topoligy weights
probs = nn.functional.softmax(alpha_topology[: i+2, i], dim=0)
sample_archs.append(Categorical(probs))
probs_op = nn.functional.softmax(alpha_ops[:, i], dim=0)
sample_ops.append(Categorical(probs_op))
return self._sample_model_spec(num, sample_archs, sample_ops)
示例8: sample_model_spec
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sample_model_spec(self, num):
"""
Override, sample the alpha via gumbel softmax instead of normal softmax.
:param num:
:return:
"""
alpha_topology = self.alpha_topology.detach().clone()
alpha_ops = self.alpha_ops.detach().clone()
sample_archs = []
sample_ops = []
gumbel_dist = Gumbel(torch.tensor([.0]), torch.tensor([1.0]))
with torch.no_grad():
for i in range(self.num_intermediate_nodes):
# align with topoligy weights
probs = gumbel_softmax(alpha_topology[: i+2, i], self.temperature(), gumbel_dist)
sample_archs.append(Categorical(probs))
probs_op = gumbel_softmax(alpha_ops[:, i], self.temperature(), gumbel_dist)
sample_ops.append(Categorical(probs_op))
return self._sample_model_spec(num, sample_archs, sample_ops)
示例9: sample_rl
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sample_rl(self, statics, *args):
device = statics[0].device
b_s = statics[0].size(0)
state = self.init_state(b_s, device)
outputs = []
log_probs = []
for t in range(self.seq_len):
prev_outputs = outputs[-1] if t > 0 else None
outs, state = self.step(t, state, prev_outputs, statics, None, *args, mode='feedback')
outputs.append([])
log_probs.append([])
for out in outs:
distr = distributions.Categorical(logits=out)
sample = distr.sample()
outputs[-1].append(sample)
log_probs[-1].append(distr.log_prob(sample))
outputs = list(zip(*outputs))
outputs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in outputs)
log_probs = list(zip(*log_probs))
log_probs = tuple(torch.cat([oo.unsqueeze(1) for oo in o], 1) for o in log_probs)
return outputs, log_probs
示例10: gen_one_noTcond
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def gen_one_noTcond(self, eos_index, max_T):
hidden = self.init_hidden(1)
device = hidden[0].device
last_rnn_outp = hidden[0][-1] # [1, C]
generation = []
for t in range(max_T):
scores = self.output_embedding(last_rnn_outp) # [1, V]
word_dist = Categorical(logits=scores)
selected_index = word_dist.sample() # [1]
if selected_index == eos_index:
break
generation.append(selected_index)
inp_embeddings = self.input_embedding(selected_index) # [1, inp_E]
last_rnn_outp, hidden = self.rnn(inp_embeddings[None, :, :], hidden)
last_rnn_outp = last_rnn_outp[0]
return torch.tensor(generation, dtype=torch.long, device=device)
示例11: sample
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sample(self, time: int, outputs: torch.Tensor) -> torch.LongTensor:
del time # unused by sample_fn
# Outputs are logits, we sample from the top-k candidates
if not torch.is_tensor(outputs):
raise TypeError(
f"Expected outputs to be a single Tensor, got: {type(outputs)}")
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
logits = _top_k_logits(logits, k=self._top_k)
sample_id_sampler = Categorical(logits=logits)
sample_ids = sample_id_sampler.sample()
return sample_ids
示例12: pi_beta_sample
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def pi_beta_sample(self, state, beta, action, **kwargs):
# 1. obtain probabilities
# note: detach is to block gradient
beta_probs = beta(state.detach(), action=action)
pi_probs = self.forward(state)
# 2. probabilities -> categorical distribution.
beta_categorical = Categorical(beta_probs)
pi_categorical = Categorical(pi_probs)
# 3. sample the actions
# See this issue: https://github.com/awarebayes/RecNN/issues/7
# usually it works like:
# pi_action = pi_categorical.sample(); beta_action = beta_categorical.sample();
# but changing the action_source to {pi: beta, beta: beta} can be configured to be:
# pi_action = beta_categorical.sample(); beta_action = beta_categorical.sample();
available_actions = {'pi': pi_categorical.sample(), 'beta': beta_categorical.sample()}
pi_action = available_actions[self.action_source['pi']]
beta_action = available_actions[self.action_source['beta']]
# 4. calculate stuff we need
pi_log_prob = pi_categorical.log_prob(pi_action)
beta_log_prob = beta_categorical.log_prob(beta_action)
return pi_log_prob, beta_log_prob, pi_probs
示例13: sampling_decode_single
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def sampling_decode_single(self, pz_dec_outs, u_enc_out, m_tm1, u_input_np, last_hidden, degree_input, bspan_index):
decoded = []
reward_sum = 0
log_probs = []
rewards = []
bspan_index_np = np.array(bspan_index).reshape(-1, 1)
for t in range(self.max_ts):
# reward
reward, finished = self.reward(m_tm1.data.view(-1), decoded, bspan_index)
reward_sum += reward
rewards.append(reward)
if t == self.max_ts - 1:
finished = True
if finished:
loss = self.finish_episode(log_probs, rewards)
return loss
# action
proba, last_hidden, _ = self.m_decoder(pz_dec_outs, u_enc_out, u_input_np, m_tm1,
degree_input, last_hidden, bspan_index_np)
proba = proba.squeeze(0) # [B,V]
dis = Categorical(proba)
action = dis.sample()
log_probs.append(dis.log_prob(action))
mt_index = action.data.view(-1)
decoded.append(mt_index.clone())
for i in range(mt_index.size(0)):
if mt_index[i] >= cfg.vocab_size:
mt_index[i] = 2 # unk
m_tm1 = cuda_(Variable(mt_index).view(1, -1))
示例14: __init__
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def __init__(self, probs=None, logits=None, validate_args=None):
self.categoricals = []
if probs is None:
probs = [None] * len(logits)
elif logits is None:
logits = [None] * len(probs)
else:
raise ValueError('Either probs or logits must be None')
for sub_probs, sub_logits in zip(probs, logits):
categorical = distributions.Categorical(probs=sub_probs, logits=sub_logits, validate_args=validate_args)
self.categoricals.append(categorical)
示例15: _sample_event
# 需要導入模塊: from torch import distributions [as 別名]
# 或者: from torch.distributions import Categorical [as 別名]
def _sample_event(self, output, greedy=True, temperature=1.0):
if greedy:
return output.argmax(-1)
else:
output = output / temperature
probs = self.output_fc_activation(output)
return Categorical(probs).sample()