本文整理汇总了Python中torch.distributions.Bernoulli方法的典型用法代码示例。如果您正苦于以下问题:Python distributions.Bernoulli方法的具体用法?Python distributions.Bernoulli怎么用?Python distributions.Bernoulli使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.distributions
的用法示例。
在下文中一共展示了distributions.Bernoulli方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def forward(self, g, action=None):
graph_embed = self.graph_op['embed'](g)
logit = self.add_node(graph_embed)
prob = torch.sigmoid(logit)
if not self.training:
action = Bernoulli(prob).sample().item()
stop = bool(action == self.stop)
if not stop:
g.add_nodes(1)
self._initialize_node_repr(g, action, graph_embed)
if self.training:
sample_log_prob = bernoulli_action_log_prob(logit, action)
self.log_prob.append(sample_log_prob)
return stop
示例2: forward
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def forward(self, x, gamma):
# shape: (bsize, channels, height, width)
if self.training:
batch_size, channels, height, width = x.shape
bernoulli = Bernoulli(gamma)
mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))).cuda()
#print((x.sample[-2], x.sample[-1]))
block_mask = self._compute_block_mask(mask)
#print (block_mask.size())
#print (x.size())
countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]
count_ones = block_mask.sum()
return block_mask * x * (countM / count_ones)
else:
return x
示例3: forward
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def forward(self, x, gamma):
# shape: (bsize, channels, height, width)
if self.training:
batch_size, channels, height, width = x.shape
bernoulli = Bernoulli(gamma)
mask = bernoulli.sample((batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1)))
if torch.cuda.is_available():
mask = mask.cuda()
block_mask = self._compute_block_mask(mask)
countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]
count_ones = block_mask.sum()
return block_mask * x * (countM / count_ones)
else:
return x
示例4: bernoulli_action_log_prob
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def bernoulli_action_log_prob(logit, action):
"""Calculate the log p of an action with respect to a Bernoulli
distribution. Use logit rather than prob for numerical stability."""
if action == 0:
return F.logsigmoid(-logit)
else:
return F.logsigmoid(logit)
示例5: bernoulli_action_log_prob
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def bernoulli_action_log_prob(logit, action):
"""
Calculate the log p of an action with respect to a Bernoulli
distribution across a batch of actions. Use logit rather than
prob for numerical stability.
"""
log_probs = torch.cat([F.logsigmoid(-logit), F.logsigmoid(logit)], dim=1)
return log_probs.gather(1, torch.tensor(action).unsqueeze(1))
示例6: forward
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def forward(self, g):
if g.number_of_edges() > 0:
for t in range(self.num_prop_rounds):
g.update_all(message_func=self.dgmg_msg,
reduce_func=self.reduce_funcs[t])
g.ndata['hv'] = self.node_update_funcs[t](
g.ndata['a'], g.ndata['hv'])
#######################################################################################
# Actions
# ``````````````````````````
# All actions are sampled from distributions parameterized using neural networks
# and here they are in turn.
#
# Action 1: Add nodes
# ''''''''''''''''''''''''''
#
# Given the graph embedding vector :math:`\textbf{h}_{G}`, evaluate
#
# .. math::
#
# \text{Sigmoid}(\textbf{W}_{\text{add node}}\textbf{h}_{G}+b_{\text{add node}}),\\
#
# which is then used to parametrize a Bernoulli distribution for deciding whether
# to add a new node.
#
# If a new node is to be added, initialize its feature with
#
# .. math::
#
# \textbf{W}_{\text{init}}\text{concat}([\textbf{h}_{\text{init}} , \textbf{h}_{G}])+\textbf{b}_{\text{init}},\\
#
# where :math:`\textbf{h}_{\text{init}}` is a learnable embedding module for
# untyped nodes.
#
示例7: _hard_bernoulli
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def _hard_bernoulli(self, dist):
return dist.Bernoulli(logits=dist.logits)
示例8: __init__
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def __init__(self, block_size):
super(DropBlock, self).__init__()
self.block_size = block_size
#self.gamma = gamma
#self.bernouli = Bernoulli(gamma)
示例9: act
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def act(batch_states, theta, values):
batch_states = torch.from_numpy(batch_states).long()
probs = torch.sigmoid(theta)[batch_states]
m = Bernoulli(1-probs)
actions = m.sample()
log_probs_actions = m.log_prob(actions)
return actions.numpy().astype(int), log_probs_actions, values[batch_states]
示例10: act
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def act(batch_states, theta):
batch_states = torch.from_numpy(batch_states).long()
probs = torch.sigmoid(theta)[batch_states]
m = Bernoulli(1-probs)
actions = m.sample()
log_probs_actions = m.log_prob(actions)
return actions.numpy().astype(int), log_probs_actions
示例11: sample_mask
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def sample_mask(self, p, shape):
"""Samples a dropout mask from a Bernoulli distribution.
Args:
p(float): the dropout probability [0, 1].
shape(torch.Size): shape of the mask to be sampled.
"""
if self.training:
self._mask = Bernoulli(1. - p).sample(shape)
else:
self._mask = (1. - p)
示例12: forward
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def forward(self, x):
bests = x.max(dim=1, keepdim=True)[1]
sampled = Categorical(probs=th.ones_like(x)).sample()
probs = th.ones(x.size(0), 1) - self.epsilon
b = Bernoulli(probs=probs).sample().long()
ret = bests * b + (1 - b) * sampled
return ret
示例13: decode
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def decode(self, p, z=None, c=None, **kwargs):
''' Returns occupancy values for the points p at time step 0.
Args:
p (tensor): points
z (tensor): latent code z
c (tensor): latent conditioned code c (For OFlow, this is
c_spatial)
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
示例14: decode
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def decode(self, p, z=None, c=None, **kwargs):
''' Returns occupancy values for the points p at time step t.
Args:
p (tensor): points of dimension 4
z (tensor): latent code z
c (tensor): latent conditioned code c (For OFlow, this is
c_spatial, whereas for ONet 4D, this is c_temporal)
'''
logits = self.decoder(p, z, c, **kwargs)
p_r = dist.Bernoulli(logits=logits)
return p_r
示例15: generate
# 需要导入模块: from torch import distributions [as 别名]
# 或者: from torch.distributions import Bernoulli [as 别名]
def generate(self, T, B):
if not self.T_condition:
raise NotImplementedError("Only the version conditioned on T has been implemented.")
hidden = self.init_hidden(B)
lengths = torch.tensor([T]*B)
device = hidden[0].device
cond_inp = make_pos_cond(T, B, lengths, self.max_T).to(device)
if self.indep_bernoulli:
generation = torch.zeros(T, B, self.vocab_size, dtype=torch.long, device=device)
else:
generation = torch.zeros(T, B, dtype=torch.long, device=device)
last_rnn_outp = hidden[0][-1]
for t in range(T):
scores = self.output_embedding(last_rnn_outp) # [B, V]
if self.indep_bernoulli:
word_dist = Bernoulli(logits=scores)
else:
word_dist = Categorical(logits=scores)
selected_index = word_dist.sample()
generation[t] = selected_index
if t < T-1:
if self.indep_bernoulli:
inp_embeddings = torch.matmul(generation[t].float(), self.input_embedding.weight)
else:
inp_embeddings = self.input_embedding(generation[t]) # [B, E]
inp_embeddings = torch.cat((inp_embeddings, cond_inp[t+1]), -1)
last_rnn_outp, hidden = self.rnn(inp_embeddings[None, :, :], hidden)
last_rnn_outp = last_rnn_outp[0]
return generation