本文整理汇总了Python中torch.distributions.categorical.Categorical.entropy方法的典型用法代码示例。如果您正苦于以下问题:Python Categorical.entropy方法的具体用法?Python Categorical.entropy怎么用?Python Categorical.entropy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.distributions.categorical.Categorical
的用法示例。
在下文中一共展示了Categorical.entropy方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: OneHotCategorical
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import entropy [as 别名]
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by `probs`.
Samples are one-hot coded vectors of size probs.size(-1).
See also: :func:`torch.distributions.Categorical`
Example::
>>> m = OneHotCategorical(torch.Tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
0
0
1
0
[torch.FloatTensor of size 4]
Args:
probs (Tensor or Variable): event probabilities
"""
params = {'probs': constraints.simplex}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.probs.size()[:-1]
event_shape = self._categorical.probs.size()[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape)
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
indices = self._categorical.sample(sample_shape)
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1)
def log_prob(self, value):
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self):
probs = self._categorical.probs
n = self.event_shape[0]
if isinstance(probs, Variable):
values = Variable(torch.eye(n, out=probs.data.new(n, n)))
else:
values = torch.eye(n, out=probs.new(n, n))
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))
示例2: OneHotCategorical
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import entropy [as 别名]
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: :attr:`probs` will be normalized to be summing to 1.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'probs': constraints.simplex}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def probs(self):
return self._categorical.probs
@property
def logits(self):
return self._categorical.logits
@property
def mean(self):
return self._categorical.probs
@property
def variance(self):
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
indices = self._categorical.sample(sample_shape)
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self):
n = self.event_shape[0]
values = self._new((n, n))
torch.eye(n, out=values)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))