本文整理汇总了Python中torch.distributions.categorical.Categorical.sample方法的典型用法代码示例。如果您正苦于以下问题:Python Categorical.sample方法的具体用法?Python Categorical.sample怎么用?Python Categorical.sample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.distributions.categorical.Categorical
的用法示例。
在下文中一共展示了Categorical.sample方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_gmm
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def sample_gmm(batch_size, mixture_weights):
cat = Categorical(probs=mixture_weights)
cluster = cat.sample([batch_size]) # [B]
mean = (cluster*10.).float().cuda()
std = torch.ones([batch_size]).cuda() *5.
norm = Normal(mean, std)
samp = norm.sample()
samp = samp.view(batch_size, 1)
return samp
示例2: sample_true2
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def sample_true2():
cat = Categorical(probs= torch.tensor(true_mixture_weights))
cluster = cat.sample()
# print (cluster)
# fsd
norm = Normal(torch.tensor([cluster*10.]).float(), torch.tensor([5.0]).float())
samp = norm.sample()
# print (samp)
return samp,cluster
示例3: OneHotCategorical
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by `probs`.
Samples are one-hot coded vectors of size probs.size(-1).
See also: :func:`torch.distributions.Categorical`
Example::
>>> m = OneHotCategorical(torch.Tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
0
0
1
0
[torch.FloatTensor of size 4]
Args:
probs (Tensor or Variable): event probabilities
"""
params = {'probs': constraints.simplex}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.probs.size()[:-1]
event_shape = self._categorical.probs.size()[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape)
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
indices = self._categorical.sample(sample_shape)
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1)
def log_prob(self, value):
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self):
probs = self._categorical.probs
n = self.event_shape[0]
if isinstance(probs, Variable):
values = Variable(torch.eye(n, out=probs.data.new(n, n)))
else:
values = torch.eye(n, out=probs.new(n, n))
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))
示例4: test_gmm_loss
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def test_gmm_loss(self):
""" Test case 1 """
n_samples = 10000
means = torch.Tensor([[0., 0.],
[1., 1.],
[-1., 1.]])
stds = torch.Tensor([[.03, .05],
[.02, .1],
[.1, .03]])
pi = torch.Tensor([.2, .3, .5])
cat_dist = Categorical(pi)
indices = cat_dist.sample((n_samples,)).long()
rands = torch.randn(n_samples, 2)
samples = means[indices] + rands * stds[indices]
class _model(nn.Module):
def __init__(self, gaussians):
super().__init__()
self.means = nn.Parameter(torch.Tensor(1, gaussians, 2).normal_())
self.pre_stds = nn.Parameter(torch.Tensor(1, gaussians, 2).normal_())
self.pi = nn.Parameter(torch.Tensor(1, gaussians).normal_())
def forward(self, *inputs):
return self.means, torch.exp(self.pre_stds), f.softmax(self.pi, dim=1)
model = _model(3)
optimizer = torch.optim.Adam(model.parameters())
iterations = 100000
log_step = iterations // 10
pbar = tqdm(total=iterations)
cum_loss = 0
for i in range(iterations):
batch = samples[torch.LongTensor(128).random_(0, n_samples)]
m, s, p = model.forward()
loss = gmm_loss(batch, m, s, p)
cum_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
pbar.set_postfix_str("avg_loss={:10.6f}".format(
cum_loss / (i + 1)))
pbar.update(1)
if i % log_step == log_step - 1:
print(m)
print(s)
print(p)
示例5: sample_true
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def sample_true(batch_size):
# print (true_mixture_weights.shape)
cat = Categorical(probs=torch.tensor(true_mixture_weights))
cluster = cat.sample([batch_size]) # [B]
mean = (cluster*10.).float()
std = torch.ones([batch_size]) *5.
# print (cluster.shape)
# fsd
# norm = Normal(torch.tensor([cluster*10.]).float(), torch.tensor([5.0]).float())
norm = Normal(mean, std)
samp = norm.sample()
# print (samp.shape)
# fadsf
samp = samp.view(batch_size, 1)
return samp
示例6: reinforce_baseline
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def reinforce_baseline(surrogate, x, logits, mixtureweights, k=1, get_grad=False):
B = logits.shape[0]
probs = torch.softmax(logits, dim=1)
outputs = {}
cat = Categorical(probs=probs)
grads =[]
# net_loss = 0
for jj in range(k):
cluster_H = cat.sample()
outputs['logq'] = logq = cat.log_prob(cluster_H).view(B,1)
outputs['logpx_given_z'] = logpx_given_z = logprob_undercomponent(x, component=cluster_H)
outputs['logpz'] = logpz = torch.log(mixtureweights[cluster_H]).view(B,1)
logpxz = logpx_given_z + logpz #[B,1]
surr_pred = surrogate.net(x)
outputs['f'] = f = logpxz - logq - 1.
# outputs['net_loss'] = net_loss = net_loss - torch.mean((f.detach() ) * logq)
outputs['net_loss'] = net_loss = - torch.mean((f.detach() - surr_pred.detach()) * logq)
# net_loss += - torch.mean( -logq.detach()*logq)
# surr_loss = torch.mean(torch.abs(f.detach() - surr_pred))
grad_logq = torch.autograd.grad([torch.mean(logq)], [logits], create_graph=True, retain_graph=True)[0]
surr_loss = torch.mean(((f.detach() - surr_pred) * grad_logq )**2)
if get_grad:
grad = torch.autograd.grad([net_loss], [logits], create_graph=True, retain_graph=True)[0]
grads.append(grad)
# net_loss = net_loss/ k
if get_grad:
grads = torch.stack(grads)
# print (grads.shape)
outputs['grad_avg'] = torch.mean(torch.mean(grads, dim=0),dim=0)
outputs['grad_std'] = torch.std(grads, dim=0)[0]
outputs['surr_loss'] = surr_loss
# return net_loss, f, logpx_given_z, logpz, logq
return outputs
示例7: reinforce
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def reinforce(x, logits, mixtureweights, k=1):
B = logits.shape[0]
probs = torch.softmax(logits, dim=1)
cat = Categorical(probs=probs)
net_loss = 0
for jj in range(k):
cluster_H = cat.sample()
logq = cat.log_prob(cluster_H).view(B,1)
logpx_given_z = logprob_undercomponent(x, component=cluster_H)
logpz = torch.log(mixtureweights[cluster_H]).view(B,1)
logpxz = logpx_given_z + logpz #[B,1]
f = logpxz - logq
net_loss += - torch.mean((f.detach() - 1.) * logq)
# net_loss += - torch.mean( -logq.detach()*logq)
net_loss = net_loss/ k
return net_loss, f, logpx_given_z, logpz, logq
示例8: OneHotCategorical
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: :attr:`probs` will be normalized to be summing to 1.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'probs': constraints.simplex}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def probs(self):
return self._categorical.probs
@property
def logits(self):
return self._categorical.logits
@property
def mean(self):
return self._categorical.probs
@property
def variance(self):
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
one_hot = probs.new(self._extended_shape(sample_shape)).zero_()
indices = self._categorical.sample(sample_shape)
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self):
n = self.event_shape[0]
values = self._new((n, n))
torch.eye(n, out=values)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))
示例9: print
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
#REINFORCE
print ('REINFORCE')
# def sample_reinforce_given_class(logits, samp):
# return logprob
grads = []
for i in range (N):
dist = Categorical(logits=logits)
samp = dist.sample()
logprob = dist.log_prob(samp)
reward = f(samp)
gradlogprob = torch.autograd.grad(outputs=logprob, inputs=(logits), retain_graph=True)[0]
grads.append(reward*gradlogprob)
print ()
grads = torch.stack(grads).view(N,C)
# print (grads.shape)
grad_mean_reinforce = torch.mean(grads,dim=0)
grad_std_reinforce = torch.std(grads,dim=0)
print ('REINFORCE')
print ('mean:', grad_mean_reinforce)
print ('std:', grad_std_reinforce)
print ()
示例10: LogitRelaxedBernoulli
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
# dist = LogitRelaxedBernoulli(torch.Tensor([1.]), bern_param)
# dist_bernoulli = Bernoulli(bern_param)
C= 2
n_components = C
B=1
probs = torch.ones(B,C)
bern_param = bern_param.view(B,1)
aa = 1 - bern_param
probs = torch.cat([aa, bern_param], dim=1)
cat = Categorical(probs= probs)
grads = []
for i in range(n):
b = cat.sample()
logprob = cat.log_prob(b.detach())
# b_ = torch.argmax(z, dim=1)
logprobgrad = torch.autograd.grad(outputs=logprob, inputs=(bern_param), retain_graph=True)[0]
grad = f(b) * logprobgrad
grads.append(grad[0][0].data.numpy())
print ('Grad Estimator: Reinfoce categorical')
print ('Grad mean', np.mean(grads))
print ('Grad std', np.std(grads))
print ()
reinforce_cat_grad_means.append(np.mean(grads))
reinforce_cat_grad_stds.append(np.std(grads))
示例11: simplax
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
def simplax():
def show_surr_preds():
batch_size = 1
rows = 3
cols = 1
fig = plt.figure(figsize=(10+cols,4+rows), facecolor='white') #, dpi=150)
for i in range(rows):
x = sample_true(1).cuda() #.view(1,1)
logits = encoder.net(x)
probs = torch.softmax(logits, dim=1)
cat = RelaxedOneHotCategorical(probs=probs, temperature=torch.tensor([1.]).cuda())
cluster_S = cat.rsample()
cluster_H = H(cluster_S)
logprob_cluster = cat.log_prob(cluster_S.detach()).view(batch_size,1)
check_nan(logprob_cluster)
z = cluster_S
n_evals = 40
x1 = np.linspace(-9,205, n_evals)
x = torch.from_numpy(x1).view(n_evals,1).float().cuda()
z = z.repeat(n_evals,1)
cluster_H = cluster_H.repeat(n_evals,1)
xz = torch.cat([z,x], dim=1)
logpxz = logprob_undercomponent(x, component=cluster_H, needsoftmax_mixtureweight=needsoftmax_mixtureweight, cuda=True)
f = logpxz #- logprob_cluster
surr_pred = surrogate.net(xz)
surr_pred = surr_pred.data.cpu().numpy()
f = f.data.cpu().numpy()
col =0
row = i
# print (row)
ax = plt.subplot2grid((rows,cols), (row,col), frameon=False, colspan=1, rowspan=1)
ax.plot(x1,surr_pred, label='Surr')
ax.plot(x1,f, label='f')
ax.set_title(str(cluster_H[0]))
ax.legend()
# save_dir = home+'/Documents/Grad_Estimators/GMM/'
plt_path = exp_dir+'gmm_surr.png'
plt.savefig(plt_path)
print ('saved training plot', plt_path)
plt.close()
def plot_dist():
mixture_weights = torch.softmax(needsoftmax_mixtureweight, dim=0)
rows = 1
cols = 1
fig = plt.figure(figsize=(10+cols,4+rows), facecolor='white') #, dpi=150)
col =0
row = 0
ax = plt.subplot2grid((rows,cols), (row,col), frameon=False, colspan=1, rowspan=1)
xs = np.linspace(-9,205, 300)
sum_ = np.zeros(len(xs))
# C = 20
for c in range(n_components):
m = Normal(torch.tensor([c*10.]).float(), torch.tensor([5.0]).float())
ys = []
for x in xs:
# component_i = (torch.exp(m.log_prob(x) )* ((c+5.) / 290.)).numpy()
component_i = (torch.exp(m.log_prob(x) )* mixture_weights[c]).detach().cpu().numpy()
ys.append(component_i)
ys = np.reshape(np.array(ys), [-1])
sum_ += ys
ax.plot(xs, ys, label='')
ax.plot(xs, sum_, label='')
# save_dir = home+'/Documents/Grad_Estimators/GMM/'
plt_path = exp_dir+'gmm_plot_dist.png'
plt.savefig(plt_path)
print ('saved training plot', plt_path)
plt.close()
#.........这里部分代码省略.........
示例12: range
# 需要导入模块: from torch.distributions.categorical import Categorical [as 别名]
# 或者: from torch.distributions.categorical.Categorical import sample [as 别名]
L2_losses = []
steps_list = []
for step in range(n_steps):
optim.zero_grad()
loss = 0
net_loss = 0
for i in range(batch_size):
x = sample_true()
logits = encoder.net(x)
# print (logits.shape)
# print (torch.softmax(logits, dim=0))
# fsfd
cat = Categorical(probs= torch.softmax(logits, dim=0))
cluster = cat.sample()
logprob_cluster = cat.log_prob(cluster.detach())
# print (logprob_cluster)
pxz = logprob_undercomponent(x, component=cluster, needsoftmax_mixtureweight=needsoftmax_mixtureweight, cuda=False)
f = pxz - logprob_cluster
# print (f)
# logprob = logprob_givenmixtureeweights(x, needsoftmax_mixtureweight)
net_loss += -f.detach() * logprob_cluster
loss += -f
loss = loss / batch_size
net_loss = net_loss / batch_size
# print (loss, net_loss)
loss.backward(retain_graph=True)
optim.step()