本文整理汇总了Python中torch.autograd.Variable.mean方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.mean方法的具体用法?Python Variable.mean怎么用?Python Variable.mean使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.mean方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute_baseline
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mean [as 别名]
def execute_baseline(points, K, n_samples, sigma2, reg_factor, mode='test'):
bs, N, _ = points.size()
e = torch.zeros(bs, N).type(dtype_l)
input, dists = create_input(points.data, sigma2)
loss_total = Variable(torch.zeros(1).type(dtype))
scores,_ = gnn(input)
probs = F.softmax(scores.permute(2, 1, 0)).permute(2, 1, 0)
if mode == 'train':
Lgp = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward2 = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward3 = Variable(torch.zeros(n_samples, bs).type(dtype))
for i in range(n_samples):
Samplei, Lgp[i] = sample_K(probs, K, 'train')
Reward2[i], _,_ = compute_reward(Samplei, K, points)
baseline = Reward2.mean(0,True).expand_as(Reward3)
loss = ((Reward2-baseline) * Lgp).sum(1).sum(0) / n_samples / bs
loss_total = loss_total + loss
show_loss = Reward2.data.mean()
sample, lgp = sample_K(probs, K, 'test')
reward, _, c = compute_reward(sample, K, points)
if mode == 'test':
show_loss = reward.data.mean()
if mode == 'test':
return sample, None, show_loss, c
else:
return sample, loss_total, show_loss, c
示例2: execute
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mean [as 别名]
def execute(points, K, n_samples, sigma2, reg_factor, mode='test'):
bs, N, _ = points.size()
e = torch.zeros(bs, N).type(dtype_l)
input, dists = create_input(points.data, sigma2)
loss_total = Variable(torch.zeros(1).type(dtype))
for k in range(K):
scores,_ = gnn(input)
probs = F.sigmoid(scores)
if mode == 'train':
variance = compute_variance(e, probs)
variance = variance.sum() / bs
Lgp = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward2 = Variable(torch.zeros(n_samples, bs).type(dtype))
Reward3 = Variable(torch.zeros(n_samples, bs).type(dtype))
for i in range(n_samples):
Samplei, Lgp[i] = sample_one(probs, 'train')
Ei = e*2 + Samplei.long()
Reward2[i], _,_ = compute_reward(Ei, k+1, points)
baseline = Reward2.mean(0,True).expand_as(Reward3)
loss = 0.0
if (last and k == K-1) or not last:
loss = ((Reward2-baseline) * Lgp).sum(1).sum(0) / n_samples / bs
loss_total = loss_total + loss - reg_factor*variance
show_loss = Reward2.data.mean()
sample, lgp = sample_one(probs, 'test')
e = e*2 + sample.long()
reward,_,c = compute_reward(e, k+1, points)
if mode == 'test':
show_loss = reward.data.mean()
if k < K-1:
input = update_input(input, dists, sample, sigma2, e, k+1)
if mode == 'test':
return e, None, show_loss, c
else:
return e, loss_total, show_loss, c
示例3: execute
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mean [as 别名]
def execute(Knap, scales, weights, volumes, C, masks, n_samples_base, mode='test'):
Ns, NNs = masks
bs = C.size(0)
C2 = C
w_total = Variable(torch.zeros(bs).type(dtype))
if mode == 'train':
loss_total = Variable(torch.zeros(1).type(dtype))
for s in range(scales):
n_samples = n_samples_base
last_scale = s == scales-1
if last_scale:
C2 = C
else:
C2 = C / 2
input = create_input(weights.data, volumes.data, C2.data, masks)
prob_scores = Knap(input)
if mode == 'train':
ws = Variable(torch.zeros(bs, n_samples).type(dtype))
lgps = Variable(torch.zeros(bs, n_samples).type(dtype))
for i in range(n_samples):
mask_chosen2, inds2 = decide2(prob_scores.data, volumes.data, C2.data, n_samples)
prob_scores2, weights2, volumes2, masks2 = rearange(prob_scores, weights, volumes, masks, inds2, mask_chosen2)
lgp, w, v = compute_stuff(mask_chosen2, prob_scores2, weights2, volumes2)
C3 = C - v
if not last_scale:
w_rec, C_rec, _ = execute(Knap, scales-1-s, weights, volumes, C3, masks, n_samples)
w = w + w_rec
C3 = C_rec
ws[:,i] = w
lgps[:,i] = lgp
b = ws.mean(1,True).expand_as(ws)
loss = -(lgps * Variable((ws-b).data)).sum(1).sum(0) / n_samples / bs
loss_total = loss_total + loss
mask_chosen, inds = decide(prob_scores.data, volumes.data, C2.data)
prob_scores, weights, volumes, masks = rearange(prob_scores, weights, volumes, masks, inds, mask_chosen) #reordenar inputs i actualitzar masks
_, w, v = compute_stuff(mask_chosen, prob_scores, weights, volumes)
w_total = w_total + w
C = C - v
if mode=='train':
return loss_total, w_total, C, masks
else:
return w_total, C, masks
示例4: Variable
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mean [as 别名]
px = simple_vae.ReparamNormal(1,100,1)
x = np.linspace(-4,4,1000)
x = Variable(torch.FloatTensor(x).unsqueeze(1))
prior_z = simple_vae.StandardNormal((1000,1))
z = prior_z.sample()
px.condition(z)
lp = px(x)
plt.clf()
plt.scatter(x.data.numpy(), lp.exp().data.numpy())
N = 1000
z = np.random.randn(N)
z = (z - z.mean()) / z.std()
x = np.cos(z) + 0.1*np.random.randn(N)
x = np.concatenate([x, x+2])
x = (x - x.mean()) / x.std()
X_var = Variable(torch.FloatTensor(x).unsqueeze(1))
Z_var = Variable(torch.FloatTensor(z).unsqueeze(1))
plt.clf()
plt.scatter(z,x)
plt.clf()
plt.hist(x,bins=100)
qz = simple_vae.ReparamNormal(1,100,1)
示例5: discrete_gan
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mean [as 别名]
def discrete_gan(nets, inputs, measure=None, penalty=None, n_samples=10, reinforce=False, gamma=0.95,
penalty_type='gradient_norm', use_beta=False, test_mode=False, use_sm=False):
global log_Z
log_M = math.log(n_samples)
discriminator = nets['discriminator']
generator = nets['generator']
M = n_samples
X = (inputs['images'] >= 0).float()
Z = inputs['z']
R = inputs['r']
U = inputs['u']
B = inputs['z'].size()[0]
log_B = math.log(B)
if R.size()[1] != DIM_C * n_samples * DIM_X * DIM_Y:
R = inputs['r_t']
assert R.size() == (B, DIM_C * n_samples * DIM_X * DIM_Y), (R.size(), (B, DIM_C * n_samples * DIM_X * DIM_Y))
try:
R = R.view(M, -1, DIM_C * DIM_X * DIM_Y)
except BaseException:
R = R.view(M, -1, DIM_C * DIM_X * DIM_Y)
U.requires_grad = False
logit = generator(Z)
assert logit.size()[1:] == X.size()[1:], (logit.size(), X.size())
g_output = F.sigmoid(logit)
g_output_ = g_output.view(-1, DIM_C * DIM_X * DIM_Y)
S = (R <= g_output_).float()
S = S.view(M, -1, DIM_C, DIM_X, DIM_Y)
S_ = Variable(S.data.cuda(), volatile=True)
S = Variable(S.data.cuda(), requires_grad=False)
gen_out = (U <= g_output_).float()
gen_out = gen_out.view(-1, DIM_C, DIM_X, DIM_Y)
real_out = discriminator(X)
fake_out = discriminator(S.view(-1, DIM_C, DIM_X, DIM_Y))
fake_out_ = discriminator(S_.view(-1, DIM_C, DIM_X, DIM_Y))
log_g = -((1. - S) * logit + F.softplus(-logit)).sum(2).sum(2).sum(2)
if (measure == 'w' and not test_mode) or use_sm:
fake_out_sm = discriminator(g_output)
d_loss, g_loss, r, f, w, b = f_divergence(measure, real_out, fake_out_sm)
else:
d_loss, g_loss, r, f, w, b = f_divergence(measure, real_out, fake_out.view(M, B, -1))
if measure in ('gan', 'jsd', 'rkl', 'kl', 'sh', 'proxy_gan', 'dv') and not use_sm:
log_w = Variable(fake_out_.data.cuda(), requires_grad=False).view(M, B)
log_beta = log_sum_exp(log_w.view(M * B, -1) - log_M - log_B, axis=0)
log_alpha = log_sum_exp(log_w - log_M, axis=0)
if use_beta:
log_Z_est = log_beta
log_w_tilde = log_w - log_Z_est - log_M - log_B
else:
log_Z_est = log_alpha
log_w_tilde = log_w - log_Z_est - log_M
w_tilde = torch.exp(log_w_tilde)
alpha = torch.exp(log_alpha)
beta = torch.exp(log_beta)
elif measure == 'xs':
w = (fake_out / 2. + 1.).view(M, B)
w_tilde = w / w.sum(0)
log_Z_est = torch.log(torch.mean(w))
elif measure == 'w' or use_sm:
log_w = Variable(torch.Tensor([0.]).float()).cuda()
log_Z_est = Variable(torch.Tensor([0.]).float()).cuda()
w_tilde = Variable(torch.Tensor([0.]).float()).cuda()
else:
raise NotImplementedError(measure)
if measure != 'w' and not use_sm:
if reinforce:
r = (log_w - log_Z)
assert not r.requires_grad
g_loss = -(r * log_g).sum(0).mean()
else:
w_tilde = Variable(w_tilde.data.cuda(), requires_grad=False)
assert not w_tilde.requires_grad
if use_beta:
g_loss = -((w_tilde * log_g).view(M * B)).sum(0).mean()
else:
g_loss = -(w_tilde * log_g).sum(0).mean()
results = dict(g_loss=g_loss.data[0], distance=-d_loss.data[0], boundary=torch.mean(b).data[0],
real=torch.mean(r).data[0], fake=torch.mean(f).data[0],
gen_out=g_output.mean().data[0], w_tilde=w_tilde.mean().data[0],
real_out=real_out.mean().data[0], fake_out=fake_out.mean().data[0])
if measure != 'w' and not use_sm:
results.update(alpha=alpha.mean().data[0], log_alpha=log_alpha.mean().data[0],
#.........这里部分代码省略.........