本文整理汇总了Python中torch.autograd.variable.Variable方法的典型用法代码示例。如果您正苦于以下问题:Python variable.Variable方法的具体用法?Python variable.Variable怎么用?Python variable.Variable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.variable
的用法示例。
在下文中一共展示了variable.Variable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: m_ggnn
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def m_ggnn(self, h_v, h_w, e_vw, opt={}):
m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))
for w in range(h_w.size(1)):
if torch.nonzero(e_vw[:, w, :].data).size():
for i, el in enumerate(self.args['e_label']):
ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])
parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
self.learn_args[0][i].size(1))
m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
1, 2)), 1, 2)
m_w = torch.squeeze(m_w)
m[:,w,:] = ind.expand_as(m_w)*m_w
return m
示例2: rotmat2quat_torch
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def rotmat2quat_torch(R):
"""
Converts a rotation matrix to quaternion
batch pytorch version ported from the corresponding numpy method above
:param R: N * 3 * 3
:return: N * 4
"""
rotdiff = R - R.transpose(1, 2)
r = torch.zeros_like(rotdiff[:, 0])
r[:, 0] = -rotdiff[:, 1, 2]
r[:, 1] = rotdiff[:, 0, 2]
r[:, 2] = -rotdiff[:, 0, 1]
r_norm = torch.norm(r, dim=1)
sintheta = r_norm / 2
r0 = torch.div(r, r_norm.unsqueeze(1).repeat(1, 3) + 0.00000001)
t1 = R[:, 0, 0]
t2 = R[:, 1, 1]
t3 = R[:, 2, 2]
costheta = (t1 + t2 + t3 - 1) / 2
theta = torch.atan2(sintheta, costheta)
q = Variable(torch.zeros(R.shape[0], 4)).float().cuda()
q[:, 0] = torch.cos(theta / 2)
q[:, 1:] = torch.mul(r0, torch.sin(theta / 2).unsqueeze(1).repeat(1, 3))
return q
示例3: expmap2rotmat_torch
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def expmap2rotmat_torch(r):
"""
Converts expmap matrix to rotation
batch pytorch version ported from the corresponding method above
:param r: N*3
:return: N*3*3
"""
theta = torch.norm(r, 2, 1)
r0 = torch.div(r, theta.unsqueeze(1).repeat(1, 3) + 0.0000001)
r1 = torch.zeros_like(r0).repeat(1, 3)
r1[:, 1] = -r0[:, 2]
r1[:, 2] = r0[:, 1]
r1[:, 5] = -r0[:, 0]
r1 = r1.view(-1, 3, 3)
r1 = r1 - r1.transpose(1, 2)
n = r1.data.shape[0]
R = Variable(torch.eye(3, 3).repeat(n, 1, 1)).float().cuda() + torch.mul(
torch.sin(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3), r1) + torch.mul(
(1 - torch.cos(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3)), torch.matmul(r1, r1))
return R
示例4: fkl_torch
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def fkl_torch(angles, parent, offset, rotInd, expmapInd):
"""
pytorch version of fkl.
convert joint angles to joint locations
batch pytorch version of the fkl() method above
:param angles: N*99
:param parent:
:param offset:
:param rotInd:
:param expmapInd:
:return: N*joint_n*3
"""
n = angles.data.shape[0]
j_n = offset.shape[0]
p3d = Variable(torch.from_numpy(offset)).float().cuda().unsqueeze(0).repeat(n, 1, 1)
angles = angles[:, 3:].contiguous().view(-1, 3)
R = data_utils.expmap2rotmat_torch(angles).view(n, j_n, 3, 3)
for i in np.arange(1, j_n):
if parent[i] > 0:
R[:, i, :, :] = torch.matmul(R[:, i, :, :], R[:, parent[i], :, :]).clone()
p3d[:, i, :] = torch.matmul(p3d[0, i, :], R[:, parent[i], :, :]) + p3d[:, parent[i], :]
return p3d
示例5: __init__
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def __init__(self,
unique_draws,
canvas_shape=[64, 64],
rolling_average_const=0.7):
"""
This class defines does all the work to create the final canvas from
the prediction of RNN and also defines the loss to back-propagate in.
:param canvas_shape: Canvas shape
:param rolling_average_const: constant to be used in creating running average
baseline.
:param stack_size: Maximum size of Stack required
:param time_steps: max len of program
:param unique_draws: Number of unique_draws in the dataset
penalize longer predicted programs in variable length case training.
"""
self.canvas_shape = canvas_shape
self.unique_draws = unique_draws
self.max_reward = Variable(torch.zeros(1)).cuda()
self.rolling_baseline = Variable(torch.zeros(1)).cuda()
self.alpha_baseline = rolling_average_const
示例6: get_label
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def get_label(self, x, detach=False): # pylint: disable=arguments-differ
"""
Get data sample labels, i.e. true or fake.
Args:
x (Union(numpy.ndarray, torch.Tensor)): Discriminator input, i.e. data sample.
detach (bool): if None detach from torch tensor variable (optional)
Returns:
torch.Tensor: Discriminator output, i.e. data label
"""
# pylint: disable=not-callable, no-member
if isinstance(x, torch.Tensor):
pass
else:
x = torch.tensor(x, dtype=torch.float32)
x = Variable(x)
if detach:
return self._discriminator.forward(x).detach().numpy()
else:
return self._discriminator.forward(x)
示例7: convert_chwTensor_to_hwcNumpy
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def convert_chwTensor_to_hwcNumpy(tensor):
"""convert a group images pytorch tensor(count * c * h * w) to numpy array images(count * h * w * c)
Parameters:
----------
tensor: numpy array , count * c * h * w
Returns:
-------
numpy array images: count * h * w * c
"""
if isinstance(tensor, Variable):
return np.transpose(tensor.data.numpy(), (0,2,3,1))
elif isinstance(tensor, torch.FloatTensor):
return np.transpose(tensor.numpy(), (0,2,3,1))
else:
raise Exception("covert b*c*h*w tensor to b*h*w*c numpy error.This tensor must have 4 dimension.")
示例8: calculate_gradient_penalty
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def calculate_gradient_penalty(discriminator, penalty, real_data, fake_data):
real_data = real_data.data
fake_data = fake_data.data
alpha = torch.rand(len(real_data), 1)
alpha = alpha.expand(real_data.size())
alpha = to_cuda_if_available(alpha)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = Variable(interpolates, requires_grad=True)
discriminator_interpolates = discriminator(interpolates)
gradients = torch.autograd.grad(outputs=discriminator_interpolates,
inputs=interpolates,
grad_outputs=to_cuda_if_available(torch.ones_like(discriminator_interpolates)),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * penalty
示例9: pre_train_epoch
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def pre_train_epoch(autoencoder, data, batch_size, optim=None, variable_sizes=None, temperature=None):
autoencoder.train(mode=(optim is not None))
training = optim is not None
losses = []
for batch in data.batch_iterator(batch_size):
if optim is not None:
optim.zero_grad()
batch = Variable(torch.from_numpy(batch))
batch = to_cuda_if_available(batch)
_, batch_reconstructed = autoencoder(batch, training=training, temperature=temperature, normalize_code=False)
loss = categorical_variable_loss(batch_reconstructed, batch, variable_sizes)
loss.backward()
if training:
optim.step()
loss = to_cpu_if_available(loss)
losses.append(loss.data.numpy())
del loss
return losses
示例10: sample
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def sample(generator, temperature, num_samples, num_features, batch_size=100, noise_size=128):
generator = to_cuda_if_available(generator)
generator.train(mode=False)
samples = np.zeros((num_samples, num_features), dtype=np.float32)
start = 0
while start < num_samples:
with torch.no_grad():
noise = Variable(torch.FloatTensor(batch_size, noise_size).normal_())
noise = to_cuda_if_available(noise)
batch_samples = generator(noise, training=False, temperature=temperature)
batch_samples = to_cpu_if_available(batch_samples)
batch_samples = batch_samples.data.numpy()
# do not go further than the desired number of samples
end = min(start + batch_size, num_samples)
# limit the samples taken from the batch based on what is missing
samples[start:end, :] = batch_samples[:min(batch_size, end - start), :]
# move to next batch
start = end
return samples
示例11: display_status
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def display_status(epoch, num_epochs, n_batch, num_batches, d_error, g_error, d_pred_real, d_pred_fake):
# var_class = torch.autograd.variable.Variable
if isinstance(d_error, torch.autograd.Variable):
d_error = d_error.data.cpu().numpy()
if isinstance(g_error, torch.autograd.Variable):
g_error = g_error.data.cpu().numpy()
if isinstance(d_pred_real, torch.autograd.Variable):
d_pred_real = d_pred_real.data
if isinstance(d_pred_fake, torch.autograd.Variable):
d_pred_fake = d_pred_fake.data
print('Epoch: [{}/{}], Batch Num: [{}/{}]'.format(
epoch,num_epochs, n_batch, num_batches)
)
print('Discriminator Loss: {:.4f}, Generator Loss: {:.4f}'.format(d_error, g_error))
print('D(x): {:.4f}, D(G(z)): {:.4f}'.format(d_pred_real.mean(), d_pred_fake.mean()))
writer.add_scalar('D(x)', d_pred_real.mean(), epoch)
writer.add_scalar('D(G(z)', d_pred_fake.mean(), epoch)
示例12: train_discriminator
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
y_real = Variable(torch.ones(prediction_real.shape[0], 1))
if torch.cuda.is_available():
D_real_loss = criterion(prediction_real, y_real.cuda())
else:
D_real_loss = criterion(prediction_real, y_real)
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
if torch.cuda.is_available():
D_fake_loss = criterion(prediction_fake, y_fake.cuda())
else:
D_fake_loss = criterion(prediction_fake, y_fake)
D_loss = D_real_loss + D_fake_loss
D_loss.backward()
optimizer.step()
# Return error
return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator
示例13: train_discriminator
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
optimizer.zero_grad()
# 1.1 Train on Real Data
prediction_real = discriminator(real_data)
y_real = Variable(torch.ones(prediction_real.shape[0], 1))
if torch.cuda.is_available():
D_real_loss = criterion(prediction_real, y_real.cuda())
else:
D_real_loss = criterion(prediction_real, y_real)
# 1.2 Train on Fake Data
prediction_fake = discriminator(fake_data)
y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
if torch.cuda.is_available():
D_fake_loss = criterion(prediction_fake, y_fake.cuda())
else:
D_fake_loss = criterion(prediction_fake, y_fake)
D_loss = D_real_loss + D_fake_loss
D_loss.backward()
optimizer.step()
return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator
示例14: r_ggnn
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def r_ggnn(self, h):
aux = Variable( torch.Tensor(h[0].size(0), self.args['out']).type_as(h[0].data).zero_() )
# For each graph
for i in range(h[0].size(0)):
nn_res = nn.Sigmoid()(self.learn_modules[0](torch.cat([h[0][i,:,:], h[-1][i,:,:]], 1)))*self.learn_modules[1](h[-1][i,:,:])
# Delete virtual nodes
nn_res = (torch.sum(h[0][i,:,:],1).expand_as(nn_res)>0).type_as(nn_res)* nn_res
aux[i,:] = torch.sum(nn_res,0)
return aux
示例15: r_mpnn
# 需要导入模块: from torch.autograd import variable [as 别名]
# 或者: from torch.autograd.variable import Variable [as 别名]
def r_mpnn(self, h):
aux = Variable( torch.Tensor(h[0].size(0), self.args['out']).type_as(h[0].data).zero_() )
# For each graph
for i in range(h[0].size(0)):
nn_res = nn.Sigmoid()(self.learn_modules[0](torch.cat([h[0][i,:,:], h[-1][i,:,:]], 1)))*self.learn_modules[1](h[-1][i,:,:])
# Delete virtual nodes
nn_res = (torch.sum(h[0][i,:,:],1).expand_as(nn_res)>0).type_as(nn_res)* nn_res
aux[i,:] = torch.sum(nn_res,0)
return aux