本文整理汇总了Python中torch.autograd.Variable.view方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.view方法的具体用法?Python Variable.view怎么用?Python Variable.view使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.view方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_epoch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def train_epoch(self, inputs, targets, optimizer, criterion,
epoch_no=0, batch_size=64, max_step=50, max_norm=5, eval_step=10):
hidden = self.model.init_hidden(batch_size)
counter = 0
x_generator = get_batch(inputs, batch_size, max_step)
y_generator = get_batch(targets, batch_size, max_step)
for x, y in zip(x_generator, y_generator):
self.model.train()
x = Variable(torch.from_numpy(np.array(x, dtype=np.float32))).long()
y = Variable(torch.from_numpy(np.array(y, dtype=np.float32))).long()
if CUDA_AVAILABLE:
x = x.cuda()
y = y.cuda()
if isinstance(hidden, tuple):
hidden = tuple([Variable(each.data) for each in hidden])
else:
hidden = Variable(hidden.data)
self.model.zero_grad() # 重置梯度
output, hidden = self.model.forward(x, hidden)
# 将 output 的维度进行转换:
# [batch_size, step_size, vocab_size] -> [batch_size * step_size, vocab_size]
# y 是 1D 的就好
step_size = x.size(1) # batch 里序列的长度有可能不足 max_step
cross_entropy_loss = criterion(
output.view(batch_size * step_size, -1),
y.view(batch_size * step_size).long()
)
focal_loss = FocalLoss(gamma=2)(
output.view(batch_size * step_size, -1),
y.view(batch_size * step_size).long()
)
ploss = pullaway_loss(output.view(batch_size * step_size, -1))
loss = cross_entropy_loss + focal_loss + 0.1 * ploss
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), max_norm)
optimizer.step()
counter += 1
if (counter % eval_step) == 0:
print("Epoch: {}; Step: {}; Loss: {:.4f}".format(
epoch_no + 1, counter, loss.data[0]
))
# 从 x 中随机挑选内容
pos = np.random.randint(0, mul(*x.size()) - 2)
length = np.random.randint(1, min(5, mul(*x.size()) - pos - 1))
start_tokens = x.view(-1)[pos:pos + length].data.numpy()
start_text = ''.join(self.vectorizer.inverse_transform([start_tokens])[0]).strip()
if start_text:
result = self.generate(start_text, max_len=100)
print("[%s]: %r" % (start_text, result))
示例2: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def forward(self, x):
height = x.size(3)
width = x.size(4)
# Compute measurements
x, y = self.measurements(x, self.pad_frame_size, self.patch_shape)
if self.encoder_learn is False:
x = x.detach()
y = y.detach()
# Map measurements to video blocks
out = Variable(torch.zeros(
x.size(0), x.size(1), self.vectorized)).cuda()
for i in range(x.size(1)):
out[:, i, :] = self.reconstruction(x[:, i, :])
output_patches = out.view(out.size(0), self.measurements.patches_size[0],
self.measurements.patches_size[1], self.measurements.patches_size[2], self.temporal_size, self.spatial_size, self.spatial_size).permute(0, 1, 4, 2, 3, 6, 5)
# Reconstruct video blocks to video
reconstructed_video = self.measurements.fold(output_patches)[0]
# Crop padding
reconstructed_video = reconstructed_video[:, :, :, 0:height, 0:width]
return reconstructed_video, y
示例3: predict
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def predict(model, test_loader):
# switch to evaluate mode
model.eval()
################Note##############################
# each sample may have different number of points
# so just use batch of size 1
##################################################
debug_here()
for i, (points_data, _seg_data, labels) in enumerate(test_loader, 0):
if i%10 == 0:
print('{0}/{1}'.format(i, len(test_loader)))
# print(points_data.size())
points_data = Variable(points_data, volatile=True)
points_data = points_data.transpose(2, 1)
_seg_data = Variable(_seg_data, volatile=True)
if opt.cuda:
points_data = points_data.cuda()
_seg_data = _seg_data.long().cuda() # must be long cuda tensor
# forward, backward optimize
pred, _ = model(points_data)
pred = pred.view(-1, opt.num_seg_classes)
_seg_data = _seg_data.view(-1, 1)[:, 0] # min is already 0
pred_choice = pred.data.max(1)[1]
print('finished loading')
示例4: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def forward(self, sentences, sentences_len, hidden):
sentences_len = sentences_len.cpu().data.numpy()
idx = np.argsort(sentences_len).tolist()[::-1]
ridx = np.argsort(idx).tolist()
sentences = sentences[idx, :]
sentences_len = sentences_len[idx, ]
embedding = self.embedding(sentences)
embedding = nn.Dropout(0.1)(embedding)
packed_embedding = pack_padded_sequence(embedding, sentences_len, batch_first=True)
packed_rnn_feature, hidden = self.rnn_feature(packed_embedding, hidden)
sentence_feature, _ = pad_packed_sequence(packed_rnn_feature, batch_first=True)
idx = Variable(LongTensor(sentences_len - 1))
idx = idx.view(-1, 1).expand(sentence_feature.size(0), sentence_feature.size(2)).unsqueeze(1)
if sentence_feature.is_cuda:
idx = idx.cuda()
sentence_feature = sentence_feature.gather(1, idx).squeeze()
sentence_feature = sentence_feature[ridx, :]
sentences_len = sentences_len[ridx, ]
logits = self.classifier(sentence_feature)
pred = F.log_softmax(logits, dim=0)
return pred
示例5: predict
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def predict(self, x, get_raw_results=False, **kwargs):
if not isinstance(x, Variable):
x = Variable(torch.from_numpy(np.asarray(x, dtype=np.float32)))
if len(x.size()) == 1:
x = x.view(1, -1)
y_pred = self._get_prediction(x).data.numpy()
return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
示例6: AffineGridGenV2
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
class AffineGridGenV2(Module):
def __init__(self, height, width, lr = 1, aux_loss = False):
super(AffineGridGenV2, self).__init__()
self.height, self.width = height, width
self.aux_loss = aux_loss
self.lr = lr
self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32)
self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0)
self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0)
self.grid[:,:,2] = np.ones([self.height, width])
self.grid = torch.from_numpy(self.grid.astype(np.float32))
def forward(self, input1):
self.batchgrid = torch.zeros(torch.Size([input1.size(0)]) + self.grid.size())
for i in range(input1.size(0)):
self.batchgrid[i] = self.grid
self.batchgrid = Variable(self.batchgrid)
if input1.is_cuda:
self.batchgrid = self.batchgrid.cuda()
output = torch.bmm(self.batchgrid.view(-1, self.height*self.width, 3), torch.transpose(input1, 1, 2)).view(-1, self.height, self.width, 2)
return output
示例7: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def train(dataloader):
uf.train()
total_loss = 0
total_items = 0
start_time = time.time()
for i_batch, batch in enumerate(dataloader):
output_seq = Variable(batch['output_seq'])
del (batch['output_seq'])
for k in batch:
batch[k] = Variable(batch[k])
if DEVICE_NO != -1:
output_seq = output_seq.cuda(DEVICE_NO)
for k in batch:
batch[k] = batch[k].cuda(DEVICE_NO)
uf.zero_grad()
pred = uf.forward(**batch)
pred = pred.view(-1, pred.size(-1))
output_seq = output_seq.view(-1)
loss = criteria(pred, output_seq)
loss.backward()
num_items = len([x for x in output_seq if int(x) != criteria.ignore_index])
total_loss += num_items * loss.data
total_items += num_items
optimizer.step()
if i_batch % log_interval == 0 and i_batch > 0:
cur_loss = total_loss[0] / total_items
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:04.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, i_batch, len(dataloader.dataset) // dataloader.batch_size, optimizer.param_groups[0]['lr'],
elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
total_items = 0
start_time = time.time()
开发者ID:ParkTong,项目名称:Unified-Architecture-for-Semantic-Role-Labeling-and-Relation-Classification,代码行数:37,代码来源:train.py
示例8: l2l_validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def l2l_validate(model, cluster_center, n_epoch=100):
val_accuracy = []
for epoch in range(n_epoch):
data_l = generate_data_l(cluster_center)
data_n = generate_data_n(cluster_center, model.n_class_n)
x_l, y_l = Variable(torch.from_numpy(data_l[0])).float(), Variable(
torch.from_numpy(data_l[1]))
x_n, y_n = Variable(torch.from_numpy(data_n[0])).float(), Variable(
torch.from_numpy(data_n[1]))
pred_ll, pred_nl, w, b = model(x_l, x_n)
M = Variable(torch.zeros(model.n_class_n, model.n_dim))
B = Variable(torch.zeros(model.n_class_n))
for k in range(model.n_class_n):
M[k] = torch.cat((w[:, 0][y_n == model.n_class_l + k].view(-1, 1),
w[:, 1][y_n == model.n_class_l + k].view(-1, 1)), 1).mean(0)
B[k] = b[y_n == model.n_class_l + k].mean()
pred_ln = torch.mm(x_l, M.t()) + B.view(1, -1).expand_as(torch.mm(x_l, M.t()))
pred_nn = torch.mm(x_n, M.t()) + B.view(1, -1).expand_as(torch.mm(x_n, M.t()))
pred = torch.cat((torch.cat((pred_ll, pred_nl)), torch.cat((pred_ln, pred_nn))), 1)
pred = pred.data.max(1)[1]
y = torch.cat((y_l, y_n))
accuracy = pred.eq(y.data).cpu().sum() * 1.0 / y.size()[0]
# print('accuracy: %.2f' % accuracy)
val_accuracy.append(accuracy)
acc_l = pred.eq(y.data).cpu()[0:100].sum() * 1.0 / 100
acc_n = pred.eq(y.data).cpu()[100:150].sum() * 1.0 / 50
print('accuracy: %.2f, lifelong accuracy: %.2f, new accuracy: %.2f' % (accuracy, acc_l, acc_n))
return numpy.mean(numpy.asarray(val_accuracy))
示例9: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = Variable(features.data)
features = features.view(features.size(0), -1)
features = self.bn(self.linear(features))
return features
示例10: accumulate_gradient
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def accumulate_gradient(self, batch_sz, states, actions, rewards,
next_states, mask):
""" Compute the difference between the return distributions of Q(s,a)
and TQ(s_,a).
"""
states = Variable(states)
actions = Variable(actions)
rewards = Variable(rewards)
next_states = Variable(next_states, volatile=True)
# Compute probabilities of Q(s,a*)
q_probs = self.policy(states)
actions = actions.view(batch_sz, 1, 1)
action_mask = actions.expand(batch_sz, 1, self.atoms_no)
qa_probs = q_probs.gather(1, action_mask).squeeze()
# Compute distribution of Q(s_,a)
target_qa_probs = self._get_categorical(next_states, rewards, mask)
# Compute the cross-entropy of phi(TZ(x_,a)) || Z(x,a)
qa_probs.data.clamp_(0.01, 0.99) # Tudor's trick for avoiding nans
loss = - torch.sum(target_qa_probs * torch.log(qa_probs))
# Accumulate gradients
loss.backward()
示例11: logposterior_func
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def logposterior_func(self, x, z):
self.B = x.size()[0] #batch size
self.zeros = Variable(torch.zeros(self.B, self.z_size).type(self.dtype))
# print (x) #[B,X]
# print(z) #[P,Z]
z = Variable(z).type(self.dtype)
z = z.view(-1,self.B,self.z_size)
return lognormal(z, self.zeros, self.zeros) + log_bernoulli(self.decode(z), x)
示例12: enumerate_support
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def enumerate_support(self):
probs = self._categorical.probs
n = self.event_shape[0]
if isinstance(probs, Variable):
values = Variable(torch.eye(n, out=probs.data.new(n, n)))
else:
values = torch.eye(n, out=probs.new(n, n))
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
return values.expand((n,) + self.batch_shape + (n,))
示例13: convert_batch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def convert_batch(trajectory, has_features=False):
"""Convert trajectory from numpy to PT variable"""
states = Variable(torch.from_numpy(
trajectory["observations"]).float())
acs = Variable(torch.from_numpy(
trajectory["actions"]))
advs = Variable(torch.from_numpy(
trajectory["advantages"].copy()).float())
advs = advs.view(-1, 1)
rs = Variable(torch.from_numpy(
trajectory["value_targets"]).float())
rs = rs.view(-1, 1)
if has_features:
features = [Variable(torch.from_numpy(f))
for f in trajectory["features"]]
else:
features = trajectory["features"]
return states, acs, advs, rs, features
示例14: test_forward_applies_embedding_then_encoder
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def test_forward_applies_embedding_then_encoder(self):
numpy_tensor = numpy.random.randint(6, size=(3, 4, 7))
inputs = Variable(torch.from_numpy(numpy_tensor))
encoder_output = self.encoder(inputs)
reshaped_input = inputs.view(12, 7)
embedded = self.embedding(reshaped_input)
mask = (inputs != 0).long().view(12, 7)
reshaped_manual_output = self.inner_encoder(embedded, mask)
manual_output = reshaped_manual_output.view(3, 4, 3)
assert_almost_equal(encoder_output.data.numpy(), manual_output.data.numpy())
示例15: match
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import view [as 别名]
def match(self, passage_encoders, question_encoders, wq_matrix, wp_matrix, fw = True):
'''
passage_encoders (pn_steps, batch, hidden_size)
question_encoders (qn_steps, batch, hidden_size)
wq_matrix (qn_steps, batch, hidden_size)
wp_matrix (pn_steps, batch, hidden_size)
'''
if fw:
match_lstm = self.fw_match_lstm
start = 0
end = passage_encoders.size(0)
stride = 1
else:
match_lstm = self.bw_match_lstm
start = passage_encoders.size(0) - 1
end = -1
stride = -1
hx = Variable(torch.zeros(passage_encoders.size(1), self.hidden_size)).cuda()
cx = Variable(torch.zeros(passage_encoders.size(1), self.hidden_size)).cuda()
match_encoders = [0 for i in range(passage_encoders.size(0))]
for i in range(start, end, stride):
wphp = wp_matrix[i]
wrhr = self.whr_net(hx)
_sum = torch.add(wphp, wrhr) # batch, hidden_size
_sum = _sum.expand(wq_matrix.size(0), wq_matrix.size(1), self.hidden_size) # qn_steps, batch, hidden_size
g = self.tanh(torch.add(wq_matrix, _sum)) # qn_steps, batch, hidden_size
g = torch.transpose(g, 0, 1)# batch, qn_steps, hidden_size
wg = self.w_net(g) # bactch, qn_steps, 1
wg = wg.squeeze(-1) # bactch, qn_steps
alpha = wg # bactch, qn_steps
alpha = self.softmax(alpha).view(alpha.size(0), 1, alpha.size(1)) # batch,1, qn_steps
attentionv = torch.bmm(alpha, question_encoders.transpose(0, 1)) # bacth, 1, hidden_size
attentionv = attentionv.squeeze(1) # bacth, hidden_size
inp = torch.cat([passage_encoders[i], attentionv], -1)
hx, cx = match_lstm(inp, (hx, cx)) # batch, hidden_size
match_encoders[i] = hx.view(1, hx.size(0), -1)
match_encoders = torch.cat(match_encoders)
return match_encoders