本文整理汇总了Python中torch.autograd.Variable.size方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.size方法的具体用法?Python Variable.size怎么用?Python Variable.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def forward(self, X, X_mask):
#X: [m, Tx] m = batch size, Tx = word count
#print(X.size(), type(X))
m = X.size()[0]
Tx = X.size()[1]
X = self.embedding(X)
#X: [m, Tx, embedding_dim] m = batch size, Tx = word count
#print(X.size(), type(X.data))
assert X.size() == torch.Size([m, Tx, self.embedding_dim])
#average words in doc. use mask so we average only words not padding
X = torch.sum(X, 1)
X = Variable(torch.div(X.data, X_mask))
#X: [m, emb_dim]
#print(X.size(), type(X.data))
assert X.size() == torch.Size([m, self.embedding_dim])
X = self.linear(X)
#X: [m, 1]
#print(X.size(), type(X))
if self.num_classes == 2:
assert X.size() == torch.Size([m, 1])
else:
assert X.size() == torch.Size([m, self.num_classes])
if self.num_classes == 2:
X = torch.squeeze(X)
X = self.sigmoid(X)
#X: [m]
#print(X.size(), type(X))
assert X.size() == torch.Size([m])
return X
else:
return F.softmax(X)
示例2: NN
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0):
net.eval()
net_time = AverageMeter()
cls_time = AverageMeter()
losses = AverageMeter()
correct = 0.
total = 0
testsize = testloader.dataset.__len__()
trainFeatures = lemniscate.memory.t()
if hasattr(trainloader.dataset, 'imgs'):
trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda()
else:
trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()
if recompute_memory:
transform_bak = trainloader.dataset.transform
trainloader.dataset.transform = testloader.dataset.transform
temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
batchSize = inputs.size(0)
features = net(inputs)
trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
trainloader.dataset.transform = transform_bak
end = time.time()
for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
batchSize = inputs.size(0)
features = net(inputs)
net_time.update(time.time() - end)
end = time.time()
dist = torch.mm(features.data, trainFeatures)
yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
candidates = trainLabels.view(1,-1).expand(batchSize, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
yd = yd.narrow(1, 0, 1)
total += targets.size(0)
correct += retrieval.eq(targets.data).cpu().sum()
cls_time.update(time.time() - end)
end = time.time()
print('Test [{}/{}]\t'
'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
'Top1: {:.2f}'.format(
total, testsize, correct*100./total, net_time=net_time, cls_time=cls_time))
return correct/total
示例3: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""
train for one epoch on the training set
"""
batch_time = utils.AverageMeter()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
# training mode
model.train()
end = time.time()
for i, (input_points, labels) in enumerate(train_loader):
# bz x 2048 x 3
input_points = Variable(input_points)
input_points = input_points.transpose(2, 1)
labels = Variable(labels[:, 0])
# print(points.size())
# print(labels.size())
# shift data to GPU
if opt.cuda:
input_points = input_points.cuda()
labels = labels.long().cuda() # must be long cuda tensor
# forward, backward optimize
output, _ = model(input_points)
# debug_here()
loss = criterion(output, labels)
##############################
# measure accuracy
##############################
prec1 = utils.accuracy(output.data, labels.data, topk=(1,))[0]
losses.update(loss.data[0], input_points.size(0))
top1.update(prec1[0], input_points.size(0))
##############################
# compute gradient and do sgd
##############################
optimizer.zero_grad()
loss.backward()
##############################
# gradient clip stuff
##############################
utils.clip_gradient(optimizer, opt.gradient_clip)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % opt.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'[email protected] {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
示例4: train_epoch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train_epoch(self, inputs, targets, optimizer, criterion,
epoch_no=0, batch_size=64, max_step=50, max_norm=5, eval_step=10):
hidden = self.model.init_hidden(batch_size)
counter = 0
x_generator = get_batch(inputs, batch_size, max_step)
y_generator = get_batch(targets, batch_size, max_step)
for x, y in zip(x_generator, y_generator):
self.model.train()
x = Variable(torch.from_numpy(np.array(x, dtype=np.float32))).long()
y = Variable(torch.from_numpy(np.array(y, dtype=np.float32))).long()
if CUDA_AVAILABLE:
x = x.cuda()
y = y.cuda()
if isinstance(hidden, tuple):
hidden = tuple([Variable(each.data) for each in hidden])
else:
hidden = Variable(hidden.data)
self.model.zero_grad() # 重置梯度
output, hidden = self.model.forward(x, hidden)
# 将 output 的维度进行转换:
# [batch_size, step_size, vocab_size] -> [batch_size * step_size, vocab_size]
# y 是 1D 的就好
step_size = x.size(1) # batch 里序列的长度有可能不足 max_step
cross_entropy_loss = criterion(
output.view(batch_size * step_size, -1),
y.view(batch_size * step_size).long()
)
focal_loss = FocalLoss(gamma=2)(
output.view(batch_size * step_size, -1),
y.view(batch_size * step_size).long()
)
ploss = pullaway_loss(output.view(batch_size * step_size, -1))
loss = cross_entropy_loss + focal_loss + 0.1 * ploss
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), max_norm)
optimizer.step()
counter += 1
if (counter % eval_step) == 0:
print("Epoch: {}; Step: {}; Loss: {:.4f}".format(
epoch_no + 1, counter, loss.data[0]
))
# 从 x 中随机挑选内容
pos = np.random.randint(0, mul(*x.size()) - 2)
length = np.random.randint(1, min(5, mul(*x.size()) - pos - 1))
start_tokens = x.view(-1)[pos:pos + length].data.numpy()
start_text = ''.join(self.vectorizer.inverse_transform([start_tokens])[0]).strip()
if start_text:
result = self.generate(start_text, max_len=100)
print("[%s]: %r" % (start_text, result))
示例5: dec
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def dec(self, encoders, decoder_inputs, is_teacher_forcing, max_question_len):
'''
encoders (batch, hidden_size)
if is_teacher_forcing: decoder_inputs (batch, max_question_len)
if not is_teacher_forcing: decoder_inputs (batch, 1)
'''
decoder_inputs = Variable(decoder_inputs).long().cuda()
decoder_inputs = self.embedding(decoder_inputs)
decoder_inputs = decoder_inputs.transpose(0, 1)
encoders = encoders.expand(decoder_inputs.size(0), encoders.size(0), self.hidden_size*2)
inputs = torch.cat([decoder_inputs, encoders], -1)
if is_teacher_forcing:
outputs, hidden = self.dec_net(inputs)
outputs = self.dropout(outputs)
logits = self.fc_net(outputs) # qn_steps, batch, voc_size
_, predictions = torch.max(logits.transpose(0, 1), -1) #batch, qn_steps
predictions = predictions.cpu().data.numpy()
else:
logits = [0 for i in range(max_question_len)]
predictions = [0 for i in range(max_question_len)]
output, hidden = self.dec_net(inputs)
output = self.dropout(output)
logits[0] = self.fc_net(output)
_, index = torch.max(logits[0])
logits[0] = logits[0].view(1, decoder_inputs.size(1), self.voc_size) # 1,batch_size, voc_size
predictions[0] = index.cpu().data.numpy() # batch_size
for i in range(1, max_question_len):
prev_output = Variable(predictions[i-1]).long().cuda()
prev_output = self.embedding(prev_output)
inputs = torch.cat([prev_output, encoders[0]], -1)
output, hidden = self.dec_net(inputs, hidden)
output = self.dropout(output)
logits[i] = self.fc_net(output)
_, index = torch.max(logits[i])
logits[i] = logits[i].view(1, decoder_inputs.size(0), self.voc_size) # 1,batch_size, voc_size
predictions[i] = index.cpu().data.numpy() # batch_size
logits = torch.cat(logits)# qn_steps, batch, voc_size
predictions = np.array(predictions).transpose(1, 0)
return logits, predictions
示例6: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(epoch):
model.train()
samples_seen = 0
for data, target in train_loader:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
samples_seen += data.size(0)
if (samples_seen // data.size(0)) % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, samples_seen, len(train_loader.dataset),
100. * samples_seen / len(train_loader.dataset), loss.item()))
示例7: predict
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def predict(self, x, get_raw_results=False, **kwargs):
if not isinstance(x, Variable):
x = Variable(torch.from_numpy(np.asarray(x, dtype=np.float32)))
if len(x.size()) == 1:
x = x.view(1, -1)
y_pred = self._get_prediction(x).data.numpy()
return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
示例8: get_input_from_batch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def get_input_from_batch(batch, use_cuda):
batch_size = len(batch.enc_lens)
enc_batch = Variable(torch.from_numpy(batch.enc_batch).long())
enc_padding_mask = Variable(torch.from_numpy(batch.enc_padding_mask)).float()
enc_lens = batch.enc_lens
extra_zeros = None
enc_batch_extend_vocab = None
if config.pointer_gen:
enc_batch_extend_vocab = Variable(torch.from_numpy(batch.enc_batch_extend_vocab).long())
# max_art_oovs is the max over all the article oov list in the batch
if batch.max_art_oovs > 0:
extra_zeros = Variable(torch.zeros((batch_size, batch.max_art_oovs)))
c_t_1 = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))
coverage = None
if config.is_coverage:
coverage = Variable(torch.zeros(enc_batch.size()))
if use_cuda:
enc_batch = enc_batch.cuda()
enc_padding_mask = enc_padding_mask.cuda()
if enc_batch_extend_vocab is not None:
enc_batch_extend_vocab = enc_batch_extend_vocab.cuda()
if extra_zeros is not None:
extra_zeros = extra_zeros.cuda()
c_t_1 = c_t_1.cuda()
if coverage is not None:
coverage = coverage.cuda()
return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage
示例9: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(train_loader, net, criterion, optimizer, epoch, train_args):
train_loss = AverageMeter()
curr_iter = (epoch - 1) * len(train_loader)
for i, data in enumerate(train_loader):
inputs, labels = data
assert inputs.size()[2:] == labels.size()[1:]
N = inputs.size(0)
inputs = Variable(inputs).cuda()
labels = Variable(labels).cuda()
optimizer.zero_grad()
outputs = net(inputs)
assert outputs.size()[2:] == labels.size()[1:]
assert outputs.size()[1] == voc.num_classes
loss = criterion(outputs, labels) / N
loss.backward()
optimizer.step()
train_loss.update(loss.data[0], N)
curr_iter += 1
writer.add_scalar('train_loss', train_loss.avg, curr_iter)
if (i + 1) % train_args['print_freq'] == 0:
print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
epoch, i + 1, len(train_loader), train_loss.avg
))
示例10: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def forward(self, images):
"""Extract the image feature vectors."""
features = self.resnet(images)
features = Variable(features.data)
features = features.view(features.size(0), -1)
features = self.bn(self.linear(features))
return features
示例11: _validate_args
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def _validate_args(self, inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio):
if self.use_attention:
if encoder_outputs is None:
raise ValueError("Argument encoder_outputs cannot be None when attention is used.")
# inference batch size
if inputs is None and encoder_hidden is None:
batch_size = 1
else:
if inputs is not None:
batch_size = inputs.size(0)
else:
if self.rnn_cell is nn.LSTM:
batch_size = encoder_hidden[0].size(1)
elif self.rnn_cell is nn.GRU:
batch_size = encoder_hidden.size(1)
# set default input and max decoding length
if inputs is None:
if teacher_forcing_ratio > 0:
raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.")
inputs = Variable(torch.LongTensor([self.sos_id] * batch_size),
volatile=True).view(batch_size, 1)
if torch.cuda.is_available():
inputs = inputs.cuda()
max_length = self.max_length
else:
max_length = inputs.size(1) - 1 # minus the start of sequence symbol
return inputs, batch_size, max_length
示例12: evaluate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def evaluate(attention_model,x_test,y_test):
"""
cv results
Args:
attention_model : {object} model
x_test : {nplist} x_test
y_test : {nplist} y_test
Returns:
cv-accuracy
"""
attention_model.batch_size = x_test.shape[0]
attention_model.hidden_state = attention_model.init_hidden()
x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
y_test_pred,_ = attention_model(x_test_var)
if bool(attention_model.type):
y_preds = torch.max(y_test_pred,1)[1]
y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
else:
y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0)
示例13: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def forward(self, x):
height = x.size(3)
width = x.size(4)
# Compute measurements
x, y = self.measurements(x, self.pad_frame_size, self.patch_shape)
if self.encoder_learn is False:
x = x.detach()
y = y.detach()
# Map measurements to video blocks
out = Variable(torch.zeros(
x.size(0), x.size(1), self.vectorized)).cuda()
for i in range(x.size(1)):
out[:, i, :] = self.reconstruction(x[:, i, :])
output_patches = out.view(out.size(0), self.measurements.patches_size[0],
self.measurements.patches_size[1], self.measurements.patches_size[2], self.temporal_size, self.spatial_size, self.spatial_size).permute(0, 1, 4, 2, 3, 6, 5)
# Reconstruct video blocks to video
reconstructed_video = self.measurements.fold(output_patches)[0]
# Crop padding
reconstructed_video = reconstructed_video[:, :, :, 0:height, 0:width]
return reconstructed_video, y
示例14: visualizeModel
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def visualizeModel(model, numImages=6):
wasTraining = model.training
model.eval()
imagesSoFar = 0
fig = plt.figure()
for i, (inputs, labels) in enumerate(dataloaders['val']):
if use_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
for j in range(inputs.size(0)):
imagesSoFar += 1
nCols = 2
ax = plt.subplot(numImages // nCols, nCols, imagesSoFar)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if imagesSoFar == numImages:
model.train(mode=wasTraining)
return
model.train(mode=wasTraining)
示例15: __val
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def __val(self):
"""
Validation function during the train phase.
"""
self.seg_net.eval()
start_time = time.time()
for j, data_tuple in enumerate(self.val_loader):
# Change the data type.
inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
targets = Variable(data_tuple[1].cuda(async=True), volatile=True)
# Forward pass.
outputs = self.seg_net(inputs)
# Compute the loss of the val batch.
loss_pixel = self.pixel_loss(outputs, targets)
loss = loss_pixel
self.val_losses.update(loss.data[0], inputs.size(0))
# Update the vars of the val phase.
self.batch_time.update(time.time() - start_time)
start_time = time.time()
self.module_utilizer.save_net(self.seg_net, self.iters)
# Print the log info & reset the states.
Log.info(
'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.8f}\n'.format(
batch_time=self.batch_time, loss=self.val_losses))
self.batch_time.reset()
self.val_losses.reset()
self.seg_net.train()