本文整理汇总了Python中torch.autograd.Variable.cuda方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.cuda方法的具体用法?Python Variable.cuda怎么用?Python Variable.cuda使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.cuda方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def validate(data_loader, net, loss):
start_time = time.time()
net.eval()
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
data = Variable(data.cuda(async = True), volatile = True)
target = Variable(target.cuda(async = True), volatile = True)
coord = Variable(coord.cuda(async = True), volatile = True)
output = net(data, coord)
loss_output = loss(output, target, train = False)
loss_output[0] = loss_output[0].data[0]
metrics.append(loss_output)
end_time = time.time()
metrics = np.asarray(metrics, np.float32)
print('Validation: tpr %3.2f, tnr %3.8f, total pos %d, total neg %d, time %3.2f' % (
100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7]),
100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9]),
np.sum(metrics[:, 7]),
np.sum(metrics[:, 9]),
end_time - start_time))
print('loss %2.4f, classify loss %2.4f, regress loss %2.4f, %2.4f, %2.4f, %2.4f' % (
np.mean(metrics[:, 0]),
np.mean(metrics[:, 1]),
np.mean(metrics[:, 2]),
np.mean(metrics[:, 3]),
np.mean(metrics[:, 4]),
np.mean(metrics[:, 5])))
print
print
示例2: predict_proba
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def predict_proba(self, dataset):
"""Predict predict probability for dataset.
This method will only work with method logistic/multiclass
Parameters:
----------
dataset (dict): dictionary with the testing dataset -
X_wide_test, X_deep_test, target
Returns:
--------
array-like with the probability for dataset.
"""
X_w = Variable(torch.from_numpy(dataset.wide)).float()
X_d = Variable(torch.from_numpy(dataset.deep))
if use_cuda:
X_w, X_d = X_w.cuda(), X_d.cuda()
# set the model in evaluation mode so dropout is not applied
net = self.eval()
pred = net(X_w,X_d).cpu()
if self.method == "logistic":
pred = pred.squeeze(1).data.numpy()
probs = np.zeros([pred.shape[0],2])
probs[:,0] = 1-pred
probs[:,1] = pred
return probs
if self.method == "multiclass":
return pred.data.numpy()
示例3: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def train(epoch):
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
randH = random.randint(0, opt.remsize)
randW = random.randint(0, opt.remsize)
input = Variable(batch[0][:, :, randH:randH + opt.size, randW:randW + opt.size])
target = Variable(batch[1][:, :,
randH + target_gap:randH + target_gap + target_size,
randW + target_gap:randW + target_gap + target_size])
#target =target.squeeze(1)
#print(target.data.size())
if cuda:
input = input.cuda()
target = target.cuda()
input = unet(input)
#print(input.data.size())
loss = criterion( input, target)
epoch_loss += loss.data[0]
loss.backward()
optimizer.step()
if iteration%10 is 0:
print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))
imgout = input.data/2 +1
torchvision.utils.save_image(imgout,"/home/wcd/PytorchProject/Unet/unetdata/checkpoint/epch_"+str(epoch)+'.jpg')
print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(training_data_loader)))
示例4: grad_fun
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def grad_fun(net, queue):
iter_idx = 0
while True:
sum_loss = 0.0
iter_idx += 1
for v in TRAIN_DATA:
x_v = Variable(torch.from_numpy(np.array([v], dtype=np.float32)))
y_v = Variable(torch.from_numpy(np.array([get_y(v)], dtype=np.float32)))
if CUDA:
x_v = x_v.cuda()
y_v = y_v.cuda()
net.zero_grad()
out_v = net(x_v)
loss_v = F.mse_loss(out_v, y_v)
loss_v.backward()
grads = [param.grad.clone() if param.grad is not None else None
for param in net.parameters()]
queue.put(grads)
sum_loss += loss_v.data.cpu().numpy()
print("%d: %.2f" % (iter_idx, sum_loss))
if sum_loss < 0.1:
queue.put(None)
break
示例5: predict
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def predict(self, dataset):
"""Predict target for dataset.
Parameters:
----------
dataset (dict): dictionary with the testing dataset -
X_wide_test, X_deep_test, target
Returns:
--------
array-like with the target for dataset
"""
X_w = Variable(torch.from_numpy(dataset.wide)).float()
X_d = Variable(torch.from_numpy(dataset.deep))
if use_cuda:
X_w, X_d = X_w.cuda(), X_d.cuda()
# set the model in evaluation mode so dropout is not applied
net = self.eval()
pred = net(X_w,X_d).cpu()
if self.method == "regression":
return pred.squeeze(1).data.numpy()
if self.method == "logistic":
return (pred > 0.5).squeeze(1).data.numpy()
if self.method == "multiclass":
_, pred_cat = torch.max(pred, 1)
return pred_cat.data.numpy()
示例6: test
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def test(epoch):
print('\nTest')
net.eval()
test_loss = 0
for batch_idx, (inputs, loc_targets, cls_targets) in enumerate(testloader):
inputs = Variable(inputs.cuda(), volatile=True)
loc_targets = Variable(loc_targets.cuda())
cls_targets = Variable(cls_targets.cuda())
loc_preds, cls_preds = net(inputs)
loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
test_loss += loss.data[0]
print('test_loss: %.3f | avg_loss: %.3f' % (loss.data[0], test_loss/(batch_idx+1)))
# Save checkpoint
global best_loss
test_loss /= len(testloader)
if test_loss < best_loss:
print('Saving..')
state = {
'net': net.module.state_dict(),
'loss': test_loss,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_loss = test_loss
示例7: to_variable
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def to_variable(torch_data, cuda=True):
variable = Variable(torch_data)
if cuda:
variable.cuda()
return variable
示例8: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def train(dataloader):
clf.train()
total_loss = 0
start_time = time.time()
for i_batch, sample_batched in enumerate(dataloader):
inputs = Variable(sample_batched['input'])
sent_len = Variable(sample_batched['len'])
true_outputs = Variable(sample_batched['output'])
hidden = clf.init_hidden(inputs.shape[0])
if USE_GPU:
inputs = inputs.cuda()
sent_len = sent_len.cuda()
true_outputs = true_outputs.cuda()
clf.zero_grad()
outputs = clf.forward(inputs, sent_len, hidden)
loss = loss_function(outputs, true_outputs)
loss.backward()
total_loss += loss.data
optimizer.step()
if i_batch % log_interval == 0 and i_batch > 0:
cur_loss = total_loss[0] / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:04.4f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, i_batch, len(dataloader.dataset) // dataloader.batch_size, optimizer.param_groups[0]['lr'],
elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
示例9: validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def validate(args):
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
loader = data_loader(data_path, split=args.split, is_transform=True, img_size=(args.img_rows, args.img_cols))
n_classes = loader.n_classes
valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)
running_metrics = runningScore(n_classes)
# Setup Model
model = get_model(args.model_path[:args.model_path.find('_')], n_classes)
state = convert_state_dict(torch.load(args.model_path)['model_state'])
model.load_state_dict(state)
model.eval()
for i, (images, labels) in tqdm(enumerate(valloader)):
model.cuda()
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
outputs = model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.data.cpu().numpy()
running_metrics.update(gt, pred)
score, class_iou = running_metrics.get_scores()
for k, v in score.items():
print(k, v)
for i in range(n_classes):
print(i, class_iou[i])
示例10: train_init
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def train_init(init_net, meta_alpha, loss_fn, image, target_bbox, evaluator):
init_net.train()
# Draw pos/neg samples
pos_examples = gen_samples(SampleGenerator('gaussian', image.size, 0.1, 1.2),
target_bbox, opts['n_pos_init'], opts['overlap_pos_init'])
neg_examples = np.concatenate([
gen_samples(SampleGenerator('uniform', image.size, 1, 2, 1.1),
target_bbox, opts['n_neg_init']//2, opts['overlap_neg_init']),
gen_samples(SampleGenerator('whole', image.size, 0, 1.2, 1.1),
target_bbox, opts['n_neg_init']//2, opts['overlap_neg_init'])])
# Crop images
crop_size = opts['img_size']
padding = opts['padding']
image = np.asarray(image)
pos_regions = extract_regions(image, pos_examples, crop_size, padding)
neg_regions = extract_regions(image, neg_examples, crop_size, padding)
pos_regions_var = Variable(torch.from_numpy(pos_regions[:opts['batch_pos']]))
neg_regions_var = Variable(torch.from_numpy(neg_regions[:opts['batch_neg']]))
if opts['use_gpu']:
pos_regions_var = pos_regions_var.cuda()
neg_regions_var = neg_regions_var.cuda()
# training
tracker_init_weights = OrderedDict((name, param) for (name, param) in init_net.named_parameters())
tracker_keys = [name for (name, _) in init_net.named_parameters()]
# the first iteration
pos_score = init_net.forward(pos_regions_var)
neg_score = init_net.forward(neg_regions_var)
init_loss = loss_fn(pos_score,neg_score)
init_acc,init_acc_pos,init_acc_neg = evaluator(pos_score, neg_score)
grads = torch.autograd.grad(init_loss, tracker_init_weights.values(), create_graph=True)
tracker_weights = OrderedDict((name, param - torch.mul(alpha,grad)) for
((name, param),(_,alpha),grad) in
zip(tracker_init_weights.items(),
meta_alpha.items(), grads))
# rest of iterations
for i in range(opts['n_init_updates']-1):
pos_score = init_net.forward(pos_regions_var, tracker_weights)
neg_score = init_net.forward(neg_regions_var, tracker_weights)
loss = loss_fn(pos_score,neg_score)
grads = torch.autograd.grad(loss, tracker_weights.values(), create_graph=True)
tracker_weights = OrderedDict((name, param - torch.mul(alpha,grad))
for ((name, param),(_,alpha),grad) in
zip(tracker_weights.items(),meta_alpha.items(), grads))
# update tracker
init_net.copy_meta_weights(tracker_weights)
init_net.eval()
pos_score = init_net.forward(pos_regions_var)
neg_score = init_net.forward(neg_regions_var)
acc,acc_pos,acc_neg = evaluator(pos_score, neg_score)
pos_regions_var = Variable(torch.from_numpy(pos_regions))
neg_regions_var = Variable(torch.from_numpy(neg_regions))
if opts['use_gpu']:
pos_regions_var = pos_regions_var.cuda()
neg_regions_var = neg_regions_var.cuda()
pos_feats = init_net(pos_regions_var, out_layer='features')
neg_feats = init_net(neg_regions_var, out_layer='features')
return pos_feats.data.clone(), neg_feats.data.clone(), init_acc, acc
示例11: forward_pass
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def forward_pass(self):
##Variables output transformed for cuda
X=self.initialize_input()
self.batch_y=self.sample['groundtruth']
Y = Variable(self.batch_y.float())
Y=Y.cuda()
## fwd
if self.dist_net=='v2':
self.batch_y_dist=distance_map_batch_v2(self.batch_y,self.threshold,self.bins)
Y_dist = Variable(self.batch_y_dist.float())
Y_dist=Y_dist.cuda()
probs_dist,probs_seg=self.predict(X)
loss_seg=self.criterion(Y,probs_seg,self.loss_fn)
loss_dist=self.criterion(Y_dist,probs_dist,'cross-entropy')
loss=loss_seg+loss_dist
else:
self.batch_y_dist=None
probs_seg=self.predict(X)
probs_dist=None
loss=self.criterion(Y,probs_seg,self.loss_fn)
return loss,probs_dist,probs_seg
示例12: get_mini_batch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def get_mini_batch(mini_batch_indices, sequences, seq_lengths, volatile=False, cuda=False):
# get the sequence lengths of the mini-batch
seq_lengths = seq_lengths[mini_batch_indices]
# sort the sequence lengths
sorted_seq_length_indices = np.argsort(seq_lengths)[::-1]
sorted_seq_lengths = seq_lengths[sorted_seq_length_indices]
sorted_mini_batch_indices = mini_batch_indices[sorted_seq_length_indices]
# compute the length of the longest sequence in the mini-batch
T_max = np.max(seq_lengths)
# this is the sorted mini-batch
mini_batch = sequences[sorted_mini_batch_indices, 0:T_max, :]
# this is the sorted mini-batch in reverse temporal order
mini_batch_reversed = reverse_sequences_numpy(mini_batch, sorted_seq_lengths)
# get mask for mini-batch
mini_batch_mask = get_mini_batch_mask(mini_batch, sorted_seq_lengths)
# wrap in PyTorch Variables
mini_batch = Variable(torch.Tensor(mini_batch), volatile=volatile)
mini_batch_reversed = Variable(torch.Tensor(mini_batch_reversed), volatile=volatile)
mini_batch_mask = Variable(torch.Tensor(mini_batch_mask), volatile=volatile)
# cuda() here because need to cuda() before packing
if cuda:
mini_batch = mini_batch.cuda()
mini_batch_mask = mini_batch_mask.cuda()
mini_batch_reversed = mini_batch_reversed.cuda()
# do sequence packing
mini_batch_reversed = nn.utils.rnn.pack_padded_sequence(mini_batch_reversed,
sorted_seq_lengths,
batch_first=True)
return mini_batch, mini_batch_reversed, mini_batch_mask, sorted_seq_lengths
示例13: random_batch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def random_batch(batch_size=3):
input_seqs = []
target_seqs = []
# Choose random pairs
for i in range(batch_size):
pair = random.choice(pairs)
input_seqs.append(indexes_from_sentence(input_lang, pair[0]))
target_seqs.append(indexes_from_sentence(output_lang, pair[1]))
# Zip into pairs, sort by length (descending), unzip
seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True)
input_seqs, target_seqs = zip(*seq_pairs)
# For input and target sequences, get array of lengths and pad with 0s to max length
input_lengths = [len(s) for s in input_seqs]
input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs]
target_lengths = [len(s) for s in target_seqs]
target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs]
# Turn padded arrays into (batch x seq) tensors, transpose into (seq x batch)
input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1)
target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1)
if USE_CUDA:
input_var = input_var.cuda()
target_var = target_var.cuda()
return input_var, input_lengths, target_var, target_lengths
示例14: eval_by_batch
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def eval_by_batch(self,Xi, Xv, y, x_size):
total_loss = 0.0
y_pred = []
if self.use_ffm:
batch_size = 16384*2
else:
batch_size = 16384
batch_iter = x_size // batch_size
criterion = F.binary_cross_entropy_with_logits
model = self.eval()
for i in range(batch_iter+1):
offset = i * batch_size
end = min(x_size, offset + batch_size)
if offset == end:
break
batch_xi = Variable(torch.LongTensor(Xi[offset:end]))
batch_xv = Variable(torch.FloatTensor(Xv[offset:end]))
batch_y = Variable(torch.FloatTensor(y[offset:end]))
if self.use_cuda:
batch_xi, batch_xv, batch_y = batch_xi.cuda(), batch_xv.cuda(), batch_y.cuda()
outputs = model(batch_xi, batch_xv)
pred = F.sigmoid(outputs).cpu()
y_pred.extend(pred.data.numpy())
loss = criterion(outputs, batch_y)
total_loss += loss.data[0]*(end-offset)
total_metric = self.eval_metric(y,y_pred)
return total_loss/x_size, total_metric
示例15: validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cuda [as 别名]
def validate(val_loader, model, criterion, location,num_epochs, print_freq):
batch_time = AverageMeter()
losses = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (image, target, _) in enumerate(val_loader):
image_var = Variable(image, volatile=True)
target_var = Variable(target, volatile=True)
if 'cuda' in location:
image_var = image_var.cuda()
target_var = target_var.cuda()
# compute output
output = model(image_var)
loss = criterion(output, target_var)
losses.update(loss.data[0], image.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses))
print(' * Val Loss {loss.avg:.3f}'
.format(loss=losses))
return loss