本文整理汇总了Python中torch.autograd.Variable.to方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.to方法的具体用法?Python Variable.to怎么用?Python Variable.to使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.to方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def train(model, trn_loader, optimizer, loss_func, device):
model.train()
training_loss = 0
training_acc = 0
counter = 0
for data, target in trn_loader:
data = Variable(data.to(device))
target = Variable(target.to(device))
# Forward pass
output = model(data)
tloss = loss_func(output, target)
training_loss += tloss.item()
# Zero the gradients
optimizer.zero_grad()
# Backward pass
tloss.backward()
# Update parameters
optimizer.step()
# Compute prediction's score
pred = torch.argmax(output.data, 1)
training_acc += accuracy_score(target.data.cpu().numpy(),
pred.data.cpu().numpy())
counter += 1
avg_loss = training_loss / float(counter)
avg_acc = training_acc / float(counter)
return avg_loss, avg_acc
示例2: get_mask
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def get_mask(attentions, lengths):
"""
Construct mask for padded itemsteps, based on lengths
"""
max_len = max(lengths.data)
mask = Variable(torch.ones(attentions.size())).detach()
mask = mask.to(DEVICE)
for i, l in enumerate(lengths.data): # skip the first sentence
if l < max_len:
mask[i, l:] = 0
return mask
示例3: validate_network
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def validate_network(data, model, criterion, device):
model.eval()
accuracy = 0
validation_loss = 0
for inputs, labels in iter(data['loader']['valid']):
inputs, labels = Variable(inputs), Variable(labels)
inputs, labels = inputs.to(device), labels.to(device)
outputs = model.forward(inputs)
validation_loss += criterion(outputs, labels)
probabilities = torch.exp(outputs).data
equality = (labels.data == probabilities.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
model.train()
return validation_loss, accuracy
示例4: validation
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def validation():
loss_all = 0.0
loss_cur = 0.0
n_batches = len(loader_val)
print_every = n_batches // 5
correct = 0
total = 0
for i, data in enumerate(loader_val):
inputs, targets = data
inputs, targets = Variable(inputs.to(device)), Variable(targets.to(device))
outputs = net(inputs)
loss_size = loss(outputs, targets)
loss_all += float(loss_size.data)
loss_cur += float(loss_size.data)
_, vpredicted = torch.max(outputs, 1)
for pi, predicted in enumerate(vpredicted):
expected = int(targets[pi])
if expected == predicted:
correct += 1
total += 1
if (i + 1 ) % print_every == 0:
avg_loss_all = loss_all / (i + 1)
avg_loss_cur = loss_cur / print_every
loss_cur = 0.0
acc = correct / total
print("validation: {}, progress: {:.02f}% loss: {:.04f}/{:.04f}, acc: {:.04f}".format(
epoch,
(100 * (i+1)/n_batches),
avg_loss_cur,
avg_loss_all,
acc,
))
if total == 0:
return 0.0
else:
return correct / total
示例5: train_model
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def train_model(dataset=dataset, save_dir=save_dir, num_classes=num_classes, lr=lr,
num_epochs=nEpochs, save_epoch=snapshot, useTest=useTest, test_interval=nTestInterval):
"""
Args:
num_classes (int): Number of classes in the data
num_epochs (int, optional): Number of epochs to train for.
"""
if modelName == 'C3D':
model = C3D_model.C3D(num_classes=num_classes, pretrained=False)
train_params = [{'params': C3D_model.get_1x_lr_params(model), 'lr': lr},
{'params': C3D_model.get_10x_lr_params(model), 'lr': lr * 10}]
elif modelName == 'R2Plus1D':
model = R2Plus1D_model.R2Plus1DClassifier(num_classes=num_classes, layer_sizes=(2, 2, 2, 2))
train_params = [{'params': R2Plus1D_model.get_1x_lr_params(model), 'lr': lr},
{'params': R2Plus1D_model.get_10x_lr_params(model), 'lr': lr * 10}]
elif modelName == 'R3D':
model = R3D_model.R3DClassifier(num_classes=num_classes, layer_sizes=(3, 4, 6, 3))
# model = resnet.ResNet(num_classes=num_classes, layers=(3, 4, 6, 3), sample_size=112, sample_duration=16)
train_params = model.parameters()
elif modelName == 'R2D':
model = R2Dnet.R2DClassifier(group_num_classes=num_classes, pretrained=True)
# model = resnet.ResNet(num_classes=num_classes, layers=(3, 4, 6, 3), sample_size=112, sample_duration=16)
train_params = model.parameters()
else:
print('We only implemented C3D and R2Plus1D models.')
raise NotImplementedError
criterion = nn.CrossEntropyLoss() # standard crossentropy loss for classification
optimizer = optim.SGD(train_params, lr=lr, momentum=0.9, weight_decay=5e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10,
gamma=0.1) # the scheduler divides the lr by 10 every 10 epochs
model.to(device) #move here because resume need .cuda()
if resume_epoch == 0:
print("Training {} from scratch...".format(modelName))
else:
checkpoint = torch.load(os.path.join(save_dir, 'models', saveName + '_epoch-' + str(resume_epoch - 1) + '.pth.tar'),
map_location=lambda storage, loc: storage) # Load all tensors onto the CPU
print("Initializing weights from: {}...".format(
os.path.join(save_dir, 'models', saveName + '_epoch-' + str(resume_epoch - 1) + '.pth.tar')))
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_dict'])
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
# model.to(device)
criterion.to(device)
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
print('Training model on {} dataset...'.format(dataset))
train_dataloader = DataLoader(VolleyballDataset(dataset=dataset, split='train',clip_len=16), batch_size=4, shuffle=True, \
num_workers=0)
val_dataloader = DataLoader(VolleyballDataset(dataset=dataset, split='val', clip_len=16), batch_size=4, num_workers=0)
test_dataloader = DataLoader(VolleyballDataset(dataset=dataset, split='test', clip_len=16), batch_size=4, num_workers=0)
trainval_loaders = {'train': train_dataloader, 'val': val_dataloader}
trainval_sizes = {x: len(trainval_loaders[x].dataset) for x in ['train', 'val']}
test_size = len(test_dataloader.dataset)
for epoch in range(resume_epoch, num_epochs):
# each epoch has a training and validation step
for phase in ['train', 'val']:
start_time = timeit.default_timer()
# reset the running loss and corrects
running_loss = 0.0
running_corrects = 0.0
# set model to train() or eval() mode depending on whether it is trained
# or being validated. Primarily affects layers such as BatchNorm or Dropout.
if phase == 'train':
# scheduler.step() is to be called once every epoch during training
scheduler.step()
model.train()
else:
model.eval()
torch.backends.cudnn.benchmark=False
# for inputs, bbox_inputs, labels, adjacent_matrix in tqdm(trainval_loaders[phase]):
# for inputs, labels in tqdm(trainval_loaders[phase]):
for inputs, labels, dists in tqdm(trainval_loaders[phase]):
# move inputs and labels to the device the training is taking place on
inputs = Variable(inputs, requires_grad=True).to(device)
# bbox_inputs = Variable(bbox_inputs, requires_grad=True).to(device)
# adjacent_matrix = Variable(adjacent_matrix, requires_grad=True).to(device)
labels = Variable(labels).to(device)
dists = Variable(dists, requires_grad = True).to(device)
# dist_num = Variable(dist_num).to(device)
optimizer.zero_grad()
if phase == 'train':
outputs = model(inputs, dists)
# outputs = model(inputs, bbox_inputs, adjacent_matrix)
else:
with torch.no_grad():
# outputs = model(inputs, bbox_inputs, adjacent_matrix)
outputs = model(inputs, dists)
probs = nn.Softmax(dim=1)(outputs)
preds = torch.max(probs, 1)[1]
#.........这里部分代码省略.........
示例6: range
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
exit_notification = False
for critic_iter in range(CRITIC_MULTIPLE_UPDATES):
# random noise z
#noise_z = torch.randn(BATCH_SIZE, 3, 4, 4)
noise_z = torch.randn(BATCH_SIZE, 100)
inputs = inputs['image']
if is_gpu_mode:
#inputs = Variable(torch.from_numpy(input_img).float().cuda())
inputs = Variable(inputs.float().cuda())
noise_z = Variable(noise_z.cuda())
# multi gpu
inputs = inputs.to(device)
noise_z = noise_z.to(device)
else:
#inputs = Variable(torch.from_numpy(input_img).float())
noise_z = Variable(noise_z)
# feedforward the inputs. generator
outputs_gen = gen_model(noise_z)
# pseudo zero-center
#print ('max inputs =', torch.max(inputs))
#print ('max ouputs_gen =', torch.max(outputs_gen))
#print ('min inputs =', torch.min(inputs))
#print ('min ouputs_gen =', torch.min(outputs_gen))
inputs = inputs - MEAN_VALUE_FOR_ZERO_CENTERED
示例7: network_worker
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
def network_worker(mode, data, model, criterion, optimizer, epochs, device, print_every):
"""
Train or validate the network
"""
model.to(device)
if mode == 'train':
loader = data['loader']['train']
elif mode == 'test':
loader = data['loader']['test']
else:
raise ValueError('Please choose \'train\' or \'test\' for parameter mode.')
if mode == 'train':
print("Start of training")
else:
print("Start of validation")
for epoch in range(epochs):
model.train()
running_loss = 0
steps = 0
start = time()
for inputs, labels in iter(loader):
steps += 1
optimizer.zero_grad()
inputs, labels = Variable(inputs), Variable(labels)
inputs, labels = inputs.to(device), labels.to(device)
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
if mode == 'train':
print("Epoch: {}/{} ({})... ".format(epoch+1, epochs, steps),
"Training Loss: {:.4f}...".format(running_loss/print_every),
"Device: {}...Time: {:.3f}s".format(device, (time() - start)/3))
else: # 'test' --> validate
validation_loss, accuracy = validate_network(data, model, criterion, device)
print("Epoch: {}/{} ({})... ".format(epoch+1, epochs, steps),
"Testing Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(validation_loss/len(data['loader']['valid'])),
"Validation Accuracy: {:.3f}".format(accuracy/len(data['loader']['valid'])),
"Device: {}...Time: {:.3f}s".format(device, (time() - start)/3))
running_loss = 0
start = time()
if mode == 'train':
print("End of training")
else:
print("End of validation")
示例8: len
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import to [as 别名]
if total == 0:
return 0.0
else:
return correct / total
n_batches = len(loader_train)
print_every = n_batches // 5
for epoch in range(n_epochs):
loss_all = 0.0
loss_cur = 0.0
print("epoch: {}".format(epoch))
for i, data in enumerate(loader_train):
inputs, targets = data
inputs, targets = Variable(inputs.to(device)), Variable(targets.to(device))
optimizer.zero_grad()
outputs = net(inputs)
loss_size = loss(outputs, targets)
loss_size.backward()
optimizer.step()
loss_all += float(loss_size.data)
loss_cur += float(loss_size.data)
if (i + 1 ) % print_every == 0:
avg_loss_all = loss_all / (i + 1)
avg_loss_cur = loss_cur / print_every
loss_cur = 0.0
print("epoch: {}, progress: {:.02f}% loss: {:.04f}/{:.04f}".format(