本文整理汇总了Python中torch.autograd.Variable.cpu方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.cpu方法的具体用法?Python Variable.cpu怎么用?Python Variable.cpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.cpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: visualizeModel
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def visualizeModel(model, numImages=6):
wasTraining = model.training
model.eval()
imagesSoFar = 0
fig = plt.figure()
for i, (inputs, labels) in enumerate(dataloaders['val']):
if use_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
for j in range(inputs.size(0)):
imagesSoFar += 1
nCols = 2
ax = plt.subplot(numImages // nCols, nCols, imagesSoFar)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if imagesSoFar == numImages:
model.train(mode=wasTraining)
return
model.train(mode=wasTraining)
示例2: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def train(epoch):
iters = []
lrs = []
train_losses = []
val_losses = []
val_accuracies = []
model.train()
# train loop
for batch_idx, batch in enumerate(train_loader):
# prepare data
images = Variable(batch[0])
targets = Variable(batch[1])
if args.cuda:
images, targets = images.cuda(), targets.cuda()
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if args.vis and batch_idx % args.log_interval == 0 and images.shape[0] == 1:
cv2.imshow('output: ', outputs.cpu().data.numpy()[0][0])
cv2.imshow('target: ', targets.cpu().data.numpy()[0][0])
cv2.waitKey(10)
# Learning rate decay.
if epoch % args.step_interval == 0 and epoch != 1 and batch_idx == 0:
if args.lr_decay != 1:
global lr, optimizer
lr *= args.lr_decay
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('Learning rate decayed to %f.' % lr)
if batch_idx % args.log_interval == 0:
val_loss, val_acc = evaluate('val', n_batches=80)
train_loss = loss.item()
iters.append(len(train_loader.dataset)*(epoch-1)+batch_idx)
lrs.append(lr)
train_losses.append(train_loss)
val_losses.append(val_loss)
val_accuracies.append(val_acc)
examples_this_epoch = batch_idx * len(images)
epoch_progress = 100. * batch_idx / len(train_loader)
print('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
'Train Loss: {:.6f}\tVal Loss: {:.6f}\tVal Acc: {}'.format(
epoch, examples_this_epoch, len(train_loader.dataset),
epoch_progress, train_loss, val_loss, val_acc))
return iters, train_losses, val_losses, val_accuracies
示例3: __eval_function__
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def __eval_function__(self,data,metrics):
test_x, test_y = data
if self.cuda:
test_x = test_x.cuda()
test_y = test_y.cuda()
test_x = Variable(test_x)
test_y = Variable(test_y)
outputs = self.model(test_x)
for metric in metrics:
metric.update(outputs.cpu().data, test_y.cpu().data)
示例4: validate
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def validate(val_loader, model, encoder_learn):
batch_time = metrics.AverageMeter()
psnr = metrics.AverageMeter()
# switch to evaluate mode
model.cuda()
model.eval()
# binarize weights
if encoder_learn:
model.module.measurements.binarization()
end = time.time()
for i, (video_frames, pad_frame_size, patch_shape) in enumerate(val_loader):
video_input = Variable(video_frames.cuda(async=True), volatile=True)
print(val_loader.dataset.videos[i])
# compute output
model.module.pad_frame_size = pad_frame_size.numpy()
model.module.patch_shape = patch_shape.numpy()
reconstructed_video, y = model(video_input)
# original video
reconstructed_video = reconstructed_video.cpu().data.numpy()
original_video = video_input.cpu().data.numpy()
# measure accuracy and record loss
psnr_video = metrics.psnr_accuracy(reconstructed_video, original_video)
psnr.update(psnr_video, video_frames.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'PSNR {psnr.val:.3f} ({psnr.avg:.3f})'.format(
i + 1, len(val_loader), batch_time=batch_time,
psnr=psnr))
# restore real-valued weights
if encoder_learn:
model.module.measurements.restore()
print(' * PSNR {psnr.avg:.3f}'.format(psnr=psnr))
return psnr.avg
示例5: test_local_var_unary_methods
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def test_local_var_unary_methods(self):
''' Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385'''
x = Var(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.abs(), Var(torch.FloatTensor([1, 2, 3, 4, 5])))
assert torch.equal(x.abs_(), Var(torch.FloatTensor([1, 2, 3, 4, 5])))
x = Var(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.cos().int(), Var(torch.IntTensor(
[0, 0, 0, 0, 0])))
x = Var(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.cos_().int(), Var(torch.IntTensor(
[0, 0, 0, 0, 0])))
x = Var(torch.FloatTensor([1, 2, -3, 4, 5]))
assert torch.equal(x.ceil(), x)
assert torch.equal(x.ceil_(), x)
assert torch.equal(x.cpu(), x)
示例6: test_remote_var_unary_methods
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def test_remote_var_unary_methods(self):
''' Unit tests for methods mentioned on issue 1385
https://github.com/OpenMined/PySyft/issues/1385'''
hook = TorchHook(verbose=False)
local = hook.local_worker
remote = VirtualWorker(id=2,hook=hook)
local.add_worker(remote)
x = Var(torch.FloatTensor([1, 2, -3, 4, 5])).send(remote)
assert torch.equal(x.abs().get(), Var(torch.FloatTensor([1, 2, 3, 4, 5])))
assert torch.equal(x.abs_().get(), Var(torch.FloatTensor([1, 2, 3, 4, 5])))
assert torch.equal(x.cos().int().get(), Var(torch.IntTensor(
[0, 0, 0, 0, 0])))
assert torch.equal(x.cos_().int().get(), Var(torch.IntTensor(
[0, 0, 0, 0, 0])))
x = Var(torch.FloatTensor([1, 2, -3, 4, 5])).send(remote)
assert torch.equal(x.ceil().get(), Var(torch.FloatTensor([1, 2, -3, 4, 5])))
assert torch.equal(x.ceil_().get(), Var(torch.FloatTensor([1, 2, -3, 4, 5])))
assert torch.equal(x.cpu().get(), Var(torch.FloatTensor([1, 2, -3, 4, 5])))
示例7: __train_func__
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def __train_func__(self,data,optimizer,loss_fn,train_metrics,running_loss,epoch,batch_num):
optimizer.zero_grad()
train_x, train_y = data
batch_size = train_x.size(0)
if self.cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
train_x = Variable(train_x)
train_y = Variable(train_y)
outputs = self.model(train_x)
loss = loss_fn(outputs, train_y)
loss.backward()
optimizer.step()
running_loss.add_(loss.cpu() * batch_size)
for metric in train_metrics:
metric.update(outputs.cpu().data, train_y.cpu().data)
示例8: visualize_model
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def visualize_model(model, num_images=6):
images_so_far = 0
fig = plt.figure()
for i, data in enumerate(dataloaders['val']):
inputs, labels = data
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images // 2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
return
示例9: visualize_stn
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def visualize_stn():
# Get a batch of training data
data, _ = next(iter(test_loader))
data = Variable(data, volatile=True)
if use_cuda:
data = data.cuda()
input_tensor = data.cpu().data
transformed_input_tensor = model.stn(data).cpu().data
in_grid = convert_image_np(
torchvision.utils.make_grid(input_tensor))
out_grid = convert_image_np(
torchvision.utils.make_grid(transformed_input_tensor))
# Plot the results side-by-side
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images')
示例10: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def train(self):
if self.T - self.target_sync_T > self.args.target:
self.sync_target_network()
self.target_sync_T = self.T
info = {}
for _ in range(self.args.iters):
self.dqn.eval()
# TODO: Use a named tuple for experience replay
n_step_sample = self.args.n_step
batch, indices, is_weights = self.replay.Sample_N(self.args.batch_size, n_step_sample, self.args.gamma)
columns = list(zip(*batch))
states = Variable(torch.from_numpy(np.array(columns[0])).float().transpose_(1, 3))
actions = Variable(torch.LongTensor(columns[1]))
terminal_states = Variable(torch.FloatTensor(columns[5]))
rewards = Variable(torch.FloatTensor(columns[2]))
# Have to clip rewards for DQN
rewards = torch.clamp(rewards, -1, 1)
steps = Variable(torch.FloatTensor(columns[4]))
new_states = Variable(torch.from_numpy(np.array(columns[3])).float().transpose_(1, 3))
target_dqn_qvals = self.target_dqn(new_states).cpu()
# Make a new variable with those values so that these are treated as constants
target_dqn_qvals_data = Variable(target_dqn_qvals.data)
q_value_targets = (Variable(torch.ones(terminal_states.size()[0])) - terminal_states)
inter = Variable(torch.ones(terminal_states.size()[0]) * self.args.gamma)
# print(steps)
q_value_targets = q_value_targets * torch.pow(inter, steps)
if self.args.double:
# Double Q Learning
new_states_qvals = self.dqn(new_states).cpu()
new_states_qvals_data = Variable(new_states_qvals.data)
q_value_targets = q_value_targets * target_dqn_qvals_data.gather(1, new_states_qvals_data.max(1)[1])
else:
q_value_targets = q_value_targets * target_dqn_qvals_data.max(1)[0]
q_value_targets = q_value_targets + rewards
self.dqn.train()
one_hot_actions = torch.zeros(self.args.batch_size, self.args.actions)
for i in range(self.args.batch_size):
one_hot_actions[i][actions[i].data] = 1
if self.args.gpu:
actions = actions.cuda()
one_hot_actions = one_hot_actions.cuda()
q_value_targets = q_value_targets.cuda()
new_states = new_states.cuda()
model_predictions_q_vals, model_predictions_state = self.dqn(states, Variable(one_hot_actions))
model_predictions = model_predictions_q_vals.gather(1, actions.view(-1, 1))
# info = {}
td_error = model_predictions - q_value_targets
info["TD_Error"] = td_error.mean().data[0]
# Update the priorities
if not self.args.density_priority:
self.replay.Update_Indices(indices, td_error.cpu().data.numpy(), no_pseudo_in_priority=self.args.count_td_priority)
# If using prioritised we need to weight the td_error
if self.args.prioritized and self.args.prioritized_is:
# print(td_error)
weights_tensor = torch.from_numpy(is_weights).float()
weights_tensor = Variable(weights_tensor)
if self.args.gpu:
weights_tensor = weights_tensor.cuda()
# print(weights_tensor)
td_error = td_error * weights_tensor
# Model 1 step state transition error
# Save them every x steps
if self.T % self.args.model_save_image == 0:
os.makedirs("{}/transition_model/{}".format(self.args.log_path, self.T))
for ii, image, action, next_state, current_state in zip(range(self.args.batch_size), model_predictions_state.cpu().data, actions.data, new_states.cpu().data, states.cpu().data):
image = image.numpy()[0]
image = np.clip(image, 0, 1)
# print(next_state)
next_state = next_state.numpy()[0]
current_state = current_state.numpy()[0]
black_bars = np.zeros_like(next_state[:1, :])
# print(black_bars.shape)
joined_image = np.concatenate((current_state, black_bars, image, black_bars, next_state), axis=0)
joined_image = np.transpose(joined_image)
self.log_image("{}/transition_model/{}/{}_____Action_{}".format(self.args.log_path, self.T, ii + 1, action), joined_image * 255)
# self.log_image("{}/transition_model/{}/{}_____Action_{}".format(self.args.log_path, self.T, ii + 1, action), image * 255)
# self.log_image("{}/transition_model/{}/{}_____Correct".format(self.args.log_path, self.T, ii + 1), next_state * 255)
# print(model_predictions_state)
#.........这里部分代码省略.........
示例11: str
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
print '(discriminator out-real) = ', output_disc_real[0:4]
print '(discriminator out-fake) = ', output_disc_fake[0:4]
print '(discriminator out-fake2) = ', output_disc_fake_2[0:4]
# tf-board (scalar)
logger.scalar_summary('loss-generator', loss_gen, total_idx)
logger.scalar_summary('loss-discriminator', loss_disc_total, total_idx)
# logger.scalar_summary('disc-out-for-real', output_disc_real[0], total_idx)
# logger.scalar_summary('disc-out-for-fake', output_disc_fake[0], total_idx)
inputs = inputs + MEAN_VALUE_FOR_ZERO_CENTERED
outputs_gen = outputs_gen + MEAN_VALUE_FOR_ZERO_CENTERED
# tf-board (images - first 10 batches)
output_imgs_temp = outputs_gen.cpu().data.numpy()[0:6]
input_imgs_temp = inputs.cpu().data.numpy()[0:4]
# logger.an_image_summary('generated', output_img, i)
# rgb to bgr
#output_imgs_temp = output_imgs_temp[:, [2, 1, 0], ...]
#input_imgs_temp = input_imgs_temp[:, [2, 1, 0], ...]
logger.image_summary('generated', output_imgs_temp, total_idx)
logger.image_summary('real', input_imgs_temp, total_idx)
# save the model
if total_idx % MODEL_SAVING_FREQUENCY == 0:
torch.save(gen_model.state_dict(),
MODEL_SAVING_DIRECTORY + 'wgan_gp_pytorch_iter_' + str(total_idx) + '.pt')
示例12:
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
print '(discriminator_b out-fake) = ', output_disc_fake_b[0]
'''
# tf-board (scalar)
logger.scalar_summary(color + ':loss(generator_a)', loss_gen_lsgan_a, i)
logger.scalar_summary(color + ':loss(generator_b)', loss_gen_lsgan_b, i)
logger.scalar_summary(color + ':loss-rec_a-l1', l1_loss_rec_a, i)
logger.scalar_summary(color + ':loss-rec_b-l1', l1_loss_rec_b, i)
logger.scalar_summary(color + ':loss-identity_a-l1', l1_loss_identity_a, i)
logger.scalar_summary(color + ':loss-identity_b-l1', l1_loss_identity_b, i)
logger.scalar_summary(color + ':loss(discriminator_a)', loss_disc_a_lsgan, i)
logger.scalar_summary(color + ':loss(discriminator_b)', loss_disc_b_lsgan, i)
# tf-board (images - first 1 batches)
DISPLAY_SIZE = 1
inputs_imgs_temp = inputs.cpu().data.numpy()[0:DISPLAY_SIZE]
output_imgs_temp_b = outputs_gen_a_to_b.cpu().data.numpy()[0:DISPLAY_SIZE]
output_imgs_temp_a = outputs_gen_b_to_a.cpu().data.numpy()[0:DISPLAY_SIZE]
answer_imgs_temp = answers.cpu().data.numpy()[0:DISPLAY_SIZE]
reconstructed_a_temp = reconstructed_a.cpu().data.numpy()[0:DISPLAY_SIZE]
reconstructed_b_temp = reconstructed_b.cpu().data.numpy()[0:DISPLAY_SIZE]
# lab to bgr
output_img_opencv[:, :, 0] = inputs_imgs_temp[0][0, :, :]
output_img_opencv[:, :, 1] = inputs_imgs_temp[0][1, :, :]
output_img_opencv[:, :, 2] = inputs_imgs_temp[0][2, :, :]
output_img_opencv = cv2.cvtColor(output_img_opencv, cv2.COLOR_LAB2BGR)
output_img_opencv = output_img_opencv[..., [2,1,0]]
inputs_imgs_temp[0][0, :, :] = output_img_opencv[:, :, 0]
inputs_imgs_temp[0][1, :, :] = output_img_opencv[:, :, 1]
inputs_imgs_temp[0][2, :, :] = output_img_opencv[:, :, 2]
示例13: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
#.........这里部分代码省略.........
], self._imageft[i:i+1, :,
int(round(roi[0].item())):int(round(roi[3].item())),
int(round(roi[1].item())):int(round(roi[4].item())),
int(round(roi[2].item())):int(round(roi[5].item()))] if cfg.USE_IMAGES else None))
mask_pred_batch.append(mask_pred)
self._predictions['mask_pred'] = mask_pred_batch
self._add_losses()
elif mode == 'TEST':
with torch.no_grad():
self.eval()
self._mode = 'TEST'
self._scene = blobs['data'].cuda()
self._gt_bbox = blobs['gt_box']
self._gt_mask = blobs['gt_mask'] if cfg.USE_MASK else None
if cfg.USE_IMAGES:
grid_shape = blobs['data'].shape[-3:]
self._imageft = []
for i in range(self.batch_size):
num_images = blobs['nearest_images']['images'][i].shape[0]
if cfg.USE_IMAGES_GT:
with torch.no_grad():
imageft = Variable(blobs['nearest_images']['images'][i].cuda())
else:
with torch.no_grad():
imageft = self.image_enet_fixed(Variable(blobs['nearest_images']['images'][i].cuda()))
imageft = self.image_enet_trainable(imageft)
proj3d = Variable(blobs['proj_ind_3d'][i])
proj2d = Variable(blobs['proj_ind_2d'][i])
if blobs['data'].shape[2]*blobs['data'].shape[3]*blobs['data'].shape[4] > cfg.MAX_VOLUME or len(proj3d) > cfg.MAX_IMAGE:
print('on cpu')
imageft = imageft.cpu()
proj3d = proj3d.cpu()
proj2d = proj2d.cpu()
# project 2d to 3d
counter = 0
init = True
for ft, ind3d, ind2d in zip(imageft, proj3d, proj2d):
counter += 1
if counter-1 in killing_inds:
continue
imageft_temp = Projection.apply(ft, ind3d, ind2d, grid_shape)[:, :,:, :].contiguous()
sz = imageft_temp.shape
if init:
imageft = imageft_temp.view(sz[0], sz[1], sz[2], sz[3])
init = False
continue
imageft = torch.stack([imageft, imageft_temp], dim=4)
# reshape to max pool over features
imageft = imageft.view(sz[0], -1, 2)
imageft = torch.nn.MaxPool1d(kernel_size=2)(imageft)
imageft = imageft.view(sz[0], sz[1], sz[2], sz[3])
imageft = imageft.view(sz[0], sz[1], sz[2], sz[3], self.batch_size)
self._imageft = imageft.permute(4, 0, 3, 2, 1)
self._imageft = self._imageft.cuda()
del proj3d
del proj2d
torch.cuda.empty_cache()
示例14: train_model
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def train_model(trainloader, model, criterion, optimizer, scheduler, loss_threshold=0.3, epochs=25):
use_gpu = torch.cuda.is_available()
if use_gpu:
model.cuda()
model.train(True) # Set model to training mode
since = time.time()
switched_opt = False
# Here we store the best model
model_file = 's_trained.pth'
best_model_wts = copy.deepcopy(model.state_dict())
# Statistics
best_acc = 0.0
best_loss = 5.0
total_step = 0.0
for epoch in range(epochs):
print('Epoch {}/{}'.format(epoch+1, epochs))
print('-' * 10)
# scheduler.step()
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for step, data in enumerate(trainloader, 0):
# get the input batch
inputs, labels = data
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Compute accuracy
preds = outputs.cpu().data.max(1)[1]
#_, preds = torch.max(outputs.data, 1)
# running_corrects = (labels.float() == preds.float().squeeze().mean())
# statistics
# loss * batch_size
running_loss += loss.data[0]
# compute accuracy
batch_correct = preds.eq(labels.cpu().data).sum()
batch_size = labels.size(0)
running_corrects += batch_correct / batch_size
# running_corrects += torch.sum(preds == labels.data)
if step % 1000 == 999: # print every 1000 mini-batches
step_loss = running_loss / 1000
step_acc = running_corrects / 1000
print('Epoch: {} Step: {} Loss: {:.3f} Acc: {:.3f}'.format(
epoch+1, step+1, step_loss, step_acc))
# print('[%d, %5d] loss: %.3f' % (epoch + 1, step + 1,
# step_loss))
total_step += step + 1
# ============ Logging ============#
# (1) Log the scalar values
info = {
'loss': step_loss,
'accuracy': step_acc
}
# (2) Log CSV file
log_csv(total_step, info['accuracy'], info['loss'])
# (3) Tensorboard specific logging
# tensorboard_log(total_step, model, info)
# for each epoch, save best model
if best_loss > step_loss:
print('loss improved from %.3f to %.3f'
% (best_loss, step_loss))
best_loss = step_loss
best_acc = step_acc
if((epoch+1) % 5 == 0):
print("Testing...")
print('Saving model to ' + model_file + "...\n")
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), model_file)
torch.save(model, "dump_model.pth")
## Switch to SGD + Nesterov
if best_loss <= 0.6 and not switched_opt:
print('Switching to SGD wt Nesterov Momentum...')
# optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, nesterov=True)
switched_opt = True
#.........这里部分代码省略.........
示例15: main
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import cpu [as 别名]
def main():
global args
args = parser.parse_args()
# massage args
block_opts = []
block_opts = args.block_opts
block_opts.append(args.block_overlap)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# create model
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](
block_opts, pretrained=args.pretrained_net, mask_path=None, mean=args.mean, std=args.std,
noise=args.noise, K=args.layers_k)
model = torch.nn.DataParallel(model, device_ids=[args.gpu_id]).cuda()
# switch to evaluate mode
model.eval()
cudnn.benchmark = True
# Data loading code
testdir = os.path.join(args.data)
test_loader = torch.utils.data.DataLoader(
datasets.videocs.VideoCS(testdir, block_opts, transforms.Compose([
transforms.ToTensor(),
])),
batch_size=1, shuffle=False,
num_workers=0, pin_memory=True)
batch_time = metrics.AverageMeter()
psnr = metrics.AverageMeter()
# binarize weights
model_weights = model.module.measurements.weight.data
if ((model_weights == 0) | (model_weights == 1)).all() == False:
model.module.measurements.binarization()
end = time.time()
for i, (video_frames, pad_frame_size, patch_shape) in enumerate(test_loader):
video_input = Variable(video_frames.cuda(async=True), volatile=True)
print(test_loader.dataset.videos[i])
# compute output
model.module.pad_frame_size = pad_frame_size.numpy()
model.module.patch_shape = patch_shape.numpy()
reconstructed_video, y = model(video_input)
# original video
reconstructed_video = reconstructed_video.cpu().data.numpy()
original_video = video_input.cpu().data.numpy()
# measure accuracy and record loss
psnr_video = metrics.psnr_accuracy(reconstructed_video, original_video)
psnr.update(psnr_video, video_frames.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'PSNR {psnr.val:.3f} ({psnr.avg:.3f})'.format(
i + 1, len(test_loader), batch_time=batch_time,
psnr=psnr))
if args.save_videos is not None:
save_path = os.path.join(args.save_videos, args.save_format)
if not os.path.exists(save_path):
os.makedirs(save_path)
y_repeat = torch.zeros(
*y.size()).unsqueeze(2).repeat(1, 1, args.block_opts[0], 1, 1)
for j in range(y.size(1)):
y_repeat[:, j, :, :, :] = y[:, j, :, :].repeat(
1, args.block_opts[0], 1, 1).data
y_repeat = y_repeat.numpy()
original_video = np.reshape(
original_video, (original_video.shape[0] * original_video.shape[1] * original_video.shape[2], original_video.shape[3], original_video.shape[4]))
reconstructed_video = np.reshape(reconstructed_video, (reconstructed_video.shape[0] * reconstructed_video.shape[1] *
reconstructed_video.shape[2], reconstructed_video.shape[3], reconstructed_video.shape[4])) / np.max(reconstructed_video)
y_repeat = np.reshape(y_repeat, (y_repeat.shape[0] * y_repeat.shape[1] *
y_repeat.shape[2], y_repeat.shape[3], y_repeat.shape[4])) / np.max(y_repeat)
write_video(save_path, test_loader.dataset.videos[i], np.dstack(
(original_video, y_repeat, reconstructed_video)), args.save_format)
print(' * PSNR {psnr.avg:.3f}'.format(psnr=psnr))