本文整理汇总了Python中torch.autograd.Variable.numpy方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.numpy方法的具体用法?Python Variable.numpy怎么用?Python Variable.numpy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.numpy方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss_fn
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numpy [as 别名]
loss = loss_fn(y_pred, ypt)
optimizer.zero_grad()
loss.backward()
optimizer.step()
y_te = model(Variable(torch.from_numpy(Xte)))
if t % 1000 == 0:
for i in range(100):
y_te += model(Variable(torch.from_numpy(Xte)))
y_te /= 101.
print(t,
'loss = %.3f' % loss.item(),
'acc_tr = %.3f' % np.mean(y_pred.argmax(1).numpy() == ypt.numpy()),
'acc_te = %.3f' % np.mean(y_te.argmax(1).numpy() == Variable(torch.from_numpy(ytr)).long().numpy()))
ygreed = model(Variable(torch.from_numpy(greed)))
for i in range(100):
ygreed += model(Variable(torch.from_numpy(greed)))
plt.scatter(greed[:, 0], greed[:, 1], c=ygreed.argmax(1).numpy(), alpha=0.5)
plt.axis('equal')
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.title('Accuracy = %.3f' % np.mean(y_pred.argmax(1).numpy() == ypt.numpy()))
plt.xlabel('sigma 1')
plt.ylabel('sigma 2')
示例2: transform
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numpy [as 别名]
if(opt.color_histogram_matching):
styleImg = transform(util.open_and_resize_image(opt.style_image,256)) # 1x3x512x512
contentImg = transform(util.open_and_resize_image(opt.content_image,256)) # 1x3x512x512
styleImg = styleImg.unsqueeze(0)
contentImg = contentImg.unsqueeze(0)
styleImg = util.match_color_histogram(styleImg.numpy(),contentImg.numpy())
styleImg = Variable(torch.from_numpy(styleImg))
contentImg = Variable(contentImg)
elif(opt.luminance_only):
styleImg = transform(util.open_and_resize_image(opt.style_image,256)) # 1x3x512x512
contentImg = transform(util.open_and_resize_image(opt.content_image,256)) # 1x3x512x512
styleImg = styleImg.unsqueeze(0)
contentImg = contentImg.unsqueeze(0)
styleImg,contentImg,content_iq = util.luminance_transfer(styleImg.numpy(),contentImg.numpy())
styleImg = Variable(torch.from_numpy(styleImg))
contentImg = Variable(torch.from_numpy(contentImg))
else:
styleImg = load_image(opt.style_image) # 1x3x512x512
contentImg = load_image(opt.content_image) # 1x3x512x512
if(opt.cuda):
styleImg = styleImg.cuda()
contentImg = contentImg.cuda()
############### MODEL ####################
vgg = VGG()
vgg.load_state_dict(torch.load(opt.vgg_dir))
for param in vgg.parameters():
param.requires_grad = False
示例3: linear
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numpy [as 别名]
optimizer.step()
# You can also do optimization at the low level as shown below.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data)
# Print out the loss after optimization.
pred = linear(x)
loss = criterion(pred, y)
print('loss after 1 step optimization: ', loss.data[0])
#======================== Loading data from numpy ========================#
a = np.array([[1,2], [3,4]])
b = torch.from_numpy(a) # convert numpy array to torch tensor
c = b.numpy() # convert torch tensor to numpy array
#===================== Implementing the input pipline =====================#
# Download and construct dataset.
train_dataset = dsets.CIFAR10(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
# Select one data pair (read data from disk).
image, label = train_dataset[0]
print (image.size())
print (label)
# Data Loader (this provides queue and thread in a very simple way).
示例4: print
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import numpy [as 别名]
encoder.load_state_dict(torch.load(
'colorizer.pkl', map_location=location))
except:
print('ERROR: please make sure you have a model with name `colorizer.pkl` in your path')
encoder.eval()
# encoder.parameters()
outputs = []
images = []
labels = []
print(test_cases)
for c in test_cases:
print('encoding ', c)
image,_, label = test_dataset[c]
image = Variable(torch.from_numpy(np.array([image.numpy()])), volatile=True)
label = Variable(label, volatile=True)
if 'cuda' in location:
image = image.cuda()
label = label.cuda()
images.append(image)
labels.append(label)
output = encoder(image)
outputs.append(output)
f, axarr = plt.subplots(len(test_cases), 3)
T = 0.38
q = 313 # number of colours
nnenc = NNEncode()