本文整理汇总了Python中torch.utils.data.to方法的典型用法代码示例。如果您正苦于以下问题:Python data.to方法的具体用法?Python data.to怎么用?Python data.to使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils.data
的用法示例。
在下文中一共展示了data.to方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(model, test_loader, device):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=None).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
logger.debug('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
示例2: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(model, loader, criterion, device, dtype, child):
model.eval()
test_loss = 0
correct1, correct5 = 0, 0
enum_load = enumerate(loader) if child else enumerate(tqdm(loader))
with torch.no_grad():
for batch_idx, (data, target) in enum_load:
data, target = data.to(device=device, dtype=dtype), target.to(device=device)
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
corr = correct(output, target, topk=(1, 5))
correct1 += corr[0]
correct5 += corr[1]
test_loss /= len(loader)
if not child:
tqdm.write(
'\nTest set: Average loss: {:.4f}, Top1: {}/{} ({:.2f}%), '
'Top5: {}/{} ({:.2f}%)'.format(test_loss, int(correct1), len(loader.sampler),
100. * correct1 / len(loader.sampler), int(correct5),
len(loader.sampler), 100. * correct5 / len(loader.sampler)))
return test_loss, correct1 / len(loader.sampler), correct5 / len(loader.sampler)
示例3: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(model, test_loader, device):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100.0 * correct / len(test_loader.dataset)
logger.debug(
"Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset), accuracy
)
)
return accuracy
示例4: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(model, loader, criterion, device, dtype):
model.eval()
test_loss = 0
correct1, correct5 = 0, 0
for batch_idx, (data, target) in enumerate(tqdm(loader)):
data, target = data.to(device=device, dtype=dtype), target.to(device=device)
with torch.no_grad():
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
corr = correct(output, target, topk=(1, 5))
correct1 += corr[0]
correct5 += corr[1]
test_loss /= len(loader)
tqdm.write(
'\nTest set: Average loss: {:.4f}, Top1: {}/{} ({:.2f}%), '
'Top5: {}/{} ({:.2f}%)'.format(test_loss, int(correct1), len(loader.dataset),
100. * correct1 / len(loader.dataset), int(correct5),
len(loader.dataset), 100. * correct5 / len(loader.dataset)))
return test_loss, correct1 / len(loader.dataset), correct5 / len(loader.dataset)
示例5: train
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def train(epoch):
""" One training epoch """
model.train()
dataset_train.load_next_buffer()
train_loss = 0
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % 20 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
示例6: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test():
""" One test epoch """
model.eval()
dataset_test.load_next_buffer()
test_loss = 0
with torch.no_grad():
for data in test_loader:
data = data.to(device)
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).item()
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
return test_loss
# check vae dir exists, if not, create it
示例7: __init__
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def __init__(self):
super(VAE, self).__init__()
# ENCODER
# 28 x 28 pixels = 784 input pixels, 400 outputs
self.fc1 = nn.Linear(784, 400)
# rectified linear unit layer from 400 to 400
self.relu = nn.ReLU()
self.fc21 = nn.Linear(400, ZDIMS) # mu layer
self.fc22 = nn.Linear(400, ZDIMS) # logvariance layer
# this last layer is the bottleneck of ZDIMS=20 connections
# DECODER
# from bottleneck to hidden 400
self.fc3 = nn.Linear(ZDIMS, 400)
# from hidden 400 to 784 outputs
self.fc4 = nn.Linear(400, 784)
self.sigmoid = nn.Sigmoid()
示例8: loss_function
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def loss_function(recon_x, x, mu, logvar):
# next 2 lines are equivalent
BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
#BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False) # deprecated
# for binary_cross_entropy, see https://pytorch.org/docs/stable/nn.html
# KLD is Kullback–Leibler divergence -- how much does one learned
# distribution deviate from another, in this specific case the
# learned distribution from the unit Gaussian
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# JVS: Kingma's repo = https://github.com/dpkingma/examples/blob/master/vae/main.py
# BCE tries to make our reconstruction as accurate as possible
# KLD tries to push the distributions as close as possible to unit Gaussian
ELBO = BCE + KLD
loss = -ELBO
return loss
示例9: train
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
示例10: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(epoch):
model.eval()
test_loss = 0
with torch.no_grad():
for i, (data, _) in enumerate(test_loader):
data = data.to(device)
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).item()
if i == 0:
n = min(data.size(0), 8)
comparison = torch.cat([data[:n],
recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
save_image(comparison.cpu(),
'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
示例11: train_epoch
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def train_epoch(data_loader, discriminator, device='cuda', args=None, epoch=1):
optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
discriminator.train_custom()
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = discriminator(data)
loss = F.nll_loss(output, target)
loss.backward(retain_graph=True)
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Relu Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.item()))
示例12: test_epoch
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test_epoch(data_loader, discriminator, device='cuda', args=None):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = discriminator(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(data_loader.dataset)
print('\nRelu Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
示例13: train
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def train(self):
print("train:")
self.model.train()
train_loss = 0
train_correct = 0
total = 0
for batch_num, (data, target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
prediction = torch.max(output, 1) # second param "1" represents the dimension to be reduced
total += target.size(0)
# train_correct incremented by one if predicted right
train_correct += np.sum(prediction[1].cpu().numpy() == target.cpu().numpy())
progress_bar(batch_num, len(self.train_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_num + 1), 100. * train_correct / total, train_correct, total))
return train_loss, train_correct / total
示例14: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(self):
print("test:")
self.model.eval()
test_loss = 0
test_correct = 0
total = 0
with torch.no_grad():
for batch_num, (data, target) in enumerate(self.test_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
test_loss += loss.item()
prediction = torch.max(output, 1)
total += target.size(0)
test_correct += np.sum(prediction[1].cpu().numpy() == target.cpu().numpy())
progress_bar(batch_num, len(self.test_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_num + 1), 100. * test_correct / total, test_correct, total))
return test_loss, test_correct / total
示例15: test
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import to [as 别名]
def test(model, test_loader, device):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
logger.info('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))