當前位置: 首頁>>代碼示例>>Python>>正文


Python data.to方法代碼示例

本文整理匯總了Python中torch.utils.data.to方法的典型用法代碼示例。如果您正苦於以下問題:Python data.to方法的具體用法?Python data.to怎麽用?Python data.to使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.utils.data的用法示例。


在下文中一共展示了data.to方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(model, test_loader, device):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, size_average=None).item()  # sum up batch loss
            pred = output.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    logger.debug('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset))) 
開發者ID:aws,項目名稱:sagemaker-pytorch-training-toolkit,代碼行數:18,代碼來源:mnist.py

示例2: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(model, loader, criterion, device, dtype, child):
    model.eval()
    test_loss = 0
    correct1, correct5 = 0, 0

    enum_load = enumerate(loader) if child else enumerate(tqdm(loader))

    with torch.no_grad():
        for batch_idx, (data, target) in enum_load:
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)
            output = model(data)
            test_loss += criterion(output, target).item()  # sum up batch loss
            corr = correct(output, target, topk=(1, 5))
            correct1 += corr[0]
            correct5 += corr[1]

    test_loss /= len(loader)
    if not child:
        tqdm.write(
            '\nTest set: Average loss: {:.4f}, Top1: {}/{} ({:.2f}%), '
            'Top5: {}/{} ({:.2f}%)'.format(test_loss, int(correct1), len(loader.sampler),
                                           100. * correct1 / len(loader.sampler), int(correct5),
                                           len(loader.sampler), 100. * correct5 / len(loader.sampler)))
    return test_loss, correct1 / len(loader.sampler), correct5 / len(loader.sampler) 
開發者ID:Randl,項目名稱:MobileNetV3-pytorch,代碼行數:26,代碼來源:run.py

示例3: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(model, test_loader, device):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, size_average=False).item()  # sum up batch loss
            pred = output.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    accuracy = 100.0 * correct / len(test_loader.dataset)

    logger.debug(
        "Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
            test_loss, correct, len(test_loader.dataset), accuracy
        )
    )

    return accuracy 
開發者ID:aws,項目名稱:sagemaker-python-sdk,代碼行數:24,代碼來源:mnist.py

示例4: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(model, loader, criterion, device, dtype):
    model.eval()
    test_loss = 0
    correct1, correct5 = 0, 0

    for batch_idx, (data, target) in enumerate(tqdm(loader)):
        data, target = data.to(device=device, dtype=dtype), target.to(device=device)
        with torch.no_grad():
            output = model(data)
            test_loss += criterion(output, target).item()  # sum up batch loss
            corr = correct(output, target, topk=(1, 5))
        correct1 += corr[0]
        correct5 += corr[1]

    test_loss /= len(loader)

    tqdm.write(
        '\nTest set: Average loss: {:.4f}, Top1: {}/{} ({:.2f}%), '
        'Top5: {}/{} ({:.2f}%)'.format(test_loss, int(correct1), len(loader.dataset),
                                       100. * correct1 / len(loader.dataset), int(correct5),
                                       len(loader.dataset), 100. * correct5 / len(loader.dataset)))
    return test_loss, correct1 / len(loader.dataset), correct5 / len(loader.dataset) 
開發者ID:Randl,項目名稱:MobileNetV2-pytorch,代碼行數:24,代碼來源:run.py

示例5: train

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def train(epoch):
    """ One training epoch """
    model.train()
    dataset_train.load_next_buffer()
    train_loss = 0
    for batch_idx, data in enumerate(train_loader):
        data = data.to(device)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.item()
        optimizer.step()
        if batch_idx % 20 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
        epoch, train_loss / len(train_loader.dataset))) 
開發者ID:ctallec,項目名稱:world-models,代碼行數:23,代碼來源:trainvae.py

示例6: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test():
    """ One test epoch """
    model.eval()
    dataset_test.load_next_buffer()
    test_loss = 0
    with torch.no_grad():
        for data in test_loader:
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
    return test_loss

# check vae dir exists, if not, create it 
開發者ID:ctallec,項目名稱:world-models,代碼行數:18,代碼來源:trainvae.py

示例7: __init__

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def __init__(self):
        super(VAE, self).__init__()

        # ENCODER
        # 28 x 28 pixels = 784 input pixels, 400 outputs
        self.fc1 = nn.Linear(784, 400)
        # rectified linear unit layer from 400 to 400
        self.relu = nn.ReLU()
        self.fc21 = nn.Linear(400, ZDIMS)  # mu layer
        self.fc22 = nn.Linear(400, ZDIMS)  # logvariance layer
        # this last layer is the bottleneck of ZDIMS=20 connections
        
       # DECODER
        # from bottleneck to hidden 400
        self.fc3 = nn.Linear(ZDIMS, 400)
        # from hidden 400 to 784 outputs
        self.fc4 = nn.Linear(400, 784)
        self.sigmoid = nn.Sigmoid() 
開發者ID:jgvfwstone,項目名稱:ArtificialIntelligenceEngines,代碼行數:20,代碼來源:main.py

示例8: loss_function

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def loss_function(recon_x, x, mu, logvar):
    # next 2 lines are equivalent
    BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
    #BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False) # deprecated
    # for binary_cross_entropy, see https://pytorch.org/docs/stable/nn.html
    
    # KLD is Kullback–Leibler divergence -- how much does one learned
    # distribution deviate from another, in this specific case the
    # learned distribution from the unit Gaussian
    
    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    
    # JVS: Kingma's repo = https://github.com/dpkingma/examples/blob/master/vae/main.py
    # BCE tries to make our reconstruction as accurate as possible
    # KLD tries to push the distributions as close as possible to unit Gaussian
    
    ELBO = BCE + KLD
    loss = -ELBO
    return loss 
開發者ID:jgvfwstone,項目名稱:ArtificialIntelligenceEngines,代碼行數:25,代碼來源:main.py

示例9: train

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def train(epoch):
    model.train()
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):
        data = data.to(device)
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.item()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.item() / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset))) 
開發者ID:pytorch,項目名稱:examples,代碼行數:21,代碼來源:main.py

示例10: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(epoch):
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()
            if i == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([data[:n],
                                      recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
                save_image(comparison.cpu(),
                         'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
開發者ID:pytorch,項目名稱:examples,代碼行數:19,代碼來源:main.py

示例11: train_epoch

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def train_epoch(data_loader, discriminator, device='cuda', args=None, epoch=1):
    optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
    discriminator.train_custom()

    for batch_idx, (data, target) in enumerate(data_loader):
        data, target = data.to(device), target.to(device)

        optimizer.zero_grad()

        output = discriminator(data)
        loss = F.nll_loss(output, target)
        loss.backward(retain_graph=True)
        optimizer.step()

        if batch_idx % args.log_interval == 0:
            print('Relu Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(data_loader.dataset),
                       100. * batch_idx / len(data_loader), loss.item())) 
開發者ID:uber-research,項目名稱:PPLM,代碼行數:20,代碼來源:gpt2tunediscrim.py

示例12: test_epoch

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test_epoch(data_loader, discriminator, device='cuda', args=None):
    discriminator.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            output = discriminator(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(data_loader.dataset)

    print('\nRelu Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(data_loader.dataset),
        100. * correct / len(data_loader.dataset))) 
開發者ID:uber-research,項目名稱:PPLM,代碼行數:19,代碼來源:gpt2tunediscrim.py

示例13: train

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def train(self):
        print("train:")
        self.model.train()
        train_loss = 0
        train_correct = 0
        total = 0

        for batch_num, (data, target) in enumerate(self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            prediction = torch.max(output, 1)  # second param "1" represents the dimension to be reduced
            total += target.size(0)

            # train_correct incremented by one if predicted right
            train_correct += np.sum(prediction[1].cpu().numpy() == target.cpu().numpy())

            progress_bar(batch_num, len(self.train_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)'
                         % (train_loss / (batch_num + 1), 100. * train_correct / total, train_correct, total))

        return train_loss, train_correct / total 
開發者ID:icpm,項目名稱:pytorch-cifar10,代碼行數:27,代碼來源:main.py

示例14: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(self):
        print("test:")
        self.model.eval()
        test_loss = 0
        test_correct = 0
        total = 0

        with torch.no_grad():
            for batch_num, (data, target) in enumerate(self.test_loader):
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = self.criterion(output, target)
                test_loss += loss.item()
                prediction = torch.max(output, 1)
                total += target.size(0)
                test_correct += np.sum(prediction[1].cpu().numpy() == target.cpu().numpy())

                progress_bar(batch_num, len(self.test_loader), 'Loss: %.4f | Acc: %.3f%% (%d/%d)'
                             % (test_loss / (batch_num + 1), 100. * test_correct / total, test_correct, total))

        return test_loss, test_correct / total 
開發者ID:icpm,項目名稱:pytorch-cifar10,代碼行數:23,代碼來源:main.py

示例15: test

# 需要導入模塊: from torch.utils import data [as 別名]
# 或者: from torch.utils.data import to [as 別名]
def test(model, test_loader, device):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, size_average=False).item()  # sum up batch loss
            pred = output.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    logger.info('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset))) 
開發者ID:aws,項目名稱:aws-step-functions-data-science-sdk-python,代碼行數:18,代碼來源:mnist.py


注:本文中的torch.utils.data.to方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。