当前位置: 首页>>代码示例>>Python>>正文


Python data.cuda方法代码示例

本文整理汇总了Python中torch.utils.data.cuda方法的典型用法代码示例。如果您正苦于以下问题:Python data.cuda方法的具体用法?Python data.cuda怎么用?Python data.cuda使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data的用法示例。


在下文中一共展示了data.cuda方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __iter__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def __iter__(self):
        for output in self.dataloader:
            if self.dataset.training:
                data, target = output
            else:
                data, ids, ratio = output

            if torch.cuda.is_available():
                data = data.cuda(non_blocking=True)

            if self.dataset.training:
                if torch.cuda.is_available():
                    target = target.cuda(non_blocking=True)
                yield data, target
            else:
                if torch.cuda.is_available():
                    ids = ids.cuda(non_blocking=True)
                    ratio = ratio.cuda(non_blocking=True)
                yield data, ids, ratio 
开发者ID:NVIDIA,项目名称:retinanet-examples,代码行数:21,代码来源:data.py

示例2: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:27,代码来源:main_1v.py

示例3: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:26,代码来源:main_1v.py

示例4: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:27,代码来源:main_1v_gpd.py

示例5: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for batch_idx, (data, target, obj_name) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
开发者ID:lianghongzhuo,项目名称:PointNetGPD,代码行数:26,代码来源:main_fullv_gpd.py

示例6: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def train(epoch):
    model.train()
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.data[0] / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset))) 
开发者ID:eelxpeng,项目名称:UnsupervisedDeepLearning-Pytorch,代码行数:23,代码来源:test_vae_pytorch_example.py

示例7: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def test(epoch):
    model.eval()
    test_loss = 0
    for i, (data, _) in enumerate(test_loader):
        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        recon_batch, mu, logvar = model(data)
        test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
        if i == 0:
          n = min(data.size(0), 8)
          comparison = torch.cat([data[:n],
                                  recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
          save_image(comparison.data.cpu(),
                     'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
开发者ID:eelxpeng,项目名称:UnsupervisedDeepLearning-Pytorch,代码行数:20,代码来源:test_vae_pytorch_example.py

示例8: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def __init__(self, **kwargs):
        default_attr = dict(
            # eval options
            model=None, batch_size=32, stride=10,
            dataset_path=None, # val dir for imagenet, base dir for CIFAR-10-C
            nb_classes=None,
            # attack options
            attack=None,
            # Communication options
            fp16_allreduce=False,
            # Logging options
            logger=None)
        default_attr.update(kwargs)
        for k in default_attr:
            setattr(self, k, default_attr[k])
        if self.dataset not in ['imagenet', 'imagenet-c', 'cifar-10', 'cifar-10-c']:
            raise NotImplementedError
        self.cuda = True
        if self.cuda:
            self.model.cuda()
        self.attack = self.attack()
        self._init_loaders() 
开发者ID:ddkang,项目名称:advex-uar,代码行数:24,代码来源:evaluator.py

示例9: create_model

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def create_model(self):
        local_model = ResNet18(name='Local',
                    created_time=self.params['current_time'])
        local_model.cuda()
        target_model = ResNet18(name='Target',
                        created_time=self.params['current_time'])
        target_model.cuda()
        if self.params['resumed_model']:
            loaded_params = torch.load(f"saved_models/{self.params['resumed_model']}")
            target_model.load_state_dict(loaded_params['state_dict'])
            self.start_epoch = loaded_params['epoch']
            self.params['lr'] = loaded_params.get('lr', self.params['lr'])
            logger.info(f"Loaded parameters from saved model: LR is"
                        f" {self.params['lr']} and current epoch is {self.start_epoch}")
        else:
            self.start_epoch = 1

        self.local_model = local_model
        self.target_model = target_model 
开发者ID:ebagdasa,项目名称:backdoor_federated_learning,代码行数:21,代码来源:image_helper.py

示例10: build_model

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def build_model(self):
		# Create model from scratch or use a pretrained one
		print("=> using model '{}'".format(self._arch))
		self._model = models.__dict__[self._arch](num_classes=len(self._labels))
		print("=> loading checkpoint '{}'".format(self._ckp))
		if self._cuda:
			checkpoint = torch.load(self._ckp)
		else:
			# Load GPU model on CPU
			checkpoint = torch.load(self._ckp, map_location=lambda storage, loc: storage)
		# Load weights
		self._model.load_state_dict(checkpoint['state_dict'])

		if self._cuda:
			self._model.cuda()
		else:
			self._model.cpu()


	# Preprocess Images to be ImageNet-compliant 
开发者ID:floydhub,项目名称:imagenet,代码行数:22,代码来源:imagenet_models.py

示例11: half_mask

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def half_mask(self, inp, gpuID):
        mask = torch.FloatTensor(1, inp.size(1), inp.size(2), inp.size(3)).fill_(1.0)
        w = int(inp.size(2)/2)
        r = np.random.rand()
        if r < 0.25: # left
            mask[:,:,:, 0:w] = 0.0
        elif r < 0.5: # up
            mask[:,:,0:w,:] = 0.0
        elif r < 0.75: # right
            mask[:,:,:,w:inp.size(3)] = 0.0
        else: # bottom
            mask[:,:,w:inp.size(2),:] = 0.0

        mask = mask.cuda(gpuID)
        mask = Variable(mask)
        out = torch.mul(inp, mask)
        return out 
开发者ID:sg-nm,项目名称:Evolutionary-Autoencoders,代码行数:19,代码来源:cnn_train.py

示例12: batchify

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def batchify(data, bsz, args):
    # Work out how cleanly we can divide the dataset into bsz parts.
    nbatch = data.size(0) // bsz
    # Trim off any extra elements that wouldn't cleanly fit (remainders).
    data = data.narrow(0, 0, nbatch * bsz)
    # Evenly divide the data across the bsz batches.
    data = data.view(bsz, -1).t().contiguous()
    if args.gpu:
        data = data.cuda()
    return data 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:12,代码来源:amazon.py

示例13: testing

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def testing(model, dataset_list, nn_paras):
    """ Training an epoch
    Args:
        model: autoencoder
        dataset_list: list of datasets for batch correction
        nn_paras: parameters for neural network training
    Returns:
        code_list: list pf embedded codes
    """

    # load nn parameters
    cuda = nn_paras['cuda']

    data_loader_list = []
    num_cells = []
    for dataset in dataset_list:
        torch_dataset = torch.utils.data.TensorDataset(
            torch.FloatTensor(dataset['gene_exp'].transpose()), torch.LongTensor(dataset['cell_labels']))
        data_loader = torch.utils.data.DataLoader(torch_dataset, batch_size=len(dataset['cell_labels']),
                                                    shuffle=False)
        data_loader_list.append(data_loader)
        num_cells.append(len(dataset["cell_labels"]))

    model.eval()

    code_list = [] # list pf embedded codes
    for i in range(len(data_loader_list)):
        idx = 0
        with torch.no_grad():
            for data, labels in data_loader_list[i]:
                if cuda:
                    data, labels = data.cuda(), labels.cuda()
                code_tmp, _ = model(data)
                code_tmp = code_tmp.cpu().numpy()
                if idx == 0:
                    code = np.zeros((code_tmp.shape[1], num_cells[i]))
                code[:, idx:idx + code_tmp.shape[0]] = code_tmp.T
                idx += code_tmp.shape[0]
        code_list.append(code)

    return code_list 
开发者ID:txWang,项目名称:BERMUDA,代码行数:43,代码来源:BERMUDA.py

示例14: reparametrize

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def reparametrize(self, mu, logvar):
        std = logvar.mul(0.5).exp_()
        if args.cuda:
            eps = torch.cuda.FloatTensor(std.size()).normal_()
        else:
            eps = torch.FloatTensor(std.size()).normal_()
        eps = Variable(eps)
        return eps.mul(std).add_(mu) 
开发者ID:bobchennan,项目名称:VAE_NBP,代码行数:10,代码来源:vae_dp.py

示例15: sample

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import cuda [as 别名]
def sample(self, model, n):
        z = Variable(torch.from_numpy(model.sample(n)[0].astype(np.float32))).cuda()
        return self.decode(z) 
开发者ID:bobchennan,项目名称:VAE_NBP,代码行数:5,代码来源:vae_dp.py


注:本文中的torch.utils.data.cuda方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。