当前位置: 首页>>代码示例>>Python>>正文


Python data.size方法代码示例

本文整理汇总了Python中torch.utils.data.size方法的典型用法代码示例。如果您正苦于以下问题:Python data.size方法的具体用法?Python data.size怎么用?Python data.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data的用法示例。


在下文中一共展示了data.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def test(epoch):
    model.eval()
    test_loss = 0
    with torch.no_grad():
        # each data is of BATCH_SIZE (default 128) samples
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()
            if i == 0:
                n = min(data.size(0), 8)
                # for the first 128 batch of the epoch, show the first 8 input digits
                # with right below them the reconstructed output digits
                comparison = torch.cat([data[:n],
                                      recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
                save_image(comparison.cpu(),
                         'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))

########## create VAE ########## 
开发者ID:jgvfwstone,项目名称:ArtificialIntelligenceEngines,代码行数:24,代码来源:main.py

示例2: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def test(epoch):
    model.eval()
    test_loss = 0
    for i, (data, _) in enumerate(test_loader):
        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        recon_batch, mu, logvar = model(data)
        test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
        if i == 0:
          n = min(data.size(0), 8)
          comparison = torch.cat([data[:n],
                                  recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
          save_image(comparison.data.cpu(),
                     'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
开发者ID:eelxpeng,项目名称:UnsupervisedDeepLearning-Pytorch,代码行数:20,代码来源:test_vae_pytorch_example.py

示例3: test

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def test(epoch):
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()
            if i == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([data[:n],
                                      recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
                save_image(comparison.cpu(),
                         'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
开发者ID:pytorch,项目名称:examples,代码行数:19,代码来源:main.py

示例4: calc_gradient_penalty

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def calc_gradient_penalty(netD, real_data, fake_data, device='cpu', pac=10, lambda_=10):
    alpha = torch.rand(real_data.size(0) // pac, 1, 1, device=device)
    alpha = alpha.repeat(1, pac, real_data.size(1))
    alpha = alpha.view(-1, real_data.size(1))

    interpolates = alpha * real_data + ((1 - alpha) * fake_data)

    # interpolates = torch.Variable(interpolates, requires_grad=True, device=device)

    disc_interpolates = netD(interpolates)

    gradients = torch.autograd.grad(
        outputs=disc_interpolates, inputs=interpolates,
        grad_outputs=torch.ones(disc_interpolates.size(), device=device),
        create_graph=True, retain_graph=True, only_inputs=True)[0]

    gradient_penalty = (
        (gradients.view(-1, pac * real_data.size(1)).norm(2, dim=1) - 1) ** 2).mean() * lambda_
    return gradient_penalty 
开发者ID:sdv-dev,项目名称:SDGym,代码行数:21,代码来源:ctgan.py

示例5: __getitem__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def __getitem__(self, index):
    # get the anchor index for current sample index
    # here we set the anchor index to the last one
    # sample in this group
        minibatch_db =  [self._roidb[index]] # [self._roidb[index_ratio]]
        blobs = get_minibatch(minibatch_db, self._num_classes)
        np.random.shuffle(blobs['rois'])
        rois = torch.from_numpy(blobs['rois'][:self.max_rois_size])
        data = torch.from_numpy(blobs['data'])
        labels = torch.from_numpy(blobs['labels'])
        data_height, data_width = data.size(1), data.size(2)
        
        data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)

        info = torch.Tensor([rois.size(0), data_height, data_width])
    
        return data, rois, labels, info 
开发者ID:jd730,项目名称:OICR-pytorch,代码行数:19,代码来源:roibatchLoader.py

示例6: train

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def train(self):
        self.model.train()

        train_loss = Average()
        train_acc = Accuracy()

        for data, target in self.train_loader:
            data = data.to(self.device)
            target = target.to(self.device)

            output = self.model(data)
            loss = F.cross_entropy(output, target)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            train_loss.update(loss.item(), data.size(0))
            train_acc.update(output, target)

        return train_loss, train_acc 
开发者ID:narumiruna,项目名称:pytorch-distributed-example,代码行数:23,代码来源:main.py

示例7: evaluate

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def evaluate(self):
        self.model.eval()

        test_loss = Average()
        test_acc = Accuracy()

        for data, target in self.test_loader:
            data = data.to(self.device)
            target = target.to(self.device)

            output = self.model(data)
            loss = F.cross_entropy(output, target)

            test_loss.update(loss.item(), data.size(0))
            test_acc.update(output, target)

        return test_loss, test_acc 
开发者ID:narumiruna,项目名称:pytorch-distributed-example,代码行数:19,代码来源:main.py

示例8: validate

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def validate(epoch, model, loader, prefix='Validation'):
    global global_step, writer

    model.eval()
    val_loss = 0

    pbar = tqdm(total=len(loader.dataset))
    pbar.set_description('Eval')
    for batch_idx, data in enumerate(loader):
        if isinstance(data, list):
            if len(data) > 1:
                cond_data = data[1].float()
                cond_data = cond_data.to(device)
            else:
                cond_data = None

            data = data[0]
        data = data.to(device)
        with torch.no_grad():
            val_loss += -model.log_probs(data, cond_data).sum().item()  # sum up batch loss
        pbar.update(data.size(0))
        pbar.set_description('Val, Log likelihood in nats: {:.6f}'.format(
            -val_loss / pbar.n))

    writer.add_scalar('validation/LL', val_loss / len(loader.dataset), epoch)

    pbar.close()
    return val_loss / len(loader.dataset) 
开发者ID:ikostrikov,项目名称:pytorch-flows,代码行数:30,代码来源:main.py

示例9: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def __init__(self, mode, noisy_for_train, sentiment, direction):
        self.mode = mode
        self.root = os.path.join('../data', 'yelp')
        self.noisy = self.mode == 'train' and noisy_for_train

        # Load data from domain 0 and domain 1.
        path = os.path.join(self.root, 'sentiment.{}.{}'.format(mode, sentiment))

        # Load vocabulary.
        print('----- Loading vocab -----')
        self.vocab = Vocabulary('../data/amazon/amazon.vocab')
        print('vocabulary size:', self.vocab.size)
        self.pad = self.vocab.word2id['<pad>']
        self.go = self.vocab.word2id['<go>']
        self.eos = self.vocab.word2id['<eos>']
        self.unk = self.vocab.word2id['<unk>']

        # Tokenize file content
        with open(path, 'r') as f:
            ids = []
            for line in f:
                words = ['<go>'] + line.split() + ['<eos>']
                if direction == 'forward':
                    pass
                elif direction == 'backward':
                    words.reverse()
                else:
                    raise ValueError()
                for word in words:
                    ids.append(self.vocab.word2id[word] if word in self.vocab.word2id else self.unk)
        self.ids = torch.LongTensor(ids)  # (very_long, )
        self.ids = batchify(self.ids, config.batch_size, config)  # shape = (???, batch_size) 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:34,代码来源:amazon.py

示例10: batchify

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def batchify(data, bsz, args):
    # Work out how cleanly we can divide the dataset into bsz parts.
    nbatch = data.size(0) // bsz
    # Trim off any extra elements that wouldn't cleanly fit (remainders).
    data = data.narrow(0, 0, nbatch * bsz)
    # Evenly divide the data across the bsz batches.
    data = data.view(bsz, -1).t().contiguous()
    if args.gpu:
        data = data.cuda()
    return data 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:12,代码来源:amazon.py

示例11: __init__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def __init__(self, mode, noisy_for_train, sentiment, direction):
        self.mode = mode
        self.root = os.path.join('../data', 'yelp')
        voc_f = os.path.join(self.root, 'yelp.vocab')
        self.noisy = self.mode == 'train' and noisy_for_train

        # Load data from domain 0 and domain 1.
        path = os.path.join(self.root, 'sentiment.{}.{}'.format(mode, sentiment))

        # Load vocabulary.
        print('----- Loading vocab -----')
        self.vocab = Vocabulary(voc_f)
        print('vocabulary size:', self.vocab.size)
        self.pad = self.vocab.word2id['<pad>']
        self.go = self.vocab.word2id['<go>']
        self.eos = self.vocab.word2id['<eos>']
        self.unk = self.vocab.word2id['<unk>']

        # Tokenize file content
        with open(path, 'r') as f:
            ids = []
            for line in f:
                words = ['<go>'] + line.split() + ['<eos>']
                if direction == 'forward':
                    pass
                elif direction == 'backward':
                    words.reverse()
                else:
                    raise ValueError()
                for word in words:
                    ids.append(self.vocab.word2id[word] if word in self.vocab.word2id else self.unk)
        self.ids = torch.LongTensor(ids)  # (very_long, )
        self.ids = batchify(self.ids, config.batch_size, config)  # shape = (, batch_size) 
开发者ID:ChenWu98,项目名称:Point-Then-Operate,代码行数:35,代码来源:yelp.py

示例12: extract_and_crop_patches_by_predicted_transform

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def extract_and_crop_patches_by_predicted_transform(patches, trans, crop_size = 32):
    assert patches.size(0) == trans.size(0)
    st = int((patches.size(2) - crop_size) / 2)
    fin = st + crop_size
    rot_LAFs = Variable(torch.FloatTensor([[0.5, 0, 0.5],[0, 0.5, 0.5]]).unsqueeze(0).repeat(patches.size(0),1,1));
    if patches.is_cuda:
        rot_LAFs = rot_LAFs.cuda()
        trans = trans.cuda()
    rot_LAFs1  = torch.cat([torch.bmm(trans, rot_LAFs[:,0:2,0:2]), rot_LAFs[:,0:2,2:]], dim = 2);
    return extract_patches(patches,  rot_LAFs1, PS = patches.size(2))[:,:, st:fin, st:fin].contiguous() 
开发者ID:ducha-aiki,项目名称:affnet,代码行数:12,代码来源:train_AffNet_test_on_graffity.py

示例13: extract_random_LAF

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def extract_random_LAF(data, max_rot = math.pi, max_tilt = 1.0, crop_size = 32):
    st = int((data.size(2) - crop_size)/2)
    fin = st + crop_size
    if type(max_rot) is float:
        rot_LAFs, inv_rotmat = get_random_rotation_LAFs(data, max_rot)
    else:
        rot_LAFs = max_rot
        inv_rotmat = None
    aff_LAFs, inv_TA = get_random_norm_affine_LAFs(data, max_tilt);
    aff_LAFs[:,0:2,0:2] = torch.bmm(rot_LAFs[:,0:2,0:2],aff_LAFs[:,0:2,0:2])
    data_aff = extract_patches(data,  aff_LAFs, PS = data.size(2))
    data_affcrop = data_aff[:,:, st:fin, st:fin].contiguous()
    return data_affcrop, data_aff, rot_LAFs,inv_rotmat,inv_TA 
开发者ID:ducha-aiki,项目名称:affnet,代码行数:15,代码来源:train_AffNet_test_on_graffity.py

示例14: load_grayscale_var

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def load_grayscale_var(fname):
    img = Image.open(fname).convert('RGB')
    img = np.mean(np.array(img), axis = 2)
    var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)), volatile = True)
    var_image_reshape = var_image.view(1, 1, var_image.size(0),var_image.size(1))
    if args.cuda:
        var_image_reshape = var_image_reshape.cuda()
    return var_image_reshape 
开发者ID:ducha-aiki,项目名称:affnet,代码行数:10,代码来源:train_AffNet_test_on_graffity.py

示例15: input_norm

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import size [as 别名]
def input_norm(self,x):
        flat = x.view(x.size(0), -1)
        mp = torch.mean(flat, dim=1)
        sp = torch.std(flat, dim=1) + 1e-7
        return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 
开发者ID:ducha-aiki,项目名称:affnet,代码行数:7,代码来源:train_OriNet_test_on_graffity.py


注:本文中的torch.utils.data.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。