当前位置: 首页>>代码示例>>Python>>正文


Python Variable.size方法代码示例

本文整理汇总了Python中torch.autograd.Variable.size方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.size方法的具体用法?Python Variable.size怎么用?Python Variable.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
 def forward(self, X, X_mask):
     #X: [m, Tx] m = batch size, Tx = word count
     #print(X.size(), type(X))
     m = X.size()[0]
     Tx = X.size()[1]
     
     X = self.embedding(X)
     #X: [m, Tx, embedding_dim] m = batch size, Tx = word count
     #print(X.size(), type(X.data))
     assert X.size() == torch.Size([m, Tx, self.embedding_dim])
             
     #average words in doc. use mask so we average only words not padding
     X = torch.sum(X, 1)
     X = Variable(torch.div(X.data, X_mask))
     #X: [m, emb_dim]
     #print(X.size(), type(X.data))
     assert X.size() == torch.Size([m, self.embedding_dim])
     
     X = self.linear(X)
     #X: [m, 1]
     #print(X.size(), type(X))
     if self.num_classes == 2:
         assert X.size() == torch.Size([m, 1])
     else:
         assert X.size() == torch.Size([m, self.num_classes])
         
     if self.num_classes == 2:
         X = torch.squeeze(X)
         X = self.sigmoid(X)
         #X: [m]
         #print(X.size(), type(X))
         assert X.size() == torch.Size([m])
         return X
     else:
         return F.softmax(X)
开发者ID:mircean,项目名称:ML,代码行数:37,代码来源:models.py

示例2: NN

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0):
    net.eval()
    net_time = AverageMeter()
    cls_time = AverageMeter()
    losses = AverageMeter()
    correct = 0.
    total = 0
    testsize = testloader.dataset.__len__()

    trainFeatures = lemniscate.memory.t()
    if hasattr(trainloader.dataset, 'imgs'):
        trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda()
    else:
        trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda()

    if recompute_memory:
        transform_bak = trainloader.dataset.transform
        trainloader.dataset.transform = testloader.dataset.transform
        temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=100, shuffle=False, num_workers=1)
        for batch_idx, (inputs, targets, indexes) in enumerate(temploader):
            inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs, volatile=True), Variable(targets)
            batchSize = inputs.size(0)
            features = net(inputs)
            trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t()
        trainLabels = torch.LongTensor(temploader.dataset.train_labels).cuda()
        trainloader.dataset.transform = transform_bak
    
    end = time.time()
    for batch_idx, (inputs, targets, indexes) in enumerate(testloader):
        inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        batchSize = inputs.size(0)
        features = net(inputs)
        net_time.update(time.time() - end)
        end = time.time()

        dist = torch.mm(features.data, trainFeatures)

        yd, yi = dist.topk(1, dim=1, largest=True, sorted=True)
        candidates = trainLabels.view(1,-1).expand(batchSize, -1)
        retrieval = torch.gather(candidates, 1, yi)

        retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
        yd = yd.narrow(1, 0, 1)

        total += targets.size(0)
        correct += retrieval.eq(targets.data).cpu().sum()
        
        cls_time.update(time.time() - end)
        end = time.time()

        print('Test [{}/{}]\t'
              'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t'
              'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t'
              'Top1: {:.2f}'.format(
              total, testsize, correct*100./total, net_time=net_time, cls_time=cls_time))

    return correct/total
开发者ID:eglxiang,项目名称:lemniscate.pytorch,代码行数:61,代码来源:test.py

示例3: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(train_loader, model, criterion, optimizer, epoch, opt):
    """
    train for one epoch on the training set
    """
    batch_time = utils.AverageMeter() 
    losses = utils.AverageMeter()
    top1 = utils.AverageMeter() 

    # training mode
    model.train() 

    end = time.time() 
    for i, (input_points, labels) in enumerate(train_loader):
        # bz x 2048 x 3 
        input_points = Variable(input_points)
        input_points = input_points.transpose(2, 1) 
        labels = Variable(labels[:, 0])

        # print(points.size())
        # print(labels.size())
        # shift data to GPU
        if opt.cuda:
            input_points = input_points.cuda() 
            labels = labels.long().cuda() # must be long cuda tensor  
        
        # forward, backward optimize 
        output, _ = model(input_points)
        # debug_here() 
        loss = criterion(output, labels)
        ##############################
        # measure accuracy
        ##############################
        prec1 = utils.accuracy(output.data, labels.data, topk=(1,))[0]
        losses.update(loss.data[0], input_points.size(0))
        top1.update(prec1[0], input_points.size(0))

        ##############################
        # compute gradient and do sgd 
        ##############################
        optimizer.zero_grad() 
        loss.backward() 
        ##############################
        # gradient clip stuff 
        ##############################
        utils.clip_gradient(optimizer, opt.gradient_clip)
        
        optimizer.step() 

        # measure elapsed time
        batch_time.update(time.time() - end) 
        end = time.time() 
        if i % opt.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              '[email protected] {top1.val:.3f} ({top1.avg:.3f})'.format(
                  epoch, i, len(train_loader), batch_time=batch_time,
                  loss=losses, top1=top1)) 
开发者ID:ShichaoJin,项目名称:pointnet2.pytorch,代码行数:60,代码来源:main_cls.py

示例4: train_epoch

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
    def train_epoch(self, inputs, targets, optimizer, criterion,
                    epoch_no=0, batch_size=64, max_step=50, max_norm=5, eval_step=10):
        hidden = self.model.init_hidden(batch_size)

        counter = 0
        x_generator = get_batch(inputs, batch_size, max_step)
        y_generator = get_batch(targets, batch_size, max_step)
        for x, y in zip(x_generator, y_generator):
            self.model.train()
            x = Variable(torch.from_numpy(np.array(x, dtype=np.float32))).long()
            y = Variable(torch.from_numpy(np.array(y, dtype=np.float32))).long()

            if CUDA_AVAILABLE:
                x = x.cuda()
                y = y.cuda()

            if isinstance(hidden, tuple):
                hidden = tuple([Variable(each.data) for each in hidden])
            else:
                hidden = Variable(hidden.data)

            self.model.zero_grad()  # 重置梯度
            output, hidden = self.model.forward(x, hidden)

            # 将 output 的维度进行转换:
            #   [batch_size, step_size, vocab_size] -> [batch_size * step_size, vocab_size]
            # y 是 1D 的就好
            step_size = x.size(1)  # batch 里序列的长度有可能不足 max_step
            cross_entropy_loss = criterion(
                output.view(batch_size * step_size, -1),
                y.view(batch_size * step_size).long()
            )
            focal_loss = FocalLoss(gamma=2)(
                output.view(batch_size * step_size, -1),
                y.view(batch_size * step_size).long()
            )
            ploss = pullaway_loss(output.view(batch_size * step_size, -1))
            loss = cross_entropy_loss + focal_loss + 0.1 * ploss

            loss.backward()
            torch.nn.utils.clip_grad_norm(self.model.parameters(), max_norm)
            optimizer.step()

            counter += 1
            if (counter % eval_step) == 0:
                print("Epoch: {}; Step: {}; Loss: {:.4f}".format(
                    epoch_no + 1, counter, loss.data[0]
                ))

                # 从 x 中随机挑选内容
                pos = np.random.randint(0, mul(*x.size()) - 2)
                length = np.random.randint(1, min(5, mul(*x.size()) - pos - 1))
                start_tokens = x.view(-1)[pos:pos + length].data.numpy()
                start_text = ''.join(self.vectorizer.inverse_transform([start_tokens])[0]).strip()
                if start_text:
                    result = self.generate(start_text, max_len=100)
                    print("[%s]: %r" % (start_text, result))
开发者ID:Linusp,项目名称:bonfire,代码行数:59,代码来源:language_model.py

示例5: dec

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
    def dec(self, encoders, decoder_inputs, is_teacher_forcing, max_question_len):
        
        '''
        encoders (batch, hidden_size)
        if is_teacher_forcing: decoder_inputs (batch, max_question_len)
        if not is_teacher_forcing: decoder_inputs (batch, 1)
        '''
        decoder_inputs = Variable(decoder_inputs).long().cuda()
        decoder_inputs = self.embedding(decoder_inputs)
        decoder_inputs = decoder_inputs.transpose(0, 1)
        
        encoders = encoders.expand(decoder_inputs.size(0), encoders.size(0), self.hidden_size*2)
        inputs = torch.cat([decoder_inputs, encoders], -1)
        
        if is_teacher_forcing:
            
            outputs, hidden = self.dec_net(inputs)
            outputs = self.dropout(outputs)
            logits = self.fc_net(outputs) # qn_steps, batch, voc_size
            
            _, predictions = torch.max(logits.transpose(0, 1), -1) #batch, qn_steps
            predictions = predictions.cpu().data.numpy()
            
        else:
            logits = [0 for i in range(max_question_len)]
            predictions = [0 for i in range(max_question_len)]
            
            output, hidden = self.dec_net(inputs)
            output = self.dropout(output)
            logits[0] = self.fc_net(output)
            
            _, index = torch.max(logits[0])
            
            logits[0] = logits[0].view(1, decoder_inputs.size(1), self.voc_size) # 1,batch_size, voc_size
            predictions[0] = index.cpu().data.numpy() # batch_size
            
            for i in range(1, max_question_len):
                
                prev_output = Variable(predictions[i-1]).long().cuda()
                prev_output = self.embedding(prev_output)
                inputs = torch.cat([prev_output, encoders[0]], -1)
                
                output, hidden = self.dec_net(inputs, hidden)
                output = self.dropout(output)
                logits[i] = self.fc_net(output)

                _, index = torch.max(logits[i])
                
                logits[i] = logits[i].view(1, decoder_inputs.size(0), self.voc_size) # 1,batch_size, voc_size
                predictions[i] = index.cpu().data.numpy() # batch_size
            
            logits = torch.cat(logits)# qn_steps, batch, voc_size
            predictions = np.array(predictions).transpose(1, 0)
            
        return logits, predictions
开发者ID:xuwenshen,项目名称:Reading_Comprehension,代码行数:57,代码来源:understand_passage.py

示例6: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(epoch):
	model.train()
	samples_seen = 0
	for data, target in train_loader:
		data, target = Variable(data), Variable(target)
		optimizer.zero_grad()
		output = model(data)
		loss = F.cross_entropy(output, target)
		loss.backward()
		optimizer.step()
		samples_seen += data.size(0)
		if (samples_seen // data.size(0)) % log_interval == 0:
			print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
				epoch, samples_seen, len(train_loader.dataset),
				100. * samples_seen / len(train_loader.dataset), loss.item()))
开发者ID:heimdallr,项目名称:home_compa_dev,代码行数:17,代码来源:main.py

示例7: predict

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
 def predict(self, x, get_raw_results=False, **kwargs):
     if not isinstance(x, Variable):
         x = Variable(torch.from_numpy(np.asarray(x, dtype=np.float32)))
     if len(x.size()) == 1:
         x = x.view(1, -1)
     y_pred = self._get_prediction(x).data.numpy()
     return y_pred if get_raw_results else np.argmax(y_pred, axis=1)
开发者ID:bitores,项目名称:MachineLearning,代码行数:9,代码来源:Networks.py

示例8: get_input_from_batch

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def get_input_from_batch(batch, use_cuda):
  batch_size = len(batch.enc_lens)

  enc_batch = Variable(torch.from_numpy(batch.enc_batch).long())
  enc_padding_mask = Variable(torch.from_numpy(batch.enc_padding_mask)).float()
  enc_lens = batch.enc_lens
  extra_zeros = None
  enc_batch_extend_vocab = None

  if config.pointer_gen:
    enc_batch_extend_vocab = Variable(torch.from_numpy(batch.enc_batch_extend_vocab).long())
    # max_art_oovs is the max over all the article oov list in the batch
    if batch.max_art_oovs > 0:
      extra_zeros = Variable(torch.zeros((batch_size, batch.max_art_oovs)))

  c_t_1 = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))

  coverage = None
  if config.is_coverage:
    coverage = Variable(torch.zeros(enc_batch.size()))

  if use_cuda:
    enc_batch = enc_batch.cuda()
    enc_padding_mask = enc_padding_mask.cuda()

    if enc_batch_extend_vocab is not None:
      enc_batch_extend_vocab = enc_batch_extend_vocab.cuda()
    if extra_zeros is not None:
      extra_zeros = extra_zeros.cuda()
    c_t_1 = c_t_1.cuda()

    if coverage is not None:
      coverage = coverage.cuda()

  return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage
开发者ID:sa7i,项目名称:pointer_summarizer,代码行数:37,代码来源:train_util.py

示例9: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def train(train_loader, net, criterion, optimizer, epoch, train_args):
    train_loss = AverageMeter()
    curr_iter = (epoch - 1) * len(train_loader)
    for i, data in enumerate(train_loader):
        inputs, labels = data
        assert inputs.size()[2:] == labels.size()[1:]
        N = inputs.size(0)
        inputs = Variable(inputs).cuda()
        labels = Variable(labels).cuda()

        optimizer.zero_grad()
        outputs = net(inputs)
        assert outputs.size()[2:] == labels.size()[1:]
        assert outputs.size()[1] == voc.num_classes

        loss = criterion(outputs, labels) / N
        loss.backward()
        optimizer.step()

        train_loss.update(loss.data[0], N)

        curr_iter += 1
        writer.add_scalar('train_loss', train_loss.avg, curr_iter)

        if (i + 1) % train_args['print_freq'] == 0:
            print('[epoch %d], [iter %d / %d], [train loss %.5f]' % (
                epoch, i + 1, len(train_loader), train_loss.avg
            ))
开发者ID:codes-kzhan,项目名称:pytorch-semantic-segmentation,代码行数:30,代码来源:train.py

示例10: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
 def forward(self, images):
     """Extract the image feature vectors."""
     features = self.resnet(images)
     features = Variable(features.data)
     features = features.view(features.size(0), -1)
     features = self.bn(self.linear(features))
     return features
开发者ID:AbhinavJain13,项目名称:pytorch-tutorial,代码行数:9,代码来源:model.py

示例11: _validate_args

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
    def _validate_args(self, inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio):
        if self.use_attention:
            if encoder_outputs is None:
                raise ValueError("Argument encoder_outputs cannot be None when attention is used.")

        # inference batch size
        if inputs is None and encoder_hidden is None:
            batch_size = 1
        else:
            if inputs is not None:
                batch_size = inputs.size(0)
            else:
                if self.rnn_cell is nn.LSTM:
                    batch_size = encoder_hidden[0].size(1)
                elif self.rnn_cell is nn.GRU:
                    batch_size = encoder_hidden.size(1)

        # set default input and max decoding length
        if inputs is None:
            if teacher_forcing_ratio > 0:
                raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.")
            inputs = Variable(torch.LongTensor([self.sos_id] * batch_size),
                                    volatile=True).view(batch_size, 1)
            if torch.cuda.is_available():
                inputs = inputs.cuda()
            max_length = self.max_length
        else:
            max_length = inputs.size(1) - 1 # minus the start of sequence symbol

        return inputs, batch_size, max_length
开发者ID:xuwenshen,项目名称:Machine-Translation,代码行数:32,代码来源:DecoderRNN.py

示例12: evaluate

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def evaluate(attention_model,x_test,y_test):
    """
        cv results
 
        Args:
            attention_model : {object} model
            x_test          : {nplist} x_test
            y_test          : {nplist} y_test
       
        Returns:
            cv-accuracy
 
      
    """
   
    attention_model.batch_size = x_test.shape[0]
    attention_model.hidden_state = attention_model.init_hidden()
    x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
    y_test_pred,_ = attention_model(x_test_var)
    if bool(attention_model.type):
        y_preds = torch.max(y_test_pred,1)[1]
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
       
    else:
        y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
       
    return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0)
开发者ID:daiyongya,项目名称:Structured-Self-Attention,代码行数:30,代码来源:train.py

示例13: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
    def forward(self, x):

        height = x.size(3)
        width = x.size(4)

        # Compute measurements
        x, y = self.measurements(x, self.pad_frame_size, self.patch_shape)

        if self.encoder_learn is False:
            x = x.detach()
            y = y.detach()

        # Map measurements to video blocks
        out = Variable(torch.zeros(
            x.size(0), x.size(1), self.vectorized)).cuda()
        for i in range(x.size(1)):
            out[:, i, :] = self.reconstruction(x[:, i, :])

        output_patches = out.view(out.size(0), self.measurements.patches_size[0],
                                  self.measurements.patches_size[1], self.measurements.patches_size[2], self.temporal_size, self.spatial_size, self.spatial_size).permute(0, 1, 4, 2, 3, 6, 5)

        # Reconstruct video blocks to video
        reconstructed_video = self.measurements.fold(output_patches)[0]

        # Crop padding
        reconstructed_video = reconstructed_video[:, :, :, 0:height, 0:width]

        return reconstructed_video, y
开发者ID:jingfuzhifu,项目名称:DeepVideoCS,代码行数:30,代码来源:fc.py

示例14: visualizeModel

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
def visualizeModel(model, numImages=6):
    wasTraining = model.training
    model.eval()
    imagesSoFar = 0
    fig = plt.figure()

    for i, (inputs, labels) in enumerate(dataloaders['val']):
        if use_gpu:
            inputs, labels = inputs.cuda(), labels.cuda()
        inputs, labels = Variable(inputs), Variable(labels)

        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)

        for j in range(inputs.size(0)):
            imagesSoFar += 1
            nCols = 2
            ax = plt.subplot(numImages // nCols, nCols, imagesSoFar)
            ax.axis('off')
            ax.set_title('predicted: {}'.format(class_names[preds[j]]))
            imshow(inputs.cpu().data[j])
            
            if imagesSoFar == numImages:
                model.train(mode=wasTraining)
                return
    model.train(mode=wasTraining)
开发者ID:Daiver,项目名称:jff,代码行数:28,代码来源:transfer_tutor1.py

示例15: __val

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import size [as 别名]
    def __val(self):
        """
          Validation function during the train phase.
        """
        self.seg_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            targets = Variable(data_tuple[1].cuda(async=True), volatile=True)
            # Forward pass.
            outputs = self.seg_net(inputs)
            # Compute the loss of the val batch.
            loss_pixel = self.pixel_loss(outputs, targets)
            loss = loss_pixel

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.module_utilizer.save_net(self.seg_net, self.iters)
        # Print the log info & reset the states.
        Log.info(
            'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
            'Loss {loss.avg:.8f}\n'.format(
            batch_time=self.batch_time, loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.seg_net.train()
开发者ID:shubhampachori12110095,项目名称:pytorch-cv,代码行数:34,代码来源:fcn_segmentor.py


注:本文中的torch.autograd.Variable.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。