当前位置: 首页>>代码示例>>Python>>正文


Python Variable.contiguous方法代码示例

本文整理汇总了Python中torch.autograd.Variable.contiguous方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.contiguous方法的具体用法?Python Variable.contiguous怎么用?Python Variable.contiguous使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.contiguous方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compare_grid_sample

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import contiguous [as 别名]
def compare_grid_sample():
    # do gradcheck
    N = random.randint(1, 8)
    C = 2 # random.randint(1, 8)
    H = 5 # random.randint(1, 8)
    W = 4 # random.randint(1, 8)
    input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True)
    input_p = input.clone().data.contiguous()
   
    grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True)
    grid_clone = grid.clone().contiguous()

    out_offcial = F.grid_sample(input, grid)    
    grad_outputs = Variable(torch.rand(out_offcial.size()).cuda())
    grad_outputs_clone = grad_outputs.clone().contiguous()
    grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous())
    grad_input_off = grad_inputs[0]


    crf = RoICropFunction()
    grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda()
    out_stn = crf.forward(input_p, grid_yx)
    grad_inputs = crf.backward(grad_outputs_clone.data)
    grad_input_stn = grad_inputs[0]
    pdb.set_trace()

    delta = (grad_input_off.data - grad_input_stn).sum()
开发者ID:XiongweiWu,项目名称:faster-rcnn.pytorch,代码行数:29,代码来源:net_utils.py

示例2: get_loss

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import contiguous [as 别名]
 def get_loss(self, logits, labels):
     
     labels = Variable(labels).long().cuda()
     labels = labels.transpose(0, 1)
     
     for i in range(len(logits)):
         logits[i] = logits[i].contiguous().view(1, logits[i].size(0), logits[i].size(1))
     logits = torch.cat(logits)
     
     logits = logits.contiguous().view(-1, logits.size(-1))
     labels = labels.contiguous().view(-1)
     
     loss = torch.mean(self.cost_func(logits, labels))
     
     return loss
开发者ID:xuwenshen,项目名称:Machine-Translation,代码行数:17,代码来源:net.py

示例3: get_loss

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import contiguous [as 别名]
 def get_loss(self, logits, labels):
     
     labels = Variable(labels).long().cuda()
     labels = labels.transpose(0, 1)
     
     logits = logits.contiguous().view(-1, logits.size(-1))
     labels = labels.contiguous().view(-1)
     
     loss = torch.mean(self.cost_func(logits, labels))
     
     return loss
 
     
     
     
开发者ID:xuwenshen,项目名称:Reading_Comprehension,代码行数:13,代码来源:understand_passage.py

示例4: fold

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import contiguous [as 别名]
    def fold(self, patches):
        idx = patches.data.new().long()
        torch.arange(0, self.input_padded_numel, out=idx)
        idx = idx.view(self.input_padded_size)
        idx_unfolded = idx.unfold(3, self.spatial_size, self.step).unfold(
            4, self.spatial_size, self.step)
        idx_unfolded = idx_unfolded.contiguous().view(-1)

        video = Variable(patches.data.new(self.input_padded_numel).zero_())
        video_ones = Variable(patches.data.new(
            self.input_padded_numel).zero_())
        patches_ones = Variable(torch.zeros(patches.size()) + 1).cuda()

        patches = patches.contiguous().view(-1)
        patches_ones = patches_ones.contiguous().view(-1)
        video.index_add_(0, Variable(idx_unfolded), patches)
        video_ones.index_add_(0, Variable(idx_unfolded), patches_ones)
        return (video / video_ones).view(self.input_padded_size), None, None, None
开发者ID:jingfuzhifu,项目名称:DeepVideoCS,代码行数:20,代码来源:measurements.py

示例5: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import contiguous [as 别名]
def train(args):
  """Trains model for args.nepochs (default = 30)"""

  t_start = time.time()
  train_data = coco_loader(args.coco_root, split='train', ncap_per_img=args.ncap_per_img)
  print('[DEBUG] Loading train data ... %f secs' % (time.time() - t_start))

  train_data_loader = DataLoader(dataset=train_data, num_workers=args.nthreads,\
    batch_size=args.batchsize, shuffle=True, drop_last=True)

  #Load pre-trained imgcnn
  model_imgcnn = Vgg16Feats()  
  model_imgcnn.cuda() 
  model_imgcnn.train(True) 

  #Convcap model
  model_convcap = convcap(train_data.numwords, args.num_layers, is_attention=args.attention)
  model_convcap.cuda()
  model_convcap.train(True)

  optimizer = optim.RMSprop(model_convcap.parameters(), lr=args.learning_rate)
  scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=.1)
  img_optimizer = None

  batchsize = args.batchsize
  ncap_per_img = args.ncap_per_img
  batchsize_cap = batchsize*ncap_per_img
  max_tokens = train_data.max_tokens
  nbatches = np.int_(np.floor((len(train_data.ids)*1.)/batchsize)) 
  bestscore = .0

  for epoch in range(args.epochs):
    loss_train = 0.
    
    if(epoch == args.finetune_after):
      img_optimizer = optim.RMSprop(model_imgcnn.parameters(), lr=1e-5)
      img_scheduler = lr_scheduler.StepLR(img_optimizer, step_size=args.lr_step_size, gamma=.1)

    scheduler.step()    
    if(img_optimizer):
      img_scheduler.step()

    #One epoch of train
    for batch_idx, (imgs, captions, wordclass, mask, _) in \
      tqdm(enumerate(train_data_loader), total=nbatches):

      imgs = imgs.view(batchsize, 3, 224, 224)
      wordclass = wordclass.view(batchsize_cap, max_tokens)
      mask = mask.view(batchsize_cap, max_tokens)

      imgs_v = Variable(imgs).cuda()
      wordclass_v = Variable(wordclass).cuda()

      optimizer.zero_grad()
      if(img_optimizer):
        img_optimizer.zero_grad() 

      imgsfeats, imgsfc7 = model_imgcnn(imgs_v)
      imgsfeats, imgsfc7 = repeat_img_per_cap(imgsfeats, imgsfc7, ncap_per_img)
      _, _, feat_h, feat_w = imgsfeats.size()

      if(args.attention == True):
        wordact, attn = model_convcap(imgsfeats, imgsfc7, wordclass_v)
        attn = attn.view(batchsize_cap, max_tokens, feat_h, feat_w)
      else:
        wordact, _ = model_convcap(imgsfeats, imgsfc7, wordclass_v)

      wordact = wordact[:,:,:-1]
      wordclass_v = wordclass_v[:,1:]
      mask = mask[:,1:].contiguous()

      wordact_t = wordact.permute(0, 2, 1).contiguous().view(\
        batchsize_cap*(max_tokens-1), -1)
      wordclass_t = wordclass_v.contiguous().view(\
        batchsize_cap*(max_tokens-1), 1)
      
      maskids = torch.nonzero(mask.view(-1)).numpy().reshape(-1)

      if(args.attention == True):
        #Cross-entropy loss and attention loss of Show, Attend and Tell
        loss = F.cross_entropy(wordact_t[maskids, ...], \
          wordclass_t[maskids, ...].contiguous().view(maskids.shape[0])) \
          + (torch.sum(torch.pow(1. - torch.sum(attn, 1), 2)))\
          /(batchsize_cap*feat_h*feat_w)
      else:
        loss = F.cross_entropy(wordact_t[maskids, ...], \
          wordclass_t[maskids, ...].contiguous().view(maskids.shape[0]))

      loss_train = loss_train + loss.data[0]

      loss.backward()

      optimizer.step()
      if(img_optimizer):
        img_optimizer.step()

    loss_train = (loss_train*1.)/(batch_idx)
    print('[DEBUG] Training epoch %d has loss %f' % (epoch, loss_train))

    modelfn = osp.join(args.model_dir, 'model.pth')
#.........这里部分代码省略.........
开发者ID:reem94,项目名称:convcap,代码行数:103,代码来源:train.py


注:本文中的torch.autograd.Variable.contiguous方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。