當前位置: 首頁>>代碼示例>>Python>>正文


Python autograd.Variable方法代碼示例

本文整理匯總了Python中torch.autograd.Variable方法的典型用法代碼示例。如果您正苦於以下問題:Python autograd.Variable方法的具體用法?Python autograd.Variable怎麽用?Python autograd.Variable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.autograd的用法示例。


在下文中一共展示了autograd.Variable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: run_batch

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def run_batch(inputs, outputs_gt, model, criterion, optimizer, train):
    """Train or validate on a batch (inputs + outputs)."""
    if train:
        model.train()
    else:
        model.eval()
    val = not train
    inputs, outputs_gt = torch.from_numpy(inputs), torch.from_numpy(outputs_gt)
    inputs, outputs_gt = Variable(inputs, volatile=val), Variable(outputs_gt)
    if GPU >= 0:
        inputs = inputs.cuda(GPU)
        outputs_gt = outputs_gt.cuda(GPU)
    if train:
        optimizer.zero_grad()
    outputs_pred = model(inputs)
    loss = criterion(outputs_pred, outputs_gt)
    if train:
        loss.backward()
        optimizer.step()
    return loss.data[0] 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:22,代碼來源:train.py

示例2: _anchor_target_layer

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def _anchor_target_layer(self, rpn_cls_score):
    rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, rpn_loss_weights = \
      anchor_target_layer(
      rpn_cls_score.data, self._gt_boxes.data.cpu().numpy(), self._pseudo_proposals['gt_scores'].data.cpu().numpy(), self._im_info, self._feat_stride, self._anchors.data.cpu().numpy(), self._num_anchors)

    rpn_labels = Variable(torch.from_numpy(rpn_labels).float().cuda()) #.set_shape([1, 1, None, None])
    rpn_bbox_targets = Variable(torch.from_numpy(rpn_bbox_targets).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
    rpn_bbox_inside_weights = Variable(torch.from_numpy(rpn_bbox_inside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
    rpn_bbox_outside_weights = Variable(torch.from_numpy(rpn_bbox_outside_weights).float().cuda())#.set_shape([1, None, None, self._num_anchors * 4])
    rpn_loss_weights = Variable(torch.from_numpy(rpn_loss_weights).float().cuda())#.set_shape([self._num_anchors])
    
    rpn_labels = rpn_labels.long()
    self._anchor_targets['rpn_labels'] = rpn_labels
    self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
    self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
    self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights
    self._anchor_targets['rpn_loss_weights'] = rpn_loss_weights
    
    for k in self._anchor_targets.keys():
      self._score_summaries[k] = self._anchor_targets[k]

    return rpn_labels 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,代碼來源:network.py

示例3: update

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def update(self, gt, pred):
    """
    gt, pred are tensors of size (..., 1, H, W) in the range [0, 1].
    """
    C, H, W = gt.size()[-3:]
    if isinstance(gt, torch.Tensor):
      gt = Variable(gt)
    if isinstance(pred, torch.Tensor):
      pred = Variable(pred)

    mse_score = self.mse_loss(pred, gt)
    eps = 1e-4
    pred.data[pred.data < eps] = eps
    pred.data[pred.data > 1 - eps] = 1 -eps
    bce_score = self.bce_loss(pred, gt)
    bce_score = bce_score.item() * C * H * W
    mse_score = mse_score.item() * C * H * W
    self.bce_results.append(bce_score)
    self.mse_results.append(mse_score) 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:21,代碼來源:metrics.py

示例4: pose_inv_full

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def pose_inv_full(pose):
  '''
  param pose: N x 6
  Inverse the 2x3 transformer matrix.
  '''
  N, _ = pose.size()
  b = pose.view(N, 2, 3)[:, :, 2:]
  # A^{-1}
  # Calculate determinant
  determinant = (pose[:, 0] * pose[:, 4] - pose[:, 1] * pose[:, 3] + 1e-8).view(N, 1)
  indices = Variable(torch.LongTensor([4, 1, 3, 0]).cuda())
  scale = Variable(torch.Tensor([1, -1, -1, 1]).cuda())
  A_inv = torch.index_select(pose, 1, indices) * scale / determinant
  A_inv = A_inv.view(N, 2, 2)
  # b' = - A^{-1} b
  b_inv = - A_inv.matmul(b).view(N, 2, 1)
  transformer_inv = torch.cat([A_inv, b_inv], dim=2)
  return transformer_inv 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:20,代碼來源:DDPAE_utils.py

示例5: train

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def train(self, input, output):
    '''
    param input: video of size (batch_size, n_frames_input, C, H, W)
    param output: video of size (batch_size, self.n_frames_output, C, H, W)
    Return video_dict, loss_dict
    '''
    input = Variable(input.cuda(), requires_grad=False)
    output = Variable(output.cuda(), requires_grad=False)
    assert input.size(1) == self.n_frames_input

    # SVI
    batch_size, _, C, H, W = input.size()
    numel = batch_size * self.n_frames_total * C * H * W
    loss_dict = {}
    for name, svi in self.svis.items():
      # loss = svi.step(input, output)
      # Note: backward() is already called in loss_and_grads.
      loss = svi.loss_and_grads(svi.model, svi.guide, input, output)
      loss_dict[name] = loss / numel

    # Update parameters
    self.optimizer.step()
    self.optimizer.zero_grad()

    return {}, loss_dict 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:27,代碼來源:DDPAE.py

示例6: test

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def test(self, input, output):
    '''
    Return decoded output.
    '''
    input = Variable(input.cuda())
    batch_size, _, _, H, W = input.size()
    output = Variable(output.cuda())
    gt = torch.cat([input, output], dim=1)

    latent = self.encode(input, sample=False)
    decoded_output, components = self.decode(latent, input.size(0))
    decoded_output = decoded_output.view(*gt.size())
    components = components.view(batch_size, self.n_frames_total, self.total_components,
                                 self.n_channels, H, W)
    latent['components'] = components
    decoded_output = decoded_output.clamp(0, 1)

    self.save_visuals(gt, decoded_output, components, latent)
    return decoded_output.cpu(), latent 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:21,代碼來源:DDPAE.py

示例7: _make_layers

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def _make_layers(self, cfg):
        layers = []
        in_channels = 3
        for x in cfg:
            if x == 'M':
                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            else:
                layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
                           nn.BatchNorm2d(x),
                           nn.ReLU(inplace=True)]
                in_channels = x
        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
        return nn.Sequential(*layers)

# net = VGG('VGG11')
# x = torch.randn(2,3,32,32)
# print(net(Variable(x)).size()) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:19,代碼來源:vgg.py

示例8: forward

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def forward(self, x):
        out = self.pre_layers(x)
        out = self.a3(out)
        out = self.b3(out)
        out = self.maxpool(out)
        out = self.a4(out)
        out = self.b4(out)
        out = self.c4(out)
        out = self.d4(out)
        out = self.e4(out)
        out = self.maxpool(out)
        out = self.a5(out)
        out = self.b5(out)
        out = self.avgpool(out)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out

# net = GoogLeNet()
# x = torch.randn(1,3,32,32)
# y = net(Variable(x))
# print(y.size()) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:24,代碼來源:googlenet.py

示例9: find_bbs

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def find_bbs(img, model, conf_threshold, input_size):
    """Find bounding boxes in an image."""
    # pad image so that its square
    img_pad, (pad_top, pad_right, pad_bottom, pad_left) = to_aspect_ratio_add(img, 1.0, return_paddings=True)

    # resize padded image to desired input size
    # "linear" interpolation seems to be enough here for 400x400 or larger images
    # change to "area" or "cubic" for marginally better quality
    img_rs = ia.imresize_single_image(img_pad, (input_size, input_size), interpolation="linear")

    # convert to torch-ready input variable
    inputs_np = (np.array([img_rs])/255.0).astype(np.float32).transpose(0, 3, 1, 2)
    inputs = torch.from_numpy(inputs_np)
    inputs = Variable(inputs, volatile=True)
    if GPU >= 0:
        inputs = inputs.cuda(GPU)

    # apply model and measure the model's time
    time_start = time.time()
    outputs_pred = model(inputs)
    time_req = time.time() - time_start

    # process the model's output (i.e. convert heatmaps to BBs)
    result = ModelResult(
        outputs_pred,
        inputs_np,
        img,
        (pad_top, pad_right, pad_bottom, pad_left)
    )
    bbs = result.get_bbs()

    return bbs, time_req 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:34,代碼來源:predict_video.py

示例10: generate_video_image

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def generate_video_image(batch_idx, examples, model):
    """Generate frames for a video of the training progress.
    Each frame contains N examples shown in a grid. Each example shows
    the input image and the main heatmap predicted by the model."""
    start_time = time.time()
    #print("A", time.time() - start_time)
    model.eval()

    # fw through network
    inputs, outputs_gt = examples_to_batch(examples, iaa.Noop())
    inputs_torch = torch.from_numpy(inputs)
    inputs_torch = Variable(inputs_torch, volatile=True)
    if GPU >= 0:
        inputs_torch = inputs_torch.cuda(GPU)
    outputs_pred_torch = model(inputs_torch)
    #print("B", time.time() - start_time)

    outputs_pred = outputs_pred_torch.cpu().data.numpy()
    inputs = (inputs * 255).astype(np.uint8).transpose(0, 2, 3, 1)
    #print("C", time.time() - start_time)
    heatmaps = []
    for i in range(inputs.shape[0]):
        hm_drawn = draw_heatmap(inputs[i], np.squeeze(outputs_pred[i][0]), alpha=0.5)
        heatmaps.append(hm_drawn)
    #print("D", time.time() - start_time)
    grid = ia.draw_grid(heatmaps, cols=11, rows=6).astype(np.uint8)
    #grid_rs = misc.imresize(grid, (720-32, 1280-32))
    # pad by 42 for the text and to get the image to 720p aspect ratio
    grid_pad = np.pad(grid, ((0, 42), (0, 0), (0, 0)), mode="constant")
    grid_pad_text = ia.draw_text(
        grid_pad,
        x=grid_pad.shape[1]-220,
        y=grid_pad.shape[0]-35,
        text="Batch %05d" % (batch_idx,),
        color=[255, 255, 255]
    )
    #print("E", time.time() - start_time)
    return grid_pad_text 
開發者ID:aleju,項目名稱:cat-bbs,代碼行數:40,代碼來源:train.py

示例11: _crop_pool_layer

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def _crop_pool_layer(self, bottom, rois, max_pool=True): # done
    # implement it using stn
    # box to affine
    # input (x1,y1,x2,y2)
    """
    [  x2-x1             x1 + x2 - W + 1  ]
    [  -----      0      ---------------  ]
    [  W - 1                  W - 1       ]
    [                                     ]
    [           y2-y1    y1 + y2 - H + 1  ]
    [    0      -----    ---------------  ]
    [           H - 1         H - 1      ]
    """
    rois = rois.detach()

    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = bottom.size(2)
    width = bottom.size(3)

    # affine theta
    theta = Variable(rois.data.new(rois.size(0), 2, 3).zero_())
    theta[:, 0, 0] = (x2 - x1) / (width - 1)
    theta[:, 0 ,2] = (x1 + x2 - width + 1) / (width - 1)
    theta[:, 1, 1] = (y2 - y1) / (height - 1)
    theta[:, 1, 2] = (y1 + y2 - height + 1) / (height - 1)

    if max_pool:
      pre_pool_size = cfg.POOLING_SIZE * 2
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
      crops = F.max_pool2d(crops, 2, 2)
    else:
      grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.POOLING_SIZE, cfg.POOLING_SIZE)))
      crops = F.grid_sample(bottom.expand(rois.size(0), bottom.size(1), bottom.size(2), bottom.size(3)), grid)
    
    return crops 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:42,代碼來源:network.py

示例12: _anchor_component

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def _anchor_component(self, height, width):
    # just to get the shape right
    #height = int(math.ceil(self._im_info.data[0, 0] / self._feat_stride[0]))
    #width = int(math.ceil(self._im_info.data[0, 1] / self._feat_stride[0]))
    anchors, anchor_length = generate_anchors_pre(\
                                          height, width,
                                           self._feat_stride, self._anchor_scales, self._anchor_ratios)
    self._anchors = Variable(torch.from_numpy(anchors).cuda())
    self._anchor_length = anchor_length 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:11,代碼來源:network.py

示例13: _generate_pseudo_gtbox

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def _generate_pseudo_gtbox(self, fuse_prob, boxes): # Inputs are two variables
      #return gt_boxes Variable(torch.from_numpy(gt_boxes).cuda()) size: gt_num * (x1,y1,x2,y2,class)
    gt_boxes, proposals = generate_pseudo_gtbox(boxes, fuse_prob, self._labels)
    return gt_boxes, proposals 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:6,代碼來源:network.py

示例14: extract_head

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def extract_head(self, image): 
    feat = self._layers["head"](Variable(torch.from_numpy(image.transpose([0,3,1,2])).cuda(), volatile=True))
    return feat

  # only useful during testing mode 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:7,代碼來源:network.py

示例15: forward

# 需要導入模塊: from torch import autograd [as 別名]
# 或者: from torch.autograd import Variable [as 別名]
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:36,代碼來源:roi_pool_py.py


注:本文中的torch.autograd.Variable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。