当前位置: 首页>>代码示例>>Python>>正文


Python torch.stack方法代码示例

本文整理汇总了Python中torch.stack方法的典型用法代码示例。如果您正苦于以下问题:Python torch.stack方法的具体用法?Python torch.stack怎么用?Python torch.stack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: bbox_transform

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def bbox_transform(ex_rois, gt_rois):
  ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
  ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
  ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
  ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

  gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
  gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
  gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
  gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

  targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
  targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
  targets_dw = torch.log(gt_widths / ex_widths)
  targets_dh = torch.log(gt_heights / ex_heights)

  targets = torch.stack(
    (targets_dx, targets_dy, targets_dw, targets_dh), 1)
  return targets 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:21,代码来源:bbox_transform.py

示例2: clip_boxes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def clip_boxes(boxes, im_shape):
  """
  Clip boxes to image boundaries.
  boxes must be tensor or Variable, im_shape can be anything but Variable
  """

  if not hasattr(boxes, 'data'):
    boxes_ = boxes.numpy()

  boxes = boxes.view(boxes.size(0), -1, 4)
  boxes = torch.stack(\
    [boxes[:,:,0].clamp(0, im_shape[1] - 1),
     boxes[:,:,1].clamp(0, im_shape[0] - 1),
     boxes[:,:,2].clamp(0, im_shape[1] - 1),
     boxes[:,:,3].clamp(0, im_shape[0] - 1)], 2).view(boxes.size(0), -1)

  return boxes 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:19,代码来源:bbox_transform.py

示例3: centerness_target

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def centerness_target(self, anchors, bbox_targets):
        # only calculate pos centerness targets, otherwise there may be nan
        gts = self.bbox_coder.decode(anchors, bbox_targets)
        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
        l_ = anchors_cx - gts[:, 0]
        t_ = anchors_cy - gts[:, 1]
        r_ = gts[:, 2] - anchors_cx
        b_ = gts[:, 3] - anchors_cy

        left_right = torch.stack([l_, r_], dim=1)
        top_bottom = torch.stack([t_, b_], dim=1)
        centerness = torch.sqrt(
            (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
            (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
        assert not torch.isnan(centerness).any()
        return centerness 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:19,代码来源:atss_head.py

示例4: offset_to_pts

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def offset_to_pts(self, center_list, pred_list):
        """Change from point offset to point coordinate."""
        pts_list = []
        for i_lvl in range(len(self.point_strides)):
            pts_lvl = []
            for i_img in range(len(center_list)):
                pts_center = center_list[i_img][i_lvl][:, :2].repeat(
                    1, self.num_points)
                pts_shift = pred_list[i_lvl][i_img]
                yx_pts_shift = pts_shift.permute(1, 2, 0).view(
                    -1, 2 * self.num_points)
                y_pts_shift = yx_pts_shift[..., 0::2]
                x_pts_shift = yx_pts_shift[..., 1::2]
                xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
                xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
                pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
                pts_lvl.append(pts)
            pts_lvl = torch.stack(pts_lvl, 0)
            pts_list.append(pts_lvl)
        return pts_list 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:22,代码来源:reppoints_head.py

示例5: roi_rescale

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def roi_rescale(self, rois, scale_factor):
        """Scale RoI coordinates by scale factor.

        Args:
            rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
            scale_factor (float): Scale factor that RoI will be multiplied by.

        Returns:
            torch.Tensor: Scaled RoI.
        """

        cx = (rois[:, 1] + rois[:, 3]) * 0.5
        cy = (rois[:, 2] + rois[:, 4]) * 0.5
        w = rois[:, 3] - rois[:, 1]
        h = rois[:, 4] - rois[:, 2]
        new_w = w * scale_factor
        new_h = h * scale_factor
        x1 = cx - new_w * 0.5
        x2 = cx + new_w * 0.5
        y1 = cy - new_h * 0.5
        y2 = cy + new_h * 0.5
        new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
        return new_rois 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:25,代码来源:base_roi_extractor.py

示例6: distance2bbox

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def distance2bbox(points, distance, max_shape=None):
    """Decode distance prediction to bounding box.

    Args:
        points (Tensor): Shape (n, 2), [x, y].
        distance (Tensor): Distance from the given point to 4
            boundaries (left, top, right, bottom).
        max_shape (tuple): Shape of the image.

    Returns:
        Tensor: Decoded bboxes.
    """
    x1 = points[:, 0] - distance[:, 0]
    y1 = points[:, 1] - distance[:, 1]
    x2 = points[:, 0] + distance[:, 2]
    y2 = points[:, 1] + distance[:, 3]
    if max_shape is not None:
        x1 = x1.clamp(min=0, max=max_shape[1])
        y1 = y1.clamp(min=0, max=max_shape[0])
        x2 = x2.clamp(min=0, max=max_shape[1])
        y2 = y2.clamp(min=0, max=max_shape[0])
    return torch.stack([x1, y1, x2, y2], -1) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:24,代码来源:transforms.py

示例7: r_duvenaud

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def r_duvenaud(self, h):
        # layers
        aux = []
        for l in range(len(h)):
            param_sz = self.learn_args[l].size()
            parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
                                                                                      param_sz[0])

            aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))

            for j in range(0, aux[l].size(1)):
                # Mask whole 0 vectors
                aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])

        aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
        return self.learn_modules[0](torch.squeeze(aux)) 
开发者ID:priba,项目名称:nmp_qc,代码行数:18,代码来源:ReadoutFunction.py

示例8: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def forward(self, query, key):
        querys = self.W_query(query)  # [N, T_q, num_units]
        keys = self.W_key(key)  # [N, T_k, num_units]
        values = self.W_value(key)

        split_size = self.num_units // self.num_heads
        querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0)  # [h, N, T_q, num_units/h]
        keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0)  # [h, N, T_k, num_units/h]
        values = torch.stack(torch.split(values, split_size, dim=2), dim=0)  # [h, N, T_k, num_units/h]

        # score = softmax(QK^T / (d_k ** 0.5))
        scores = torch.matmul(querys, keys.transpose(2, 3))  # [h, N, T_q, T_k]
        scores = scores / (self.key_dim ** 0.5)
        scores = F.softmax(scores, dim=3)

        # out = score * V
        out = torch.matmul(scores, values)  # [h, N, T_q, num_units/h]
        out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0)  # [N, T_q, num_units]

        return out 
开发者ID:KinglittleQ,项目名称:GST-Tacotron,代码行数:22,代码来源:GST.py

示例9: __next__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def __next__(self):
        self.count += 1
        img0 = self.imgs.copy()
        if cv2.waitKey(1) == ord('q'):  # q to quit
            cv2.destroyAllWindows()
            raise StopIteration

        # Letterbox
        img = [letterbox(x, new_shape=self.img_size, interp=cv2.INTER_LINEAR)[0] for x in img0]

        # Stack
        img = np.stack(img, 0)

        # Normalize RGB
        img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)  # BGR to RGB
        img = np.ascontiguousarray(img, dtype=np.float16 if self.half else np.float32)  # uint8 to fp16/fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0

        return self.sources, img, img0, None 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:21,代码来源:datasets.py

示例10: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def forward(self, input, target):
        features = self.feats(input)
        output_indices = list(range(0, (target.size(1))))
        # Iterate over fully connecteds for each imu, perform forward pass and
        # record the output.
        imu_out = []
        for i in self.imus:
            imu_i = getattr(self, 'imu{}'.format(i))
            imu_out.append(imu_i(features))
        # Add a singleton dim at 1 for sequence length, which is always 1 in
        # this model.
        output = torch.stack(imu_out, dim=1).unsqueeze(1)
        output /= output.norm(2, 3, keepdim=True)
        return torch.stack(
            imu_out,
            dim=1).unsqueeze(1), target, torch.LongTensor(output_indices) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:18,代码来源:resnet18_image2imu_regress.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def forward(self, input, target):

        input = input[:, :self.args.input_length * 3]
        target = target[:, -self.args.output_length:]

        features = self.resnet_features(input)
        output_indices = list(
            range(self.args.sequence_length - self.args.output_length,
                  self.args.sequence_length))
        # Iterate over fully connecteds for each imu, perform forward pass and
        # record the output.

        all_output = []
        for imu_id in range(self.args.output_length):
            imu_out = []
            for i in self.imus:
                imu_i = getattr(self, 'imu{}'.format(i))
                imu_out.append(imu_i(features))
            output = torch.stack(imu_out, dim=1).unsqueeze(1)
            all_output.append(output)
        # Add a singleton dim at 1 for sequence length, which is always 1 in
        # this model.
        all_output = torch.cat(all_output, dim=1)
        return all_output, target, torch.LongTensor(output_indices) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:26,代码来源:resnet_one_tower_prediction.py

示例12: __getitem__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def __getitem__(self, idx):
        fid = self.data_set_list[idx]
        if self.read_features:
            features = []
            for i in range(self.sequence_length):
                feature_path = os.path.join(
                    self.features_dir,
                    self.frames_metadata[fid + i]['cur_frame'] + '.pytar')
                features.append(torch.load(feature_path))
            input = torch.stack(features)
        else:
            image = self.load_and_resize(
                os.path.join(self.root_dir, 'images', fid))
            segment = self.load_and_resize_segmentation(
                os.path.join(self.root_dir, 'walkable', fid))

        # The two 0s are just place holders. They can be replaced by any values
        return (image, segment, 0, 0, ['images' + fid]) 
开发者ID:ehsanik,项目名称:dogTorch,代码行数:20,代码来源:nyu_walkable_surface_dataset.py

示例13: generate_embedding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def generate_embedding(bert_model, labels):
    """Generate bert's embedding from fine-tuned model."""
    batch_size, time = labels.shape

    cls_ids = torch.full(
        (batch_size, 1), bert_model.bert_text_encoder.cls_idx, dtype=labels.dtype, device=labels.device)
    bert_labels = torch.cat([cls_ids, labels], 1)
    # replace eos with sep
    eos_idx = bert_model.bert_text_encoder.eos_idx
    sep_idx = bert_model.bert_text_encoder.sep_idx
    bert_labels[bert_labels == eos_idx] = sep_idx

    embedding, _ = bert_model.bert(bert_labels, output_all_encoded_layers=True)
    # sum over all layers embedding
    embedding = torch.stack(embedding).sum(0)
    # get rid of cls
    embedding = embedding[:, 1:]

    assert labels.shape == embedding.shape[:-1]

    return embedding 
开发者ID:Alexander-H-Liu,项目名称:End-to-end-ASR-Pytorch,代码行数:23,代码来源:bert_embedding.py

示例14: bbox_transform

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def bbox_transform(ex_rois, gt_rois):
    ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
    ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
    ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
    ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights

    gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
    gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
    gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
    gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights

    targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = torch.log(gt_widths / ex_widths)
    targets_dh = torch.log(gt_heights / ex_heights)

    targets = torch.stack(
        (targets_dx, targets_dy, targets_dw, targets_dh),1)

    return targets 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:22,代码来源:bbox_transform.py

示例15: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import stack [as 别名]
def forward(self, inputs, hidden=None):  
        if hidden is None and self.mode != "jordan":
        # if hidden is None:
            batch_size = inputs.size(0)
            # print(batch_size)
            hidden = torch.autograd.Variable(torch.zeros(batch_size,
                                                       self.hidden_size))
            if self.cuda:
                hidden = hidden.cuda()

        output_forward, hidden_forward = self._forward(inputs, hidden)
        output_forward = torch.stack(output_forward, dim=0)
        if not self.bidirectional:
            if self.batch_first:
                output_forward = output_forward.transpose(0,1)
            return output_forward, hidden_forward

        output_reversed, hidden_reversed = self._reversed_forward(inputs, hidden)
        hidden = torch.cat([hidden_forward, hidden_reversed], dim=hidden_forward.dim() - 1)
        output_reversed = torch.stack(output_reversed, dim=0)
        output = torch.cat([output_forward, output_reversed],
                                dim=output_reversed.data.dim() - 1)
        if self.batch_first:
            output = output.transpose(0,1)
        return output, hidden 
开发者ID:llhthinker,项目名称:slot-filling,代码行数:27,代码来源:rnn.py


注:本文中的torch.stack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。