当前位置: 首页>>代码示例>>Python>>正文


Python image_list.to_image_list方法代码示例

本文整理汇总了Python中maskrcnn_benchmark.structures.image_list.to_image_list方法的典型用法代码示例。如果您正苦于以下问题:Python image_list.to_image_list方法的具体用法?Python image_list.to_image_list怎么用?Python image_list.to_image_list使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在maskrcnn_benchmark.structures.image_list的用法示例。


在下文中一共展示了image_list.to_image_list方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: im_detect_bbox_hflip

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def im_detect_bbox_hflip(model, images, target_scale, target_max_size, device):
    """
    Performs bbox detection on the horizontally flipped image.
    Function signature is the same as for im_detect_bbox.
    """
    transform = TT.Compose([
        T.Resize(target_scale, target_max_size),
        TT.RandomHorizontalFlip(1.0),
        TT.ToTensor(),
        T.Normalize(
            mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
        )
    ])
    images = [transform(image) for image in images]
    images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
    boxlists = model(images.to(device))

    # Invert the detections computed on the flipped image
    boxlists_inv = [boxlist.transpose(0) for boxlist in boxlists]
    return boxlists_inv 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:22,代码来源:bbox_aug.py

示例2: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)
        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        images = to_image_list(images)
        features = self.backbone(images.tensors)
        proposals, proposal_losses = self.rpn(images, features, targets)
        if self.cfg.MODEL.RPN_ONLY:
            x = features
            result = proposals
            detector_losses = {}
        else:
            x, result, detector_losses = self.roi_heads(features, proposals, targets)

        if self.training:
            losses = {}
            losses.update(detector_losses)
            losses.update(proposal_losses)
            return losses

        return result 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:27,代码来源:generalized_rcnn.py

示例3: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None, features=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)
            features (list[Tensor]): encoder output features (optional)

        Returns:
            result (list[BoxList] or dict[Tensor]): the output from the model.
                During training, it returns a dict[Tensor] which contains the losses.
                During testing, it returns list[BoxList] contains additional fields
                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        if features is None:
            images = to_image_list(images)
            features = self.encoder(images.tensors)
        return self.decoder(images, features, targets) 
开发者ID:Lausannen,项目名称:NAS-FCOS,代码行数:22,代码来源:single_stage_detector.py

示例4: __call__

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def __call__(self, batch):
        transposed_batch = list(zip(*batch))
        images = to_image_list(transposed_batch[0], self.size_divisible)
        targets = transposed_batch[1]
        img_ids = transposed_batch[2]
        return images, targets, img_ids 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:8,代码来源:collate_batch.py

示例5: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)

        Returns:
            result (list[BoxList] or dict[Tensor]): the output from the model.
                During training, it returns a dict[Tensor] which contains the losses.
                During testing, it returns list[BoxList] contains additional fields
                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        images = to_image_list(images)
        features = self.backbone(images.tensors)
        proposals, proposal_losses = self.rpn(images, features, targets)
        if self.roi_heads:
            x, result, detector_losses = self.roi_heads(features, proposals, targets)
        else:
            # RPN-only models don't have roi_heads
            x = features
            result = proposals
            detector_losses = {}

        if self.training:
            losses = {}
            losses.update(detector_losses)
            losses.update(proposal_losses)
            return losses

        return result 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:35,代码来源:generalized_rcnn.py

示例6: compute_prediction

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def compute_prediction(self, original_image):
        """
        Arguments:
            original_image (np.ndarray): an image as returned by OpenCV

        Returns:
            prediction (BoxList): the detected objects. Additional information
                of the detection properties can be found in the fields of
                the BoxList via `prediction.fields()`
        """
        # apply pre-processing to image
        image = self.transforms(original_image)
        # convert to an ImageList, padded so that it is divisible by
        # cfg.DATALOADER.SIZE_DIVISIBILITY
        image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
        image_list = image_list.to(self.device)
        # compute predictions
        with torch.no_grad():
            predictions = self.model(image_list)
        predictions = [o.to(self.cpu_device) for o in predictions]

        # always single image is passed at a time
        prediction = predictions[0]

        # reshape prediction (a BoxList) into the original image size
        height, width = original_image.shape[:-1]
        prediction = prediction.resize((width, height))

        if prediction.has_field("mask"):
            # if we have masks, paste the masks in the right position
            # in the image, as defined by the bounding boxes
            masks = prediction.get_field("mask")
            # always single image is passed at a time
            masks = self.masker([masks], [prediction])[0]
            prediction.add_field("mask", masks)
        return prediction 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:38,代码来源:predictor.py

示例7: compute_prediction

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def compute_prediction(self, original_image):
        """
        Arguments:
            original_image (np.ndarray): an image as returned by OpenCV

        Returns:
            prediction (BoxList): the detected objects. Additional information
                of the detection properties can be found in the fields of
                the BoxList via `prediction.fields()`
        """
        # apply pre-processing to image
        image = self.transforms(original_image)
        # convert to an ImageList, padded so that it is divisible by
        # cfg.DATALOADER.SIZE_DIVISIBILITY
        image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
        image_list = image_list.to(self.device)
        # compute predictions
        with torch.no_grad():
            predictions = self.model(image_list)
        predictions = [o.to(self.cpu_device) for o in predictions]

        # always single image is passed at a time
        prediction = predictions[0]

        # reshape prediction (a BoxList) into the original image size
        height, width = original_image.shape[:-1]
        prediction = prediction.resize((width, height))

        return prediction 
开发者ID:Xiangyu-CAS,项目名称:R2CNN.pytorch,代码行数:31,代码来源:inference_engine.py

示例8: im_detect_bbox

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def im_detect_bbox(model, images, target_scale, target_max_size, device):
    """
    Performs bbox detection on the original image.
    """
    transform = TT.Compose([
        T.Resize(target_scale, target_max_size),
        TT.ToTensor(),
        T.Normalize(
            mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255
        )
    ])
    images = [transform(image) for image in images]
    images = to_image_list(images, cfg.DATALOADER.SIZE_DIVISIBILITY)
    return model(images.to(device)) 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:16,代码来源:bbox_aug.py

示例9: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)

        Returns:
            result (list[BoxList] or dict[Tensor]): the output from the model.
                During training, it returns a dict[Tensor] which contains the losses.
                During testing, it returns list[BoxList] contains additional fields
                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        images = to_image_list(images)
        features = self.backbone(images.tensors)
        proposals, proposal_losses = self.rpn(images, features, targets)
        #print(features[0].shape)
       
        if self.roi_heads:
            x, result, detector_losses = self.roi_heads(features, proposals, targets)
        else:
            # RPN-only models don't have roi_heads
            x = features
            
            result = proposals
            detector_losses = {}

        if self.training:
            losses = {}
            losses.update(detector_losses)
            losses.update(proposal_losses)
            return losses
        #print(proposals)
        #@print(x.shape)
        #print(proposals[0].bbox)
        #print(x)
        #print(result[0].get_field('orig_inds'))
        #print(result[0])
        return result 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:43,代码来源:generalized_rcnn.py

示例10: compute_features_from_bbox

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def compute_features_from_bbox(self, original_image, gt_boxes):
        """
        Extracts features given the ground-truth boxes
        assume ground-truth boxes are list of boxes in xyxy format
        Arguments:
            original_image (np.ndarray): an image as returned by OpenCV
        Returns:
            features (BoxList): the ground truth boxes with features
            accessible using features.get_field()
        """
        # Convert gt boxes to BoxList
        gt_box_list = BoxList(
            gt_boxes, (original_image.shape[1], original_image.shape[0]), mode='xyxy').to(self.device)
        # Convert image as in `run_on_opencv_image`
        image = self.transforms(original_image)
        # Convert gt boxes for a single image to a list
        #print(image.size(1))
        gt_box_list = [gt_box_list.resize((image.size(2), image.size(1)))]
        image_list = to_image_list(
            image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
        image_list = image_list.to(self.device)
        with torch.no_grad():
            features = self.feat_extractor(image_list, gt_box_list)
        #print(features)
        # sanity check
        #assert len(features) == len(gt_box_list[0].bbox)
        #feats = gt_box_list[0]
        #feats.add_field('features', features)
        return features[0].cpu().detach().numpy()[0] 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:31,代码来源:DetectronModels.py

示例11: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None, rngs=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)

        Returns:
            result (list[BoxList] or dict[Tensor]): the output from the model.
                During training, it returns a dict[Tensor] which contains the losses.
                During testing, it returns list[BoxList] contains additional fields
                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        images = to_image_list(images)
        if rngs is None:
            features = self.backbone(images.tensors)
        else:
            features = self.backbone(images.tensors, rngs)
            features = self.fpn(features)
        proposals, proposal_losses = self.rpn(images, features, targets)
        if self.roi_heads:
            x, result, detector_losses = self.roi_heads(features, proposals, targets)
        else:
            # RPN-only models don't have roi_heads
            x = features
            result = proposals
            detector_losses = {}

        if self.training:
            losses = {}
            losses.update(detector_losses)
            losses.update(proposal_losses)
            return losses

        return result 
开发者ID:megvii-model,项目名称:DetNAS,代码行数:39,代码来源:generalized_rcnn.py

示例12: forward

# 需要导入模块: from maskrcnn_benchmark.structures import image_list [as 别名]
# 或者: from maskrcnn_benchmark.structures.image_list import to_image_list [as 别名]
def forward(self, images, targets=None):
        """
        Arguments:
            images (list[Tensor] or ImageList): images to be processed
            targets (list[BoxList]): ground-truth boxes present in the image (optional)

        Returns:
            result (list[BoxList] or dict[Tensor]): the output from the model.
                During training, it returns a dict[Tensor] which contains the losses.
                During testing, it returns list[BoxList] contains additional fields
                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        images = to_image_list(images)
        features = self.backbone(images.tensors)

        if self.fp4p_on:
            # get you C4
            proposals, proposal_losses = self.rpn(images, (features[-1],), targets)
        else:
            proposals, proposal_losses = self.rpn(images, features, targets)

        # features = [feature.detach() for feature in features]

        if self.roi_heads:
            x, result, detector_losses = self.roi_heads(features, proposals, targets)
        else:
            # RPN-only models don't have roi_heads
            x = features
            result = proposals
            detector_losses = {}

        if self.training:
            losses = {}
            losses.update(detector_losses)
            losses.update(proposal_losses)
            return losses

        return result 
开发者ID:clw5180,项目名称:remote_sensing_object_detection_2019,代码行数:43,代码来源:generalized_rrpn_rcnn.py


注:本文中的maskrcnn_benchmark.structures.image_list.to_image_list方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。