当前位置: 首页>>代码示例>>Python>>正文


Python bounding_box.BoxList方法代码示例

本文整理汇总了Python中maskrcnn_benchmark.structures.bounding_box.BoxList方法的典型用法代码示例。如果您正苦于以下问题:Python bounding_box.BoxList方法的具体用法?Python bounding_box.BoxList怎么用?Python bounding_box.BoxList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在maskrcnn_benchmark.structures.bounding_box的用法示例。


在下文中一共展示了bounding_box.BoxList方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: eval_detection_voc

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
    """Evaluate on voc dataset.
    Args:
        pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
        gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
        iou_thresh: iou thresh
        use_07_metric: boolean
    Returns:
        dict represents the results
    """
    assert len(gt_boxlists) == len(
        pred_boxlists
    ), "Length of gt and pred lists need to be same."
    prec, rec = calc_detection_voc_prec_rec(
        pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
    )
    ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
    return {"ap": ap, "map": np.nanmean(ap)} 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:20,代码来源:voc_eval.py

示例2: keep_only_positive_boxes

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def keep_only_positive_boxes(boxes):
    """
    Given a set of BoxList containing the `labels` field,
    return a set of BoxList for which `labels > 0`.

    Arguments:
        boxes (list of BoxList)
    """
    assert isinstance(boxes, (list, tuple))
    assert isinstance(boxes[0], BoxList)
    assert boxes[0].has_field("labels")
    positive_boxes = []
    positive_inds = []
    num_boxes = 0
    for boxes_per_image in boxes:
        labels = boxes_per_image.get_field("labels")
        inds_mask = labels > 0
        inds = inds_mask.nonzero().squeeze(1)
        positive_boxes.append(boxes_per_image[inds])
        positive_inds.append(inds_mask)
    return positive_boxes, positive_inds 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:23,代码来源:mask_head.py

示例3: add_gt_proposals

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def add_gt_proposals(self, proposals, targets):
        """
        Arguments:
            proposals: list[BoxList]
            targets: list[BoxList]
        """
        # Get the device we're operating on
        device = proposals[0].bbox.device

        gt_boxes = [target.copy_with_fields([]) for target in targets]

        # later cat of bbox requires all fields to be present for all bbox
        # so we need to add a dummy for objectness that's missing
        for gt_box in gt_boxes:
            gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))

        proposals = [
            cat_boxlist((proposal, gt_box))
            for proposal, gt_box in zip(proposals, gt_boxes)
        ]

        return proposals 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:24,代码来源:inference.py

示例4: prepare_boxlist

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def prepare_boxlist(self, boxes, scores, image_shape):
        """
        Returns BoxList from `boxes` and adds probability scores information
        as an extra field
        `boxes` has shape (#detections, 4 * #classes), where each row represents
        a list of predicted bounding boxes for each of the object classes in the
        dataset (including the background class). The detections in each row
        originate from the same object proposal.
        `scores` has shape (#detection, #classes), where each row represents a list
        of object detection confidence scores for each of the object classes in the
        dataset (including the background class). `scores[i, j]`` corresponds to the
        box at `boxes[i, j * 4:(j + 1) * 4]`.
        """
        boxes = boxes.reshape(-1, 4)
        scores = scores.reshape(-1)
        boxlist = BoxList(boxes, image_shape, mode="xyxy")
        boxlist.add_field("scores", scores)
        #@inds = torch.arange(0,scores.shape[0])
        #boxlist.add_field("orig_inds", inds)
        #print(scores.shape)
        return boxlist 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:23,代码来源:inference.py

示例5: run_on_opencv_image

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def run_on_opencv_image(self, image):
        """
        Arguments:
            image (np.ndarray): an image as returned by OpenCV

        Returns:
            prediction (BoxList): the detected objects. Additional information
                of the detection properties can be found in the fields of
                the BoxList via `prediction.fields()`
        """
        predictions = self.compute_prediction(image)
        top_predictions = self.select_top_predictions(predictions)

        result = image.copy()
        if self.show_mask_heatmaps:
            return self.create_mask_montage(result, top_predictions)
        result = self.overlay_boxes(result, top_predictions)
        if self.cfg.MODEL.MASK_ON:
            result = self.overlay_mask(result, top_predictions)
        if self.cfg.MODEL.KEYPOINT_ON:
            result = self.overlay_keypoints(result, top_predictions)
        result = self.overlay_class_names(result, top_predictions)

        return result 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:26,代码来源:DetectronModels.py

示例6: select_top_predictions

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def select_top_predictions(self, predictions):
        """
        Select only predictions which have a `score` > self.confidence_threshold,
        and returns the predictions in descending order of score

        Arguments:
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `scores`.

        Returns:
            prediction (BoxList): the detected objects. Additional information
                of the detection properties can be found in the fields of
                the BoxList via `prediction.fields()`
        """
        scores = predictions.get_field("scores")
        keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
        predictions = predictions[keep]
        scores = predictions.get_field("scores")
        _, idx = scores.sort(0, descending=True)
        return predictions[idx] 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:22,代码来源:DetectronModels.py

示例7: overlay_boxes

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def overlay_boxes(self, image, predictions):
        """
        Adds the predicted boxes on top of the image

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `labels`.
        """
        labels = predictions.get_field("labels")
        boxes = predictions.bbox

        colors = self.compute_colors_for_labels(labels).tolist()

        for box, color in zip(boxes, colors):
            box = box.to(torch.int64)
            top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
            image = cv2.rectangle(
                image, tuple(top_left), tuple(bottom_right), tuple(color), 1
            )

        return image 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:24,代码来源:DetectronModels.py

示例8: overlay_class_names

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def overlay_class_names(self, image, predictions):
        """
        Adds detected class names and scores in the positions defined by the
        top-left corner of the predicted bounding box

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `scores` and `labels`.
        """
        scores = predictions.get_field("scores").tolist()
        labels = predictions.get_field("labels").tolist()
        labels = [self.CATEGORIES[i] for i in labels]
        boxes = predictions.bbox

        template = "{}: {:.2f}"
        for box, score, label in zip(boxes, scores, labels):
            x, y = box[:2]
            s = template.format(label, score)
            cv2.putText(
                image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
            )

        return image 
开发者ID:simaiden,项目名称:Clothing-Detection,代码行数:26,代码来源:DetectronModels.py

示例9: prepare_boxlist

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def prepare_boxlist(self, boxes, scores, image_shape):
        """
        Returns BoxList from `boxes` and adds probability scores information
        as an extra field
        `boxes` has shape (#detections, 4 * #classes), where each row represents
        a list of predicted bounding boxes for each of the object classes in the
        dataset (including the background class). The detections in each row
        originate from the same object proposal.
        `scores` has shape (#detection, #classes), where each row represents a list
        of object detection confidence scores for each of the object classes in the
        dataset (including the background class). `scores[i, j]`` corresponds to the
        box at `boxes[i, j * 4:(j + 1) * 4]`.
        """
        boxes = boxes.reshape(-1, 4)
        scores = scores.reshape(-1)
        boxlist = BoxList(boxes, image_shape, mode="xyxy")
        boxlist.add_field("scores", scores)
        return boxlist 
开发者ID:megvii-model,项目名称:DetNAS,代码行数:20,代码来源:inference.py

示例10: __getitem__

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def __getitem__(self, index):
        img_id = self.ids[index]

        im_path = os.path.join(self.root, img_id + '.jpg')
        img = Image.open(im_path).convert("RGB")
        im = cv2.imread(im_path)
        anno = self.get_groundtruth(index)
        anno["im_info"] = [im.shape[0], im.shape[1]]
        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        target.add_field("difficult", anno["difficult"])

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, index 
开发者ID:clw5180,项目名称:remote_sensing_object_detection_2019,代码行数:21,代码来源:icdar_series.py

示例11: __getitem__

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size)
        target.add_field("masks", masks)

        if anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size)
            target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:33,代码来源:coco.py

示例12: get_groundtruth

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def get_groundtruth(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        target.add_field("difficult", anno["difficult"])
        return target 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:12,代码来源:voc.py

示例13: __getitem__

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def __getitem__(self, item):
        img = Image.open(self.image_lists[item]).convert("RGB")

        # dummy target
        w, h = img.size
        target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:13,代码来源:list_dataset.py

示例14: forward

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def forward(self, x, boxes):
        mask_prob = x

        scores = None
        if self.keypointer:
            mask_prob, scores = self.keypointer(x, boxes)

        assert len(boxes) == 1, "Only non-batched inference supported for now"
        boxes_per_image = [box.bbox.size(0) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        scores = scores.split(boxes_per_image, dim=0)

        results = []
        for prob, box, score in zip(mask_prob, boxes, scores):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            prob = PersonKeypoints(prob, box.size)
            prob.add_field("logits", score)
            bbox.add_field("keypoints", prob)
            results.append(bbox)

        return results


# TODO remove and use only the Keypointer 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:28,代码来源:inference.py

示例15: __call__

# 需要导入模块: from maskrcnn_benchmark.structures import bounding_box [as 别名]
# 或者: from maskrcnn_benchmark.structures.bounding_box import BoxList [as 别名]
def __call__(self, masks, boxes):
        # TODO do this properly
        if isinstance(boxes, BoxList):
            boxes = [boxes]
        assert len(boxes) == 1

        result, scores = heatmaps_to_keypoints(
            masks.detach().cpu().numpy(), boxes[0].bbox.cpu().numpy()
        )
        return torch.from_numpy(result).to(masks.device), torch.as_tensor(scores, device=masks.device) 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:12,代码来源:inference.py


注:本文中的maskrcnn_benchmark.structures.bounding_box.BoxList方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。