当前位置: 首页>>代码示例>>Python>>正文


Python blob.serialize方法代码示例

本文整理汇总了Python中utils.blob.serialize方法的典型用法代码示例。如果您正苦于以下问题:Python blob.serialize方法的具体用法?Python blob.serialize怎么用?Python blob.serialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.blob的用法示例。


在下文中一共展示了blob.serialize方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __getitem__

# 需要导入模块: from utils import blob [as 别名]
# 或者: from utils.blob import serialize [as 别名]
def __getitem__(self, index_tuple):
        index, ratio = index_tuple
        single_db = [self._roidb[index]]
        blobs, valid = get_minibatch(single_db)
        #TODO: Check if minibatch is valid ? If not, abandon it.
        # Need to change _worker_loop in torch.utils.data.dataloader.py.

        # Squeeze batch dim
        for key in blobs:
            if key != 'roidb':
                blobs[key] = blobs[key].squeeze(axis=0)

        if self._roidb[index]['need_crop']:
            self.crop_data(blobs, ratio)
            # Check bounding box
            entry = blobs['roidb'][0]
            boxes = entry['boxes']
            invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
            valid_inds = np.nonzero(~ invalid)[0]
            if len(valid_inds) < len(boxes):
                for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
                            'box_to_gt_ind_map', 'gt_keypoints']:
                    if key in entry:
                        entry[key] = entry[key][valid_inds]
                entry['segms'] = [entry['segms'][ind] for ind in valid_inds]

        blobs['roidb'] = blob_utils.serialize(blobs['roidb'])  # CHECK: maybe we can serialize in collate_fn

        return blobs 
开发者ID:roytseng-tw,项目名称:Detectron.pytorch,代码行数:31,代码来源:loader.py

示例2: __getitem__

# 需要导入模块: from utils import blob [as 别名]
# 或者: from utils.blob import serialize [as 别名]
def __getitem__(self, index_tuple):
        index, ratio = index_tuple
        single_db = [self._roidb[index]]

        ## _add_proposals(xxx)
        blobs, valid = get_minibatch(single_db)
        #TODO: Check if minibatch is valid ? If not, abandon it.
        # Need to change _worker_loop in torch.utils.data.dataloader.py.

        # Squeeze batch dim
        for key in blobs:
            if key != 'roidb':
                blobs[key] = blobs[key].squeeze(axis=0)

        if self._roidb[index]['need_crop']:
            self.crop_data(blobs, ratio)
            # Check bounding box
            entry = blobs['roidb'][0]
            boxes = entry['boxes']
            invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
            valid_inds = np.nonzero(~ invalid)[0]
            if len(valid_inds) < len(boxes):
                for key in ['boxes', 'precomp_keypoints', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
                            'box_to_gt_ind_map', 'gt_keypoints', 'gt_actions', 'gt_role_id']:
                    if key in entry:
                        entry[key] = entry[key][valid_inds]
                entry['segms'] = [entry['segms'][ind] for ind in valid_inds]

        blobs['roidb'] = blob_utils.serialize(blobs['roidb'])  # CHECK: maybe we can serialize in collate_fn

        return blobs 
开发者ID:bobwan1995,项目名称:PMFNet,代码行数:33,代码来源:loader.py

示例3: __getitem__

# 需要导入模块: from utils import blob [as 别名]
# 或者: from utils.blob import serialize [as 别名]
def __getitem__(self, index_tuple):
        index, ratio = index_tuple
        single_db = [self._roidb[index]]
        blobs, valid = get_minibatch(single_db)
        #TODO: Check if minibatch is valid ? If not, abandon it.
        # Need to change _worker_loop in torch.utils.data.dataloader.py.        

        # Squeeze batch dim
        for key in blobs:
            if key != 'roidb':
                # print('%s %s' % (key, type(blobs[key])))
                # print(blobs[key].shape)
                blobs[key] = blobs[key].squeeze(axis=0)

        if self._roidb[index]['need_crop']:
            self.crop_data(blobs, ratio)
            # Check bounding box
            entry = blobs['roidb'][0]
            boxes = entry['boxes']
            invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
            valid_inds = np.nonzero(~ invalid)[0]

            if len(valid_inds) < len(boxes):
                for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
                            'box_to_gt_ind_map', 'gt_keypoints', 'gt_scores', 'dataset_id', 
                            'gt_source']: # EDIT: gt_scores
                    if key in entry:
                        entry[key] = entry[key][valid_inds]
                entry['segms'] = [entry['segms'][ind] for ind in valid_inds]

        blobs['roidb'] = blob_utils.serialize(blobs['roidb'])  # CHECK: maybe we can serialize in collate_fn

        return blobs 
开发者ID:AruniRC,项目名称:detectron-self-train,代码行数:35,代码来源:loader.py

示例4: __getitem__

# 需要导入模块: from utils import blob [as 别名]
# 或者: from utils.blob import serialize [as 别名]
def __getitem__(self, index_tuple):
        index, ratio = index_tuple
        single_db = [self._roidb[index]]
        blobs, valid = get_minibatch(single_db)
        #TODO: Check if minibatch is valid ? If not, abandon it.
        # Need to change _worker_loop in torch.utils.data.dataloader.py.

        # Squeeze batch dim
        for key in blobs:
            if key != 'roidb':
                blobs[key] = blobs[key].squeeze(axis=0)

        if self._roidb[index]['need_crop']:
            self.crop_data(blobs, ratio)
            # Check bounding box
            entry = blobs['roidb'][0]
            boxes = entry['boxes']
            invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
            valid_inds = np.nonzero(~ invalid)[0]
            if len(valid_inds) < len(boxes):
                for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
                            'box_to_gt_ind_map', 'gt_keypoints']:
                    if key in entry:
                        entry[key] = entry[key][valid_inds]
                entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
            # for rel sanity check
            sbj_gt_boxes = entry['sbj_gt_boxes']
            obj_gt_boxes = entry['obj_gt_boxes']
            sbj_invalid = (sbj_gt_boxes[:, 0] == sbj_gt_boxes[:, 2]) | (sbj_gt_boxes[:, 1] == sbj_gt_boxes[:, 3])
            obj_invalid = (obj_gt_boxes[:, 0] == obj_gt_boxes[:, 2]) | (obj_gt_boxes[:, 1] == obj_gt_boxes[:, 3])
            rel_valid = sbj_invalid | obj_invalid
            rel_valid_inds = np.nonzero(~ rel_invalid)[0]
            if len(rel_valid_inds) < len(sbj_gt_boxes):
                for key in ['sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes',
                            'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map',
                            'width', 'height']:
                    if key in entry:
                        entry[key] = entry[key][rel_valid_inds]

        blobs['roidb'] = blob_utils.serialize(blobs['roidb'])  # CHECK: maybe we can serialize in collate_fn

        return blobs 
开发者ID:jz462,项目名称:Large-Scale-VRD.pytorch,代码行数:44,代码来源:loader_rel.py

示例5: add_rpn_blobs

# 需要导入模块: from utils import blob [as 别名]
# 或者: from utils.blob import serialize [as 别名]
def add_rpn_blobs(blobs, im_scales, roidb):
    """Add blobs needed training RPN-only and end-to-end Faster R-CNN models."""
    # Temporal dimensions of the output
    T = roidb[0]['boxes'].shape[-1] // 4
    # Following vars are only used in FPN case, but keeping it out of the "if"
    # condition, so as to allow for _populate_rpn_blobs to work (it will pass
    # these dummy values and not use them)
    foas = []
    k_max = cfg.FPN.RPN_MAX_LEVEL
    k_min = cfg.FPN.RPN_MIN_LEVEL
    if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
        # RPN applied to many feature levels, as in the FPN paper
        for lvl in range(k_min, k_max + 1):
            field_stride = 2. ** lvl
            anchor_sizes = (
                cfg.FPN.RPN_ANCHOR_START_SIZE * 2. ** (lvl - k_min), )
            anchor_aspect_ratios = cfg.FPN.RPN_ASPECT_RATIOS
            foa = _get_field_of_anchors(
                field_stride, anchor_sizes, anchor_aspect_ratios, T)
            foas.append(foa)
        all_anchors = np.concatenate([f.field_of_anchors for f in foas])
    else:
        foa = _get_field_of_anchors(
            cfg.RPN.STRIDE, cfg.RPN.SIZES, cfg.RPN.ASPECT_RATIOS, T)
        all_anchors = foa.field_of_anchors

    for im_i, entry in enumerate(roidb):
        _populate_rpn_blobs(entry, im_scales[im_i], blobs, all_anchors, foas,
                            foa, k_min, k_max)

    for k, v in blobs.items():
        if isinstance(v, list) and len(v) > 0:
            blobs[k] = np.concatenate(v)

    valid_keys = [
        'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes',
        'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']
    minimal_roidb = [{} for _ in range(len(roidb))]
    for i, e in enumerate(roidb):
        for k in valid_keys:
            if k in e:
                minimal_roidb[i][k] = e[k]
    blobs['roidb'] = blob_utils.serialize(minimal_roidb)

    # Always return valid=True, since RPN minibatches are valid by design
    return True 
开发者ID:facebookresearch,项目名称:DetectAndTrack,代码行数:48,代码来源:rpn.py


注:本文中的utils.blob.serialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。