当前位置: 首页>>代码示例>>Python>>正文


Python torch.uint8方法代码示例

本文整理汇总了Python中torch.uint8方法的典型用法代码示例。如果您正苦于以下问题:Python torch.uint8方法的具体用法?Python torch.uint8怎么用?Python torch.uint8使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.uint8方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_bitmap_mask_resize

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def test_bitmap_mask_resize():
    # resize with empty bitmap masks
    raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    resized_masks = bitmap_masks.resize((56, 72))
    assert len(resized_masks) == 0
    assert resized_masks.height == 56
    assert resized_masks.width == 72

    # resize with bitmap masks contain 1 instances
    raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
    bitmap_masks = BitmapMasks(raw_masks, 4, 4)
    resized_masks = bitmap_masks.resize((8, 8))
    assert len(resized_masks) == 1
    assert resized_masks.height == 8
    assert resized_masks.width == 8
    truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
                       [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
                       [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
                       [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
    assert (resized_masks.masks == truth).all() 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:23,代码来源:test_masks.py

示例2: sample

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
        """Directly returns the positive and negative indices  of samples.

        Args:
            assign_result (:obj:`AssignResult`): Assigned results
            bboxes (torch.Tensor): Bounding boxes
            gt_bboxes (torch.Tensor): Ground truth boxes

        Returns:
            :obj:`SamplingResult`: sampler results
        """
        pos_inds = torch.nonzero(
            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
        neg_inds = torch.nonzero(
            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
        gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
        sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
                                         assign_result, gt_flags)
        return sampling_result 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:21,代码来源:pseudo_sampler.py

示例3: normalize_wav

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
    if tensor.dtype == torch.float32:
        pass
    elif tensor.dtype == torch.int32:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 2147483647.
        tensor[tensor < 0] /= 2147483648.
    elif tensor.dtype == torch.int16:
        tensor = tensor.to(torch.float32)
        tensor[tensor > 0] /= 32767.
        tensor[tensor < 0] /= 32768.
    elif tensor.dtype == torch.uint8:
        tensor = tensor.to(torch.float32) - 128
        tensor[tensor > 0] /= 127.
        tensor[tensor < 0] /= 128.
    return tensor 
开发者ID:pytorch,项目名称:audio,代码行数:18,代码来源:wav_utils.py

示例4: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def forward(self, x, auto_recurse=0) -> Out:
        """
        :param x: image, NCHW, [0, 255]
        :param auto_recurse: int, how many times the last scales should be applied again.
        :return: Out
        """
        # Visualize input
        # if self._show_input:
        self.summarizer.register_images('train', {'input': x.to(torch.uint8)})
        forward_scales = list(range(self.scales)) + [-1 for _ in range(auto_recurse)]

        out = Out(targets_style='S' if self._rgb else 'bn',  # IF RGB baseline, use symbols as targets for loss
                  auto_recursive_from=self.scales if auto_recurse > 0 else None)
        out.append_input_image(x)

        x = self.sub_rgb_mean(x)  # something like -128..128 but not really
        if self._rgb:
            x = x.detach()
        self._forward_with_scales(out, x, forward_scales)

        return out 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:23,代码来源:multiscale_network.py

示例5: getclassAccuracy

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def getclassAccuracy(output, target, nclasses, topk=(1,)):
    """
    Computes the top-k accuracy between output and target and aggregates it by class
    :param output: output vector from the network
    :param target: ground-truth
    :param nclasses: nclasses in the problem
    :param topk: Top-k results desired, i.e. top1, top2, top5
    :return: topk vectors aggregated by class
    """
    maxk = max(topk)

    score, label_index = output.topk(k=maxk, dim=1, largest=True, sorted=True)
    correct = label_index.eq(torch.unsqueeze(target, 1))

    ClassAccuracyRes = []
    for k in topk:
        ClassAccuracy = torch.zeros([1, nclasses], dtype=torch.uint8).cuda()
        correct_k = correct[:, :k].sum(1)
        for n in range(target.shape[0]):
            ClassAccuracy[0, target[n]] += correct_k[n].byte()
        ClassAccuracyRes.append(ClassAccuracy)

    return ClassAccuracyRes 
开发者ID:vpulab,项目名称:Semantic-Aware-Scene-Recognition,代码行数:25,代码来源:utils.py

示例6: __call__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def __call__(self, proposals, keypoint_logits):
        heatmaps = []
        valid = []
        for proposals_per_image in proposals:
            kp = proposals_per_image.get_field("keypoints")
            heatmaps_per_image, valid_per_image = project_keypoints_to_heatmap(
                kp, proposals_per_image, self.discretization_size
            )
            heatmaps.append(heatmaps_per_image.view(-1))
            valid.append(valid_per_image.view(-1))

        keypoint_targets = cat(heatmaps, dim=0)
        valid = cat(valid, dim=0).to(dtype=torch.uint8)
        valid = torch.nonzero(valid).squeeze(1)

        # torch.mean (in binary_cross_entropy_with_logits) does'nt
        # accept empty tensors, so handle it sepaartely
        if keypoint_targets.numel() == 0 or len(valid) == 0:
            return keypoint_logits.sum() * 0

        N, K, H, W = keypoint_logits.shape
        keypoint_logits = keypoint_logits.view(N * K, H * W)

        keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid])
        return keypoint_loss 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:27,代码来源:loss.py

示例7: extract

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def extract(df):

    df_dic = {'df':df, 'locator':'None', 'dataset_id':'None'}
    feature_dic = {}
    n = df.shape[1]

    # topic vectors
    topic_features = extract_topic_features(df_dic)
    topic_vec = pad_vec(topic_features.loc[0,'table_topic'])
    feature_dic['topic'] = torch.FloatTensor(np.vstack((np.tile(topic_vec,(n,1)), np.zeros((MAX_COL_COUNT - n, topic_dim)))))


    # sherlock vectors
    sherlock_features = extract_sherlock_features(df_dic)
    for f_g in feature_group_cols:
        temp = sherlock_features[feature_group_cols[f_g]].to_numpy()
        temp = np.vstack((temp, np.zeros((MAX_COL_COUNT - n, temp.shape[1])))).astype('float')
        feature_dic[f_g] = torch.FloatTensor(temp)

    # dictionary of features, labels, masks
    return feature_dic, np.zeros(MAX_COL_COUNT), torch.tensor([1]*n + [0]*(MAX_COL_COUNT-n), dtype=torch.uint8) 
开发者ID:megagonlabs,项目名称:sato,代码行数:23,代码来源:predict.py

示例8: decode

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def decode(self, emissions: torch.Tensor,
               mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
        """Find the most likely tag sequence using Viterbi algorithm.

        Args:
            emissions (`~torch.Tensor`): Emission score tensor of size
                ``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
                ``(batch_size, seq_length, num_tags)`` otherwise.
            mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
                if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.

        Returns:
            List of list containing the best tag sequence for each batch.
        """
        self._validate(emissions, mask=mask)
        if mask is None:
            mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)

        if self.batch_first:
            emissions = emissions.transpose(0, 1)
            mask = mask.transpose(0, 1)

        return self._viterbi_decode(emissions, mask) 
开发者ID:megagonlabs,项目名称:sato,代码行数:25,代码来源:__init__.py

示例9: to_tensor

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
    See ``ToTensor`` for more details.
    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
    Returns:
        Tensor: Converted image.
    """
    if not(_is_numpy_image(pic)):
        raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))

    # handle numpy array
    img = torch.from_numpy(pic.transpose((2, 0, 1)))
    # backward compatibility
    if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8:
        return img.float().div(255)
    else:
        return img 
开发者ID:jbohnslav,项目名称:opencv_transforms,代码行数:20,代码来源:functional.py

示例10: adjust_brightness

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2]==1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
开发者ID:jbohnslav,项目名称:opencv_transforms,代码行数:21,代码来源:functional.py

示例11: adjust_contrast

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an mage.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.
    Returns:
        numpy ndarray: Contrast adjusted image.
    """
    # much faster to use the LUT construction than anything else I've tried
    # it's because you have to change dtypes multiple times
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ (i-74)*contrast_factor+74 for i in range (0,256)]).clip(0,255).astype('uint8')
    # enhancer = ImageEnhance.Contrast(img)
    # img = enhancer.enhance(contrast_factor)
    if img.shape[2]==1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img,table) 
开发者ID:jbohnslav,项目名称:opencv_transforms,代码行数:23,代码来源:functional.py

示例12: to_tensor

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
    See ``ToTensor`` for more details.
    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
    Returns:
        Tensor: Converted image.
    """
    if not(_is_numpy_image(pic)):
        raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))

    # handle numpy array
    img = torch.from_numpy(pic.transpose((2, 0, 1)))
    # backward compatibility
    if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8:
        return img.float()
    else:
        return img 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:20,代码来源:opencv_functional.py

示例13: adjust_brightness

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2] == 1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:21,代码来源:opencv_functional.py

示例14: adjust_contrast

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an mage.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.
    Returns:
        numpy ndarray: Contrast adjusted image.
    """
    # much faster to use the LUT construction than anything else I've tried
    # it's because you have to change dtypes multiple times
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ (i-74)*contrast_factor+74 for i in range (0,256)]).clip(0,255).astype('uint8')
    # enhancer = ImageEnhance.Contrast(img)
    # img = enhancer.enhance(contrast_factor)
    if img.shape[2] == 1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
开发者ID:CMU-CREATE-Lab,项目名称:deep-smoke-machine,代码行数:23,代码来源:opencv_functional.py

示例15: collect_results_cpu

# 需要导入模块: import torch [as 别名]
# 或者: from torch import uint8 [as 别名]
def collect_results_cpu(result_part, size, tmpdir=None):
    rank, world_size = get_dist_info()
    # create a tmp dir if it is not specified
    if tmpdir is None:
        MAX_LEN = 512
        # 32 is whitespace
        dir_tensor = torch.full((MAX_LEN, ),
                                32,
                                dtype=torch.uint8,
                                device='cuda')
        if rank == 0:
            tmpdir = tempfile.mkdtemp()
            tmpdir = torch.tensor(
                bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
            dir_tensor[:len(tmpdir)] = tmpdir
        dist.broadcast(dir_tensor, 0)
        tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
    else:
        mmcv.mkdir_or_exist(tmpdir)
    # dump the part result to the dir
    mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
    dist.barrier()
    # collect all parts
    if rank != 0:
        return None
    else:
        # load results of all parts from tmp dir
        part_list = []
        for i in range(world_size):
            part_file = osp.join(tmpdir, f'part_{i}.pkl')
            part_list.append(mmcv.load(part_file))
        # sort the results
        ordered_results = []
        for res in zip(*part_list):
            ordered_results.extend(list(res))
        # the dataloader may pad some samples
        ordered_results = ordered_results[:size]
        # remove tmp dir
        shutil.rmtree(tmpdir)
        return ordered_results 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:42,代码来源:test.py


注:本文中的torch.uint8方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。