當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.flip方法代碼示例

本文整理匯總了Python中torch.flip方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.flip方法的具體用法?Python torch.flip怎麽用?Python torch.flip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.flip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _reverse_data_dict

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _reverse_data_dict(self, data_dict):
        result = {}
        for k, x in data_dict.items():

            if not isinstance(x, torch.Tensor):
                result[k] = x
                continue

            new_x = torch.flip(x, [len(x.shape) - 1])

            # since direction_label_map, direction_multilabel_map will not appear in inputs, we omit the flipping
            if k == 'offsetmap_w':
                new_x = -new_x
            elif k == 'angle_map':
                new_x = x.clone()
                mask = (x > 0) & (x < 180)
                new_x[mask] = 180 - x[mask]
                mask = (x < 0) & (x > -180)
                new_x[mask] = - (180 + x[mask])

            result[k] = new_x

        return result 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:25,代碼來源:data_helper.py

示例2: flip_lr

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def flip_lr(image):
    """
    Flip image horizontally

    Parameters
    ----------
    image : torch.Tensor [B,3,H,W]
        Image to be flipped

    Returns
    -------
    image_flipped : torch.Tensor [B,3,H,W]
        Flipped image
    """
    assert image.dim() == 4, 'You need to provide a [B,C,H,W] image to flip'
    return torch.flip(image, [3]) 
開發者ID:TRI-ML,項目名稱:packnet-sfm,代碼行數:18,代碼來源:image.py

示例3: flip_model

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def flip_model(model, image, flip):
    """
    Flip input image and flip output inverse depth map

    Parameters
    ----------
    model : nn.Module
        Module to be used
    image : torch.Tensor [B,3,H,W]
        Input image
    flip : bool
        True if the flip is happening

    Returns
    -------
    inv_depths : list of torch.Tensor [B,1,H,W]
        List of predicted inverse depth maps
    """
    if flip:
        return [flip_lr(inv_depth) for inv_depth in model(flip_lr(image))]
    else:
        return model(image)

######################################################################################################################## 
開發者ID:TRI-ML,項目名稱:packnet-sfm,代碼行數:26,代碼來源:image.py

示例4: conditionalUpwind

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def conditionalUpwind(self, u):
        """
        Upwind scheme:
        https://en.wikipedia.org/wiki/Upwind_scheme
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])

        u1 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        u2 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            -torch.flip(self.weight, dims=[-1]), stride=1, padding=0, bias=None) / (self.dx)

        u = torch.where(u > 0, u1, u2)

        return u2.view(u_shape) 
開發者ID:cics-nd,項目名稱:ar-pde-cnn,代碼行數:23,代碼來源:burgerFiniteDifference.py

示例5: _flip_path

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _flip_path(path, path_lens):
    """Flips label sequence.
    This function rotates a label sequence and flips it.
    ``path[b, t]`` stores a label at time ``t`` in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[b, t] = path[b, t + path_lens[b]]``
    .. ::
       a b c d .     . a b c d    d c b a .
       e f . . .  -> . . . e f -> f e . . .
       g h i j k     g h i j k    k j i h g

    Args:
        path (FloatTensor): `[B, 2*L+1]`
        path_lens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[B, 2*L+1]`

    """
    bs = path.size(0)
    max_path_len = path.size(1)
    rotate = (torch.arange(max_path_len) + path_lens[:, None]) % max_path_len
    return torch.flip(path[torch.arange(bs, dtype=torch.int64)[:, None], rotate], dims=[1]) 
開發者ID:hirofumi0810,項目名稱:neural_sp,代碼行數:24,代碼來源:ctc.py

示例6: _flip_label_probability

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _flip_label_probability(log_probs, xlens):
    """Flips a label probability matrix.
    This function rotates a label probability matrix and flips it.
    ``log_probs[i, b, l]`` stores log probability of label ``l`` at ``i``-th
    input in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[i, b, l] = log_probs[i + xlens[b], b, l]``

    Args:
        cum_log_prob (FloatTensor): `[T, B, vocab]`
        xlens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[T, B, vocab]`

    """
    xmax, bs, vocab = log_probs.size()
    rotate = (torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax
    return torch.flip(log_probs[rotate[:, :, None],
                                torch.arange(bs, dtype=torch.int64)[None, :, None],
                                torch.arange(vocab, dtype=torch.int64)[None, None, :]], dims=[0]) 
開發者ID:hirofumi0810,項目名稱:neural_sp,代碼行數:22,代碼來源:ctc.py

示例7: _flip_path_probability

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _flip_path_probability(cum_log_prob, xlens, path_lens):
    """Flips a path probability matrix.
    This function returns a path probability matrix and flips it.
    ``cum_log_prob[i, b, t]`` stores log probability at ``i``-th input and
    at time ``t`` in a output sequence in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[i, j, k] = cum_log_prob[i + xlens[j], j, k + path_lens[j]]``

    Args:
        cum_log_prob (FloatTensor): `[T, B, 2*L+1]`
        xlens (LongTensor): `[B]`
        path_lens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[T, B, 2*L+1]`

    """
    xmax, bs, max_path_len = cum_log_prob.size()
    rotate_input = ((torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax)
    rotate_label = ((torch.arange(max_path_len, dtype=torch.int64) + path_lens[:, None]) % max_path_len)
    return torch.flip(cum_log_prob[rotate_input[:, :, None],
                                   torch.arange(bs, dtype=torch.int64)[None, :, None],
                                   rotate_label], dims=[0, 2]) 
開發者ID:hirofumi0810,項目名稱:neural_sp,代碼行數:24,代碼來源:ctc.py

示例8: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def forward(ctx, input):
		'''
		In the forward pass we receive a context object and a Tensor containing the input;
		we must return a Tensor containing the output, and we can use the context object to cache objects for use in the backward pass.
		Specifically, ctx is a context object that can be used to stash information for backward computation.
		You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method.
		:param ctx:
		:param input: i.e., batch_preds of [batch, ranking_size], each row represents the relevance predictions for documents within a ltr_adhoc
		:return: [batch, ranking_size], each row represents the log_cumsum_exp value
		'''

		m, _ = torch.max(input, dim=1, keepdim=True)    #a transformation aiming for higher stability when computing softmax() with exp()
		y = input - m
		y = torch.exp(y)
		y_cumsum_t2h = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1])    #row-wise cumulative sum, from tail to head
		fd_output = torch.log(y_cumsum_t2h) + m # corresponding to the '-m' operation

		ctx.save_for_backward(input, fd_output)

		return fd_output 
開發者ID:pt-ranking,項目名稱:pt-ranking.github.io,代碼行數:22,代碼來源:listmle.py

示例9: _get_strided

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
    r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
    representing how the window is shifted along the waveform. Each row is a frame.

    Args:
        waveform (Tensor): Tensor of size ``num_samples``
        window_size (int): Frame length
        window_shift (int): Frame shift
        snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
            in the file, and the number of frames depends on the frame_length.  If False, the number of frames
            depends only on the frame_shift, and we reflect the data at the ends.

    Returns:
        Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
    """
    assert waveform.dim() == 1
    num_samples = waveform.size(0)
    strides = (window_shift * waveform.stride(0), waveform.stride(0))

    if snip_edges:
        if num_samples < window_size:
            return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
        else:
            m = 1 + (num_samples - window_size) // window_shift
    else:
        reversed_waveform = torch.flip(waveform, [0])
        m = (num_samples + (window_shift // 2)) // window_shift
        pad = window_size // 2 - window_shift // 2
        pad_right = reversed_waveform
        if pad > 0:
            # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
            # but we want [2, 1, 0, 0, 1, 2]
            pad_left = reversed_waveform[-pad:]
            waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
        else:
            # pad is negative so we want to trim the waveform at the front
            waveform = torch.cat((waveform[-pad:], pad_right), dim=0)

    sizes = (m, window_size)
    return waveform.as_strided(sizes, strides) 
開發者ID:pytorch,項目名稱:audio,代碼行數:42,代碼來源:kaldi.py

示例10: inference_model

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def inference_model(model, loader, device, use_flip):
    mask_dict = {}
    for image_ids, images in tqdm(loader):
        masks = inference_image(model, images, device)
        if use_flip:
            flipped_imgs = torch.flip(images, dims=(3,))
            flipped_masks = inference_image(model, flipped_imgs, device)
            flipped_masks = np.flip(flipped_masks, axis=2)
            masks = (masks + flipped_masks) / 2
        for name, mask in zip(image_ids, masks):
            mask_dict[name] = mask.astype(np.float32)
    return mask_dict 
開發者ID:sneddy,項目名稱:pneumothorax-segmentation,代碼行數:14,代碼來源:Inference.py

示例11: apply_tta

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def apply_tta(input):
    inputs = []
    inputs.append(input)
    inputs.append(torch.flip(input, dims=[2]))
    inputs.append(torch.flip(input, dims=[3]))
    inputs.append(torch.rot90(input, k=1, dims=[2, 3]))
    inputs.append(torch.rot90(input, k=2, dims=[2, 3]))
    inputs.append(torch.rot90(input, k=3, dims=[2, 3]))
    inputs.append(torch.rot90(torch.flip(input, dims=[2]), k=1, dims=[2, 3]))
    inputs.append(torch.rot90(torch.flip(input, dims=[2]), k=3, dims=[2, 3]))
    return inputs 
開發者ID:4uiiurz1,項目名稱:kaggle-aptos2019-blindness-detection,代碼行數:13,代碼來源:test.py

示例12: flip

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def flip(image, label=None):
    if np.random.rand() < 0.5:
        image = torch.flip(image, [3])
        if label is not None:
            label = torch.flip(label, [3])
    return image, label 
開發者ID:XiaLiPKU,項目名稱:EMANet,代碼行數:8,代碼來源:dataset.py

示例13: _get_item

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def _get_item(self, image_id):
        image_path = osp.join(self.data_root, 'JPEGImages', image_id + '.jpg')
        label_path = osp.join(self.data_root, 'SegmentationClassAug', 
                              image_id + '.png')

        image, label = fetch(image_path, label_path)
        image, label = scale(image, label)
        image, label = pad(image, label)
        image, label = crop(image, label)
        image, label = flip(image, label)

        return image[0], label[0, 0].long() 
開發者ID:XiaLiPKU,項目名稱:EMANet,代碼行數:14,代碼來源:dataset.py

示例14: __call__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def __call__(self, sample):
        if random.random() < 0.5:
            for k in sample.keys():
                if sample[k] is not None and isinstance(sample[k], (torch.Tensor)):
                    sample[k] = torch.flip(sample[k], [-1])
                    # if flow flipped
                    if 'flow' in k:
                        sample[k][:, :, 0] *= -1

        return sample 
開發者ID:DeepMotionAIResearch,項目名稱:DenseMatchingBenchmark,代碼行數:12,代碼來源:flow_trans.py

示例15: __call__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import flip [as 別名]
def __call__(self, tensor):
        if random.random() < self.p:
            tensor = torch.flip(tensor, dims=self.dims)
        return tensor 
開發者ID:kakaobrain,項目名稱:autoclint,代碼行數:6,代碼來源:transforms.py


注:本文中的torch.flip方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。