当前位置: 首页>>代码示例>>Python>>正文


Python torch.cumsum方法代码示例

本文整理汇总了Python中torch.cumsum方法的典型用法代码示例。如果您正苦于以下问题:Python torch.cumsum方法的具体用法?Python torch.cumsum怎么用?Python torch.cumsum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.cumsum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_cdf_from_pr

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def _get_cdf_from_pr(pr):
    """
    :param pr: NHWL
    :return: NHW(L+1) as int16 on CPU!
    """
    N, H, W, _ = pr.shape

    precision = 16

    cdf = torch.cumsum(pr, -1)
    cdf = cdf.mul_(2**precision)
    cdf = cdf.round()
    cdf = torch.cat((torch.zeros((N, H, W, 1), dtype=cdf.dtype, device=cdf.device),
                     cdf), dim=-1)
    cdf = cdf.to('cpu', dtype=torch.int16, non_blocking=True)

    return cdf 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:19,代码来源:bitcoding.py

示例2: split

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def split(data, batch):
    node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
    node_slice = torch.cat([torch.tensor([0]), node_slice])

    row, _ = data.edge_index
    edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
    edge_slice = torch.cat([torch.tensor([0]), edge_slice])

    # Edge indices should start at zero for every graph.
    data.edge_index -= node_slice[batch[row]].unsqueeze(0)
    data.__num_nodes__ = torch.bincount(batch).tolist()

    slices = {'edge_index': edge_slice}
    if data.x is not None:
        slices['x'] = node_slice
    if data.edge_attr is not None:
        slices['edge_attr'] = edge_slice
    if data.y is not None:
        if data.y.size(0) == batch.size(0):
            slices['y'] = node_slice
        else:
            slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)

    return data, slices 
开发者ID:rusty1s,项目名称:pytorch_geometric,代码行数:26,代码来源:tu.py

示例3: _merge

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def _merge(actions, h_l, c_l, h_r, c_r, h_p, c_p, mask):
        """
        This method merges left and right TreeLSTM states. It reuses already precomputed states for the parent node,
        but still, has to apply correct masking.
        """
        cumsum = torch.cumsum(actions, dim=-1)
        mask_l = (1.0 - cumsum)[..., None]
        mask_r = (cumsum - actions)[..., None]
        mask = mask[..., None]
        actions = actions[..., None]
        # If the row of mask matrix is zero ignore everything calculated so far and copy the corresponding left hidden
        # states from the previous layer (the assumption here is that one adds padding tokens to the right side and
        # action that uses padding token can't be sampled if the row of a mask is a nonzero vector).
        # Eventually, you will end up with the leftmost state on the top that contains a correct required value.
        h_p = (mask_l * h_l + actions * h_p + mask_r * h_r) * mask + h_l * (1. - mask)
        c_p = (mask_l * c_l + actions * c_p + mask_r * c_r) * mask + c_l * (1. - mask)
        return h_p, c_p 
开发者ID:facebookresearch,项目名称:latent-treelstm,代码行数:19,代码来源:BinaryTreeBasedModule.py

示例4: safe_cumprod

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def safe_cumprod(tensor, dim: int, eps: float = 1e-10):
    """
    An implementation of cumprod to prevent precision issue.
    cumprod(x)
    = [x1, x1x2, x1x2x3, ....]
    = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...]
    = exp(cumsum(log(x)))
    """

    if (tensor + eps < 0).any().item():
        raise RuntimeError(
            "Safe cumprod can only take non-negative tensors as input."
            "Consider use torch.cumprod if you want to calculate negative values."
        )

    log_tensor = torch.log(tensor + eps)
    cumsum_log_tensor = torch.cumsum(log_tensor, dim)
    exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor)
    return exp_cumsum_log_tensor 
开发者ID:pytorch,项目名称:fairseq,代码行数:21,代码来源:functions.py

示例5: reduced_sequential

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def reduced_sequential(module, data, indices, out=None, dim_size=None):
  packed, pack_indices, counts = pack(data, indices)
  result, hidden = module(packed)
  last = torch.cumsum(counts, dim=0) - 1

  if dim_size is None:
    dim_size = indices.max() + 1
  if out is None:
    out = torch.zeros(
      dim_size, result.shape[1:],
      dtype=data.dtype, device=data.device
    )
  out_hidden = torch.zeros_like(out)
  out[pack_indices] += result.data[last]
  out_hidden[pack_indices] += hidden.data[0]

  return out, out_hidden 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:19,代码来源:scatter.py

示例6: pairwise_no_pad

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def pairwise_no_pad(op, data, indices):
  unique, counts = indices.unique(return_counts=True)
  expansion = torch.cumsum(counts, dim=0)
  expansion = torch.repeat_interleave(expansion, counts)
  offset = torch.arange(0, counts.sum(), device=data.device)
  expansion = expansion - offset - 1
  expanded = torch.repeat_interleave(data, expansion.to(data.device), dim=0)

  expansion_offset = counts.roll(1)
  expansion_offset[0] = 0
  expansion_offset = expansion_offset.cumsum(dim=0)
  expansion_offset = torch.repeat_interleave(expansion_offset, counts)
  expansion_offset = torch.repeat_interleave(expansion_offset, expansion)
  off_start = torch.repeat_interleave(torch.repeat_interleave(counts, counts) - expansion, expansion)
  access = torch.arange(expansion.sum(), device=data.device)
  access = access - torch.repeat_interleave(expansion.roll(1).cumsum(dim=0), expansion) + off_start + expansion_offset

  result = op(expanded, data[access.to(data.device)])
  return result, torch.repeat_interleave(indices, expansion, dim=0) 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:21,代码来源:scatter.py

示例7: max

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def max(X):
        seq_len, n_batch, n_states = X.shape
        X_sorted, _ = torch.sort(X, dim=2, descending=True)
        cssv = torch.cumsum(X_sorted, dim=2) - 1
        ind = X.new(n_states)
        for i in range(n_states):
            ind[i] = i + 1
        cond = X_sorted - cssv / ind > 0
        rho = cond.long().sum(dim=2)
        cssv = cssv.view(-1, n_states)
        rho = rho.view(-1)

        tau = (torch.gather(cssv, dim=1, index=rho[:, None] - 1)[:, 0]
               / rho.type(X.type()))
        tau = tau.view(seq_len, n_batch)
        A = torch.clamp(X - tau[:, :, None], min=0)
        # A /= A.sum(dim=2, keepdim=True)

        M = torch.sum(A * (X - .5 * A), dim=2)

        return M.squeeze(), A.squeeze() 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:23,代码来源:viterbi.py

示例8: _compute_metrics

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def _compute_metrics(self, rank_hist, suffix=""):
        """Computes desired matrix from rank histogram"""
        metrics = {}
        n = torch.sum(rank_hist).item()

        ranks = torch.arange(1, self.dataset.num_entities() + 1).float().to(self.device)
        metrics["mean_rank" + suffix] = (
            (torch.sum(rank_hist * ranks).item() / n) if n > 0.0 else 0.0
        )

        reciprocal_ranks = 1.0 / ranks
        metrics["mean_reciprocal_rank" + suffix] = (
            (torch.sum(rank_hist * reciprocal_ranks).item() / n) if n > 0.0 else 0.0
        )

        hits_at_k = (
            (torch.cumsum(rank_hist[: max(self.hits_at_k_s)], dim=0) / n).tolist()
            if n > 0.0
            else [0.0] * max(self.hits_at_k_s)
        )

        for i, k in enumerate(self.hits_at_k_s):
            metrics["hits_at_{}{}".format(k, suffix)] = hits_at_k[k - 1]

        return metrics 
开发者ID:uma-pi1,项目名称:kge,代码行数:27,代码来源:entity_ranking.py

示例9: pick_mixture_component

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def pick_mixture_component(w, seed=None):
        '''Randomly choose mixture component indices with probability given by
        the component weights w. Works on batches of component weights.

        w:      Weights of the mixture components, must be positive and sum to one
        seed:   Optional RNG seed for consistent decisions'''

        w_thresholds = torch.cumsum(w, dim=1)
        # Prepare local random number generator
        rng = torch.Generator(device=w.device)
        if isinstance(seed, int):
            rng = rng.manual_seed(seed)
        else:
            rng.seed()
        # Draw one uniform random number per batch row and compare against thresholds
        u = torch.rand(w.shape[0], 1, device=w.device, generator=rng)
        indices = torch.sum(u > w_thresholds, dim=1).int()
        # Return mixture component indices
        return indices 
开发者ID:VLL-HD,项目名称:FrEIA,代码行数:21,代码来源:gaussian_mixture.py

示例10: quantiles

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def quantiles(self, quantiles, old_style=False):
        if self.size == 0:
            return torch.full((self.depth, len(quantiles)), torch.nan)
        summary, weights = self._weighted_summary()
        cumweights = torch.cumsum(weights, dim=-1) - weights / 2
        if old_style:
            # To be convenient with torch.percentile
            cumweights -= cumweights[:,0:1].clone()
            cumweights /= cumweights[:,-1:].clone()
        else:
            cumweights /= torch.sum(weights, dim=-1, keepdim=True)
        result = torch.zeros(self.depth, len(quantiles),
                dtype=self.dtype, device=self.device)
        # numpy is needed for interpolation
        if not hasattr(quantiles, 'cpu'):
            quantiles = torch.Tensor(quantiles)
        nq = quantiles.cpu().numpy()
        ncw = cumweights.cpu().numpy()
        nsm = summary.cpu().numpy()
        for d in range(self.depth):
            result[d] = torch.tensor(numpy.interp(nq, ncw[d], nsm[d]),
                    dtype=self.dtype, device=self.device)
        return result 
开发者ID:CSAILVision,项目名称:gandissect,代码行数:25,代码来源:runningstats.py

示例11: normalize

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def normalize(self, data):
        '''
        Given input data as taken from the training distirbution,
        normalizes every channel to reflect quantile values,
        uniformly distributed, within [0, 1].
        '''
        assert self.size > 0
        assert data.shape[0] == self.depth
        summary, weights = self._weighted_summary()
        cumweights = torch.cumsum(weights, dim=-1) - weights / 2
        cumweights /= torch.sum(weights, dim=-1, keepdim=True)
        result = torch.zeros_like(data).float()
        # numpy is needed for interpolation
        ndata = data.cpu().numpy().reshape((data.shape[0], -1))
        ncw = cumweights.cpu().numpy()
        nsm = summary.cpu().numpy()
        for d in range(self.depth):
            normed = torch.tensor(numpy.interp(ndata[d], nsm[d], ncw[d]),
                dtype=torch.float, device=data.device).clamp_(0.0, 1.0)
            if len(data.shape) > 1:
                normed = normed.view(*(data.shape[1:]))
            result[d] = normed
        return result 
开发者ID:CSAILVision,项目名称:gandissect,代码行数:25,代码来源:runningstats.py

示例12: thermometer_encoding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def thermometer_encoding(samples=None, level=None, device=None):
    """
    the help function to encode the samples using the thermometer encoding schema
    :param samples:
    :param level:
    :param device:
    :return:
    """
    assert level is not None and isinstance(level, int), 'level should specified as an integer'
    assert torch.is_tensor(samples), "input samples must be a PyTorch tensor"
    if len(samples.shape) >= 4 and (samples.shape[1] == 1 or samples.shape[1] == 3):
        samples = samples.permute(0, 2, 3, 1)

    # convert one hot encoding to thermometer encoding
    one_hot_samples = one_hot_encoding(samples=samples, level=level, device=device)
    therm_samples = torch.cumsum(one_hot_samples, dim=-1)

    # the returned samples is a type of numpy data with shape [BatchSize * (Channel * Level) * Weight* Height]
    shape = samples.shape
    therm_samples_numpy = torch.reshape(therm_samples, (shape[0], shape[1], shape[2], shape[3] * level))
    therm_samples_numpy = therm_samples_numpy.permute(0, 3, 1, 2).cpu().numpy()

    return therm_samples_numpy 
开发者ID:kleincup,项目名称:DEEPSEC,代码行数:25,代码来源:TE.py

示例13: __adjust_last_row_sp0_m_ge_n

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def __adjust_last_row_sp0_m_ge_n(
        arr, lshape_map, last_diag_pr, row_inds, row_per_proc_list, tile_columns
    ):
        """

        Need to adjust the size of last row if arr.split == 0 and the diagonal ends before the
        last tile. This should only be run if arr,split == 0 and last_diag_pr < arr.comm.size - 1.
        """
        # need to find the amount of data after the diagonal
        lshape_cumsum = torch.cumsum(lshape_map[..., 0], dim=0)
        diff = lshape_cumsum[last_diag_pr] - arr.gshape[1]
        if diff > torch.true_divide(lshape_map[last_diag_pr, 0], 2):  # todo: tune this?
            # if the shape diff is > half the data on the process
            #   then add a row after the diagonal, todo: is multiple rows faster?
            row_inds.insert(tile_columns, diff)
            row_per_proc_list[last_diag_pr] += 1
        else:
            # if the diff is < half the data on the process
            #   then extend the last row inds to be the end of the process
            row_inds[tile_columns - 1] += diff 
开发者ID:helmholtz-analytics,项目名称:heat,代码行数:22,代码来源:tiling.py

示例14: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def forward(ctx, input):
		'''
		In the forward pass we receive a context object and a Tensor containing the input;
		we must return a Tensor containing the output, and we can use the context object to cache objects for use in the backward pass.
		Specifically, ctx is a context object that can be used to stash information for backward computation.
		You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method.
		:param ctx:
		:param input: i.e., batch_preds of [batch, ranking_size], each row represents the relevance predictions for documents within a ltr_adhoc
		:return: [batch, ranking_size], each row represents the log_cumsum_exp value
		'''

		m, _ = torch.max(input, dim=1, keepdim=True)    #a transformation aiming for higher stability when computing softmax() with exp()
		y = input - m
		y = torch.exp(y)
		y_cumsum_t2h = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1])    #row-wise cumulative sum, from tail to head
		fd_output = torch.log(y_cumsum_t2h) + m # corresponding to the '-m' operation

		ctx.save_for_backward(input, fd_output)

		return fd_output 
开发者ID:pt-ranking,项目名称:pt-ranking.github.io,代码行数:22,代码来源:listmle.py

示例15: backward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import cumsum [as 别名]
def backward(ctx, grad_output):
		'''
		In the backward pass we receive the context object and
		a Tensor containing the gradient of the loss with respect to the output produced during the forward pass (i.e., forward's output).
		We can retrieve cached data from the context object, and
		must compute and return the gradient of the loss with respect to the input to the forward function.
		Namely, grad_output is the gradient of the loss w.r.t. forward's output. Here we first compute the gradient (denoted as grad_out_wrt_in) of forward's output w.r.t. forward's input.
		Based on the chain rule, grad_output * grad_out_wrt_in would be the desired output, i.e., the gradient of the loss w.r.t. forward's input
		:param ctx:
		:param grad_output:
		:return:
		'''

		input, fd_output = ctx.saved_tensors
		#chain rule
		bk_output = grad_output * (torch.exp(input) * torch.cumsum(torch.exp(-fd_output), dim=1))

		return bk_output 
开发者ID:pt-ranking,项目名称:pt-ranking.github.io,代码行数:20,代码来源:listmle.py


注:本文中的torch.cumsum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。