当前位置: 首页>>代码示例>>Python>>正文


Python torch.int64方法代码示例

本文整理汇总了Python中torch.int64方法的典型用法代码示例。如果您正苦于以下问题:Python torch.int64方法的具体用法?Python torch.int64怎么用?Python torch.int64使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.int64方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: calculate_outputs_and_gradients

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def calculate_outputs_and_gradients(inputs, model, target_label_idx, cuda=False):
    # do the pre-processing
    predict_idx = None
    gradients = []
    for input in inputs:
        input = pre_processing(input, cuda)
        output = model(input)
        output = F.softmax(output, dim=1)
        if target_label_idx is None:
            target_label_idx = torch.argmax(output, 1).item()
        index = np.ones((output.size()[0], 1)) * target_label_idx
        index = torch.tensor(index, dtype=torch.int64)
        if cuda:
            index = index.cuda()
        output = output.gather(1, index)
        # clear grad
        model.zero_grad()
        output.backward()
        gradient = input.grad.detach().cpu().numpy()[0]
        gradients.append(gradient)
    gradients = np.array(gradients)
    return gradients, target_label_idx 
开发者ID:TianhongDai,项目名称:integrated-gradient-pytorch,代码行数:24,代码来源:utils.py

示例2: mu_law_encoding

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def mu_law_encoding(
        x: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Encode signal based on mu-law companding.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This algorithm assumes the signal has been scaled to between -1 and 1 and
    returns a signal encoded with values from 0 to quantization_channels - 1.

    Args:
        x (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law encoding
    """
    mu = quantization_channels - 1.0
    if not x.is_floating_point():
        x = x.to(torch.float)
    mu = torch.tensor(mu, dtype=x.dtype)
    x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
    x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
    return x_mu 
开发者ID:pytorch,项目名称:audio,代码行数:26,代码来源:functional.py

示例3: overlay_boxes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def overlay_boxes(self, image, predictions):
        """
        Adds the predicted boxes on top of the image

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `labels`.
        """
        labels = predictions.get_field("labels")
        boxes = predictions.bbox

        colors = self.compute_colors_for_labels(labels).tolist()

        for box, color in zip(boxes, colors):
            box = box.to(torch.int64)
            top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
            image = cv2.rectangle(
                image, tuple(top_left), tuple(bottom_right), tuple(color), 1
            )

        return image 
开发者ID:Res2Net,项目名称:Res2Net-maskrcnn,代码行数:24,代码来源:predictor.py

示例4: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def __init__(self, vocab, used_concepts):
        super().__init__()
        self.vocab = vocab
        self.used_concepts = used_concepts

        self.output_dims = [None, 0, 4]

        self.register_buffer('global2local', torch.zeros(len(self.vocab), dtype=torch.int64))
        for k, v in self.used_concepts.items():
            if v['type'] != 'attribute':
                continue

            self.output_dims[1] += len(v['values'])

            v = v['values']
            self.register_buffer('local2global_{}'.format(k), torch.zeros(len(v), dtype=torch.int64))
            for i, vv in enumerate(v):
                self.global2local[vocab.word2idx[vv]] = i
                getattr(self, 'local2global_{}'.format(k))[i] = vocab.word2idx[vv] 
开发者ID:vacancy,项目名称:NSCL-PyTorch-Release,代码行数:21,代码来源:scene_graph_groundtruth.py

示例5: _get_epoch_indices

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def _get_epoch_indices(self, generator):
        """
        Create a list of dataset indices (with repeats) to use for one epoch.

        Args:
            generator (torch.Generator): pseudo random number generator used for
                stochastic rounding.

        Returns:
            torch.Tensor: list of dataset indices to use in one epoch. Each index
                is repeated based on its calculated repeat factor.
        """
        # Since repeat factors are fractional, we use stochastic rounding so
        # that the target repeat factor is achieved in expectation over the
        # course of training
        rands = torch.rand(len(self._frac_part), generator=generator)
        rep_factors = self._int_part + (rands < self._frac_part).float()
        # Construct a list of indices in which we repeat images as specified
        indices = []
        for dataset_index, rep_factor in enumerate(rep_factors):
            indices.extend([dataset_index] * int(rep_factor.item()))
        return torch.tensor(indices, dtype=torch.int64) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:24,代码来源:repeat_factor.py

示例6: parsing_on_boxes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[ymin:ymax, xmin:xmax]
        resized_parsing = cv2.resize(
            cropped_parsing,
            (heatmap_size[1], heatmap_size[0]),
            interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:20,代码来源:parsing.py

示例7: parsing_on_boxes

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[max(0, ymin):ymax, max(0, xmin):xmax]
        resized_parsing = cv2.resize(
            cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
开发者ID:soeaver,项目名称:Parsing-R-CNN,代码行数:18,代码来源:loss.py

示例8: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def forward(self, belief, state):
    B, H, Z = belief.size(0), belief.size(1), state.size(1)
    belief, state = belief.unsqueeze(dim=1).expand(B, self.candidates, H).reshape(-1, H), state.unsqueeze(dim=1).expand(B, self.candidates, Z).reshape(-1, Z)
    # Initialize factorized belief over action sequences q(a_t:t+H) ~ N(0, I)
    action_mean, action_std_dev = torch.zeros(self.planning_horizon, B, 1, self.action_size, device=belief.device), torch.ones(self.planning_horizon, B, 1, self.action_size, device=belief.device)
    for _ in range(self.optimisation_iters):
      # Evaluate J action sequences from the current belief (over entire sequence at once, batched over particles)
      actions = (action_mean + action_std_dev * torch.randn(self.planning_horizon, B, self.candidates, self.action_size, device=action_mean.device)).view(self.planning_horizon, B * self.candidates, self.action_size)  # Sample actions (time x (batch x candidates) x actions)
      actions.clamp_(min=self.min_action, max=self.max_action)  # Clip action range
      # Sample next states
      beliefs, states, _, _ = self.transition_model(state, actions, belief)
      # Calculate expected returns (technically sum of rewards over planning horizon)
      returns = self.reward_model(beliefs.view(-1, H), states.view(-1, Z)).view(self.planning_horizon, -1).sum(dim=0)
      # Re-fit belief to the K best action sequences
      _, topk = returns.reshape(B, self.candidates).topk(self.top_candidates, dim=1, largest=True, sorted=False)
      topk += self.candidates * torch.arange(0, B, dtype=torch.int64, device=topk.device).unsqueeze(dim=1)  # Fix indices for unrolled actions
      best_actions = actions[:, topk.view(-1)].reshape(self.planning_horizon, B, self.top_candidates, self.action_size)
      # Update belief with new means and standard deviations
      action_mean, action_std_dev = best_actions.mean(dim=2, keepdim=True), best_actions.std(dim=2, unbiased=False, keepdim=True)
    # Return first action mean µ_t
    return action_mean[0].squeeze(dim=1) 
开发者ID:Kaixhin,项目名称:PlaNet,代码行数:23,代码来源:planner.py

示例9: bdd_message_func

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def bdd_message_func(self, edges):
        """Message function for block-diagonal-decomposition regularizer"""
        if edges.src['h'].dtype == th.int64 and len(edges.src['h'].shape) == 1:
            raise TypeError('Block decomposition does not allow integer ID feature.')

        # calculate msg @ W_r before put msg into edge
        if self.low_mem:
            etypes = th.unique(edges.data['type'])
            msg = th.empty((edges.src['h'].shape[0], self.out_feat),
                           device=edges.src['h'].device)
            for etype in etypes:
                loc = edges.data['type'] == etype
                w = self.weight[etype].view(self.num_bases, self.submat_in, self.submat_out)
                src = edges.src['h'][loc].view(-1, self.num_bases, self.submat_in)
                sub_msg = th.einsum('abc,bcd->abd', src, w)
                sub_msg = sub_msg.reshape(-1, self.out_feat)
                msg[loc] = sub_msg
        else:
            weight = self.weight.index_select(0, edges.data['type']).view(
                -1, self.submat_in, self.submat_out)
            node = edges.src['h'].view(-1, 1, self.submat_in)
            msg = th.bmm(node, weight).view(-1, self.out_feat)
        if 'norm' in edges.data:
            msg = msg * edges.data['norm']
        return {'msg': msg} 
开发者ID:dmlc,项目名称:dgl,代码行数:27,代码来源:relgraphconv.py

示例10: torch_dtype_to_np_dtype

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:24,代码来源:types.py

示例11: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def __init__(self, dim, X, y, kernel, variance=1.0, N_max=None):
        super(GP, self).__init__()

        self.dim = torch.tensor([dim], requires_grad=False)
        self.kernel = kernel
        self.variance = torch.nn.Parameter(
                                transform_backward(torch.tensor([variance])))

        if torch.is_tensor(X):
            self.X = X
        else:
            self.X = torch.tensor(X, requires_grad=False, dtype=dtype)

        self.N_max = N_max
        self.N = self.X.size()[0]

        if isinstance(y, Sparse1DTensor):
            self.y = y
            ix = torch.tensor([k for k in y.ix.keys()], dtype=torch.int64)
            self.get_batch = BatchIndices(None, ix, self.N_max)
        else:
            # NOTE: see (1)
            self.y = torch.tensor(y.squeeze(), dtype=dtype,
                                  requires_grad=False)
            self.get_batch = BatchIndices(self.N, None, self.N_max) 
开发者ID:rsheth80,项目名称:pmf-automl,代码行数:27,代码来源:gplvm.py

示例12: grp_range_torch

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def grp_range_torch(a,dev):
    idx = torch.cumsum(a,0)
    id_arr = torch.ones(idx[-1],dtype = torch.int64,device=dev)
    id_arr[0] = 0
    id_arr[idx[:-1]] = -a[:-1]+1
    return torch.cumsum(id_arr,0) 
开发者ID:edwardzhou130,项目名称:PolarSeg,代码行数:8,代码来源:ptBEV.py

示例13: save

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:
    r"""See torchaudio.save"""

    ch_idx, len_idx = (0, 1) if channels_first else (1, 0)

    # check if save directory exists
    abs_dirpath = os.path.dirname(os.path.abspath(filepath))
    if not os.path.isdir(abs_dirpath):
        raise OSError("Directory does not exist: {}".format(abs_dirpath))
    # check that src is a CPU tensor
    _misc_ops.check_input(src)
    # Check/Fix shape of source data
    if src.dim() == 1:
        # 1d tensors as assumed to be mono signals
        src.unsqueeze_(ch_idx)
    elif src.dim() > 2 or src.size(ch_idx) > 16:
        # assumes num_channels < 16
        raise ValueError(
            "Expected format where C < 16, but found {}".format(src.size()))

    if channels_first:
        src = src.t()

    if src.dtype == torch.int64:
        # Soundfile doesn't support int64
        src = src.type(torch.int32)

    precision = "PCM_S8" if precision == 8 else "PCM_" + str(precision)

    return soundfile.write(filepath, src, sample_rate, precision) 
开发者ID:pytorch,项目名称:audio,代码行数:32,代码来源:soundfile_backend.py

示例14: _train_one_step

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def _train_one_step(self, X_tokens, label, X_mask):
        """Train the classifier for one optimization step.

        :param X_tokens: Tokenized and embedded training example
        :type X_tokens: torch.int64
        :param label: Label of the training example
        :type label: torch.int64
        :param X_mask: Mask differentiating tokens vs not tokens
        :type X_mask: torch.FloatTensor
        :return: losses, classifier prediction logits
        :rtype: tuple
        """
        self.opt.zero_grad()
        self.model.zero_grad()

        cls_predict_logits, _, _ = self.model(
            X_tokens, attention_mask=X_mask
        )  # dimensions: (batch_size, hidden_dim, sequence_length)

        sup_loss = torch.mean(self.loss_func(cls_predict_logits, label))
        losses = {"g_sup_loss": sup_loss.cpu().data}
        sup_loss.backward()

        # Clip the norm of the gradients to 1.0.
        # This is to help prevent the "exploding gradients" problem.
        # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)

        self.opt.step()
        return losses, cls_predict_logits 
开发者ID:interpretml,项目名称:interpret-text,代码行数:31,代码来源:components.py

示例15: generate_data

# 需要导入模块: import torch [as 别名]
# 或者: from torch import int64 [as 别名]
def generate_data(batch, use_cuda):
    """Create a formatted and ordered data batch to use in the
    three player model.

    :param batch: A pandas dataframe containing the tokens, masks, counts, and
        labels associated with a batch of data
    :type batch: DataFrame
    :param use_cuda: whether to use CUDA
    :type use_cuda: bool
    :return: formatted and ordered tokens (x), masks (m), and
        labels (y) associated with a batch of data
    :rtype: dict
    """
    # sort for rnn happiness
    batch.sort_values("counts", inplace=True, ascending=False)

    x_mask = np.stack(batch["mask"], axis=0)
    # drop all zero columns
    zero_col_idxs = np.argwhere(np.all(x_mask[..., :] == 0, axis=0))
    x_mask = np.delete(x_mask, zero_col_idxs, axis=1)

    x_mat = np.stack(batch["tokens"], axis=0)
    # drop all zero columns
    x_mat = np.delete(x_mat, zero_col_idxs, axis=1)

    y_vec = np.stack(batch["labels"], axis=0)

    batch_x_ = Variable(torch.from_numpy(x_mat)).to(torch.int64)
    batch_m_ = Variable(torch.from_numpy(x_mask)).type(torch.FloatTensor)
    batch_y_ = Variable(torch.from_numpy(y_vec)).to(torch.int64)

    if use_cuda:
        batch_x_ = batch_x_.cuda()
        batch_m_ = batch_m_.cuda()
        batch_y_ = batch_y_.cuda()

    return {"x": batch_x_, "m": batch_m_, "y": batch_y_} 
开发者ID:interpretml,项目名称:interpret-text,代码行数:39,代码来源:utils_introspective_rationale.py


注:本文中的torch.int64方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。