當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.arange方法代碼示例

本文整理匯總了Python中torch.arange方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.arange方法的具體用法?Python torch.arange怎麽用?Python torch.arange使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.arange方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _get_uncertainty

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def _get_uncertainty(self, mask_pred, labels):
        """Estimate uncertainty based on pred logits.

        We estimate uncertainty as L1 distance between 0.0 and the logits
        prediction in 'mask_pred' for the foreground class in `classes`.

        Args:
            mask_pred (Tensor): mask predication logits, shape (num_rois,
                num_classes, mask_height, mask_width).

            labels (list[Tensor]): Either predicted or ground truth label for
                each predicted mask, of length num_rois.

        Returns:
            scores (Tensor): Uncertainty scores with the most uncertain
                locations having the highest uncertainty score,
                shape (num_rois, 1, mask_height, mask_width)
        """
        if mask_pred.shape[1] == 1:
            gt_class_logits = mask_pred.clone()
        else:
            inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
            gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
        return -torch.abs(gt_class_logits) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:26,代碼來源:mask_point_head.py

示例2: __iter__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def __iter__(self):
        # deterministically shuffle based on epoch
        if self.shuffle:
            g = torch.Generator()
            g.manual_seed(self.epoch)
            indices = torch.randperm(len(self.dataset), generator=g).tolist()
        else:
            indices = torch.arange(len(self.dataset)).tolist()

        # add extra samples to make it evenly divisible
        indices += indices[:(self.total_size - len(indices))]
        assert len(indices) == self.total_size

        # subsample
        indices = indices[self.rank:self.total_size:self.num_replicas]
        assert len(indices) == self.num_samples

        return iter(indices) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:20,代碼來源:distributed_sampler.py

示例3: test

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def test(self, dataset):
        self.model.eval()
        with torch.no_grad():
            total_loss = 0.0
            predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
            indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
            for idx in tqdm(range(len(dataset)), desc='Testing epoch  ' + str(self.epoch) + ''):
                ltree, linput, rtree, rinput, label = dataset[idx]
                target = utils.map_label_to_target(label, dataset.num_classes)
                linput, rinput = linput.to(self.device), rinput.to(self.device)
                target = target.to(self.device)
                output = self.model(ltree, linput, rtree, rinput)
                loss = self.criterion(output, target)
                total_loss += loss.item()
                output = output.squeeze().to('cpu')
                predictions[idx] = torch.dot(indices, torch.exp(output))
        return total_loss / len(dataset), predictions 
開發者ID:dasguptar,項目名稱:treelstm.pytorch,代碼行數:19,代碼來源:trainer.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def forward(self, x):
        # x is of shape: batchSize x dimInFeatures x numberNodesIn
        B = x.shape[0]
        F = x.shape[1]
        Nin = x.shape[2]
        # And now we add the zero padding
        if Nin < self.N:
            x = torch.cat((x,
                           torch.zeros(B, F, self.N-Nin)\
                                   .type(x.dtype).to(x.device)
                          ), dim = 2)
        # Compute the filter output
        u = LSIGF(self.weight, self.S, x, self.bias)
        # So far, u is of shape batchSize x dimOutFeatures x numberNodes
        # And we want to return a tensor of shape
        # batchSize x dimOutFeatures x numberNodesIn
        # since the nodes between numberNodesIn and numberNodes are not required
        if Nin < self.N:
            u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
        return u 
開發者ID:alelab-upenn,項目名稱:graph-neural-networks,代碼行數:22,代碼來源:graphML.py

示例5: plot_wh_methods

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def plot_wh_methods():  # from utils.utils import *; plot_wh_methods()
    # Compares the two methods for width-height anchor multiplication
    # https://github.com/ultralytics/yolov3/issues/168
    x = np.arange(-4.0, 4.0, .1)
    ya = np.exp(x)
    yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2

    fig = plt.figure(figsize=(6, 3), dpi=150)
    plt.plot(x, ya, '.-', label='yolo method')
    plt.plot(x, yb ** 2, '.-', label='^2 power method')
    plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
    plt.xlim(left=-4, right=4)
    plt.ylim(bottom=0, top=6)
    plt.xlabel('input')
    plt.ylabel('output')
    plt.legend()
    fig.tight_layout()
    fig.savefig('comparison.png', dpi=200) 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:20,代碼來源:utils.py

示例6: get_entropy

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def get_entropy(self, pred, label):
        n, c, h, w = pred.size()
        label = label.unsqueeze(3).long()
        pred = F.softmax(pred, 1).permute(0, 2, 3, 1)
        one_hot_label = ((torch.arange(c)).cuda() == label).float()

        if self.eps == 0:
            prior = 0
        else:
            if self.priorType == 'gaussian':
                tensor = (torch.arange(c).cuda() - label).float()
                prior = NormalDist(tensor, c / 10)
            elif self.priorType == 'uniform':
                prior = 1 / (c-1)

        smoothed_label = (1 - self.eps) * one_hot_label + self.eps * prior * (1 - one_hot_label)
        entropy = smoothed_label * safe_log(pred) + (1 - smoothed_label) * safe_log(1 - pred)
        return -entropy 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:20,代碼來源:losses.py

示例7: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512):
         super(CBP, self).__init__()
         self.thresh = thresh
         self.projDim = projDim
         self.input_dim = input_dim
         self.output_dim = projDim
         torch.manual_seed(1)
         self.h_ = [
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long),
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long)
         ]
         self.weights_ = [
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(),
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float()
         ]

         indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[0].reshape(1, -1)), dim=0)
         indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[1].reshape(1, -1)), dim=0)

         self.sparseM = [
             torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(),
             torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(),
         ] 
開發者ID:jiangtaoxie,項目名稱:fast-MPN-COV,代碼行數:27,代碼來源:CBP.py

示例8: _feature_window_function

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def _feature_window_function(window_type: str,
                             window_size: int,
                             blackman_coeff: float,
                             device: torch.device,
                             dtype: int,
                             ) -> Tensor:
    r"""Returns a window function with the given type and size
    """
    if window_type == HANNING:
        return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
    elif window_type == HAMMING:
        return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
    elif window_type == POVEY:
        # like hanning but goes to zero at edges
        return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
    elif window_type == RECTANGULAR:
        return torch.ones(window_size, device=device, dtype=dtype)
    elif window_type == BLACKMAN:
        a = 2 * math.pi / (window_size - 1)
        window_function = torch.arange(window_size, device=device, dtype=dtype)
        # can't use torch.blackman_window as they use different coefficients
        return (blackman_coeff - 0.5 * torch.cos(a * window_function) +
                (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype)
    else:
        raise Exception('Invalid window type ' + window_type) 
開發者ID:pytorch,項目名稱:audio,代碼行數:27,代碼來源:kaldi.py

示例9: _test_istft_of_sine

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def _test_istft_of_sine(self, amplitude, L, n):
        # stft of amplitude*sin(2*pi/L*n*x) with the hop length and window size equaling L
        x = torch.arange(2 * L + 1, dtype=torch.get_default_dtype())
        sound = amplitude * torch.sin(2 * math.pi / L * x * n)
        # stft = torch.stft(sound, L, hop_length=L, win_length=L,
        #                   window=torch.ones(L), center=False, normalized=False)
        stft = torch.zeros((L // 2 + 1, 2, 2))
        stft_largest_val = (amplitude * L) / 2.0
        if n < stft.size(0):
            stft[n, :, 1] = -stft_largest_val

        if 0 <= L - n < stft.size(0):
            # symmetric about L // 2
            stft[L - n, :, 1] = stft_largest_val

        estimate = torchaudio.functional.istft(stft, L, hop_length=L, win_length=L,
                                               window=torch.ones(L), center=False, normalized=False)
        # There is a larger error due to the scaling of amplitude
        _compare_estimate(sound, estimate, atol=1e-3) 
開發者ID:pytorch,項目名稱:audio,代碼行數:21,代碼來源:functional_cpu_test.py

示例10: _test_get_strided_helper

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def _test_get_strided_helper(self, num_samples, window_size, window_shift, snip_edges):
        waveform = torch.arange(num_samples).float()
        output = kaldi._get_strided(waveform, window_size, window_shift, snip_edges)

        # from NumFrames in feature-window.cc
        n = window_size
        if snip_edges:
            m = 0 if num_samples < window_size else 1 + (num_samples - window_size) // window_shift
        else:
            m = (num_samples + (window_shift // 2)) // window_shift

        self.assertTrue(output.dim() == 2)
        self.assertTrue(output.shape[0] == m and output.shape[1] == n)

        window = torch.empty((m, window_size))

        for r in range(m):
            extract_window(window, waveform, r, window_size, window_shift, snip_edges)
        torch.testing.assert_allclose(window, output) 
開發者ID:pytorch,項目名稱:audio,代碼行數:21,代碼來源:test_compliance_kaldi.py

示例11: _create_data_set

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def _create_data_set(self):
        # used to generate the dataset to test on. this is not used in testing (offline procedure)
        test_filepath = common_utils.get_asset_path('kaldi_file.wav')
        sr = 16000
        x = torch.arange(0, 20).float()
        # between [-6,6]
        y = torch.cos(2 * math.pi * x) + 3 * torch.sin(math.pi * x) + 2 * torch.cos(x)
        # between [-2^30, 2^30]
        y = (y / 6 * (1 << 30)).long()
        # clear the last 16 bits because they aren't used anyways
        y = ((y >> 16) << 16).float()
        torchaudio.save(test_filepath, y, sr)
        sound, sample_rate = torchaudio.load(test_filepath, normalization=False)
        print(y >> 16)
        self.assertTrue(sample_rate == sr)
        torch.testing.assert_allclose(y, sound) 
開發者ID:pytorch,項目名稱:audio,代碼行數:18,代碼來源:test_compliance_kaldi.py

示例12: get_ts_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def get_ts_loss(self, temporal_scores, ts_labels,  answer_indices):
        """
        Args:
            temporal_scores: (N, 5, Li, 2)
            ts_labels: dict(st=(N, ), ed=(N, ))
            answer_indices: (N, )

        Returns:

        """
        bsz = len(answer_indices)
        # compute loss
        ca_temporal_scores_st_ed = \
            temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices]  # (N, Li, 2)
        loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels["st"])
        loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels["ed"])
        return (loss_st + loss_ed) / 2. 
開發者ID:jayleicn,項目名稱:TVQAplus,代碼行數:19,代碼來源:stage.py

示例13: decimate

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def decimate(tensor, m):
    """
    Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value.

    This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size.

    :param tensor: tensor to be decimated
    :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension
    :return: decimated tensor
    """
    assert tensor.dim() == len(m)
    for d in range(tensor.dim()):
        if m[d] is not None:
            tensor = tensor.index_select(dim=d,
                                         index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long())

    return tensor 
開發者ID:zzzDavid,項目名稱:ICDAR-2019-SROIE,代碼行數:19,代碼來源:utils.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def forward(self, batch_token_ids):
        batch_size, sent_len = batch_token_ids.size()
        device = batch_token_ids.device

        batch_pos_ids = torch.arange(
            sent_len, dtype=torch.long, device=device, requires_grad=False
        )
        batch_pos_ids = batch_pos_ids.unsqueeze(0).expand_as(batch_token_ids)

        batch_token_emb = self.token_embedding(batch_token_ids)
        batch_pos_emb = self.pos_embedding(batch_pos_ids)

        batch_token_emb = batch_token_emb + batch_pos_emb

        batch_token_out = self.layer_norm(batch_token_emb)
        batch_token_out = self.dropout(batch_token_out)

        return batch_token_out 
開發者ID:dolphin-zs,項目名稱:Doc2EDAG,代碼行數:20,代碼來源:ner_model.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import arange [as 別名]
def forward(self, batch_elem_emb, sent_pos_ids=None):
        if sent_pos_ids is None:
            num_elem = batch_elem_emb.size(-2)
            sent_pos_ids = torch.arange(
                num_elem, dtype=torch.long, device=batch_elem_emb.device, requires_grad=False
            )
        elif not isinstance(sent_pos_ids, torch.Tensor):
            sent_pos_ids = torch.tensor(
                sent_pos_ids, dtype=torch.long, device=batch_elem_emb.device, requires_grad=False
            )

        batch_pos_emb = self.embedding(sent_pos_ids)
        out = batch_elem_emb + batch_pos_emb
        out = self.dropout(self.layer_norm(out))

        return out 
開發者ID:dolphin-zs,項目名稱:Doc2EDAG,代碼行數:18,代碼來源:dee_model.py


注:本文中的torch.arange方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。