当前位置: 首页>>代码示例>>Python>>正文


Python torch.arange函数代码示例

本文整理汇总了Python中torch.arange函数的典型用法代码示例。如果您正苦于以下问题:Python arange函数的具体用法?Python arange怎么用?Python arange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了arange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_broadcast_subspace

 def test_broadcast_subspace(self):
     a = zeros((100, 100))
     v = Variable(torch.arange(0, 100))[:, None]
     b = Variable(torch.arange(99, -1, -1).long())
     a[b] = v
     expected = b.double().unsqueeze(1).expand(100, 100)
     self.assertEqual(a, expected)
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:7,代码来源:test_indexing.py

示例2: __call__

    def __call__(self, spec_f):

        spec_f, is_variable = _check_is_variable(spec_f)
        n_fft = spec_f.size(2)

        m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
        m_max = 2595 * np.log10(1. + (self.f_max / 700))

        m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
        f_pts = (700 * (10**(m_pts / 2595) - 1))

        bins = torch.floor(((n_fft - 1) * 2) * f_pts / self.sr).long()

        fb = torch.zeros(n_fft, self.n_mels)
        for m in range(1, self.n_mels + 1):
            f_m_minus = bins[m - 1].item()
            f_m = bins[m].item()
            f_m_plus = bins[m + 1].item()

            if f_m_minus != f_m:
                fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
            if f_m != f_m_plus:
                fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)

        fb = Variable(fb)
        spec_m = torch.matmul(spec_f, fb)  # (c, l, n_fft) dot (n_fft, n_mels) -> (c, l, n_mels)
        return spec_m if is_variable else spec_m.data
开发者ID:SsnL,项目名称:audio,代码行数:27,代码来源:transforms.py

示例3: __call__

    def __call__(self, grid):
        batch_size, _, grid_dimX, grid_dimY, grid_dimZ = grid.size()

        k = 1.0

        x_coords = 2.0 * k * torch.arange(grid_dimX, dtype=torch.float32).unsqueeze(1).unsqueeze(1
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimX - 1.0) - 1.0
        y_coords = 2.0 * k * torch.arange(grid_dimY, dtype=torch.float32).unsqueeze(1).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimY - 1.0) - 1.0
        z_coords = 2.0 * k * torch.arange(grid_dimZ, dtype=torch.float32).unsqueeze(0).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimZ - 1.0) - 1.0

        coords = torch.stack((x_coords, y_coords, z_coords), dim=0)

        if self.with_r:
            rs = ((x_coords ** 2) + (y_coords ** 2) + (z_coords ** 2)) ** 0.5
            rs = k * rs / torch.max(rs)
            rs = torch.unsqueeze(rs, dim=0)
            coords = torch.cat((coords, rs), dim=0)

        coords = torch.unsqueeze(coords, dim=0).repeat(batch_size, 1, 1, 1, 1)

        grid = torch.cat((coords.to(grid.device), grid), dim=1)

        return grid
开发者ID:caskeep,项目名称:3D-SIS,代码行数:25,代码来源:coord_conv3d.py

示例4: meshgrid

def meshgrid(x, y, row_major=True):
    '''Return meshgrid in range x & y.

    Args:
      x: (int) first dim range.
      y: (int) second dim range.
      row_major: (bool) row major or column major.

    Returns:
      (tensor) meshgrid, sized [x*y,2]

    Example:
    >> meshgrid(3,2)
    0  0
    1  0
    2  0
    0  1
    1  1
    2  1
    [torch.FloatTensor of size 6x2]

    >> meshgrid(3,2,row_major=False)
    0  0
    0  1
    0  2
    1  0
    1  1
    1  2
    [torch.FloatTensor of size 6x2]
    '''
    a = torch.arange(0,x)
    b = torch.arange(0,y)
    xx = a.repeat(y).view(-1,1)
    yy = b.view(-1,1).repeat(1,x).view(-1,1)
    return torch.cat([xx,yy],1) if row_major else torch.cat([yy,xx],1)
开发者ID:hopstone,项目名称:pytorch-retinanet,代码行数:35,代码来源:utils.py

示例5: make_positions

def make_positions(tensor, padding_idx, left_pad, onnx_trace=False):
    """Replace non-padding symbols with their position numbers.

    Position numbers begin at padding_idx+1.

    Padding symbols are ignored, but it is necessary to specify whether padding
    is added on the left side (left_pad=True) or right side (left_pad=False).
    """
    if onnx_trace:
        range_buf = torch._dim_arange(like=tensor, dim=1) + padding_idx + 1
        mask = tensor.ne(padding_idx)
        positions = range_buf.expand_as(tensor)
        if left_pad:
            positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
        return positions * mask.long() + positions * (1 - mask.long())

    max_pos = padding_idx + 1 + tensor.size(1)
    if not hasattr(make_positions, 'range_buf'):
        make_positions.range_buf = tensor.new()
    make_positions.range_buf = make_positions.range_buf.type_as(tensor)
    if make_positions.range_buf.numel() < max_pos:
        torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
    mask = tensor.ne(padding_idx)
    positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
    if left_pad:
        positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
    return tensor.clone().masked_scatter_(mask, positions[mask])
开发者ID:fyabc,项目名称:fairseq,代码行数:27,代码来源:utils.py

示例6: test_cuda_small_tensors

    def test_cuda_small_tensors(self):
        # Check multiple small tensors which will likely use the same
        # underlying cached allocation
        ctx = mp.get_context('spawn')
        tensors = []
        for i in range(5):
            device = i % 2
            tensors += [torch.arange(i * 5, (i + 1) * 5).cuda(device)]

        inq = ctx.Queue()
        outq = ctx.Queue()
        inq.put(tensors)
        p = ctx.Process(target=sum_tensors, args=(inq, outq))
        p.start()

        results = []
        for i in range(5):
            results.append(outq.get())
        p.join()

        for i, tensor in enumerate(tensors):
            v, device, tensor_size, storage_size = results[i]
            self.assertEqual(v, torch.arange(i * 5, (i + 1) * 5).sum())
            self.assertEqual(device, i % 2)
            self.assertEqual(tensor_size, 5)
            self.assertEqual(storage_size, 5)
开发者ID:Northrend,项目名称:pytorch,代码行数:26,代码来源:test_multiprocessing.py

示例7: test_int_assignment

    def test_int_assignment(self):
        x = Variable(torch.arange(0, 4).view(2, 2))
        x[1] = 5
        self.assertEqual(x.data.tolist(), [[0, 1], [5, 5]])

        x = Variable(torch.arange(0, 4).view(2, 2))
        x[1] = Variable(torch.arange(5, 7))
        self.assertEqual(x.data.tolist(), [[0, 1], [5, 6]])
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:8,代码来源:test_indexing.py

示例8: test_int_assignment

    def test_int_assignment(self):
        x = torch.arange(0, 4).view(2, 2)
        x[1] = 5
        self.assertEqual(x.tolist(), [[0, 1], [5, 5]])

        x = torch.arange(0, 4).view(2, 2)
        x[1] = torch.arange(5, 7)
        self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
开发者ID:RichieMay,项目名称:pytorch,代码行数:8,代码来源:test_indexing.py

示例9: test_byte_tensor_assignment

 def test_byte_tensor_assignment(self):
     x = Variable(torch.arange(0, 16).view(4, 4))
     b = Variable(torch.ByteTensor([True, False, True, False]))
     value = Variable(torch.Tensor([3, 4, 5, 6]))
     x[b] = value
     self.assertEqual(x[0], value)
     self.assertEqual(x[1].data, torch.arange(4, 8))
     self.assertEqual(x[2], value)
     self.assertEqual(x[3].data, torch.arange(12, 16))
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:9,代码来源:test_indexing.py

示例10: enumerate_support

 def enumerate_support(self):
     total_count = int(self.total_count.max())
     if not self.total_count.min() == total_count:
         raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.")
     values = self._new(1 + total_count,)
     torch.arange(1 + total_count, out=values)
     values = values.view((-1,) + (1,) * len(self._batch_shape))
     values = values.expand((-1,) + self._batch_shape)
     return values
开发者ID:lewisKit,项目名称:pyro,代码行数:9,代码来源:binomial.py

示例11: test_byte_tensor_assignment

 def test_byte_tensor_assignment(self):
     x = torch.arange(0., 16).view(4, 4)
     b = torch.ByteTensor([True, False, True, False])
     value = torch.tensor([3., 4., 5., 6.])
     x[b] = value
     self.assertEqual(x[0], value)
     self.assertEqual(x[1], torch.arange(4, 8))
     self.assertEqual(x[2], value)
     self.assertEqual(x[3], torch.arange(12, 16))
开发者ID:RichieMay,项目名称:pytorch,代码行数:9,代码来源:test_indexing.py

示例12: __init__

 def __init__(self, train_size, batch_size):
     self.num_data = train_size
     self.num_per_batch = int(train_size / batch_size)
     self.batch_size = batch_size
     self.range = torch.arange(0,batch_size).view(1, batch_size).long()
     self.leftover_flag = False
     if train_size % batch_size:
         self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
         self.leftover_flag = True
开发者ID:lianDaniel,项目名称:R-FCN.pytorch,代码行数:9,代码来源:trainval_net.py

示例13: backward

 def backward(ctx, grad_output):
     idx = grad_output.data.new().long()
     torch.arange(0, ctx.input_numel, out=idx)
     idx = idx.view(ctx.input_size)
     idx_unfolded = idx.unfold(ctx.dim, ctx.size, ctx.step)
     idx_unfolded = idx_unfolded.contiguous().view(-1)
     grad_input = Variable(grad_output.data.new(ctx.input_numel).zero_())
     grad_output = grad_output.contiguous().view(-1)
     grad_input = grad_input.index_add(0, Variable(idx_unfolded), grad_output)
     return grad_input.view(ctx.input_size), None, None, None
开发者ID:athiwatp,项目名称:pytorch,代码行数:10,代码来源:tensor.py

示例14: __call__

    def __call__(self, image):

        x_coords = 2.0 * torch.arange(self.image_height).unsqueeze(
            1).expand(self.image_height, self.image_width) / 255.0 - 1.0
        y_coords = 2.0 * torch.arange(self.image_width).unsqueeze(
            0).expand(self.image_height, self.image_width) / 255.0 - 1.0
        coords = torch.stack((x_coords, y_coords), dim=0)

        image = torch.cat((coords, image), dim=0)

        return image
开发者ID:davnov134,项目名称:instance-segmentation-pytorch,代码行数:11,代码来源:preprocess.py

示例15: __init__

    def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = torch.nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0., max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer("pe", pe)
开发者ID:daixiangau,项目名称:naacl2019-select-pretraining-data-for-ner,代码行数:11,代码来源:transformer.py


注:本文中的torch.arange函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。