當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.Size方法代碼示例

本文整理匯總了Python中torch.Size方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.Size方法的具體用法?Python torch.Size怎麽用?Python torch.Size使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.Size方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: image_to_object

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def image_to_object(images, pose, object_size):
  '''
  Inverse pose, crop and transform image patches.
  param images: (... x C x H x W) tensor
  param pose: (N x 3) tensor
  '''
  N, pose_size = pose.size()
  n_channels, H, W = images.size()[-3:]
  images = images.view(N, n_channels, H, W)
  if pose_size == 3:
    transformer_inv = expand_pose(pose_inv(pose))
  elif pose_size == 6:
    transformer_inv = pose_inv_full(pose)

  grid = F.affine_grid(transformer_inv,
                       torch.Size((N, n_channels, object_size, object_size)))
  obj = F.grid_sample(images, grid)
  return obj 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:20,代碼來源:DDPAE_utils.py

示例2: object_to_image

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def object_to_image(objects, pose, image_size):
  '''
  param images: (N x C x H x W) tensor
  param pose: (N x 3) tensor
  '''
  N, pose_size = pose.size()
  _, n_channels, _, _ = objects.size()
  if pose_size == 3:
    transformer = expand_pose(pose)
  elif pose_size == 6:
    transformer = pose.view(N, 2, 3)

  grid = F.affine_grid(transformer,
                       torch.Size((N, n_channels, image_size, image_size)))
  components = F.grid_sample(objects, grid)
  return components 
開發者ID:jthsieh,項目名稱:DDPAE-video-prediction,代碼行數:18,代碼來源:DDPAE_utils.py

示例3: generate_grid

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def generate_grid(num_grid, size, device):
    """Generate regular square grid of points in [0, 1] x [0, 1] coordinate
    space.

    Args:
        num_grid (int): The number of grids to sample, one for each region.
        size (tuple(int, int)): The side size of the regular grid.
        device (torch.device): Desired device of returned tensor.

    Returns:
        (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
            contains coordinates for the regular grids.
    """

    affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
    grid = F.affine_grid(
        affine_trans, torch.Size((1, 1, *size)), align_corners=False)
    grid = normalize(grid)
    return grid.view(1, -1, 2).expand(num_grid, -1, -1) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:21,代碼來源:point_sample.py

示例4: test_resnext_backbone

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_resnext_backbone():
    with pytest.raises(KeyError):
        # ResNeXt depth should be in [50, 101, 152]
        ResNeXt(depth=18)

    # Test ResNeXt with group 32, base_width 4
    model = ResNeXt(depth=50, groups=32, base_width=4)
    for m in model.modules():
        if is_block(m):
            assert m.conv2.groups == 32
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    feat = model(imgs)
    assert len(feat) == 4
    assert feat[0].shape == torch.Size([1, 256, 56, 56])
    assert feat[1].shape == torch.Size([1, 512, 28, 28])
    assert feat[2].shape == torch.Size([1, 1024, 14, 14])
    assert feat[3].shape == torch.Size([1, 2048, 7, 7]) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:test_backbones.py

示例5: test_res2net_backbone

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_res2net_backbone():
    with pytest.raises(KeyError):
        # Res2Net depth should be in [50, 101, 152]
        Res2Net(depth=18)

    # Test Res2Net with scales 4, base_width 26
    model = Res2Net(depth=50, scales=4, base_width=26)
    for m in model.modules():
        if is_block(m):
            assert m.scales == 4
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    feat = model(imgs)
    assert len(feat) == 4
    assert feat[0].shape == torch.Size([1, 256, 56, 56])
    assert feat[1].shape == torch.Size([1, 512, 28, 28])
    assert feat[2].shape == torch.Size([1, 1024, 14, 14])
    assert feat[3].shape == torch.Size([1, 2048, 7, 7]) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:test_backbones.py

示例6: _affine_grid_gen

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def _affine_grid_gen(rois, input_size, grid_size):

    rois = rois.detach()
    x1 = rois[:, 1::4] / 16.0
    y1 = rois[:, 2::4] / 16.0
    x2 = rois[:, 3::4] / 16.0
    y2 = rois[:, 4::4] / 16.0

    height = input_size[0]
    width = input_size[1]

    zero = Variable(rois.data.new(rois.size(0), 1).zero_())
    theta = torch.cat([\
      (x2 - x1) / (width - 1),
      zero,
      (x1 + x2 - width + 1) / (width - 1),
      zero,
      (y2 - y1) / (height - 1),
      (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3)

    grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size)))

    return grid 
開發者ID:guoruoqian,項目名稱:cascade-rcnn_Pytorch,代碼行數:25,代碼來源:net_utils.py

示例7: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512):
         super(CBP, self).__init__()
         self.thresh = thresh
         self.projDim = projDim
         self.input_dim = input_dim
         self.output_dim = projDim
         torch.manual_seed(1)
         self.h_ = [
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long),
                 torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long)
         ]
         self.weights_ = [
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(),
             (2 * torch.randint(0, 2, (self.input_dim,)) - 1).float()
         ]

         indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[0].reshape(1, -1)), dim=0)
         indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
                               self.h_[1].reshape(1, -1)), dim=0)

         self.sparseM = [
             torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(),
             torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(),
         ] 
開發者ID:jiangtaoxie,項目名稱:fast-MPN-COV,代碼行數:27,代碼來源:CBP.py

示例8: size_getter

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def size_getter(shape: Union[int, Tuple[int, ...], torch.Size]) -> torch.Size:
    """
    Helper function for defining a size object.
    :param shape: The shape
    :return: Size object
    """

    if shape is None:
        return torch.Size([])
    elif isinstance(shape, torch.Size):
        return shape
    elif isinstance(shape, int):
        return torch.Size([shape])

    return torch.Size(shape)


# NB: This is basically the same as original, but we include the prior as well 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:20,代碼來源:parameter.py

示例9: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def __init__(self, theta, initial_dist, dt, num_steps=10):
        """
        Implements a SIR model where the number of sick has been replaced with the fraction of sick people of the entire
        population. Model taken from this article: https://arxiv.org/pdf/2004.06680.pdf
        :param theta: The parameters (beta, gamma, sigma)
        """

        if initial_dist.event_shape != torch.Size([3]):
            raise NotImplementedError('Must be of size 3!')

        def g(x, beta, gamma, sigma):
            g1 = -sigma * x[..., 0] * x[..., 1]
            g3 = torch.zeros_like(g1)

            return concater(g1, -g1, g3)

        inc_dist = Independent(Normal(torch.zeros(1), math.sqrt(dt) * torch.ones(1)), 1)

        super().__init__((f, g), theta, initial_dist, inc_dist, dt=dt, num_steps=num_steps) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:21,代碼來源:sir.py

示例10: test_UnscentedTransform2D

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_UnscentedTransform2D(self):
        # ===== 2D model ===== #
        mat = torch.eye(2)
        scale = torch.diag(mat)

        norm = Normal(0., 1.)
        mvn = MultivariateNormal(torch.zeros(2), torch.eye(2))
        mvnlinear = AffineProcess((fmvn, g), (mat, scale), mvn, mvn)
        mvnoblinear = AffineObservations((fomvn, gomvn), (1.,), norm)

        mvnmodel = StateSpaceModel(mvnlinear, mvnoblinear)

        # ===== Perform unscented transform ===== #
        uft = UnscentedFilterTransform(mvnmodel)
        res = uft.initialize(3000)
        p = uft.predict(res)
        c = uft.correct(0., p)

        assert isinstance(c.x_dist(), MultivariateNormal) and c.x_dist().mean.shape == torch.Size([3000, 2]) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:21,代碼來源:utils.py

示例11: test_LinearNoBatch

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_LinearNoBatch(self):
        norm = Normal(0., 1.)
        linear = AffineProcess((f, g), (1., 1.), norm, norm)

        # ===== Initialize ===== #
        x = linear.i_sample()

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(linear.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1]))

        # ===== Sample path ===== #
        path = linear.sample_path(num + 1)
        self.assertEqual(samps.shape, path.shape) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:21,代碼來源:timeseries.py

示例12: test_LinearBatch

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_LinearBatch(self):
        norm = Normal(0., 1.)
        linear = AffineProcess((f, g), (1., 1.), norm, norm)

        # ===== Initialize ===== #
        shape = 1000, 100
        x = linear.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(linear.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape]))

        # ===== Sample path ===== #
        path = linear.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:22,代碼來源:timeseries.py

示例13: test_BatchedParameter

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_BatchedParameter(self):
        norm = Normal(0., 1.)
        shape = 1000, 100

        a = torch.ones((shape[0], 1))

        init = Normal(a, 1.)
        linear = AffineProcess((f, g), (a, 1.), init, norm)

        # ===== Initialize ===== #
        x = linear.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(linear.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape]))

        # ===== Sample path ===== #
        path = linear.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:26,代碼來源:timeseries.py

示例14: test_MultiDimensional

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_MultiDimensional(self):
        mu = torch.zeros(2)
        scale = torch.ones_like(mu)

        shape = 1000, 100

        mvn = Independent(Normal(mu, scale), 1)
        mvn = AffineProcess((f, g), (1., 1.), mvn, mvn)

        # ===== Initialize ===== #
        x = mvn.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(mvn.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape, *mu.shape]))

        # ===== Sample path ===== #
        path = mvn.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:26,代碼來源:timeseries.py

示例15: test_SDE

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import Size [as 別名]
def test_SDE(self):
        shape = 1000, 100

        a = 1e-2 * torch.ones((shape[0], 1))
        dt = 0.1
        norm = Normal(0., math.sqrt(dt))

        init = Normal(a, 1.)
        sde = AffineEulerMaruyama((f_sde, g_sde), (a, 0.15), init, norm, dt=dt, num_steps=10)

        # ===== Initialize ===== #
        x = sde.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(sde.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape]))

        # ===== Sample path ===== #
        path = sde.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:27,代碼來源:timeseries.py


注:本文中的torch.Size方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。