當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.rand_like方法代碼示例

本文整理匯總了Python中torch.rand_like方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.rand_like方法的具體用法?Python torch.rand_like怎麽用?Python torch.rand_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.rand_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_parallel_transport0_preserves_inner_products

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_parallel_transport0_preserves_inner_products(a, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)

    v_0 = torch.rand_like(a) + 1e-5
    u_0 = torch.rand_like(a) + 1e-5

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_0 = man.proju(zero, v_0)  # project on tangent plane
    u_0 = man.proju(zero, u_0)  # project on tangent plane

    v_a = man.transp0(a, v_0)
    u_a = man.transp0(a, u_0)

    vu_0 = man.inner(v_0, u_0, keepdim=True)
    vu_a = man.inner(v_a, u_a, keepdim=True)
    np.testing.assert_allclose(vu_a, vu_0, atol=1e-5, rtol=1e-5) 
開發者ID:geoopt,項目名稱:geoopt,代碼行數:24,代碼來源:test_lorentz_math.py

示例2: test_parallel_transport0_back

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_parallel_transport0_back(a, b, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)
    b = man.projx(b)

    v_0 = torch.rand_like(a) + 1e-5
    v_0 = man.proju(a, v_0)  # project on tangent plane

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_t = man.transp0back(a, v_0)
    v_t = man.transp0(b, v_t)

    v_s = man.transp(a, zero, v_0)
    v_s = man.transp(zero, b, v_s)

    np.testing.assert_allclose(v_t, v_s, atol=1e-5, rtol=1e-5) 
開發者ID:geoopt,項目名稱:geoopt,代碼行數:23,代碼來源:test_lorentz_math.py

示例3: test_weighted_midpoint_weighted_zero_sum

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_weighted_midpoint_weighted_zero_sum(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.expmap0(torch.eye(3, 10)).detach().requires_grad_(True)
    weights = torch.rand_like(a[..., 0])
    weights = weights - weights.sum() / weights.numel()
    mid = manifold.weighted_midpoint(
        a, lincomb=lincomb, weights=weights, posweight=True
    )
    if _k == 0 and lincomb:
        np.testing.assert_allclose(
            mid.detach(),
            torch.cat([weights, torch.zeros(a.size(-1) - a.size(0))]),
            atol=1e-6,
        )
    assert mid.shape == a.shape[-1:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all() 
開發者ID:geoopt,項目名稱:geoopt,代碼行數:20,代碼來源:test_gyrovector_math.py

示例4: load_mnist

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def load_mnist(train=True, batch_size=1, num_workers=0):
    """Rescale and preprocess MNIST dataset."""
    mnist_transform = torchvision.transforms.Compose([
        # convert PIL image to tensor:
        torchvision.transforms.ToTensor(),
        # flatten:
        torchvision.transforms.Lambda(lambda x: x.view(-1)),
        # add uniform noise:
        torchvision.transforms.Lambda(lambda x: (x + torch.rand_like(x).div_(256.))),
        # rescale to [0,1]:
        torchvision.transforms.Lambda(lambda x: rescale(x, 0., 1.))
    ])
    return data.DataLoader(
        torchvision.datasets.MNIST(root="./datasets/mnist", train=train, transform=mnist_transform, download=False),
        batch_size=batch_size,
        pin_memory=CUDA,
        drop_last=train
    ) 
開發者ID:paultsw,項目名稱:nice_pytorch,代碼行數:20,代碼來源:train.py

示例5: load_cifar10

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def load_cifar10(train=True, batch_size=1, num_workers=0):
    """Rescale and preprocess CIFAR10 dataset."""
    # check if ZCA matrix exists on dataset yet:
    assert os.path.exists("./datasets/cifar/zca_matrix.pt"), \
        "[load_cifar10] ZCA whitening matrix not built! Run `python make_datasets.py` first."
    zca_matrix = torch.load("./datasets/cifar/zca_matrix.pt")

    cifar10_transform = torchvision.transforms.Compose([
        # convert PIL image to tensor:
        torchvision.transforms.ToTensor(),
        # flatten:
        torchvision.transforms.Lambda(lambda x: x.view(-1)),
        # add uniform noise ~ [-1/256, +1/256]:
        torchvision.transforms.Lambda(lambda x: (x + torch.rand_like(x).div_(128.).add_(-1./256.))),
        # rescale to [-1,1]:
        torchvision.transforms.Lambda(lambda x: rescale(x,-1.,1.)),
        # exact ZCA:
        torchvision.transforms.LinearTransformation(zca_matrix)
    ])
    return data.DataLoader(
        torchvision.datasets.CIFAR10(root="./datasets/cifar", train=train, transform=cifar10_transform, download=False),
        batch_size=batch_size,
        pin_memory=CUDA,
        drop_last=train
    ) 
開發者ID:paultsw,項目名稱:nice_pytorch,代碼行數:27,代碼來源:train.py

示例6: reset_parameters

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def reset_parameters(self, pairwise_idx=None):
        if pairwise_idx is None:
            idxs = range(len(self.messengers))
            if not self.fixed_weighting:
                self.unary_weight.data.fill_(self.init_unary_weight)
        else:
            idxs = [pairwise_idx]

        for i in idxs:
            self.messengers[i].reset_parameters()
            if isinstance(self.messengers[i], nn.Conv2d):
                # TODO: gaussian initialization for XY kernels?
                pass
            if self.compat[i] is not None:
                self.compat[i].weight.data[:, :, 0, 0] = 1.0 - th.eye(self.channels, dtype=th.float32)
                if self.perturbed_init:
                    perturb_range = 0.001
                    self.compat[i].weight.data.add_((th.rand_like(self.compat[i].weight.data) - 0.5) * perturb_range)
            self.pairwise_weights[i].data = th.ones_like(self.pairwise_weights[i]) * self.init_pairwise_weights[i] 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:21,代碼來源:paccrf.py

示例7: test_combination_invariant_loss_sdr

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_combination_invariant_loss_sdr():
    n_batch = 40
    n_samples = 16000
    n_sources = 2

    references = torch.randn(n_batch, n_samples, n_sources)

    noise_amount = [0.01, 0.05, 0.1, 0.5, 1.0]
    LossCPIT = ml.train.loss.CombinationInvariantLoss(
        loss_function=ml.train.loss.SISDRLoss())
    LossSDR = ml.train.loss.SISDRLoss()

    for n in noise_amount:
        estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
        _loss_a = LossSDR(estimates, references).item()

        for shift in range(n_sources):
            sources_a = estimates[..., shift:]
            sources_b = estimates[..., :shift]
            sources_c = torch.rand_like(estimates)
            shifted_sources = torch.cat(
                [sources_a, sources_b, sources_c], dim=-1)
            _loss_b = LossCPIT(shifted_sources, references).item()
            assert np.allclose(_loss_a, _loss_b, atol=1e-4) 
開發者ID:nussl,項目名稱:nussl,代碼行數:26,代碼來源:test_loss.py

示例8: test_balance_by_size_latent

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_balance_by_size_latent():
    class Expand(nn.Module):
        def __init__(self, times):
            super().__init__()
            self.times = times

        def forward(self, x):
            for i in range(self.times):
                x = x + torch.rand_like(x, requires_grad=True)
            return x

    sample = torch.rand(10, 100, 100)

    model = nn.Sequential(*[Expand(i) for i in [1, 2, 3, 4, 5, 6]])
    balance = balance_by_size(2, model, sample)
    assert balance == [4, 2]

    model = nn.Sequential(*[Expand(i) for i in [6, 5, 4, 3, 2, 1]])
    balance = balance_by_size(2, model, sample)
    assert balance == [2, 4] 
開發者ID:kakaobrain,項目名稱:torchgpipe,代碼行數:22,代碼來源:test_balance.py

示例9: test_dopri5_adjoint_against_dopri5

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_dopri5_adjoint_against_dopri5(self):
        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint_adjoint(func, y0, t_points, method='dopri5')
        gradys = torch.rand_like(ys) * 0.1
        ys.backward(gradys)

        adj_y0_grad = y0.grad
        adj_t_grad = t_points.grad
        adj_A_grad = func.A.grad
        self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
        self.assertEqual(max_abs(func.unused_module.bias.grad), 0)

        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
        ys.backward(gradys)

        self.assertLess(max_abs(y0.grad - adj_y0_grad), 3e-4)
        self.assertLess(max_abs(t_points.grad - adj_t_grad), 1e-4)
        self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-3) 
開發者ID:rtqichen,項目名稱:torchdiffeq,代碼行數:21,代碼來源:gradient_tests.py

示例10: test_adams_adjoint_against_dopri5

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_adams_adjoint_against_dopri5(self):
        func, y0, t_points = self.problem()
        ys_ = torchdiffeq.odeint_adjoint(func, y0, t_points, method='adams')
        gradys = torch.rand_like(ys_) * 0.1
        ys_.backward(gradys)

        adj_y0_grad = y0.grad
        adj_t_grad = t_points.grad
        adj_A_grad = func.A.grad
        self.assertEqual(max_abs(func.unused_module.weight.grad), 0)
        self.assertEqual(max_abs(func.unused_module.bias.grad), 0)

        func, y0, t_points = self.problem()
        ys = torchdiffeq.odeint(func, y0, t_points, method='dopri5')
        ys.backward(gradys)

        self.assertLess(max_abs(y0.grad - adj_y0_grad), 5e-2)
        self.assertLess(max_abs(t_points.grad - adj_t_grad), 5e-4)
        self.assertLess(max_abs(func.A.grad - adj_A_grad), 2e-2) 
開發者ID:rtqichen,項目名稱:torchdiffeq,代碼行數:21,代碼來源:gradient_tests.py

示例11: _pre_process

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def _pre_process(self, x):
        """Dequantize the input image `x` and convert to logits.

        Args:
            x (torch.Tensor): Input image.

        Returns:
            y (torch.Tensor): Dequantized logits of `x`.

        See Also:
            - Dequantization: https://arxiv.org/abs/1511.01844, Section 3.1
            - Modeling logits: https://arxiv.org/abs/1605.08803, Section 4.1
        """
        y = (x * 255. + torch.rand_like(x)) / 256.
        y = (2 * y - 1) * self.data_constraint
        y = (y + 1) / 2
        y = y.log() - (1. - y).log()

        # Save log-determinant of Jacobian of initial transform
        ldj = F.softplus(y) + F.softplus(-y) \
            - F.softplus((1. - self.data_constraint).log() - self.data_constraint.log())
        sldj = ldj.view(ldj.size(0), -1).sum(-1)

        return y, sldj 
開發者ID:chrischute,項目名稱:real-nvp,代碼行數:26,代碼來源:real_nvp.py

示例12: compute_rewards

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def compute_rewards(weights, dataset, epsilon=0.0):
    """
    Perform inference using epsilon-greedy contextual bandit (without updates).
    """
    context, rewards = dataset
    context = context.type(torch.float32)

    # compute scores:
    scores = torch.matmul(weights, context.t()).squeeze()
    explore = (torch.rand(scores.shape[1]) < epsilon).type(torch.float32)
    rand_scores = torch.rand_like(scores)
    scores.mul_(1 - explore).add_(rand_scores.mul(explore))

    # select arm and observe reward:
    selected_arms = scores.argmax(dim=0)
    return rewards[range(rewards.shape[0]), selected_arms] 
開發者ID:facebookresearch,項目名稱:CrypTen,代碼行數:18,代碼來源:membership_inference.py

示例13: test_sce_equals_ce

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_sce_equals_ce(self):
        # Does soft ce loss match classic ce loss when labels are one-hot?
        Y_golds = torch.LongTensor([0, 1, 2])
        Y_golds_probs = torch.Tensor(preds_to_probs(Y_golds.numpy(), num_classes=4))

        Y_probs = torch.rand_like(Y_golds_probs)
        Y_probs = Y_probs / Y_probs.sum(dim=1).reshape(-1, 1)

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="none")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="none")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy())

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="sum")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="sum")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy())

        ce_loss = F.cross_entropy(Y_probs, Y_golds, reduction="mean")
        ces_loss = cross_entropy_with_probs(Y_probs, Y_golds_probs, reduction="mean")
        np.testing.assert_equal(ce_loss.numpy(), ces_loss.numpy()) 
開發者ID:snorkel-team,項目名稱:snorkel,代碼行數:21,代碼來源:test_loss.py

示例14: test_init

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def test_init(self):
        x = torch.randn(self.batch_size, *self.inp_size_linear)
        x = x * torch.rand_like(x) + torch.randn_like(x)
        y = self.net_linear(x)
        # Channel-wise mean should be zero
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_linear[0], -1).mean(dim=-1),
                                       torch.zeros(self.inp_size_linear[0]), atol=1e-06))
        # Channel-wise std should be one
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_linear[0], -1).std(dim=-1),
                                       torch.ones(self.inp_size_linear[0]), atol=1e-06))

        x = torch.randn(self.batch_size, *self.inp_size_conv)
        x = x * torch.rand_like(x) + torch.randn_like(x)
        y = self.net_conv(x)
        # Channel-wise mean should be zero
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_conv[0], -1).mean(dim=-1),
                                       torch.zeros(self.inp_size_conv[0]), atol=1e-06))
        # Channel-wise std should be one
        self.assertTrue(torch.allclose(y.transpose(0,1).contiguous().view(self.inp_size_conv[0], -1).std(dim=-1),
                                       torch.ones(self.inp_size_conv[0]), atol=1e-06)) 
開發者ID:VLL-HD,項目名稱:FrEIA,代碼行數:22,代碼來源:invertible_resnet.py

示例15: select_action

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import rand_like [as 別名]
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):

        # Assuming agent_inputs is a batch of Q-Values for each agent bav
        self.epsilon = self.schedule.eval(t_env)

        if test_mode:
            # Greedy action selection only
            self.epsilon = 0.0

        # mask actions that are excluded from selection
        masked_q_values = agent_inputs.clone()
        masked_q_values[avail_actions == 0.0] = -float("inf")  # should never be selected!

        random_numbers = th.rand_like(agent_inputs[:, :, 0])
        pick_random = (random_numbers < self.epsilon).long()
        random_actions = Categorical(avail_actions.float()).sample().long()

        picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
        return picked_actions 
開發者ID:oxwhirl,項目名稱:pymarl,代碼行數:21,代碼來源:action_selectors.py


注:本文中的torch.rand_like方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。