当前位置: 首页>>代码示例>>Python>>正文


Python torch.rand函数代码示例

本文整理汇总了Python中torch.rand函数的典型用法代码示例。如果您正苦于以下问题:Python rand函数的具体用法?Python rand怎么用?Python rand使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了rand函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setUp

    def setUp(self, size=(2, 5), batch=3, dtype=torch.float64, device=None,
              seed=None, mu=None, cov=None, A=None, b=None):
        '''Test the correctness of batch implementation of mean().

        This function will stack `[1 * mu, 2 * mu, ..., batch * mu]`.
        Then, it will see whether the batch output is accurate or not.

        Args:
            size: Tuple size of matrix A.
            batch: The batch size > 0.
            dtype: data type.
            device: In which device.
            seed: Seed for the random number generator.
            mu: To test a specific mean mu.
            cov: To test a specific covariance matrix.
            A: To test a specific A matrix.
            b: To test a specific bias b.
        '''
        if seed is not None:
            torch.manual_seed(seed)
        if A is None:
            A = torch.rand(size, dtype=dtype, device=device)
        if b is None:
            b = torch.rand(size[0], dtype=dtype, device=device)
        if mu is None:
            mu = torch.rand(size[1], dtype=dtype, device=device)
        if cov is None:
            cov = rand.definite(size[1], dtype=dtype, device=device,
                                positive=True, semi=False, norm=10**2)
        self.A = A
        self.b = b
        var = torch.diag(cov)
        self.batch_mean = torch.stack([(i + 1) * mu for i in range(batch)])
        self.batch_cov = torch.stack([(i + 1) * cov for i in range(batch)])
        self.batch_var = torch.stack([(i + 1) * var for i in range(batch)])
开发者ID:ModarTensai,项目名称:network_moments,代码行数:35,代码来源:tests.py

示例2: unit_test

def unit_test(args):
    ''' test different (kinds of) predicate detectors '''
    print("Torch uninitialized 5x3 matrix:")
    x_t = torch.Tensor(5, 3)
    print(x_t)

    print("Torch randomly initialized 5x3 matrix X:")
    x_t = torch.rand(5, 3)
    if args.verbose:
        print(x_t)
        print("size:", x_t.size())

    print("Torch randomly initialized 5x3 matrix Y:")
    y_t = torch.rand(5, 3)
    if args.verbose:
        print(y_t)
    print("X + Y:")
    z_t = torch.add(x_t, y_t)
    print(z_t)


    print("slice (X + Y)[:, 1]:")
    print(z_t[:, 1])

    num_wrong = 0
    print("unit_test:  num_tests:", 1,
          " num_wrong:", num_wrong, " -- ", "FAIL" if num_wrong else "PASS")
开发者ID:sprax,项目名称:python,代码行数:27,代码来源:ptt.py

示例3: test_FixedNoiseMultiTaskGP_single_output

    def test_FixedNoiseMultiTaskGP_single_output(self, cuda=False):
        for double in (False, True):
            tkwargs = {
                "device": torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": torch.double if double else torch.float,
            }
            model = _get_fixed_noise_model_single_output(**tkwargs)
            self.assertIsInstance(model, FixedNoiseMultiTaskGP)
            self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(
                model.task_covar_module.covar_factor.shape[-1], model._rank
            )

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            mll = fit_gpytorch_model(mll, options={"maxiter": 1})

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)
开发者ID:saschwan,项目名称:botorch,代码行数:35,代码来源:test_multitask.py

示例4: test_FixedNoiseGP

    def test_FixedNoiseGP(self, cuda=False):
        for batch_shape in (torch.Size([]), torch.Size([2])):
            for num_outputs in (1, 2):
                for double in (False, True):
                    tkwargs = {
                        "device": torch.device("cuda") if cuda else torch.device("cpu"),
                        "dtype": torch.double if double else torch.float,
                    }
                    model = self._get_model(
                        batch_shape=batch_shape,
                        num_outputs=num_outputs,
                        n=10,
                        **tkwargs
                    )
                    self.assertIsInstance(model, FixedNoiseGP)
                    self.assertIsInstance(
                        model.likelihood, FixedNoiseGaussianLikelihood
                    )
                    self.assertIsInstance(model.mean_module, ConstantMean)
                    self.assertIsInstance(model.covar_module, ScaleKernel)
                    matern_kernel = model.covar_module.base_kernel
                    self.assertIsInstance(matern_kernel, MaternKernel)
                    self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)

                    # test model fitting
                    mll = ExactMarginalLogLikelihood(model.likelihood, model)
                    mll = fit_gpytorch_model(mll, options={"maxiter": 1})

                    # Test forward
                    test_x = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model(test_x)
                    self.assertIsInstance(posterior, MultivariateNormal)

                    # TODO: Pass observation noise into posterior
                    # posterior_obs = model.posterior(test_x, observation_noise=True)
                    # self.assertTrue(
                    #     torch.allclose(
                    #         posterior_f.variance + 0.01,
                    #         posterior_obs.variance
                    #     )
                    # )

                    # test posterior
                    # test non batch evaluation
                    X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
                    )
                    # test batch evaluation
                    X = torch.rand(
                        torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
                    )
                    posterior = model.posterior(X)
                    self.assertIsInstance(posterior, GPyTorchPosterior)
                    self.assertEqual(
                        posterior.mean.shape,
                        torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
                    )
开发者ID:saschwan,项目名称:botorch,代码行数:60,代码来源:test_gp_regression.py

示例5: sample_relax

def sample_relax(logits): #, k=1):
    

    # u = torch.rand(B,C).clamp(1e-8, 1.-1e-8) #.cuda()
    u = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
    gumbels = -torch.log(-torch.log(u))
    z = logits + gumbels
    b = torch.argmax(z, dim=1)

    cat = Categorical(logits=logits)
    logprob = cat.log_prob(b).view(B,1)

    v_k = torch.rand(B,1).clamp(1e-12, 1.-1e-12)
    z_tilde_b = -torch.log(-torch.log(v_k))
    #this way seems biased even tho it shoudlnt be
    # v_k = torch.gather(input=u, dim=1, index=b.view(B,1))
    # z_tilde_b = torch.gather(input=z, dim=1, index=b.view(B,1))

    v = torch.rand(B,C).clamp(1e-12, 1.-1e-12) #.cuda()
    probs = torch.softmax(logits,dim=1).repeat(B,1)
    # print (probs.shape, torch.log(v_k).shape, torch.log(v).shape)
    # fasdfa

    # print (v.shape)
    # print (v.shape)
    z_tilde = -torch.log((- torch.log(v) / probs) - torch.log(v_k))

    # print (z_tilde)
    # print (z_tilde_b)
    z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)
    # print (z_tilde)
    # fasdfs

    return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:34,代码来源:plotting_cat_grads_dist_4.py

示例6: test_forward_works_on_higher_order_input

 def test_forward_works_on_higher_order_input(self):
     params = Params({
             "words": {
                     "type": "embedding",
                     "num_embeddings": 20,
                     "embedding_dim": 2,
                     },
             "characters": {
                     "type": "character_encoding",
                     "embedding": {
                             "embedding_dim": 4,
                             "num_embeddings": 15,
                             },
                     "encoder": {
                             "type": "cnn",
                             "embedding_dim": 4,
                             "num_filters": 10,
                             "ngram_filter_sizes": [3],
                             },
                     }
             })
     token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
     inputs = {
             'words': Variable(torch.rand(3, 4, 5, 6) * 20).long(),
             'characters': Variable(torch.rand(3, 4, 5, 6, 7) * 15).long(),
             }
     assert token_embedder(inputs, num_wrapping_dims=2).size() == (3, 4, 5, 6, 12)
开发者ID:Jordan-Sauchuk,项目名称:allennlp,代码行数:27,代码来源:basic_text_field_embedder_test.py

示例7: visualize_results

    def visualize_results(self, epoch, fix=True):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)

        tot_num_samples = min(self.sample_num, self.batch_size)
        image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))

        if fix:
            """ fixed noise """
            samples = self.G(self.sample_z_)
        else:
            """ random noise """
            if self.gpu_mode:
                sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
            else:
                sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)

            samples = self.G(sample_z_)

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
开发者ID:zbxzc35,项目名称:pytorch-generative-model-collections,代码行数:28,代码来源:GAN.py

示例8: sample_relax_given_class

def sample_relax_given_class(logits, samp):

    cat = Categorical(logits=logits)

    u = torch.rand(B,C).clamp(1e-8, 1.-1e-8)
    gumbels = -torch.log(-torch.log(u))
    z = logits + gumbels

    b = samp #torch.argmax(z, dim=1)
    logprob = cat.log_prob(b).view(B,1)


    u_b = torch.gather(input=u, dim=1, index=b.view(B,1))
    z_tilde_b = -torch.log(-torch.log(u_b))
    
    z_tilde = -torch.log((- torch.log(u) / torch.softmax(logits, dim=1)) - torch.log(u_b))
    z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)


    z = z_tilde

    u_b = torch.gather(input=u, dim=1, index=b.view(B,1))
    z_tilde_b = -torch.log(-torch.log(u_b))
    
    u = torch.rand(B,C).clamp(1e-8, 1.-1e-8)
    z_tilde = -torch.log((- torch.log(u) / torch.softmax(logits, dim=1)) - torch.log(u_b))
    z_tilde.scatter_(dim=1, index=b.view(B,1), src=z_tilde_b)

    return z, z_tilde, logprob
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:plotting_cat_grads_dist.py

示例9: test_forward_runs_with_non_bijective_mapping

 def test_forward_runs_with_non_bijective_mapping(self):
     elmo_fixtures_path = self.FIXTURES_ROOT / 'elmo'
     options_file = str(elmo_fixtures_path / 'options.json')
     weight_file = str(elmo_fixtures_path / 'lm_weights.hdf5')
     params = Params({
             "token_embedders": {
                     "words": {
                             "type": "embedding",
                             "num_embeddings": 20,
                             "embedding_dim": 2,
                             },
                     "elmo": {
                             "type": "elmo_token_embedder",
                             "options_file": options_file,
                             "weight_file": weight_file
                             },
                     },
             "embedder_to_indexer_map": {"words": ["words"], "elmo": ["elmo", "words"]}
             })
     token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
     inputs = {
             'words': (torch.rand(3, 6) * 20).long(),
             'elmo': (torch.rand(3, 6, 50) * 15).long(),
             }
     token_embedder(inputs)
开发者ID:apmoore1,项目名称:allennlp,代码行数:25,代码来源:basic_text_field_embedder_test.py

示例10: grad2

def grad2():
    W = Variable(torch.rand(2, 2), requires_grad=True)
    W2 = Variable(torch.rand(2, 1), requires_grad=True)
    x1 = Variable(torch.rand(1, 2), requires_grad=True)
    x2 = Variable(torch.rand(1, 2), requires_grad=True)

    print("w: ")
    print(W)
    print("x1: ")
    print(x1)
    print("x2: ")
    print(x2)
    print("--------------------")

    y1 = torch.matmul(torch.matmul(x1, W), W2)
    print(torch.matmul(W, W2))
    # y = Variable(y, requires_grad=True)
    # print("y1:")
    # print(y1)

    y1.backward()
    # print(W.grad)
    print(x1.grad)

    # W.grad.data.zero_()
    # x1.grad.data.zero_()
    y2 = torch.matmul(torch.matmul(x2, W), W2)
    y2.backward()
    # print("y2: ")
    # print(y2)
    # print(W.grad)
    print(x2.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:32,代码来源:gradient.py

示例11: sample_relax

    def sample_relax(probs):
        #Sample z
        u = torch.rand(B,C)
        gumbels = -torch.log(-torch.log(u))
        z = torch.log(probs) + gumbels

        b = torch.argmax(z, dim=1)
        logprob = cat.log_prob(b)


        #Sample z_tilde
        u_b = torch.rand(B,1)
        z_tilde_b = -torch.log(-torch.log(u_b))
        u = torch.rand(B,C)
        z_tilde = -torch.log((- torch.log(u) / probs) - torch.log(u_b))

        # print (z_tilde)
        z_tilde[:,b] = z_tilde_b
        # print (z_tilde)

        # fasdfasd

        # print (z)
        # print (b)
        # print (z_tilde)
        # print (logprob)
        # print (probs)
        # fsdfa

        return z, b, logprob, z_tilde
开发者ID:chriscremer,项目名称:Other_Code,代码行数:30,代码来源:is_pz_grad_dependent_on_theta_2.py

示例12: test_add_output_dim

 def test_add_output_dim(self, cuda=False):
     for double in (False, True):
         tkwargs = {
             "device": torch.device("cuda") if cuda else torch.device("cpu"),
             "dtype": torch.double if double else torch.float,
         }
         original_batch_shape = torch.Size([2])
         # check exception is raised
         X = torch.rand(2, 1, **tkwargs)
         with self.assertRaises(ValueError):
             add_output_dim(X=X, original_batch_shape=original_batch_shape)
         # test no new batch dims
         X = torch.rand(2, 2, 1, **tkwargs)
         X_out, output_dim_idx = add_output_dim(
             X=X, original_batch_shape=original_batch_shape
         )
         self.assertTrue(torch.equal(X_out, X.unsqueeze(0)))
         self.assertEqual(output_dim_idx, 0)
         # test new batch dims
         X = torch.rand(3, 2, 2, 1, **tkwargs)
         X_out, output_dim_idx = add_output_dim(
             X=X, original_batch_shape=original_batch_shape
         )
         self.assertTrue(torch.equal(X_out, X.unsqueeze(1)))
         self.assertEqual(output_dim_idx, 1)
开发者ID:saschwan,项目名称:botorch,代码行数:25,代码来源:test_utils.py

示例13: run_test_argmax

def run_test_argmax():
    test_argmax = TestArgMax()
    k=torch.rand(4)
    v=torch.rand(4)
    y=torch.rand(4)
    loss = test_argmax(k,v,y)
    loss.backward()
开发者ID:tianzhiliang,项目名称:test,代码行数:7,代码来源:argmax_gradient.py

示例14: visualize_results

    def visualize_results(self, epoch, fix=True):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)

        image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))

        if fix:
            """ fixed noise """
            samples = self.G(self.sample_z_, self.sample_y_)
        else:
            """ random noise """
            temp = torch.LongTensor(self.batch_size, 1).random_() % 10
            sample_y_ = torch.FloatTensor(self.batch_size, 10)
            sample_y_.zero_()
            sample_y_.scatter_(1, temp, 1)
            if self.gpu_mode:
                sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True), \
                                       Variable(sample_y_.cuda(), volatile=True)
            else:
                sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True), \
                                       Variable(sample_y_, volatile=True)

            samples = self.G(sample_z_, sample_y_)

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
开发者ID:zbxzc35,项目名称:pytorch-generative-model-collections,代码行数:33,代码来源:ACGAN.py

示例15: setUp

  def setUp(self):
    # Tests will use 3 filters and image width, height = 2 X 2

    # Batch size 1
    x = torch.ones((1, 3, 2, 2))
    x[0, 0, 1, 0] = 1.1
    x[0, 0, 1, 1] = 1.2
    x[0, 1, 0, 1] = 1.2
    x[0, 2, 1, 0] = 1.3
    self.x = x
    self.gradient = torch.rand(x.shape)

    # Batch size 2
    x = torch.ones((2, 3, 2, 2))
    x[0, 0, 1, 0] = 1.1
    x[0, 0, 1, 1] = 1.2
    x[0, 1, 0, 1] = 1.2
    x[0, 2, 1, 0] = 1.3

    x[1, 0, 0, 0] = 1.4
    x[1, 1, 0, 0] = 1.5
    x[1, 1, 0, 1] = 1.6
    x[1, 2, 1, 1] = 1.7
    self.x2 = x
    self.gradient2 = torch.rand(x.shape)

    # All equal
    self.dutyCycle = torch.zeros((1, 3, 1, 1))
    self.dutyCycle[:] = 1.0 / 3.0
开发者ID:rhyolight,项目名称:nupic.research,代码行数:29,代码来源:k_winners_cnn_test.py


注:本文中的torch.rand函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。