當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.full方法代碼示例

本文整理匯總了Python中torch.full方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.full方法的具體用法?Python torch.full怎麽用?Python torch.full使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.full方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: generate_embedding

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def generate_embedding(bert_model, labels):
    """Generate bert's embedding from fine-tuned model."""
    batch_size, time = labels.shape

    cls_ids = torch.full(
        (batch_size, 1), bert_model.bert_text_encoder.cls_idx, dtype=labels.dtype, device=labels.device)
    bert_labels = torch.cat([cls_ids, labels], 1)
    # replace eos with sep
    eos_idx = bert_model.bert_text_encoder.eos_idx
    sep_idx = bert_model.bert_text_encoder.sep_idx
    bert_labels[bert_labels == eos_idx] = sep_idx

    embedding, _ = bert_model.bert(bert_labels, output_all_encoded_layers=True)
    # sum over all layers embedding
    embedding = torch.stack(embedding).sum(0)
    # get rid of cls
    embedding = embedding[:, 1:]

    assert labels.shape == embedding.shape[:-1]

    return embedding 
開發者ID:Alexander-H-Liu,項目名稱:End-to-end-ASR-Pytorch,代碼行數:23,代碼來源:bert_embedding.py

示例2: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def __init__(self, size, device=False):

        self.size = size
        self._done = False

        # The score for each translation on the beam.
        self.scores = torch.zeros((size,), dtype=torch.float, device=device)
        self.all_scores = []

        # The backpointers at each time-step.
        self.prev_ks = []

        # The outputs at each time-step.
        self.next_ys = [torch.full((size,), Constants.PAD, dtype=torch.long, device=device)]
        self.next_ys[0][0] = Constants.SOS
        self.finished = [False for _ in range(size)] 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:18,代碼來源:Beam.py

示例3: test_forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_forward(self):
        batch_size = 10
        shape = [2, 3, 4]
        inputs = torch.randn(batch_size, *shape)

        def test_case(scale, shift, true_outputs, true_logabsdet):
            with self.subTest(scale=scale, shift=shift):
                transform = standard.AffineScalarTransform(scale=scale, shift=shift)
                outputs, logabsdet = transform(inputs)
                self.assert_tensor_is_good(outputs, [batch_size] + shape)
                self.assert_tensor_is_good(logabsdet, [batch_size])
                self.assertEqual(outputs, true_outputs)
                self.assertEqual(logabsdet,
                                 torch.full([batch_size], true_logabsdet * np.prod(shape)))

        self.eps = 1e-6
        test_case(None, 2., inputs + 2., 0)
        test_case(2., None, inputs * 2., np.log(2.))
        test_case(2., 2., inputs * 2. + 2., np.log(2.)) 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:21,代碼來源:standard_test.py

示例4: test_inverse

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_inverse(self):
        batch_size = 10
        shape = [2, 3, 4]
        inputs = torch.randn(batch_size, *shape)

        def test_case(scale, shift, true_outputs, true_logabsdet):
            with self.subTest(scale=scale, shift=shift):
                transform = standard.AffineScalarTransform(scale=scale, shift=shift)
                outputs, logabsdet = transform.inverse(inputs)
                self.assert_tensor_is_good(outputs, [batch_size] + shape)
                self.assert_tensor_is_good(logabsdet, [batch_size])
                self.assertEqual(outputs, true_outputs)
                self.assertEqual(logabsdet,
                                 torch.full([batch_size], true_logabsdet * np.prod(shape)))

        self.eps = 1e-6
        test_case(None, 2., inputs - 2., 0)
        test_case(2., None, inputs / 2., -np.log(2.))
        test_case(2., 2., (inputs - 2.) / 2., -np.log(2.)) 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:21,代碼來源:standard_test.py

示例5: barabasi_albert_graph

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def barabasi_albert_graph(num_nodes, num_edges):
    r"""Returns the :obj:`edge_index` of a Barabasi-Albert preferential
    attachment model, where a graph of :obj:`num_nodes` nodes grows by
    attaching new nodes with :obj:`num_edges` edges that are preferentially
    attached to existing nodes with high degree.

    Args:
        num_nodes (int): The number of nodes.
        num_edges (int): The number of edges from a new node to existing nodes.
    """

    assert num_edges > 0 and num_edges < num_nodes

    row, col = torch.arange(num_edges), torch.randperm(num_edges)

    for i in range(num_edges, num_nodes):
        row = torch.cat([row, torch.full((num_edges, ), i, dtype=torch.long)])
        choice = np.random.choice(torch.cat([row, col]).numpy(), num_edges)
        col = torch.cat([col, torch.from_numpy(choice)])

    edge_index = torch.stack([row, col], dim=0)
    edge_index, _ = remove_self_loops(edge_index)
    edge_index = to_undirected(edge_index, num_nodes)

    return edge_index 
開發者ID:rusty1s,項目名稱:pytorch_geometric,代碼行數:27,代碼來源:random.py

示例6: test_clip_grad_norm_

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_clip_grad_norm_(self):
        params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
        grad_norm = utils.clip_grad_norm_(params, 1.0)
        self.assertTrue(torch.is_tensor(grad_norm))
        self.assertEqual(grad_norm, 0.0)

        params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
        for p in params:
            p.grad = torch.full((5,), fill_value=2.)
        grad_norm = utils.clip_grad_norm_(params, 1.0)
        exp_grad_norm = torch.full((15,), fill_value=2.).norm()
        self.assertTrue(torch.is_tensor(grad_norm))
        self.assertEqual(grad_norm, exp_grad_norm)

        grad_norm = utils.clip_grad_norm_(params, 1.0)
        self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:18,代碼來源:test_utils.py

示例7: load_dataset

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
        """Load a given dataset split.
        Args:
            split (str): name of the split (e.g., train, valid, test)
        """
        if self.args.max_sentences is not None:
            bsz = self.args.max_sentences
        else:
            bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample)
        self.datasets[split] = DummyDataset(
            {
                'id': 1,
                'net_input': {
                    'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),
                    'src_lengths': torch.full(
                        (bsz, ), self.args.tokens_per_sample, dtype=torch.long
                    ),
                },
                'target': torch.stack([self.dummy_tgt for _ in range(bsz)]),
                'nsentences': bsz,
                'ntokens': bsz * self.args.tokens_per_sample,
            },
            num_items=self.args.dataset_size,
            item_size=self.args.tokens_per_sample,
        ) 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:27,代碼來源:dummy_lm.py

示例8: test_std_share_network_output_values

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_std_share_network_output_values(input_dim, output_dim, hidden_sizes):
    module = GaussianMLPTwoHeadedModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        hidden_nonlinearity=None,
        std_parameterization='exp',
        hidden_w_init=nn.init.ones_,
        output_w_init=nn.init.ones_)

    dist = module(torch.ones(input_dim))

    exp_mean = torch.full(
        (output_dim, ), input_dim * (torch.Tensor(hidden_sizes).prod().item()))
    exp_variance = (
        input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item()

    assert dist.mean.equal(exp_mean)
    assert dist.variance.equal(torch.full((output_dim, ), exp_variance))
    assert dist.rsample().shape == (output_dim, ) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:22,代碼來源:test_gaussian_mlp_module.py

示例9: test_std_share_network_output_values_with_batch

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_std_share_network_output_values_with_batch(input_dim, output_dim,
                                                    hidden_sizes):
    module = GaussianMLPTwoHeadedModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        hidden_nonlinearity=None,
        std_parameterization='exp',
        hidden_w_init=nn.init.ones_,
        output_w_init=nn.init.ones_)

    batch_size = 5
    dist = module(torch.ones([batch_size, input_dim]))

    exp_mean = torch.full(
        (batch_size, output_dim),
        input_dim * (torch.Tensor(hidden_sizes).prod().item()))
    exp_variance = (
        input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item()

    assert dist.mean.equal(exp_mean)
    assert dist.variance.equal(
        torch.full((batch_size, output_dim), exp_variance))
    assert dist.rsample().shape == (batch_size, output_dim) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:26,代碼來源:test_gaussian_mlp_module.py

示例10: test_std_network_output_values

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_std_network_output_values(input_dim, output_dim, hidden_sizes):
    init_std = 2.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=init_std,
        hidden_nonlinearity=None,
        std_parameterization='exp',
        hidden_w_init=nn.init.ones_,
        output_w_init=nn.init.ones_)

    dist = module(torch.ones(input_dim))

    exp_mean = torch.full(
        (output_dim, ), input_dim * (torch.Tensor(hidden_sizes).prod().item()))
    exp_variance = init_std**2

    assert dist.mean.equal(exp_mean)
    assert dist.variance.equal(torch.full((output_dim, ), exp_variance))
    assert dist.rsample().shape == (output_dim, ) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:24,代碼來源:test_gaussian_mlp_module.py

示例11: test_softplus_std_network_output_values

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_softplus_std_network_output_values(input_dim, output_dim,
                                            hidden_sizes):
    init_std = 2.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=init_std,
        hidden_nonlinearity=None,
        std_parameterization='softplus',
        hidden_w_init=nn.init.ones_,
        output_w_init=nn.init.ones_)

    dist = module(torch.ones(input_dim))

    exp_mean = input_dim * torch.Tensor(hidden_sizes).prod().item()
    exp_variance = torch.Tensor([init_std]).exp().add(1.).log()**2

    assert dist.mean.equal(torch.full((output_dim, ), exp_mean))
    assert dist.variance.equal(torch.full((output_dim, ), exp_variance[0]))
    assert dist.rsample().shape == (output_dim, ) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:24,代碼來源:test_gaussian_mlp_module.py

示例12: test_exp_min_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_exp_min_std(input_dim, output_dim, hidden_sizes):
    min_value = 10.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=1.,
        min_std=min_value,
        hidden_nonlinearity=None,
        std_parameterization='exp',
        hidden_w_init=nn.init.zeros_,
        output_w_init=nn.init.zeros_)

    dist = module(torch.ones(input_dim))

    exp_variance = min_value**2

    assert dist.variance.equal(torch.full((output_dim, ), exp_variance)) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:21,代碼來源:test_gaussian_mlp_module.py

示例13: test_exp_max_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_exp_max_std(input_dim, output_dim, hidden_sizes):
    max_value = 1.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=10.,
        max_std=max_value,
        hidden_nonlinearity=None,
        std_parameterization='exp',
        hidden_w_init=nn.init.zeros_,
        output_w_init=nn.init.zeros_)

    dist = module(torch.ones(input_dim))

    exp_variance = max_value**2

    assert dist.variance.equal(torch.full((output_dim, ), exp_variance)) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:21,代碼來源:test_gaussian_mlp_module.py

示例14: test_softplus_min_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_softplus_min_std(input_dim, output_dim, hidden_sizes):
    min_value = 2.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=1.,
        min_std=min_value,
        hidden_nonlinearity=None,
        std_parameterization='softplus',
        hidden_w_init=nn.init.zeros_,
        output_w_init=nn.init.zeros_)

    dist = module(torch.ones(input_dim))

    exp_variance = torch.Tensor([min_value]).exp().add(1.).log()**2

    assert dist.variance.equal(torch.full((output_dim, ), exp_variance[0])) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:21,代碼來源:test_gaussian_mlp_module.py

示例15: test_softplus_max_std

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import full [as 別名]
def test_softplus_max_std(input_dim, output_dim, hidden_sizes):
    max_value = 1.

    module = GaussianMLPModule(
        input_dim=input_dim,
        output_dim=output_dim,
        hidden_sizes=hidden_sizes,
        init_std=10,
        max_std=max_value,
        hidden_nonlinearity=None,
        std_parameterization='softplus',
        hidden_w_init=nn.init.ones_,
        output_w_init=nn.init.ones_)

    dist = module(torch.ones(input_dim))

    exp_variance = torch.Tensor([max_value]).exp().add(1.).log()**2

    assert torch.equal(dist.variance,
                       torch.full((output_dim, ), exp_variance[0])) 
開發者ID:rlworkgroup,項目名稱:garage,代碼行數:22,代碼來源:test_gaussian_mlp_module.py


注:本文中的torch.full方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。