當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.qr方法代碼示例

本文整理匯總了Python中torch.qr方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.qr方法的具體用法?Python torch.qr怎麽用?Python torch.qr使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.qr方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def __init__(self, in_channel):
        super().__init__()

        weight = np.random.randn(in_channel, in_channel)
        q, _ = la.qr(weight)
        w_p, w_l, w_u = la.lu(q.astype(np.float32))
        w_s = np.diag(w_u)
        w_u = np.triu(w_u, 1)
        u_mask = np.triu(np.ones_like(w_u), 1)
        l_mask = u_mask.T

        w_p = torch.from_numpy(w_p)
        w_l = torch.from_numpy(w_l)
        w_s = torch.from_numpy(w_s)
        w_u = torch.from_numpy(w_u)

        self.register_buffer('w_p', w_p)
        self.register_buffer('u_mask', torch.from_numpy(u_mask))
        self.register_buffer('l_mask', torch.from_numpy(l_mask))
        self.register_buffer('s_sign', torch.sign(w_s))
        self.register_buffer('l_eye', torch.eye(l_mask.shape[0]))
        self.w_l = nn.Parameter(w_l)
        self.w_s = nn.Parameter(logabs(w_s))
        self.w_u = nn.Parameter(w_u) 
開發者ID:rosinality,項目名稱:glow-pytorch,代碼行數:26,代碼來源:model.py

示例2: factor_orthogonalize

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def factor_orthogonalize(self, mu):
        """
        Pushes the factor's non-orthogonal part to its corresponding core.

        This method works in place.

        :param mu: an int between 0 and N-1
        """

        if self.Us[mu] is None:
            return
        Q, R = torch.qr(self.Us[mu])
        self.Us[mu] = Q

        if self.batch:
            if self.cores[mu].dim() == 3:
                self.cores[mu] = torch.einsum('bjk,baj->bak', (self.cores[mu], R))
            else:
                self.cores[mu] = torch.einsum('bijk,baj->biak', (self.cores[mu], R))
        else:
            if self.cores[mu].dim() == 2:
                self.cores[mu] = torch.einsum('jk,aj->ak', (self.cores[mu], R))
            else:
                self.cores[mu] = torch.einsum('ijk,aj->iak', (self.cores[mu], R)) 
開發者ID:rballester,項目名稱:tntorch,代碼行數:26,代碼來源:tensor.py

示例3: random_orthogonal

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def random_orthogonal(size):
    """
    Returns a random orthogonal matrix as a 2-dim tensor of shape [size, size].
    """

    # Use the QR decomposition of a random Gaussian matrix.
    x = torch.randn(size, size)
    q, _ = torch.qr(x)
    return q 
開發者ID:bayesiains,項目名稱:nsf,代碼行數:11,代碼來源:torchutils.py

示例4: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def forward(self, features: Dict[str, Tensor]):
        ft_all_layers = features['all_layer_embeddings']
        org_device = ft_all_layers[0].device
        all_layer_embedding = torch.stack(ft_all_layers).transpose(1,0)
        all_layer_embedding = all_layer_embedding[:, self.layer_start:, :, :]  # Start from 4th layers output

        # torch.qr is slow on GPU (see https://github.com/pytorch/pytorch/issues/22573). So compute it on CPU until issue is fixed
        all_layer_embedding = all_layer_embedding.cpu()

        attention_mask = features['attention_mask'].cpu().numpy()
        unmask_num = np.array([sum(mask) for mask in attention_mask]) - 1  # Not considering the last item
        embedding = []

        # One sentence at a time
        for sent_index in range(len(unmask_num)):
            sentence_feature = all_layer_embedding[sent_index, :, :unmask_num[sent_index], :]
            one_sentence_embedding = []
            # Process each token
            for token_index in range(sentence_feature.shape[1]):
                token_feature = sentence_feature[:, token_index, :]
                # 'Unified Word Representation'
                token_embedding = self.unify_token(token_feature)
                one_sentence_embedding.append(token_embedding)

            features.update({'sentence_embedding': features['cls_token_embeddings']})

            one_sentence_embedding = torch.stack(one_sentence_embedding)
            sentence_embedding = self.unify_sentence(sentence_feature, one_sentence_embedding)
            embedding.append(sentence_embedding)

        output_vector = torch.stack(embedding).to(org_device)

        features.update({'sentence_embedding': output_vector})

        return features 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:37,代碼來源:WKPooling.py

示例5: unify_token

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def unify_token(self, token_feature):
        """
            Unify Token Representation
        """
        window_size = self.context_window_size

        alpha_alignment = torch.zeros(token_feature.size()[0], device=token_feature.device)
        alpha_novelty = torch.zeros(token_feature.size()[0], device=token_feature.device)

        for k in range(token_feature.size()[0]):
            left_window = token_feature[k - window_size:k, :]
            right_window = token_feature[k + 1:k + window_size + 1, :]
            window_matrix = torch.cat([left_window, right_window, token_feature[k, :][None, :]])
            Q, R = torch.qr(window_matrix.T)

            r = R[:, -1]
            alpha_alignment[k] = torch.mean(self.norm_vector(R[:-1, :-1], dim=0), dim=1).matmul(R[:-1, -1]) / torch.norm(r[:-1])
            alpha_alignment[k] = 1 / (alpha_alignment[k] * window_matrix.size()[0] * 2)
            alpha_novelty[k] = torch.abs(r[-1]) / torch.norm(r)

        # Sum Norm
        alpha_alignment = alpha_alignment / torch.sum(alpha_alignment)  # Normalization Choice
        alpha_novelty = alpha_novelty / torch.sum(alpha_novelty)

        alpha = alpha_novelty + alpha_alignment
        alpha = alpha / torch.sum(alpha)  # Normalize

        out_embedding = torch.mv(token_feature.t(), alpha)
        return out_embedding 
開發者ID:UKPLab,項目名稱:sentence-transformers,代碼行數:31,代碼來源:WKPooling.py

示例6: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:,0] = -1*W[:,0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
開發者ID:alphacep,項目名稱:tn2-wg,代碼行數:15,代碼來源:glow.py

示例7: orthogonal

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def orthogonal(tensor, gain=1):
    """Fills the input Tensor or Variable with a (semi) orthogonal matrix, as described in "Exact solutions to the
    nonlinear dynamics of learning in deep linear neural networks" - Saxe, A. et al. (2013). The input tensor must have
    at least 2 dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened.

    Args:
        tensor: an n-dimensional torch.Tensor or autograd.Variable, where n >= 2
        gain: optional scaling factor

    Examples:
        >>> w = torch.Tensor(3, 5)
        >>> nn.init.orthogonal(w)
    """
    if isinstance(tensor, Variable):
        orthogonal(tensor.data, gain=gain)
        return tensor

    if tensor.ndimension() < 2:
        raise ValueError("Only tensors with 2 or more dimensions are supported")

    rows = tensor.size(0)
    cols = tensor[0].numel()
    flattened = torch.Tensor(rows, cols).normal_(0, 1)
    # Compute the qr factorization
    q, r = torch.qr(flattened)
    # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
    d = torch.diag(r, 0)
    ph = d.sign()
    q *= ph.expand_as(q)
    # Pad zeros to Q (if rows smaller than cols)
    if rows < cols:
        padding = torch.zeros(rows, cols - rows)
        if q.is_cuda:
            q = torch.cat([q, padding.cuda()], 1)
        else:
            q = torch.cat([q, padding], 1)

    tensor.view_as(q).copy_(q)
    tensor.mul_(gain)
    return tensor 
開發者ID:magic282,項目名稱:SEASS,代碼行數:42,代碼來源:xinit.py

示例8: _init_cache_for_constant_diag

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def _init_cache_for_constant_diag(self, eye, batch_shape, n, k):
        # We can factor out the noise for for both QR and solves.
        self._noise = self._noise.narrow(-2, 0, 1)
        self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self, self._noise.sqrt() * eye), dim=-2))
        self._q_cache = self._q_cache[..., :n, :]

        # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I
        logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
        logdet = logdet + (n - k) * self._noise.squeeze(-2).squeeze(-1).log()
        self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze() 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:12,代碼來源:added_diag_lazy_tensor.py

示例9: _init_cache_for_non_constant_diag

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
        # With non-constant diagonals, we cant factor out the noise as easily
        self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
        self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()

        logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
        logdet -= (1.0 / self._noise).log().sum([-1, -2])
        self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze() 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:10,代碼來源:added_diag_lazy_tensor.py

示例10: _inv_matmul_preconditioner

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def _inv_matmul_preconditioner(self):
        """
        (Optional) define a preconditioner that can be used for linear systems, but not necessarily
        for log determinants. By default, this can call :meth:`~gpytorch.lazy.LazyTensor._preconditioner`.

        Returns:
            function: a function on x which performs P^{-1}(x)
        """
        base_precond, _, _ = self._preconditioner()

        if base_precond is not None:
            return base_precond
        elif gpytorch.beta_features.default_preconditioner.on():
            if hasattr(self, "_default_preconditioner_cache"):
                U, S, V = self._default_preconditioner_cache
            else:
                precond_basis_size = min(gpytorch.settings.max_preconditioner_size.value(), self.size(-1))
                random_basis = torch.randn(
                    self.batch_shape + torch.Size((self.size(-2), precond_basis_size)),
                    device=self.device,
                    dtype=self.dtype,
                )
                projected_mat = self._matmul(random_basis)
                proj_q = torch.qr(projected_mat)
                orthog_projected_mat = self._matmul(proj_q).transpose(-2, -1)
                U, S, V = torch.svd(orthog_projected_mat)
                U = proj_q.matmul(U)

                self._default_preconditioner_cache = (U, S, V)

            def preconditioner(v):
                res = V.transpose(-2, -1).matmul(v)
                res = (1 / S).unsqueeze(-1) * res
                res = U.matmul(res)
                return res

            return preconditioner
        else:
            return None 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:41,代碼來源:lazy_tensor.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def forward(self, A):
        Q, R = torch.qr(A)
        self.save_for_backward(A, Q, R)
        return Q, R 
開發者ID:wangleiphy,項目名稱:tensorgrad,代碼行數:6,代碼來源:qr.py

示例12: test_fisher_matrix_matrix_matmul

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def test_fisher_matrix_matrix_matmul(self):
        model = torch.nn.Sequential(
            torch.nn.Linear(1, 400),
            torch.nn.ELU(),
            torch.nn.Linear(400, 400),
            torch.nn.ELU(),
            torch.nn.Linear(400, 1),
        )

        data = torch.randn(1500, 1)

        fvp = FVPR_FD(model, data)

        numpars = 0
        for p in model.parameters():
            numpars += p.numel()

        orthmat, _ = torch.qr(torch.randn(numpars, 80))
        emat = 1e-2 * torch.randn(80, 2)

        full_matmul = fvp.matmul(orthmat @ emat)
        split_matmul = fvp.matmul(orthmat) @ emat

        # check that F (Vy) = FV y
        self.assertLess(
            torch.norm(full_matmul - split_matmul) / split_matmul.norm(), 1e-2
        )

        # check that matrix columns work
        self.assertLess(
            torch.norm(full_matmul[:, 0] - fvp.matmul(orthmat @ emat[:, 0])), 1e-5
        ) 
開發者ID:amzn,項目名稱:xfer,代碼行數:34,代碼來源:test_fisherreg_fd.py

示例13: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:, 0] = -1 * W[:, 0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
開發者ID:NVIDIA,項目名稱:NeMo,代碼行數:14,代碼來源:waveglow.py

示例14: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def __init__(self, C_t):
        # center and orthogonalize
        self.Q_t, _ = torch.qr(C_t - C_t.mean(0))
        self.dof = C_t.shape[0] - 2 - C_t.shape[1] 
開發者ID:broadinstitute,項目名稱:tensorqtl,代碼行數:6,代碼來源:core.py

示例15: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import qr [as 別名]
def __init__(self, c):
        super(Invertible1x1Conv, self).__init__()
        self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
                                    bias=False)

        # Sample a random orthonormal matrix to initialize weights
        W = torch.qr(torch.FloatTensor(c, c).normal_())[0]

        # Ensure determinant is 1.0 not -1.0
        if torch.det(W) < 0:
            W[:, 0] = -1*W[:, 0]
        W = W.view(c, c, 1)
        self.conv.weight.data = W 
開發者ID:xcmyz,項目名稱:FastSpeech,代碼行數:15,代碼來源:glow.py


注:本文中的torch.qr方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。