當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.diagonal方法代碼示例

本文整理匯總了Python中torch.diagonal方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.diagonal方法的具體用法?Python torch.diagonal怎麽用?Python torch.diagonal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.diagonal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_lkj_covariance_prior_log_prob_hetsd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def test_lkj_covariance_prior_log_prob_hetsd(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        a = torch.tensor([exp(-1), exp(-2)], device=device)
        b = torch.tensor([exp(1), exp(2)], device=device)
        sd_prior = SmoothedBoxPrior(a, b)
        prior = LKJCovariancePrior(2, torch.tensor(0.5, device=device), sd_prior)
        S = torch.eye(2, device=device)
        self.assertAlmostEqual(prior.log_prob(S).item(), -4.71958, places=4)
        S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-4.71958, -4.57574], device=S.device)))
        with self.assertRaises(ValueError):
            prior.log_prob(torch.eye(3, device=device))

        # For eta=1.0 log_prob is flat over all covariance matrices
        prior = LKJCovariancePrior(2, torch.tensor(1.0, device=device), sd_prior)
        marginal_sd = torch.diagonal(S, dim1=-2, dim2=-1).sqrt()
        log_prob_expected = prior.correlation_prior.C + prior.sd_prior.log_prob(marginal_sd)
        self.assertTrue(approx_equal(prior.log_prob(S), log_prob_expected)) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:20,代碼來源:test_lkj_prior.py

示例2: norm_to_lognorm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def norm_to_lognorm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]:
    """Compute mean and covariance of a log-MVN from its MVN sufficient statistics

    If `X ~ N(mu, Cov)` and `Y = exp(X)`, then `Y` is log-normal with

        mu_ln_{i} = exp(mu_{i} + 0.5 * Cov_{ii})
        Cov_ln_{ij} = exp(mu_{i} + mu_{j} + 0.5 * (Cov_{ii} + Cov_{jj})) *
        (exp(Cov_{ij}) - 1)

    Args:
        mu: A `batch_shape x n` mean vector of the Normal distribution.
        Cov: A `batch_shape x n x n` covariance matrix of the Normal distribution.

    Returns:
        A two-tuple containing:

        - The `batch_shape x n` mean vector of the log-Normal distribution.
        - The `batch_shape x n x n` covariance matrix of the log-Normal
            distribution.
    """
    diag = torch.diagonal(Cov, dim1=-1, dim2=-2)
    b = mu + 0.5 * diag
    mu_ln = torch.exp(b)
    Cov_ln = (torch.exp(Cov) - 1) * torch.exp(b.unsqueeze(-1) + b.unsqueeze(-2))
    return mu_ln, Cov_ln 
開發者ID:pytorch,項目名稱:botorch,代碼行數:27,代碼來源:utils.py

示例3: _components

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def _components(self) -> Dict[Tuple[str, str, str], Tuple[Tensor, Tensor]]:
        states_per_measure = defaultdict(list)
        for state_belief in self.state_beliefs:
            for m, measure in enumerate(self.design.measures):
                H = state_belief.H[:, m, :].data
                m = H * state_belief.means.data
                std = H * torch.diagonal(state_belief.covs.data, dim1=-2, dim2=-1).sqrt()
                states_per_measure[measure].append((m, std))

        out = {}
        for measure, means_and_stds in states_per_measure.items():
            means, stds = zip(*means_and_stds)
            means = torch.stack(means).permute(1, 0, 2)
            stds = torch.stack(stds).permute(1, 0, 2)
            for s, (process_name, state_element) in enumerate(self.design.state_elements):
                if ~torch.isclose(means[:, :, s].abs().max(), torch.zeros(1)):
                    out[(measure, process_name, state_element)] = (means[:, :, s], stds[:, :, s])
        return out 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:20,代碼來源:over_time.py

示例4: test_lkj_covariance_prior_log_prob

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def test_lkj_covariance_prior_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        sd_prior = SmoothedBoxPrior(exp(-1), exp(1))
        if cuda:
            sd_prior = sd_prior.cuda()
        prior = LKJCovariancePrior(2, torch.tensor(0.5, device=device), sd_prior)
        S = torch.eye(2, device=device)
        self.assertAlmostEqual(prior.log_prob(S).item(), -3.59981, places=4)
        S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-3.59981, -3.45597], device=S.device)))
        with self.assertRaises(ValueError):
            prior.log_prob(torch.eye(3, device=device))

        # For eta=1.0 log_prob is flat over all covariance matrices
        prior = LKJCovariancePrior(2, torch.tensor(1.0, device=device), sd_prior)
        marginal_sd = torch.diagonal(S, dim1=-2, dim2=-1).sqrt()
        log_prob_expected = prior.correlation_prior.C + prior.sd_prior.log_prob(marginal_sd)
        self.assertTrue(approx_equal(prior.log_prob(S), log_prob_expected)) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:20,代碼來源:test_lkj_prior.py

示例5: _is_valid_correlation_matrix

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def _is_valid_correlation_matrix(Sigma, tol=1e-6):
    """Check if supplied matrix is a valid correlation matrix

    A matrix is a valid correlation matrix if it is positive semidefinite, and
    if all diagonal elements are equal to 1.

    Args:
        Sigma: A n x n correlation matrix, or a batch of b correlation matrices
            with shape b x n x n
        tol: The tolerance with which to check unit value of the diagonal elements

    Returns:
        True if Sigma is a valid correlation matrix, False otherwise (in batch
            mode, all matrices in the batch need to be valid correlation matrices)

    """
    pdef = torch.all(constraints.positive_definite.check(Sigma))
    return pdef and all(torch.all(torch.abs(S.diag() - 1) < tol) for S in Sigma.view(-1, *Sigma.shape[-2:])) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:20,代碼來源:lkj_prior.py

示例6: _is_valid_correlation_matrix_cholesky_factor

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def _is_valid_correlation_matrix_cholesky_factor(L, tol=1e-6):
    """Check if supplied matrix is a Cholesky factor of a valid correlation matrix

    A matrix is a Cholesky fator of a valid correlation matrix if it is lower
    triangular, has positive diagonal, and unit row-sum

    Args:
        L: A n x n lower-triangular matrix, or a batch of b lower-triangular
            matrices with shape b x n x n
        tol: The tolerance with which to check positivity of the diagonal and
            unit-sum of the rows

    Returns:
        True if L is a Cholesky factor of a valid correlation matrix, False
            otherwise (in batch mode, all matrices in the batch need to be
            Cholesky factors of valid correlation matrices)

    """
    unit_row_length = torch.all((torch.norm(L, dim=-1) - 1).abs() < tol)
    return unit_row_length and torch.all(constraints.lower_cholesky.check(L)) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:22,代碼來源:lkj_prior.py

示例7: deprecate_task_noise_corr

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def deprecate_task_noise_corr(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
    if prefix + "task_noise_corr_factor" in state_dict:
        # Remove after 1.0
        warnings.warn(
            "Loading a deprecated parameterization of _MultitaskGaussianLikelihoodBase. Consider re-saving your model.",
            OldVersionWarning,
        )
        # construct the task correlation matrix from the factors using the old parameterization
        corr_factor = state_dict.pop(prefix + "task_noise_corr_factor").squeeze(0)
        corr_diag = state_dict.pop(prefix + "task_noise_corr_diag").squeeze(0)
        num_tasks, rank = corr_factor.shape[-2:]
        M = corr_factor.matmul(corr_factor.transpose(-1, -2))
        idx = torch.arange(M.shape[-1], dtype=torch.long, device=M.device)
        M[..., idx, idx] += corr_diag
        sem_inv = 1 / torch.diagonal(M, dim1=-2, dim2=-1).sqrt().unsqueeze(-1)
        C = M * sem_inv.matmul(sem_inv.transpose(-1, -2))
        # perform a Cholesky decomposition and extract the required entries
        L = torch.cholesky(C)
        tidcs = torch.tril_indices(num_tasks, rank)[:, 1:]
        task_noise_corr = L[..., tidcs[0], tidcs[1]]
        state_dict[prefix + "task_noise_corr"] = task_noise_corr 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:23,代碼來源:multitask_gaussian_likelihood.py

示例8: lognorm_to_norm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def lognorm_to_norm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]:
    """Compute mean and covariance of a MVN from those of the associated log-MVN

    If `Y` is log-normal with mean mu_ln and covariance Cov_ln, then
    `X ~ N(mu_n, Cov_n)` with

        Cov_n_{ij} = log(1 + Cov_ln_{ij} / (mu_ln_{i} * mu_n_{j}))
        mu_n_{i} = log(mu_ln_{i}) - 0.5 * log(1 + Cov_ln_{ii} / mu_ln_{i}**2)

    Args:
        mu: A `batch_shape x n` mean vector of the log-Normal distribution.
        Cov: A `batch_shape x n x n` covariance matrix of the log-Normal
            distribution.

    Returns:
        A two-tuple containing:

        - The `batch_shape x n` mean vector of the Normal distribution
        - The `batch_shape x n x n` covariance matrix of the Normal distribution
    """
    Cov_n = torch.log(1 + Cov / (mu.unsqueeze(-1) * mu.unsqueeze(-2)))
    mu_n = torch.log(mu) - 0.5 * torch.diagonal(Cov_n, dim1=-1, dim2=-2)
    return mu_n, Cov_n 
開發者ID:pytorch,項目名稱:botorch,代碼行數:25,代碼來源:utils.py

示例9: exact_matrix_logarithm_trace

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def exact_matrix_logarithm_trace(Fx, x):
    """
    Computes slow-ass Tr(Ln(d(Fx)/dx))
    :param Fx: output of f(x)
    :param x: input
    :return: Tr(Ln(I + df/dx))
    """
    bs = Fx.size(0)
    outVector = torch.sum(Fx, 0).view(-1)
    outdim = outVector.size()[0]
    indim = x.view(bs, -1).size()
    jac = torch.empty([bs, outdim, indim[1]], dtype=torch.float)
    # for each output Fx[i] compute d(Fx[i])/d(x)
    for i in range(outdim):
        zero_gradients(x)
        jac[:, i, :] = torch.autograd.grad(outVector[i], x,
                                           retain_graph=True)[0].view(bs, outdim)
    jac = jac.cpu().numpy()
    iden = np.eye(jac.shape[1])
    log_jac = np.stack([logm(jac[i] + iden) for i in range(bs)])
    trace_jac = np.diagonal(log_jac, axis1=1, axis2=2).sum(1)
    return trace_jac 
開發者ID:jhjacobsen,項目名稱:invertible-resnet,代碼行數:24,代碼來源:matrix_utils.py

示例10: power_series_full_jac_exact_trace

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def power_series_full_jac_exact_trace(Fx, x, k):
    """
    Fast-boi Tr(Ln(d(Fx)/dx)) using power-series approximation with full
    jacobian and exact trace
    
    :param Fx: output of f(x)
    :param x: input
    :param k: number of power-series terms  to use
    :return: Tr(Ln(I + df/dx))
    """
    _, jac = compute_log_det(x, Fx)
    jacPower = jac
    summand = torch.zeros_like(jacPower)
    for i in range(1, k+1):
        if i > 1:
            jacPower = torch.matmul(jacPower, jac)
        if (i + 1) % 2 == 0:
            summand += jacPower / (np.float(i))
        else: 
            summand -= jacPower / (np.float(i)) 
    trace = torch.diagonal(summand, dim1=1, dim2=2).sum(1)
    return trace 
開發者ID:jhjacobsen,項目名稱:invertible-resnet,代碼行數:24,代碼來源:matrix_utils.py

示例11: _apply_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def _apply_loss(d, d_gt):
    """
    LOSS CALCULATION OF THE BATCH

    Arguments:
    ----------
        - d: Computed displacements
        - d_gt: Ground truth displacements

    Returns:
    --------
        - loss: calculate loss according to the specified loss function
    
    """

    # Set all pixel entries to 0 whose displacement magnitude is bigger than 10px
    pixel_thresh = 10
    dispMagnitude = torch.sqrt(torch.pow(d_gt[:,:,0],2) + torch.pow(d_gt[:,:,1], 2)).unsqueeze(-1).expand(-1,-1,2)
    idx = dispMagnitude > pixel_thresh
    z = torch.zeros(dispMagnitude.shape)
    d = torch.where(idx, z, d)
    d_gt = torch.where(idx, z, d_gt)

    # Calculate loss according to formula in paper
    return torch.sum(torch.sqrt(torch.diagonal(torch.bmm(d - d_gt, (d-d_gt).permute(0,2,1)), dim1=-2, dim2=-1)), dim = 1) 
開發者ID:yannadani,項目名稱:dlgm,代碼行數:27,代碼來源:main.py

示例12: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def __init__(
        self,
        num_tasks,
        rank=0,
        task_correlation_prior=None,
        batch_shape=torch.Size(),
        noise_prior=None,
        noise_constraint=None,
    ):
        """
        Args:
            num_tasks (int): Number of tasks.

            rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
            then a diagonal covariance matrix is fit.

            task_correlation_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise correlaton matrix.
            Only used when `rank` > 0.

        """
        if noise_constraint is None:
            noise_constraint = GreaterThan(1e-4)

        noise_covar = MultitaskHomoskedasticNoise(
            num_tasks=num_tasks, noise_prior=noise_prior, noise_constraint=noise_constraint, batch_shape=batch_shape
        )
        super().__init__(
            num_tasks=num_tasks,
            noise_covar=noise_covar,
            rank=rank,
            task_correlation_prior=task_correlation_prior,
            batch_shape=batch_shape,
        )

        self.register_parameter(name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(*batch_shape, 1)))
        self.register_constraint("raw_noise", noise_constraint) 
開發者ID:cornellius-gp,項目名稱:gpytorch,代碼行數:38,代碼來源:multitask_gaussian_likelihood.py

示例13: tobit_probs

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def tobit_probs(mean: Tensor,
                cov: Tensor,
                lower: Optional[Tensor] = None,
                upper: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
    # CDF not well behaved at tails, truncate
    clamp = lambda z: torch.clamp(z, -5., 5.)

    if upper is None:
        upper = torch.empty_like(mean)
        upper[:] = float('inf')
    if lower is None:
        lower = torch.empty_like(mean)
        lower[:] = float('-inf')

    std = torch.diagonal(cov, dim1=-2, dim2=-1)
    probs_up = torch.zeros_like(mean)
    is_cens_up = torch.isfinite(upper)
    upper_z = (upper[is_cens_up] - mean[is_cens_up]) / std[is_cens_up]
    probs_up[is_cens_up] = 1. - std_normal.cdf(clamp(upper_z))

    probs_lo = torch.zeros_like(mean)
    is_cens_lo = torch.isfinite(lower)
    lower_z = (lower[is_cens_lo] - mean[is_cens_lo]) / std[is_cens_lo]
    probs_lo[is_cens_lo] = std_normal.cdf(clamp(lower_z))

    return probs_lo, probs_up 
開發者ID:strongio,項目名稱:torch-kalman,代碼行數:28,代碼來源:utils.py

示例14: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def forward(self, src, adj, tgt_seq, binary_tgt,return_attns=False,int_preds=False):
        batch_size = src[0].size(0)
        src_seq, src_pos = src
        if self.decoder_type in ['sa_m','rnn_m']: 
            tgt_seq = tgt_seq[:, :-1]

        enc_output, *enc_self_attns = self.encoder(src_seq, adj, src_pos,return_attns=return_attns)
        dec_output, *dec_output2 = self.decoder(tgt_seq,src_seq,enc_output,return_attns=return_attns,int_preds=int_preds)

        if self.decoder_type == 'rnn_m':
            seq_logit = dec_output
        elif self.decoder_type == 'mlp':
            seq_logit = dec_output
        else:
            seq_logit = self.tgt_word_proj(dec_output)
            if self.decoder_type == 'graph':
                seq_logit = torch.diagonal(seq_logit,0,1,2)
        if int_preds:
            intermediate_preds = []
            tgt_word_proj_copy = self.tgt_word_proj.linear.weight.data.detach().repeat(batch_size,1,1)
            for int_idx,int_out in enumerate(dec_output2[0][:-1]):
                int_out = torch.bmm(int_out,tgt_word_proj_copy.transpose(1,2))
                intermediate_preds += [torch.diagonal(int_out,0,1,2)]
            return seq_logit.view(-1, seq_logit.size(-1)),enc_output, intermediate_preds
        elif return_attns:
            return seq_logit.view(-1,seq_logit.size(-1)),enc_output,enc_self_attns,dec_output2
        else:
            return seq_logit.view(-1,seq_logit.size(-1)),enc_output,None 
開發者ID:QData,項目名稱:LaMP,代碼行數:30,代碼來源:Models.py

示例15: generate_labels

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import diagonal [as 別名]
def generate_labels(pairs, rel_dict):
    pool = ThreadPool(8)
    def func(pair):
        tmp = rel_dict.get((pair[0], pair[1]), [0])
        out = np.zeros(cfg.MODEL.NUM_RELATIONS, dtype=np.int32)
        if pair[0] != -1 and pair[1] != -1: # If pair = (-1,-1) then this pair is diagonal pair, we don't need the labels
            out[tmp] = 1
        return out
    results = pool.map(func, pairs)
    pool.close()
    pool.join()
    results = np.stack(results)
    return results 
開發者ID:ruotianluo,項目名稱:Context-aware-ZSR,代碼行數:15,代碼來源:rel_heads.py


注:本文中的torch.diagonal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。