本文整理汇总了Python中scipy.linalg.pinvh方法的典型用法代码示例。如果您正苦于以下问题:Python linalg.pinvh方法的具体用法?Python linalg.pinvh怎么用?Python linalg.pinvh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.linalg
的用法示例。
在下文中一共展示了linalg.pinvh方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _set_covariance
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
示例2: assoc_test
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def assoc_test(weights, gwas, ldmat, heterogeneity=False):
"""
TWAS association test.
:param weights: numpy.ndarray of eQTL weights
:param gwas: pyfocus.GWAS object
:param ldmat: numpy.ndarray LD matrix
:param heterogeneity: bool estimate variance from multiplicative random effect
:return: tuple (beta, se)
"""
p = ldmat.shape[0]
assoc = np.dot(weights, gwas.Z)
if heterogeneity:
resid = assoc - gwas.Z
resid_var = mdot([resid, lin.pinvh(ldmat), resid]) / p
else:
resid_var = 1
se = np.sqrt(resid_var * mdot([weights, ldmat, weights]))
return assoc, se
示例3: pinvh
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def pinvh(a, cond=None, rcond=None, lower=True):
return linalg.pinvh(a, cond, rcond, lower)
示例4: get_precision
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
示例5: get_resid
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def get_resid(zscores, swld, wcor):
"""
Regress out the average pleiotropic signal tagged by TWAS at the region
:param zscores: numpy.ndarray TWAS zscores
:param swld: numpy.ndarray intercept variable
:param wcor: numpy.ndarray predicted expression correlation
:return: tuple (residual TWAS zscores, intercept z-score)
"""
m, m = wcor.shape
m, p = swld.shape
# create mean factor
intercept = swld.dot(np.ones(p))
# estimate under the null for variance components, i.e. V = SW LD SW
wcor_inv, rank = lin.pinvh(wcor, return_rank=True)
numer = mdot([intercept.T, wcor_inv, zscores])
denom = mdot([intercept.T, wcor_inv, intercept])
alpha = numer / denom
resid = zscores - intercept * alpha
s2 = mdot([resid, wcor_inv, resid]) / (rank - 1)
inter_se = np.sqrt(s2 / denom)
inter_z = alpha / inter_se
return resid, inter_z
示例6: get_precision
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
示例7: _get_covars
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
示例8: __init__
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def __init__(self, ctr, cov):
self.n = len(ctr) # dimension
self.ctr = np.array(ctr) # center coordinates
self.cov = np.array(cov) # covariance matrix
self.am = lalg.pinvh(cov) # precision matrix (inverse of covariance)
self.axes = lalg.cholesky(cov, lower=True) # transformation axes
# Volume of ellipsoid is the volume of an n-sphere divided
# by the (determinant of the) Jacobian associated with the
# transformation, which by definition is the precision matrix.
detsign, detln = linalg.slogdet(self.am)
self.vol = np.exp(logvol_prefactor(self.n) - 0.5 * detln)
# The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where
# (a, b, ...) are the lengths of principle axes.
# The eigenvectors (v) are the normalized principle axes.
l, v = lalg.eigh(self.cov)
if np.all((l > 0.) & (np.isfinite(l))):
self.axlens = np.sqrt(l)
else:
raise ValueError("The input precision matrix defining the "
"ellipsoid {0} is apparently singular with "
"l={1} and v={2}.".format(self.cov, l, v))
# Scaled eigenvectors are the principle axes, where `paxes[:,i]` is the
# i-th axis. Multiplying this matrix by a vector will transform a
# point in the unit n-sphere to a point in the ellipsoid.
self.paxes = np.dot(v, np.diag(self.axlens))
# Amount by which volume was increased after initialization (i.e.
# cumulative factor from `scale_to_vol`).
self.expand = 1.
示例9: fit
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
示例10: test_bayesian_ridge_score_values
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1. / (np.var(y) + eps)
lambda_ = 1.
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1. / alpha_ * np.eye(n_samples) + 1. / lambda_ * np.dot(X, X.T)
M_inv = pinvh(M)
score += - 0.5 * (fast_logdet(M) + np.dot(y.T, np.dot(M_inv, y)) +
n_samples * log(2 * np.pi))
# compute score with BayesianRidge
clf = BayesianRidge(alpha_1=alpha_1, alpha_2=alpha_2,
lambda_1=lambda_1, lambda_2=lambda_2,
n_iter=1, fit_intercept=False, compute_score=True)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
示例11: _train_gblup
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def _train_gblup(y, Z, X, include_ses=False, p_threshold=0.01):
log = logging.getLogger(pyfocus.LOG)
try:
from limix.qc import normalise_covariance
except ImportError as ie:
log.error("Training submodule requires limix>=2.0.0 and sklearn to be installed.")
raise
from numpy.linalg import multi_dot as mdot
from scipy.linalg import pinvh
log.debug("Initializing GBLUP model")
attrs = dict()
# estimate heritability using limix
K_cis = np.dot(Z, Z.T)
K_cis = normalise_covariance(K_cis)
fe_var, s2u, s2e, logl, fixed_betas, pval = _fit_cis_herit(y, K_cis, X)
yresid = y - np.dot(X, fixed_betas)
if pval > p_threshold:
log.info("h2g pvalue {} greater than threshold {}. Skipping".format(pval, p_threshold))
return None
attrs["h2g"] = s2u / (fe_var + s2u + s2e)
attrs["h2g.logl"] = logl
attrs["h2g.pvalue"] = pval
# Total variance
n, p = Z.shape
# ridge solution (i.e. rrBLUP)
# this will be slower than normal GBLUP when p > n but is a little bit more flexible
ZtZpDinv = pinvh(np.dot(Z.T, Z) + np.eye(p) * (s2e / s2u))
betas = mdot([ZtZpDinv, Z.T, yresid])
if include_ses:
# TODO: come back to this with matrix operations rather than list comprehensions
# jack-knife standard-errors over the fast leave-one-out estimates using rrBLUP
"""
h = np.array([mdot([Z[i], ZtZpDinv, Z[i]]) for i in range(n)])
e = yresid - np.dot(Z, betas)
beta_jk = [betas - np.dot(ZtZpDinv, Z[i] * e[i]) / (1 - h[i]) for i in range(n)]
ses = np.sqrt(np.mean(beta_jk, axis=0) * (n - 1))
"""
ses = None
else:
ses = None
return betas, ses, attrs
示例12: fit
# 需要导入模块: from scipy import linalg [as 别名]
# 或者: from scipy.linalg import pinvh [as 别名]
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self