本文整理汇总了Python中scipy.linalg.pinvh函数的典型用法代码示例。如果您正苦于以下问题:Python pinvh函数的具体用法?Python pinvh怎么用?Python pinvh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pinvh函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _update_precisions
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
示例2: _init_params
def _init_params(self,X):
'''
Initialise parameters
'''
d = X.shape[1]
# initialise prior on means & precision matrices
if 'means' in self.init_params:
means0 = self.init_params['means']
else:
kms = KMeans(n_init = self.n_init, n_clusters = self.n_components)
means0 = kms.fit(X).cluster_centers_
if 'covar' in self.init_params:
scale_inv0 = self.init_params['covar']
scale0 = pinvh(scale_inv0)
else:
# heuristics to define broad prior over precision matrix
diag_els = np.abs(np.max(X,0) - np.min(X,0))/2
scale_inv0 = np.diag( diag_els )
scale0 = np.diag( 1./ diag_els )
if 'weights' in self.init_params:
weights0 = np.ones(self.n_components) / self.n_components
else:
weights0 = np.ones(self.n_components) / self.n_components
if 'dof' in self.init_params:
dof0 = self.init_params['dof']
else:
dof0 = d
if 'beta' in self.init_params:
beta0 = self.init_params['beta']
else:
beta0 = 1e-3
# clusters that are not pruned
self.active = np.ones(self.n_components, dtype = np.bool)
# checks initialisation errors in case parameters are user defined
assert dof0 >= d,( 'Degrees of freedom should be larger than '
'dimensionality of data')
assert means0.shape[0] == self.n_components,('Number of centrods defined should '
'be equal to number of components')
assert means0.shape[1] == d,('Dimensioanlity of means and data '
'should be the same')
assert weights0.shape[0] == self.n_components,('Number of weights should be '
'to number of components')
# At first iteration these parameters are equal to priors, but they change
# at each iteration of mean field approximation
scale = np.array([np.copy(scale0) for _ in range(self.n_components)])
means = np.copy(means0)
weights = np.copy(weights0)
dof = dof0*np.ones(self.n_components)
beta = beta0*np.ones(self.n_components)
init_ = [means0, scale0, scale_inv0, beta0, dof0, weights0]
iter_ = [means, scale, scale_inv0, beta, dof, weights]
return init_, iter_
示例3: fit
def fit(self, X=None, y=None):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : array_like, shape (n_samples, 3)
An array with shape (n_eval, 3) with the observations of the output to be predicted.
of shape (n_samples, 3) with the Best Linear Unbiased Prediction at x.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
if X:
K_list = self.calc_scalar_kernel_matrices(X)
else:
K_list = self.calc_scalar_kernel_matrices()
# add diagonal noise to each scalar kernel matrix
K_list = [K + self.nugget * sp.ones(K.shape[0]) for K in K_list]
Kglob = None
# outer_iv = [sp.outer(iv, iv.T) for iv in self.ivs] # NO, wrong
for K, ivs, iv_corr in zip(K_list, self.ivs, self.iv_corr):
# make the outer product tensor of shape (N_ls, N_ls, 3, 3) and multiply it with the scalar kernel
K3D = iv_corr * K[:, :, None, None] * rotmat_multi(ivs, ivs)
# reshape tensor onto a 2D array tiled with 3x3 matrix blocks
if Kglob is None:
Kglob = K3D
else:
Kglob += K3D
Kglob = my_tensor_reshape(Kglob)
# # all channels merged into one covariance matrix
# # K^{glob}_{ij} = \sum_{k = 1}^{N_{IVs}} w_k D_{k, ij} |v_k^i\rangle \langle v_k^j |
try:
inv = LA.pinv2(Kglob)
except LA.LinAlgError as err:
print("pinv2 failed: %s. Switching to pinvh" % err)
try:
inv = LA.pinvh(Kglob)
except LA.LinAlgError as err:
print("pinvh failed: %s. Switching to pinv2" % err)
inv = None
# alpha is the vector of regression coefficients of GaussianProcess
alpha = sp.dot(inv, self.y.ravel())
if not self.low_memory:
self.inverse = inv
self.Kglob = Kglob
self.alpha = sp.array(alpha)
示例4: __init__
def __init__(self, xs, ys, noise=0.001, l=1, K=K_SE):
self.xs = xs
self.l = l
self.K = K
Kxx = self.K(xs, l=self.l)
self.KxxI = pinvh(Kxx + (noise**2) * eye_like(Kxx))
self.KxxI_ys = self.KxxI.dot(ys)
示例5: fit
def fit(self, evidence_approx_method="fixed-point",max_iter = 100):
'''
Fits Bayesian linear regression, returns posterior mean and preision
of parameters
Parameters:
-----------
max_iter: int
Number of maximum iterations
evidence_approx_method: str (DEFAULT = 'fixed-point')
Method for approximating evidence, either 'fixed-point' or 'EM'
# Theory Note:
-----------------
This code implements two methods to fit type II ML Bayesian Linear Regression:
Expectation Maximization and Fixed Point Iterations. Expectation Maximization
is generally slower so by default we use fixed-point.
'''
# use type II maximum likelihood to find hyperparameters alpha and beta
self._evidence_approx(max_iter = max_iter, method = evidence_approx_method)
# find parameters of posterior distribution after last update of alpha & beta
self.w_mu, self.w_precision = self._posterior_params(self.alpha,self.beta)
self.D = pinvh(self.w_precision)
示例6: test_simple_complex
def test_simple_complex(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array(
[[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float
)
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
示例7: nll
def nll(l): # negative log likelihood
#if l < 0.001: return 1e10
Kxx = K(xs, l=l)
Kxx += (noise**2) * eye_like(Kxx)
res = (ys.T).dot(pinvh(Kxx)).dot(ys) + slogdet(Kxx)[1]
res = squeeze(res)
#print l,res
return res
示例8: test_nonpositive
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
示例9: laplacian_sc_pinv
def laplacian_sc_pinv(G,
observed_nodelist, unobserved_nodelist, weight='weight'):
"""
Pseudo-inverse of Laplacian Schur complement.
"""
sc = laplacian_schur_complement(G,
observed_nodelist, unobserved_nodelist, weight=weight)
return pinvh(sc)
示例10: error_matrix
def error_matrix(self):
"""
Covariance Matrix.
"""
try:
mask = self.flat_hess_.mask
except AttributeError:
mask = None
return self._reshape_matrix(
-np.ma.array(pinvh(self.flat_hess_.data), mask=mask))
示例11: update_sigma
def update_sigma(X, alpha_, lambda_, keep_lambda, n_samples):
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
return sigma_
示例12: nll_prime
def nll_prime(l):
Kxx,Kps = K(xs, l=l, deriv=True)
Kxx += (noise**2) * eye_like(Kxx)
KxxI = pinvh(Kxx)
a = KxxI.dot(ys)
aaT = outer(a,a) # a . a.T
KI_aaT = KxxI - aaT # K^-1 - aaT
res = []
for Kp in Kps:
grad = trace_prod(KI_aaT, Kp)
res.append(grad)
return asarray(res)
示例13: _init_params
def _init_params(self,*args):
'''
Initialise parameters of Bayesian Gaussian HMM
'''
d,X = args
pr_start, pr_trans = super(VBGaussianHMM,self)._init_params()
# initialise prior on means & precision matrices
if 'means' in self.init_params:
means0 = check_array(self.init_params['means'])
else:
kms = KMeans(n_init = 2, n_clusters = self.n_hidden)
means0 = kms.fit(X).cluster_centers_
if 'covar' in self.init_params:
scale_inv0 = self.init_params['covar']
scale0 = pinvh(scale_inv0)
else:
# heuristics to define broad prior over precision matrix
diag_els = np.abs(np.max(X,0) - np.min(X,0))
scale_inv0 = np.diag( diag_els )
scale0 = np.diag( 1./ diag_els )
if 'dof' in self.init_params:
dof0 = self.init_params['dof']
else:
dof0 = d
if 'beta' in self.init_params:
beta0 = self.init_params['beta']
else:
beta0 = 1e-3
# checks initialisation errors in case parameters are user defined
if dof0 < d:
raise ValueError(( 'Degrees of freedom should be larger than '
'dimensionality of data'))
if means0.shape[0] != self.n_hidden:
raise ValueError(('Number of centrods defined should '
'be equal to number of components' ))
if means0.shape[1] != d:
raise ValueError(('Dimensionality of means and data '
'should be the same'))
scale = np.array([np.copy(scale0) for _ in range(self.n_hidden)])
dof = dof0*np.ones(self.n_hidden)
beta = beta0*np.ones(self.n_hidden)
# if user did not define initialisation parameters use KMeans
return pr_start, pr_trans, {'means':means0,'scale':scale,'beta': beta,
'dof':dof,'scale_inv0':scale_inv0}
示例14: get_precision
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
示例15: _update_params
def _update_params(self, Nk, Xk, Sk, beta0, means0, dof0, scale_inv0,
beta, means, dof, scale):
''' Updates distribution of means and precisions '''
for k in range(self.n_active):
# update mean and precision for each cluster
beta[k] = beta0 + Nk[k]
means[k] = (beta0*means0[k,:] + Xk[k]) / beta[k]
dof[k] = dof0 + Nk[k] + 1
# precision calculation is ugly but prevent overflow & underflow
scale[k,:,:] = pinvh( scale_inv0 + (beta0*Sk[k] + Nk[k]*Sk[k] -
np.outer(Xk[k],Xk[k]) -
beta0*np.outer(means0[k,:] - Xk[k],means0[k,:])) /
(beta0 + Nk[k]) )
return beta,means,dof,scale