本文整理汇总了Python中scipy.linalg.inv函数的典型用法代码示例。如果您正苦于以下问题:Python inv函数的具体用法?Python inv怎么用?Python inv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了inv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sample_transition_within_subspace
def sample_transition_within_subspace(model, state, hyperparams):
"""
MCMC iteration (Gibbs sampling) for transition matrix and covariance
within the constrained subspace
"""
# Calculate sufficient statistics
suffStats = smp.evaluate_transition_sufficient_statistics(state)
# Convert to Givens factorisation form
U,D = model.convert_to_givens_form()
# Sample a new projected transition matrix and transition covariance
rank = model.parameters['rank'][0]
nu0 = rank
Psi0 = rank*hyperparams['rPsi0']
nu,Psi,M,V = smp.hyperparam_update_degenerate_mniw_transition(
suffStats, U,
nu0,
Psi0,
hyperparams['M0'],
hyperparams['V0'])
D = la.inv(smp.sample_wishart(nu, la.inv(Psi)))
FU = smp.sample_matrix_normal(M, D, V)
# Project out
Fold = model.parameters['F']
F = smp.project_degenerate_transition_matrix(Fold, FU, U)
model.parameters['F'] = F
# Convert back to eigen-decomposition form
model.update_from_givens_form(U, D)
return model
示例2: mix_parameters
def mix_parameters(self, Pibra, Piket):
r"""Mix the two parameter sets :math:`\Pi_i` and :math:`\Pi_j`
from the 'bra' and the 'ket' wavepackets :math:`\Phi\left[\Pi_i\right]`
and :math:`\Phi^\prime\left[\Pi_j\right]`.
:param Pibra: The parameter set :math:`\Pi_i` from the bra part wavepacket.
:param Piket: The parameter set :math:`\Pi_j` from the ket part wavepacket.
:return: The mixed parameters :math:`q_0` and :math:`Q_S`. (See the theory for details.)
"""
# <Pibra | ... | Piket>
qr, pr, Qr, Pr = Pibra
qc, pc, Qc, Pc = Piket
# Mix the parameters
Gr = dot(Pr, inv(Qr))
Gc = dot(Pc, inv(Qc))
r = imag(Gc - conjugate(Gr.T))
s = imag(dot(Gc, qc) - dot(conjugate(Gr.T), qr))
q0 = dot(inv(r), s)
Q0 = 0.5 * r
# Here we can not avoid the matrix root by using svd
Qs = inv(sqrtm(Q0))
return (q0, Qs)
示例3: get_precision
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
示例4: sample_transition_covariance_within_subspace
def sample_transition_covariance_within_subspace(model, state, hyperparams, pseudo_dof=None):
"""
MCMC iteration (Gibbs sampling) for transition matrix and covariance
within the constrained subspace
"""
# Calculate sufficient statistics
suffStats = smp.evaluate_transition_sufficient_statistics(state)
# Convert to Givens factorisation form
U,D = model.convert_to_givens_form()
# Sampke a pseudo-observation to constrain the size of move
if pseudo_dof is not None:
extra_nu = pseudo_dof
extra_Psi = smp.sample_wishart(pseudo_dof, D)
else:
extra_nu = 0
extra_Psi = 0
# Sample a new projected transition matrix and transition covariance
rank = model.parameters['rank'][0]
nu0 = rank + extra_nu
Psi0 = rank*hyperparams['rPsi0'] + np.dot(U, np.dot(extra_Psi, U.T))
nu,Psi = smp.hyperparam_update_degenerate_iw_transition_covariance(
suffStats, U,
model.parameters['F'],
nu0,
Psi0)
D = la.inv(smp.sample_wishart(nu, la.inv(Psi)))
# Convert back to eigen-decomposition form
model.update_from_givens_form(U, D)
return model
示例5: RImat
def RImat(WI, mx):
""" R matrix
Parameters
----------
WI : numpy 3d array
array of inverted interaction arrays, as returned from WImat
mx: int
matching point in inward and outward solutions (bound states only)
Returns
-------
RI : numpy 3d array
R matrix of the Johnson method
"""
oo, n, m = WI.shape
I = np.identity(n)
RI = np.zeros_like(WI)
U = 12*WI-I*10
for i in range(1, mx+1):
RI[i] = linalg.inv(U[i]-RI[i-1])
for i in range(oo-2, mx, -1):
RI[i] = linalg.inv(U[i]-RI[i+1])
return RI
示例6: dataNorm
def dataNorm(self):
SXX = np.cov(self.X)
U, l, Ut = LA.svd(SXX, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
self.nX = np.dot(H,self.X)
#print np.cov(self.nX)
#print "mean:"
#print np.mean(self.nX)
SYY = np.cov(self.Y)
U, l, Ut = LA.svd(SYY, full_matrices=True)
H = np.dot(LA.sqrtm(LA.inv(np.diag(l))),Ut)
#print "H"
#print H
self.nY = np.dot(H,self.Y)
#print np.cov(self.nY)
print "dataNorm_X:"
for i in range(len(self.nX)):
print(self.nX[i])
print("---")
print "dataNorm_Y:"
for i in range(len(self.nY)):
print(self.nY[i])
print("---")
示例7: write_ctf_comp
def write_ctf_comp(fid, comps):
"""Write the CTF compensation data into a fif file
Parameters
----------
fid: file
The open FIF file descriptor
comps: list
The compensation data to write
"""
if len(comps) <= 0:
return
# This is very simple in fact
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
for comp in comps:
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
# Write the compensation kind
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp['ctfkind'])
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED,
comp['save_calibrated'])
# Write an uncalibrated or calibrated matrix
import pdb; pdb.set_trace()
comp['data']['data'] = linalg.inv(
np.dot(np.diag(comp['rowcals'].ravel())),
np.dot(comp.data.data,
linalg.inv(np.diag(comp.colcals.ravel()))))
write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp['data'])
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
示例8: update_matrices
def update_matrices(self):
if hasattr(self,"dataImg"):
mScale = self._stack_scale_mat()
invM = inv(np.dot(self.modelView,mScale))
self.dev.writeBuffer(self.invMBuf,invM.flatten().astype(np.float32))
invP = inv(self.projection)
self.dev.writeBuffer(self.invPBuf,invP.flatten().astype(np.float32))
示例9: gp_likelihood_cplt2
def gp_likelihood_cplt2(self, target, D_nm, D_mm, logtheta, logdelta, rglr):
y = target
Len = len(y)
Len_rr = len(D_mm[0])
num_of_feature = len(logdelta)
theta = np.exp(logtheta)
delta = []
for i in range(0, num_of_feature):
delta.append(np.exp(logdelta[i]))
K_nm = GaussOperation.kernel_gauss_gp_cplt(D_nm, theta, delta)
K_mm = GaussOperation.kernel_gauss_gp_cplt(D_mm, theta, delta)
I = np.identity(Len, dtype=np.float)
I_rr = np.identity(Len_rr, dtype=np.float)
L = cholesky(K_mm + rglr * I_rr)
inv_L = inv(L)
Q = np.dot(K_nm, np.transpose(inv_L))
inv_Q = np.real(inv(rglr * I_rr + np.dot(np.transpose(Q), Q)))
inv_K = 1 / rglr * I - 1 / rglr * np.dot(Q, np.dot(inv_Q, np.transpose(Q)))
part1 = 0.5 * np.dot(np.transpose(y), np.dot(inv_K, y))
part2 = 0.5 * Len * np.log(rglr) + 0.5 * np.log(det(I_rr + 1 / rglr * np.dot(np.transpose(Q), Q)))
part3 = Len / 2 * np.log(2 * pi)
LLH = part1 + part2 + part3
return LLH
示例10: kcca
def kcca(self, X, Y, kernel_x=gaussian_kernel, kernel_y=gaussian_kernel, eta=1.0):
n, p = X.shape
n, q = Y.shape
Kx = DIST.squareform(DIST.pdist(X, kernel_x))
Ky = DIST.squareform(DIST.pdist(Y, kernel_y))
J = np.eye(n) - np.ones((n, n)) / n
M = np.dot(np.dot(Kx.T, J), Ky) / n
L = np.dot(np.dot(Kx.T, J), Kx) / n + eta * Kx
N = np.dot(np.dot(Ky.T, J), Ky) / n + eta * Ky
sqx = SLA.sqrtm(SLA.inv(L))
sqy = SLA.sqrtm(SLA.inv(N))
a = np.dot(np.dot(sqx, M), sqy.T)
A, s, Bh = SLA.svd(a, full_matrices=False)
B = Bh.T
# U = np.dot(np.dot(A.T, sqx), X).T
# V = np.dot(np.dot(B.T, sqy), Y).T
print s.shape
print A.shape
print B.shape
return s, A, B
示例11: get_corr_pred
def get_corr_pred(self, sctx, eps_app_eng, d_eps, tn, tn1):
'''
Corrector predictor computation.
@param eps_app_eng input variable - engineering strain
'''
delta_gamma = 0.
if sctx.update_state_on:
# print "in us"
eps_n = eps_app_eng - d_eps
sigma, f_trial, epsilon_p, q_1, q_2 = self._get_state_variables(
sctx, eps_n)
sctx.mats_state_array[:3] = epsilon_p
sctx.mats_state_array[3] = q_1
sctx.mats_state_array[4:] = q_2
diff1s = zeros([3])
sigma, f_trial, epsilon_p, q_1, q_2 = self._get_state_variables(
sctx, eps_app_eng)
# Note: the state variables are not needed here, just gamma
diff2ss = self.yf.get_diff2ss(eps_app_eng, self.E, self.nu, sctx)
Xi_mtx = inv(inv(self.D_el) + delta_gamma * diff2ss * f_trial)
N_mtx_denom = sqrt(dot(dot(diff1s, Xi_mtx), diff1s))
if N_mtx_denom == 0.:
N_mtx = zeros(3)
else:
N_mtx = dot(Xi_mtx, self.diff1s) / N_mtx_denom
D_mtx = Xi_mtx - vdot(N_mtx, N_mtx)
# print "sigma ",sigma
# print "D_mtx ",D_mtx
return sigma, D_mtx
示例12: __init__
def __init__(self,data,cov_matrix=False,loc=None):
"""Parameters
----------
data : array of data, shape=(number points,number dim)
If cov_matrix is True then data is the covariance matrix (see below)
Keywords
--------
cov_matrix : bool (optional)
If True data is treated as a covariance matrix with shape=(number dim, number dim)
loc : the mean of the data if a covarinace matrix is given, shape=(number dim)
"""
if cov_matrix:
self.dim=data.shape[0]
self.n=None
self.data_t=None
self.mu=loc
self.evec,eval,V=sl.svd(data,full_matrices=False)
self.sigma=sqrt(eval)
self.Sigma=diag(1./self.sigma)
self.B=dot(self.evec,self.Sigma)
self.Binv=sl.inv(self.B)
else:
self.n,self.dim=data.shape #the shape of input data
self.mu=data.mean(axis=0) #the mean of the data
self.data_t=data-self.mu #remove the mean
self.evec,eval,V=sl.svd(self.data_t.T,full_matrices=False) #get the eigenvectors (axes of the ellipsoid)
data_p=dot(self.data_t,self.evec) #project the data onto the eigenvectors
self.sigma=data_p.std(axis=0) #get the spread of the distribution (the axis ratos for the ellipsoid)
self.Sigma=diag(1./self.sigma) #the eigenvalue matrix for the ellipsoid equation
self.B=dot(self.evec,self.Sigma) #used in the ellipsoid equation
self.Binv=sl.inv(self.B) #also useful to have around
示例13: propose
def propose(self):
ee = np.asarray(self.Y.value) - np.asarray(self.muY.value)
H = (np.exp(self.LH.value)**(-0.5))[1:]
K = np.asarray(self.Y.value).shape[1]
b_new = np.empty_like(self.stochastic.value)
# auxiliary variables to pick the right subvector/submatrix for the equations
lb = 0
ub = 1
for j in range(1, K):
z = np.expand_dims(H[:, j], 1)*np.expand_dims(ee[:, j], 1) # LHS variable in the regression
Z = np.expand_dims(-H[:, j], 1)*ee[:, :j] # RHS variables in the regression
b_prior = np.asarray([self.b_bar[lb:ub]])
Vinv_prior = inv(self.Pb_bar[lb:ub, lb:ub])
V_post = inv(Vinv_prior + Z.T @ Z)
b_post = V_post @ (Vinv_prior @ b_prior.T + Z.T @ z)
b_new[lb:ub] = pm.rmv_normal_cov(b_post.ravel(), V_post)
lb = ub
ub += j+1
self.stochastic.value = b_new
示例14: kernel_embedding_model
def kernel_embedding_model(self, yita, alpha, Beta, K_tp, K_ts, K_tt, A, R, logtheta, logeta, Len_sr, lda):
I_sr = np.identity(Len_sr, dtype = float)
L = cholesky((K_tt + lda * I_sr))
inv_L = inv(L).transpose()
Q = np.dot(np.transpose(K_ts), inv_L)
Pi = inv( np.dot(np.transpose(Q), Q) + lda * I_sr)
H = np.dot( K_tp, Q)
BLK = np.dot(K_tp,Beta)-K_ts
Delta = R + np.dot(np.transpose(BLK),alpha)
W = np.exp(Delta/yita)
C = sum(W[:,0])/len(W[:,0])
W = W/C
Lambda = np.diag(W[:,0])
#Lambda = np.identity(len(W[:,0]),dtype = float)
Gamma = np.dot( np.dot(K_ts, Lambda), np.transpose(K_ts))
X = inv(Gamma + lda * K_tt + lda**2 * I_sr)
Y = np.dot(K_ts, A)
M = np.dot(X, Y)
return inv_L, Q, Pi, H, Lambda, Gamma, X, Y, M, C
示例15: __init__
def __init__(self, rng, n_samples=500, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
dtype=np.int)
for k, w in enumerate(self.weights)])