本文整理汇总了Python中scipy.linalg.pinv函数的典型用法代码示例。如果您正苦于以下问题:Python pinv函数的具体用法?Python pinv怎么用?Python pinv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pinv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pinv_Z_alt
def test_pinv_Z_alt():
## Test pseudo-inverse computation of a complex double precision distributed matrix
m, n = 7, 4
gA = np.random.standard_normal((m, n)).astype(np.float64)
gA = gA + 1.0J * np.random.standard_normal((m, n)).astype(np.float64)
gA = np.dot(gA, gA.T.conj())
assert np.linalg.matrix_rank(gA) < gA.shape[0] # no full rank
gA = np.asfortranarray(gA)
m, n = gA.shape
dA = core.DistributedMatrix.from_global_array(gA, rank=0)
pinvA = rt.pinv(dA)
gpinvA = pinvA.to_global_array()
gpinvA = gpinvA[:n, :]
if rank == 0:
assert not allclose(gA, np.dot(gA, np.dot(gpinvA, gA)))
assert not allclose(gpinvA, np.dot(gpinvA, np.dot(gA, gpinvA)))
spinvA = la.pinv(gA)
assert allclose(gA, np.dot(gA, np.dot(spinvA, gA)))
assert allclose(spinvA, np.dot(spinvA, np.dot(gA, spinvA)))
assert not allclose(gpinvA, la.pinv(gA)) # compare with scipy result
示例2: __m__
def __m__(self, C1_ts, C2_ts, P_ts, P_t_tm1s, P_tm1s, G_ts):
# Same as e step, we used initial observation to calculate the prior.
n = self.n_observations() + 1
C1_sum = np.sum(C1_ts, axis=2)
P_t_sum = np.sum(P_ts, axis=2)
P_t_tm1_sum_1tT = np.sum(P_t_tm1s[:, :, :], axis=2)
P_tm1_sum_1tT = np.sum(P_tm1s[:, :, :], axis=2)
# Output matrix fit.
C = C1_sum @ pinv(P_t_sum)
# Observation covariance fit.
R = np.zeros((self.observations_size, self.observations_size))
for t in range(n):
R += C2_ts[:, :, t] - C @ G_ts[:, :, t]
R *= 1.0/n
# State dynamics
A = P_t_tm1_sum_1tT @ pinv(P_tm1_sum_1tT)
# Hidden Noise
Q = 1.0/(n-1) * (np.sum(P_ts[:, :, 1:], axis=2) - A @ P_t_tm1_sum_1tT)
# Control signal
B = self.Bs[:, :, 0]
D = self.Ds[:, :, 0]
# Initial state
init_mu = self.mus[:, :, 0]
init_V = P_ts[:, :, 0] - init_mu @ init_mu.T
return (A, B, C, D, Q, R), init_mu, init_V
示例3: _fit
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
示例4: __init__
def __init__(self, U, Y, statedim, reg=None):
if size(shape(U)) == 1:
U = reshape(U, (-1,1))
if size(shape(Y)) == 1:
Y = reshape(Y, (-1,1))
if reg is None:
reg = 0
yDim = size(Y,1)
uDim = size(U,1)
self.output_size = size(Y,1) # placeholder
# number of samples of past/future we'll mash together into a 'state'
width = 1
# total number of past/future pairings we get as a result
K = size(U,0) - 2 * width + 1
# build hankel matrices containing pasts and futures
U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T
# solve the eigenvalue problem
YfUfT = dot(Y_f, U_f.T)
YfUpT = dot(Y_f, U_p.T)
YfYpT = dot(Y_f, Y_p.T)
UfUpT = dot(U_f, U_p.T)
UfYpT = dot(U_f, Y_p.T)
UpYpT = dot(U_p, Y_p.T)
F = bmat([[None, YfUfT, YfUpT, YfYpT],
[YfUfT.T, None, UfUpT, UfYpT],
[YfUpT.T, UfUpT.T, None, UpYpT],
[YfYpT.T, UfYpT.T, UpYpT.T, None]])
Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
[None, pinv(dot(U_f,U_f.T)), None, None],
[None, None, pinv(dot(U_p,U_p.T)), None],
[None, None, None, pinv(dot(Y_p,Y_p.T))]])
F = F - eye(size(F, 0)) * reg
# Take smallest eigenvalues
_, W = eigs(Ginv.dot(F), k=statedim, which='SR')
# State sequence is a weighted combination of the past
W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
W_Y_p = W[ width * (yDim + uDim + uDim):, :]
X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)
# Regress; trim inputs to match the states we retrieved
R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
RRi = pinv(dot(R, R.T))
RL = dot(R, L.T)
Sys = dot(RRi, RL).T
self.A = Sys[:statedim, :statedim]
self.B = Sys[:statedim, statedim:]
self.C = Sys[statedim:, :statedim]
self.D = Sys[statedim:, statedim:]
示例5: _update_precisions
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in xrange(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in xrange(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in xrange(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = linalg.pinv(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in xrange(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = linalg.pinv(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
self.scale_[k],
self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
示例6: ICC_rep_anova
def ICC_rep_anova(Y):
# the data Y are entered as a 'table' ie subjects are in rows and repeated
# measures in columns
# flag = 1 returns design figure + question
# ------------------------------------------------------------------------------------------
# One Sample Repeated measure ANOVA
# Y = XB + E with X = [FaTor / SubjeT]
# ------------------------------------------------------------------------------------------
[nb_subjects, nb_conditions] = Y.shape
#print nb_subjects, nb_conditions
df = nb_conditions - 1
dfe = nb_subjects*nb_conditions - nb_subjects - df
dfmodel = nb_subjects - df
# create the design matrix for the different levels
# ------------------------------------------------
x = kron(eye(nb_conditions), ones((nb_subjects, 1)))# effect
x0 = tile(eye(nb_subjects), (nb_conditions, 1))# subjeT
X = hstack([x, x0])
# Compute the repeated measure effect
# ------------------------------------
Y = Y.flatten(1)
# Sum Square Total
SST = dot((Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))).T, (Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))))
# Sum Square SubjeT (error in the ANOVA model)
M = dot(dot(X, pinv(dot(X.T,X))), X.T)
R = eye(Y.shape[0]) - M
SSS = dot(dot(Y.T,R),Y)
MSS = SSS / dfe
# Sum square effect (repeated measure)
Betas = dot(pinv(x),Y)# compute without cst/subjects
yhat = dot(x,Betas)
SSE = diag(dot((yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1))).T, (yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1)))))
# MSE = SSE / df;
# Sum Square error
SSError = SST - SSS - SSE
MSError = SSError / dfmodel
# ICC(3,1) = (mean square subjeT - mean square error) / (mean square subjeT + (k-1)*-mean square error)
return -((MSS - MSError) / (MSS + df * MSError))
#Y = array([[1,2],[2,3.5]])
#print ICC_rep_anova(Y)
示例7: bayesian_regression
def bayesian_regression(X, Y, K):
d = K.shape[0]
alpha = model_evidence(X, Y, K)
V1_inv = np.eye(d)
V2_inv = K
# beta = (V1inv+alpha*V2inv)\(V1inv*Y'*X)/(X'*X)
YTX = np.dot(Y.T, X)
XTX = np.dot(X.T, X)
beta = np.dot(np.dot(linalg.pinv(V1_inv + alpha * V2_inv),
np.dot(V1_inv, YTX)), linalg.pinv(XTX))
return beta
示例8: _cp3
def _cp3(X, n_components, tol, max_iter, init_type, random_state=None):
"""
3 dimensional CANDECOMP/PARAFAC decomposition.
This code is meant to be a tutorial/testing example... in general _cpN
should be more compact and equivalent mathematically.
"""
if len(X.shape) != 3:
raise ValueError("CP3 decomposition only supports 3 dimensions!")
if init_type == "random":
A, B, C = _random_init(X, n_components, random_state)
elif init_type == "hosvd":
A, B, C = _hosvd_init(X, n_components)
grams = [np.dot(arr.T, arr) for arr in (A, B, C)]
err = 1E10
for itr in range(max_iter):
err_old = err
A = matricize(X, 0).dot(kr(C, B)).dot(linalg.pinv(grams[1] * grams[2]))
if itr == 0:
normalization = np.sqrt((A ** 2).sum(axis=0))
else:
normalization = A.max(axis=0)
normalization[normalization < 1] = 1
A /= normalization
grams[0] = np.dot(A.T, A)
B = matricize(X, 1).dot(kr(C, A)).dot(linalg.pinv(grams[0] * grams[2]))
if itr == 0:
normalization = np.sqrt((B ** 2).sum(axis=0))
else:
normalization = B.max(axis=0)
normalization[normalization < 1] = 1
B /= normalization
grams[1] = np.dot(B.T, B)
C = matricize(X, 2).dot(kr(B, A)).dot(linalg.pinv(grams[0] * grams[1]))
if itr == 0:
normalization = np.sqrt((C ** 2).sum(axis=0))
else:
normalization = C.max(axis=0)
normalization[normalization < 1] = 1
C /= normalization
grams[2] = np.dot(C.T, C)
err = linalg.norm(matricize(X, 0) - np.dot(A, kr(C, B).T)) ** 2
thresh = np.abs(err - err_old) / err_old
if (thresh < tol) or (itr > max_iter):
break
return A, B, C
示例9: _nipals_twoblocks_inner_loop
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
示例10: _update_precisions
def _update_precisions(self):
"""Update the variational distributions for the precisions"""
if self.cvtype == "spherical":
self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
for k in xrange(self.n_components):
# XXX: how to avoid this huge temporary matrix in memory
dif = self._X - self._means[k]
self._b[k] = 1.0
d = np.sum(dif * dif, axis=1)
self._b[k] += 0.5 * np.sum(self._z.T[k] * (d + self.n_features))
self._bound_prec[k] = 0.5 * self.n_features * (digamma(self._a[k]) - np.log(self._b[k]))
self._precs = self._a / self._b
elif self.cvtype == "diag":
for k in xrange(self.n_components):
self._a[k].fill(1.0 + 0.5 * np.sum(self._z.T[k], axis=0))
ddif = self._X - self._means[k] # see comment above
for d in xrange(self.n_features):
self._b[k, d] = 1.0
dd = ddif.T[d] * ddif.T[d]
self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
self._precs[k] = self._a[k] / self._b[k]
self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k]) - np.log(self._b[k]))
self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])
elif self.cvtype == "tied":
self._a = 2 + self._X.shape[0] + self.n_features
self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
for i in xrange(self._X.shape[0]):
for k in xrange(self.n_components):
dif = self._X[i] - self._means[k]
self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
self._B = linalg.pinv(self._B)
self._precs = self._a * self._B
self._detB = linalg.det(self._B)
self._bound_prec = 0.5 * detlog_wishart(self._a, self._B, self._detB, self.n_features)
self._bound_prec -= 0.5 * self._a * np.trace(self._B)
elif self.cvtype == "full":
for k in xrange(self.n_components):
T = np.sum(self._z.T[k])
self._a[k] = 2 + T + self.n_features
self._B[k] = (T + 1) * np.identity(self.n_features)
for i in xrange(self._X.shape[0]):
dif = self._X[i] - self._means[k]
self._B[k] += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
self._B[k] = linalg.pinv(self._B[k])
self._precs[k] = self._a[k] * self._B[k]
self._detB[k] = linalg.det(self._B[k])
self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k], self._B[k], self._detB[k], self.n_features)
self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
示例11: __init__
def __init__(self, X, Y, rank, reg=None):
if size(shape(X)) == 1:
X = reshape(X, (-1, 1))
if size(shape(Y)) == 1:
Y = reshape(Y, (-1, 1))
if reg is None:
reg = 0
self.rank = rank
CXX = dot(X.T, X) + reg * eye(size(X, 1))
CXY = dot(X.T, Y)
_U, _S, V = svd(dot(CXY.T, dot(pinv(CXX), CXY)))
self.W = V[0:rank, :].T
self.A = dot(pinv(CXX), dot(CXY, self.W)).T
示例12: _nipals_twoblocks_inner_loop
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06):
"""Inner loop of the iterative NIPALS algorithm. provide an alternative
of the svd(X'Y) ie. return the first left and rigth singular vectors of X'Y
See PLS for the meaning of the parameters.
It is similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y
"""
y_score = Y[:, [0]]
u_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode is "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
u = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
u = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
u /= np.sqrt(np.dot(u.T, u))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, u)
# 2.1 Update v: the Y weights
if mode is "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
v = np.dot(Y_pinv, x_score)
else:
# Mode A regress each X column on y_score
v = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize v
v /= np.sqrt(np.dot(v.T, v))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, v)
u_diff = u - u_old
if np.dot(u_diff.T, u_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
u_old = u
ite += 1
return u, v
示例13: preProcess
def preProcess(u,y,NumDict):
NumInputs = u.shape[0]
NumOutputs = y.shape[0]
NumRows = NumDict['Rows']
NumCols = NumDict['Columns']
NSig = NumDict['Dimension']
UPast,UFuture = getHankelMatrices(u,NumRows,NumCols)
YPast,YFuture = getHankelMatrices(y,NumRows,NumCols)
Data = np.vstack((UPast,UFuture,YPast))
L = la.lstsq(Data.T,YFuture.T)[0].T
Z = np.dot(L,Data)
DataShift = np.vstack((UPast,UFuture[NumInputs:],YPast))
LShift = la.lstsq(DataShift.T,YFuture[NumOutputs:].T)[0].T
ZShift = np.dot(LShift,DataShift)
L1 = L[:,:NumInputs*NumRows]
L3 = L[:,2*NumInputs*NumRows:]
LPast = np.hstack((L1,L3))
DataPast = np.vstack((UPast,YPast))
U, S, Vt = la.svd(np.dot(LPast,DataPast))
Sig = np.diag(S[:NSig])
SigRt = np.diag(np.sqrt(S[:NSig]))
Gamma = np.dot(U[:,:NSig],SigRt)
GammaLess = Gamma[:-NumOutputs]
GammaPinv = la.pinv(Gamma)
GammaLessPinv = la.pinv(GammaLess)
GamShiftSolve = la.lstsq(GammaLess,ZShift)[0]
GamSolve = la.lstsq(Gamma,Z)[0]
GamData = np.vstack((GamSolve,UFuture))
GamYData = np.vstack((GamShiftSolve,YFuture[:NumOutputs]))
# Should probably move to a better output structure
# One that doesn't depent so heavily on ordering
GammaDict = {'Data':GamData,
'DataLess':GammaLess,
'DataY':GamYData,
'Pinv': GammaPinv,
'LessPinv': GammaLessPinv}
return GammaDict,S
示例14: calculateGradient
def calculateGradient(self):
# normalize rewards
# self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))
# initialize variables
R = ones((self.dataset.getNumSequences(), 1), float)
X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)
# collect sufficient statistics
print self.dataset.getNumSequences()
for n in range(self.dataset.getNumSequences()):
_state, _action, reward = self.dataset.getSequence(n)
seqidx = ravel(self.dataset['sequence_index'])
if n == self.dataset.getNumSequences() - 1:
# last sequence until end of dataset
loglh = self.loglh['loglh'][seqidx[n]:, :]
else:
loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]
X[n, :-1] = sum(loglh, 0)
R[n, 0] = sum(reward, 0)
# linear regression
beta = dot(pinv(X), R)
return beta[:-1]
示例15: test_simple_complex
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))