本文整理汇总了Python中scipy.linalg.lstsq函数的典型用法代码示例。如果您正苦于以下问题:Python lstsq函数的具体用法?Python lstsq怎么用?Python lstsq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lstsq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: partial_corr
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
示例2: bilinear_least_squares
def bilinear_least_squares(X, y, b0=None, n_iter=10, fit_intercept=True):
"""assumes X.shape = n_samples, n_matrices, h, wi
and does linear regression as a sum of rank 1 matrices"""
if X.ndim == 3:
X = X[:, np.newaxis]
n_samples, n_matrices, n_feat_a, n_feat_b = X.shape
if b0 is None:
b0 = np.ones((n_matrices, n_feat_b)) / n_feat_b
b = b0.copy()
if fit_intercept:
X_mean, y_mean = X.mean(0), y.mean()
X = X - X_mean
y = y - y_mean
for i in range(n_iter):
a_estimation_matrix = np.einsum(
"ijkl, jl -> ijk", X, b).reshape(n_samples, -1)
a = lstsq(a_estimation_matrix, y)[0].reshape(n_matrices, n_feat_a)
b_estimation_matrix = np.einsum(
"ijkl, jk -> ijl", X, a).reshape(n_samples, -1)
b = lstsq(b_estimation_matrix, y)[0].reshape(n_matrices, n_feat_b)
if fit_intercept:
intercept = y_mean - np.einsum("jkl, jk, jl", X_mean, a, b)
return a, b, intercept
return a, b
示例3: pcor
def pcor(X,Y,Z):
"""
computes the correlation amtrix of X and Y conditioning on Z
"""
if X.ndim==1: X = X[:,SP.newaxis]
if Y.ndim==1: Y = Y[:,SP.newaxis]
if Z is None: return STATS.pearsonr(X,Y)
if Z.ndim==1: Z = Z[:,SP.newaxis]
nSamples = X.shape[0]
betaX, _, _, _ = LA.lstsq(Z,X)
betaY, _, _, _ = LA.lstsq(Z,Y)
Xres = X - SP.dot(Z,betaX)
Yres = Y - SP.dot(Z,betaY)
corr_cond = SP.corrcoef(Xres[:,0],Yres[:,0])[0,1]
dz = Z.shape[1] # dimension of conditioning variable
df = max(nSamples - dz - 2,0) # degrees of freedom
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
tstat = corr_cond / SP.sqrt(1.0 - corr_cond ** 2) # calculate t statistic
tstat = math.sqrt(df) * tstat
pv_cond = 2 * t.cdf(-abs(tstat), df, loc=0, scale=1) # calculate p value
return corr_cond,pv_cond
示例4: pcorParallel
def pcorParallel(X,Z,Y=None):
"""
computes the correlation matrix between X and Y conditioning on Z
"""
if Y is None: return pcorParallelSym(X,Z)
if Z is None: return corrParallel(X,Y)
if Z.ndim==1: Z = Z[SP.newaxis,:]
X = X.T
Y = Y.T
Z = Z.T
beta,_,_,_ = LA.lstsq(Z,Y)
Yres = Y - SP.dot(Z,beta)
beta,_,_,_ = LA.lstsq(Z,X)
Xres = X - SP.dot(Z,beta)
nSamples = Z.shape[0]
nCovs = Z.shape[1]
df = max(nSamples - 2 - nCovs,0)
return corrParallel(Xres.T,Yres.T,df=df)
示例5: process
def process(self, data_in, obs_vec):
"""
Generate function network model.
:param data: Training data matrix :math:`\mathcal{X}\in\mathbb{R}^{d\\times n}`
:type data: numpy array
:param obs_vec: Observation vector :math:`y\in\mathbb{R}^{1 \\times n}`
:type obs_vec: numpy array
:return: none
:rtype: none
"""
# check consistency of data
obs_num = obs_vec.shape[1]
data_num = data_in.shape[1]
if obs_num != data_num:
raise Exception("Number of samples for data and observations must be the same")
else:
# initialize variables
self.data = data_in
self.data_dim = data_in.shape[0]
nsamp = data_num
# peel off parameters
ki = self.k_type
bandwidth = ki.params[0]
# compute regularized kernel matrix
kmat = kernel(self.data, self.data, self.k_type) + (pow(self.noise,2))*eye(nsamp)
# perform Cholesky factorization, and compute mean vector (for stable inverse computations)
self.lmat = cholesky(kmat).transpose()
self.mean_vec = lstsq(self.lmat, obs_vec)
self.mean_vec = lstsq(self.lmat.transpose(), self.mean_vec)
示例6: pcorr
def pcorr(C,k):
val=list(C.columns.values)
C[u'ones']=np.ones(C.shape[0])
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p-1, p-1), dtype=np.float)
idx = np.zeros(p, dtype=np.bool)
for kk in k:
idx[kk] = True
idx[p-1] = True
for i in range(p-1):
P_corr[i, i] = 1
for j in range(i+1, p-1):
beta_i = linalg.lstsq(C[:, idx], C[:, i])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, j])[0]
res_j = C[:, j] - C[:, idx].dot(beta_j)
res_i = C[:, i] - C[:, idx].dot(beta_i)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
p=pd.DataFrame(P_corr, index=val, columns=val)
return p
示例7: divide
def divide(self,x,mode):
if mode == 1:
y = linalg.lstsq(self.diag,x)
else:
y = linalg.lstsq(self.diag.conj().T,x)
return y[0]
示例8: solveQ3
def solveQ3(trainData, testData, columnLabel):
target = trainingData[:,dataMEDVCol:dataMEDVCol+1]
targetTest = testingData[:,dataMEDVCol:dataMEDVCol+1]
Ones = np.ones((len(target),1))
# Fitting the parameters: theta = (X'*X)^-1*X'*y
Xtrain = np.hstack((Ones, trainData.reshape(len(Ones),1)))
mTheta = lstsq(Xtrain, target)[0]
target_pred = dot(Xtrain, mTheta)
t = target-target_pred
msePred = sum((target-target_pred)**2)/len(target)
meanTarget = sum(target)/len(target)
varianceTarget = sum((target-meanTarget)**2)/len(target)
FVU = msePred/varianceTarget
Xtest = np.hstack((Ones, testData.reshape(len(Ones),1)))
mThetaTest = lstsq(Xtest, targetTest)[0]
# use theta from training set, not from testing set
target_pred_test = dot(Xtest, mTheta)
msePredTest = sum((targetTest-target_pred_test)**2)/len(targetTest)
meanTargetTest = sum(targetTest)/len(targetTest)
varianceTargetTest = sum((targetTest-meanTargetTest)**2)/len(targetTest)
FVUTest = msePredTest/varianceTargetTest
print '###',columnLabel,'###'
print 'MSE training set:', msePred
print 'MSE testing set:', msePredTest
print 'R2 of testing set against theta from training set:', 1 - FVUTest,'\n'
示例9: divide
def divide(self,b,mode):
if mode == 1:
y = sci.lstsq(self.matrix,b)
else:
y = sci.lstsq(self.matrix.conj().T,b)
return y[0]
示例10: backgroundCorrectPSFWF
def backgroundCorrectPSFWF(d):
import numpy as np
from scipy import linalg
zf = d.shape[2]/2
#subtract a linear background in x
Ax = np.vstack([np.ones(d.shape[0]), np.arange(d.shape[0])]).T
bgxf = (d[0,:,zf] + d[-1,:,zf])/2
gx = linalg.lstsq(Ax, bgxf)[0]
d = d - np.dot(Ax, gx)[:,None,None]
#do the same in y
Ay = np.vstack([np.ones(d.shape[1]), np.arange(d.shape[1])]).T
bgyf = (d[:,0,zf] + d[:,-1,zf])/2
gy = linalg.lstsq(Ay, bgyf)[0]
d = d - np.dot(Ay, gy)[None, :,None]
#estimate background on central slice as mean of rim pixels
#bgr = (d[0,:,zf].mean() + d[-1,:,zf].mean() + d[:,0,zf].mean() + d[:,-1,zf].mean())/4
#sum over all pixels (and hence mean) should be preserved over z (for widefield psf)
dm = d.mean(1).mean(0)
bg = dm - dm[zf]
return np.maximum(d - bg[None, None, :], 0) + 1e-5
示例11: trialFunFit_constrained
def trialFunFit_constrained(self, s, arr, alphas, pairs, zerostart=False):
deg = len(alphas)
carr = np.concatenate((arr.real, arr.imag))
# construct matrix for extended fitting problem
A = np.concatenate((1. / (s[:,None] + alphas[None,:]), \
arr[:,None] / (s[:,None] + alphas[None,:])), axis=1)
# implement the constraint
pairsnew = np.concatenate((pairs, pairs))
for i, p in enumerate(pairsnew):
if p:
x1 = A[:,i] + A[:,i+1]
x2 = 1j * (A[:,i] - A[:,i+1])
A[:,i] = x1
A[:,i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
# find auxiliary residues
c = la.lstsq(A, carr)[0][-len(alphas):]
# find zeros of fitted auxiliary function
a = np.diag(alphas)
b = np.ones(deg)
# implement similarity transform
for i, p in enumerate(pairs):
if p:
a[i:i+2, i:i+2] = np.array([[alphas[i].real, alphas[i].imag], \
[-alphas[i].imag, alphas[i].real]])
b[i:i+2] = np.array([2,0])
H = a.real - np.dot(b[:,None], c[None,:])
alphanew = np.linalg.eig(H)[0]
inds = np.argsort(alphanew)
alphanew = alphanew[inds]
# indicates where pairs of complex conjugate poles occur
auxarr = np.abs((np.abs(alphanew[:-1]) - np.abs(alphanew[1:])) / np.abs(alphanew[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
# find residues
Anew = 1. / (s[:,None] + alphanew[None,:])
for i, p in enumerate(pairs):
if p:
x1 = Anew[:,i] + Anew[:,i+1]
x2 = 1j * (Anew[:,i] - Anew[:,i+1])
Anew[:,i] = x1
Anew[:,i+1] = x2
Anew = np.concatenate((Anew.real, Anew.imag), axis=0)
if zerostart:
# enforce K(t=0)=0 constraint
row1 = np.ones(2*deg)
for i, p in enumerate(pairs):
if p:
row1[i+1] = 0
Anew = np.concatenate((np.ones((1, deg), dtype=complex), Anew), axis=0)
carr = np.concatenate((np.zeros(1, dtype=complex), carr))
cnew = la.lstsq(Anew, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
return alphanew, cnew, pairs
示例12: partial_corr
def partial_corr(C):
"""
Partial Correlation in Python (clone of Matlab's partialcorr)
from https://gist.github.com/fabianp/9396204419c7b638d38f
This uses the linear regression approach to compute the partial
correlation (might be slow for a huge number of variables). The
algorithm is detailed here:
http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
the algorithm can be summarized as
1) perform a normal linear least-squares regression with X as the target and Z as the predictor
2) calculate the residuals in Step #1
3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
4) calculate the residuals in Step #3
5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
The result is the partial correlation between X and Y while controlling for the effect of Z
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i + 1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
示例13: fit_values
def fit_values(self, s, x, damp=0.0):
Phi = complete_polynomial(s.T, self.d).T
self.Phi = Phi
if damp == 0.0:
self.coefs = np.ascontiguousarray(lstsq(Phi, x)[0])
else:
new_coefs = np.ascontiguousarray(lstsq(Phi, x)[0])
self.coefs = (1 - damp) * new_coefs + damp * self.coefs
示例14: time_lstsq
def time_lstsq(self, dtype, size, lapack_driver):
if lapack_driver == 'numpy':
np.linalg.lstsq(self.A, self.b,
rcond=np.finfo(self.A.dtype).eps * 100)
else:
sl.lstsq(self.A, self.b, cond=None, overwrite_a=False,
overwrite_b=False, check_finite=False,
lapack_driver=lapack_driver)
示例15: _unscented_correct
def _unscented_correct(cross_sigma, mu_pred, sigma2_pred, obs_mu_pred, obs_sigma2_pred, z):
"""Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
mu_pred : [n_dim_state] array
mean of state at time t given observations from timesteps [0, t-1]
sigma2_pred : [n_dim_state, n_dim_state] array
square root of covariance of state at time t given observations from
timesteps [0, t-1]
obs_mu_pred : [n_dim_obs] array
mean of observation at time t given observations from times [0, t-1]
obs_sigma2_pred : [n_dim_obs] array
square root of covariance of observation at time t given observations
from times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
mu_filt : [n_dim_state] array
mean of state at time t given observations from time steps [0, t]
sigma2_filt : [n_dim_state, n_dim_state] array
square root of covariance of state at time t given observations from
time steps [0, t]
"""
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
##############################################
# Same as this, but more stable (supposedly) #
##############################################
# K = cross_sigma.dot(
# linalg.pinv(
# obs_sigma2_pred.T.dot(obs_sigma2_pred)
# )
# )
##############################################
# equivalent to this MATLAB code
# K = (cross_sigma / obs_sigma2_pred.T) / obs_sigma2_pred
K = linalg.lstsq(obs_sigma2_pred, cross_sigma.T)[0]
K = linalg.lstsq(obs_sigma2_pred.T, K)[0]
K = K.T
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
U = K.dot(obs_sigma2_pred)
sigma2_filt = cholupdate(sigma2_pred, U.T, -1.0)
else:
# no corrections to be made
mu_filt = mu_pred
sigma2_filt = sigma2_pred
return (mu_filt, sigma2_filt)