本文整理汇总了Python中scipy.cov函数的典型用法代码示例。如果您正苦于以下问题:Python cov函数的具体用法?Python cov怎么用?Python cov使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cov函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, Y=None, Xr=None, Rg=None, Ug=None, Sg=None, factr=1e7, debug=False):
"""
Args:
Y: [N, P] phenotype matrix
Xr: [N, S] genotype data of the set component
R: [N, S] genotype data of the set component
factr: paramenter that determines the accuracy of the solution
(see scipy.optimize.fmin_l_bfgs_b for more details)
"""
# assert Xr
Xr-= Xr.mean(0)
Xr/= Xr.std(0)
Xr/= sp.sqrt(Xr.shape[1])
self.Y = Y
self.Xr = Xr
if Sg is None or Ug is None:
Sg, Ug = la.eigh(Rg)
self.Rg = Rg
self.Ug = Ug
self.Sg = Sg
self.covY = sp.cov(Y.T)
self.factr = factr
self.debug = debug
self.gp = {}
self.info = {}
#_trRr = sp.diagonal(sp.dot(self.Ug, sp.dot(sp.diag(self.Sg), self.Ug.T))).sum()
self.trRg = ((self.Ug*self.Sg**0.5)**2).sum()
示例2: __init__
def __init__(self, Y=None, Xr=None, F=None, Rr=None, factr=1e7, debug=False):
"""
Args:
Y: [N, P] phenotype matrix
Xr: [N, S] genotype data of the set component
R: [N, S] genotype data of the set component
factr: paramenter that determines the accuracy of the solution
(see scipy.optimize.fmin_l_bfgs_b for more details)
"""
# avoid SVD failure by adding some jitter
Xr+= 2e-6*(sp.rand(*Xr.shape)-0.5)
# make sure it is normalised
Xr-= Xr.mean(0)
Xr/= Xr.std(0)
Xr/= sp.sqrt(Xr.shape[1])
self.Y = Y
self.F = F
self.Xr = Xr
self.covY = sp.cov(Y.T)
self.factr = factr
self.debug = debug
self.gp = {}
self.info = {}
self.lowrank = Xr.shape[1]<Xr.shape[0]
if Rr is not None:
self.Rr = Rr
else:
if self.lowrank: self.Rr = None
else: self.Rr = sp.dot(Xr, Xr.T)
示例3: _maximum_likelihood
def _maximum_likelihood(self, X):
n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
n_components = self.n_components
# Predict mean
mu = X.mean(axis=0)
# Predict covariance
cov = sp.cov(X, rowvar=0)
eigvals, eigvecs = self._eig_decomposition(cov)
sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
(n_features - n_components)) # FIXME: M < D?
weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
inv_M = spla.inv(M)
self.eigvals = eigvals
self.eigvecs = eigvecs
self.predict_mean = mu
self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
self.latent_cov = sigma2 * inv_M
self.sigma2 = sigma2 # FIXME!
self.weight = weight
self.inv_M = inv_M
return self.latent_mean
示例4: learn_gmm
def learn_gmm(self,x,y,tau=None):
'''
Function that learns the GMM from training samples
It is possible to add a regularizer term Sigma = Sigma + tau*I
Input:
x : the training samples
y : the labels
tau : the value of the regularizer, if tau = None (default) no regularization
Output:
the mean, covariance and proportion of each class
'''
## Get information from the data
C = int(y.max(0)) # Number of classes
n = x.shape[0] # Number of samples
d = x.shape[1] # Number of variables
## Initialization
self.ni = sp.empty((C,1)) # Vector of number of samples for each class
self.prop = sp.empty((C,1)) # Vector of proportion
self.mean = sp.empty((C,d)) # Vector of means
self.cov = sp.empty((C,d,d)) # Matrix of covariance
## Learn the parameter of the model for each class
for i in range(C):
j = sp.where(y==(i+1))[0]
self.ni[i] = float(j.size)
self.prop[i] = self.ni[i]/n
self.mean[i,:] = sp.mean(x[j,:],axis=0)
self.cov[i,:,:] = sp.cov(x[j,:],bias=1,rowvar=0) # Normalize by ni to be consistent with the update formulae
if tau is not None:
self.tau = tau*sp.eye(d)
示例5: plot_covariance
def plot_covariance(history, dist_X):
for dist_name in list(history.keys()):
nTypes = len(history[dist_name].keys())
errors = sp.zeros((2,nTypes))
fig = plt.figure()
fig.set_size_inches(6*nTypes,5)
plt.subplot(1,nTypes+1,1)
plt.imshow(dist_X.corr_matrix,cmap=plt.cm.gray,interpolation='none')
counter = 0
for samp_name in list(history[dist_name].keys()):
counter += 1
hist_single = history[dist_name][samp_name]
nsteps = len(hist_single)
nbatch = hist_single[-1]['X'].shape[1]
N = hist_single[0]['X'].shape[0]
X = sp.zeros((N,nbatch,nsteps))
P = sp.zeros((N,nbatch,nsteps))
for tt in range(nsteps):
X[:,:,tt] = hist_single[tt]['X']
P[:,:,tt] = hist_single[tt]['P']
ax = plt.subplot(1,nTypes+1,counter+1)
inv_var_diags = sp.diag(10.**sp.linspace(-dist_X.log_conditioning, 0, N))**.5
corr_matrix_calc = sp.dot(sp.dot(inv_var_diags**.5,sp.cov(X.reshape(N,nbatch*nsteps),rowvar = 1)),inv_var_diags**.5)
plt.imshow(corr_matrix_calc,cmap=plt.cm.gray,interpolation='none')
print (corr_matrix_calc)
plt.show()
示例6: pca
def pca(data, dim):
""" Return the first dim principal components as colums of a matrix.
Every row of the matrix resembles a point in the data space.
"""
assert dim <= data.shape[1], \
"dim must be less or equal than the original dimension"
# We have to make a copy of the original data and substract the mean
# of every entry
data = makeCentered(data)
cm = cov(data.T)
# OPT only calculate the dim first eigenvectors here
# The following calculation may seem a bit "weird" but also correct to me.
# The eigenvectors with the dim highest eigenvalues have to be selected
# We keep track of the indexes via enumerate to restore the right ordering
# later.
eigval, eigvec = eig(cm)
eigval = [(val, ind) for ind, val in enumerate(eigval)]
eigval.sort()
eigval[:-dim] = [] # remove all but the highest dim elements
# now we have to bring them back in the right order
eig_indexes = [(ind, val) for val, ind in eigval]
eig_indexes.sort(reverse=True)
eig_indexes = [ind for ind, val in eig_indexes]
return eigvec.take(eig_indexes, 1).T
示例7: fit
def fit(self, data):
"""Fit VAR model to data.
Parameters
----------
data : array, shape (trials, channels, samples) or (channels, samples)
Epoched or continuous data set.
Returns
-------
self : :class:`VAR`
The :class:`VAR` object to facilitate method chaining (see usage
example).
"""
data = atleast_3d(data)
if self.delta == 0 or self.delta is None:
# ordinary least squares
x, y = self._construct_eqns(data)
else:
# regularized least squares (ridge regression)
x, y = self._construct_eqns_rls(data)
b, res, rank, s = sp.linalg.lstsq(x, y)
self.coef = b.transpose()
self.residuals = data - self.predict(data)
self.rescov = sp.cov(cat_trials(self.residuals[:, :, self.p:]))
return self
示例8: _initParams_fast
def _initParams_fast(self):
"""
initialize the gp parameters
1) project Y on the known factor X0 -> Y0
average variance of Y0 is used to initialize the variance explained by X0
2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
3) perform PCA on cov(Y1) and considers the first k PC for initializing X
4) the variance of all other PCs is used to initialize the noise
5) the variance explained by interaction is set to a small random number
"""
Xd = LA.pinv(self.X0)
Y0 = self.X0.dot(Xd.dot(self.Y))
Y1 = self.Y-Y0
YY = SP.cov(Y1)
S,U = LA.eigh(YY)
X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
a = SP.array([SP.sqrt(Y0.var(0).mean())])
b = 1e-3*SP.randn(1)
c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
示例9: fit
def fit(self, data):
""" Fit VAR model to data.
Parameters
----------
data : array-like, shape = [n_samples, n_channels, n_trials] or [n_samples, n_channels]
Continuous or segmented data set.
Returns
-------
self : :class:`VAR`
The :class:`VAR` object to facilitate method chaining (see usage example)
"""
data = sp.atleast_3d(data)
if self.delta == 0 or self.delta is None:
# ordinary least squares
(x, y) = self._construct_eqns(data)
else:
# regularized least squares (ridge regression)
(x, y) = self._construct_eqns_rls(data)
(b, res, rank, s) = sp.linalg.lstsq(x, y)
self.coef = b.transpose()
self.residuals = data - self.predict(data)
self.rescov = sp.cov(cat_trials(self.residuals), rowvar=False)
return self
示例10: _init_params
def _init_params(self, X):
init = self.init
n_samples, n_features = X.shape
n_components = self.n_components
if (init == 'kmeans'):
km = Kmeans(n_components)
clusters, mean, cov = km.cluster(X)
coef = sp.array([c.shape[0] / n_samples for c in clusters])
comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
for i in range(n_components)]
elif (init == 'rand'):
coef = sp.absolute(sprand.randn(n_components))
coef = coef / coef.sum()
means = X[sprand.permutation(n_samples)[0: n_components]]
clusters = [[] for i in range(n_components)]
for x in X:
idx = sp.argmin([spla.norm(x - mean) for mean in means])
clusters[idx].append(x)
comps = []
for k in range(n_components):
mean = means[k]
cov = sp.cov(clusters[k], rowvar=0, ddof=0)
comps.append(multivariate_normal(mean, cov, allow_singular=True))
self.coef = coef
self.comps = comps
示例11: cluster
def cluster(self, X):
self.fit(X)
cluster = [X[sp.argmax(self.responsibility, axis=1) == k] for k in range(self.n_classes)]
mean = self.center
cov = [sp.cov(c, rowvar=0, ddof=0) for c in cluster]
return cluster, mean, cov
示例12: getEmpTraitCovar
def getEmpTraitCovar(self):
"""
Returns the empirical trait covariance matrix
"""
if self.P==1:
out=self.Y[self.Iok].var()
else:
out=SP.cov(self.Y[self.Iok].T)
return out
示例13: fit
def fit(self, X):
cov = sp.cov(X, rowvar=0)
eigvals, eigvecs = self._eig_decomposition(cov)
self.eigvals = eigvals
self.eigvecs = eigvecs
self.mean = X.mean(axis=0)
return sp.dot(X, eigvecs)
示例14: _initParams
def _initParams(self,init_method=None):
""" this function initializes the paramenter and Ifilter """
if self.P==1:
if self.bgRE:
params0 = {'Cg':SP.sqrt(0.5)*SP.ones(1),'Cn':SP.sqrt(0.5)*SP.ones(1)}
Ifilter = None
else:
params0 = {'Cr':1e-9*SP.ones(1),'Cn':SP.ones(1)}
Ifilter = {'Cr':SP.zeros(1,dtype=bool),'Cn':SP.ones(1,dtype=bool)}
else:
if self.bgRE:
if self.colCovarType=='freeform':
if init_method=='pairwise':
_RV = fitPairwiseModel(self.Y,XX=self.XX,S_XX=self.S_XX,U_XX=self.U_XX,verbose=False)
params0 = {'Cg':_RV['params0_Cg'],'Cn':_RV['params0_Cn']}
elif init_method=='random':
params0 = {'Cg':SP.randn(self.Cg.getNumberParams()),'Cn':SP.randn(self.Cn.getNumberParams())}
else:
cov = 0.5*SP.cov(self.Y.T)+1e-4*SP.eye(self.P)
chol = LA.cholesky(cov,lower=True)
params = chol[SP.tril_indices(self.P)]
params0 = {'Cg':params.copy(),'Cn':params.copy()}
Ifilter = None
else:
if self.colCovarType=='freeform':
cov = SP.cov(self.Y.T)+1e-4*SP.eye(self.P)
chol = LA.cholesky(cov,lower=True)
params = chol[SP.tril_indices(self.P)]
#else:
# S,U=LA.eigh(cov)
# a = SP.sqrt(S[-self.rank_r:])[:,SP.newaxis]*U[:,-self.rank_r:]
# if self.colCovarType=='lowrank_id':
# c = SP.sqrt(S[:-self.rank_r].mean())*SP.ones(1)
# else:
# c = SP.sqrt(S[:-self.rank_r].mean())*SP.ones(self.P)
# params0_Cn = SP.concatenate([a.T.ravel(),c])
params0 = {'Cr':1e-9*SP.ones(self.P),'Cn':params}
Ifilter = {'Cr':SP.zeros(self.P,dtype=bool),
'Cn':SP.ones(params.shape[0],dtype=bool)}
if self.mean.F is not None and self.bgRE:
params0['mean'] = 1e-6*SP.randn(self.mean.getParams().shape[0])
if Ifilter is not None:
Ifilter['mean'] = SP.ones(self.mean.getParams().shape[0],dtype=bool)
return params0,Ifilter
示例15: infer_full_post
def infer_full_post(self,X_i,D_i):
class MJMError(Exception):
pass
[m,V] = self.infer_full(X_i,D_i)
ns=X_i.shape[0]
cv = sp.zeros([ns,ns])
for i in xrange(self.size):
cv+=V[ns*i:ns*(i+1),:]
cv= cv/self.size + sp.cov(m,rowvar=0,bias=1)
return [sp.mean(m,axis=0).reshape([1,ns]),cv]