本文整理匯總了Python中scipy.linalg.pinv方法的典型用法代碼示例。如果您正苦於以下問題:Python linalg.pinv方法的具體用法?Python linalg.pinv怎麽用?Python linalg.pinv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy.linalg
的用法示例。
在下文中一共展示了linalg.pinv方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _orthogonalize
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _orthogonalize(X):
"""Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X : array of shape(n, p)
the data to be orthogonalized
Returns
-------
X : array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized.
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv, norm
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
# X[:, i] /= norm(X[:, i])
return X
示例2: _orthogonalize
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p)
the data to be orthogonalized
Returns
-------
X: array of shape(n, p)
the data after orthogonalization
Notes
-----
X is changed in place. The columns are not normalized
"""
if X.size == X.shape[0]:
return X
from scipy.linalg import pinv
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(np.dot(X[:, i], X[:, :i]), pinv(X[:, :i]))
return X
示例3: bb_shift_vector
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def bb_shift_vector(self) -> Vector:
"""The shift necessary to place the BB at the radiation isocenter.
The values are in the coordinates defined in the documentation.
The shift is based on the paper by Low et al. See online documentation for more.
"""
A = np.empty([2 * len(self.images), 3])
epsilon = np.empty([2 * len(self.images), 1])
for idx, img in enumerate(self.images):
g = img.gantry_angle
c = img.couch_angle_varian_scale
A[2 * idx:2 * idx + 2, :] = np.array([[-cos(c), -sin(c), 0],
[-cos(g) * sin(c), cos(g) * cos(c), -sin(g)],
]) # equation 6 (minus delta)
epsilon[2 * idx:2 * idx + 2] = np.array([[img.cax2bb_vector.y], [img.cax2bb_vector.x]]) # equation 7
B = linalg.pinv(A)
delta = B.dot(epsilon) # equation 9
return Vector(x=delta[1][0], y=-delta[0][0], z=-delta[2][0])
示例4: inverse
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def inverse(self):
return la.pinv(self._Sigma)
示例5: mpinv
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def mpinv(self):
return linalg.pinv(self.m)
示例6: xpinv
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def xpinv(self):
return linalg.pinv(self.x)
示例7: pinv
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def pinv(a, cond=None, rcond=None):
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : array, shape (M, N)
Matrix to be pseudo-inverted
cond, rcond : float
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than rcond*largest_singular_value are
considered zero.
Returns
-------
B : array, shape (N, M)
Raises LinAlgError if computation does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = asarray_chkfinite(a)
b = numpy.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
return lstsq(a, b, cond=cond)[0]
示例8: unscented_correct
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma_pred = moments_pred
obs_mu_pred, obs_sigma_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
# calculate Kalman gain
K = cross_sigma.dot(linalg.pinv(obs_sigma_pred))
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
sigma_filt = sigma_pred - K.dot(cross_sigma.T)
else:
# no corrections to be made
mu_filt = mu_pred
sigma_filt = sigma_pred
return Moments(mu_filt, sigma_filt)
示例9: _em_observation_matrix
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _em_observation_matrix(observations, observation_offsets,
smoothed_state_means, smoothed_state_covariances):
r"""Apply the EM algorithm to parameter `observation_matrix`
Maximize expected log likelihood of observations with respect to the
observation matrix `observation_matrix`.
.. math::
C &= ( \sum_{t=0}^{T-1} (z_t - d_t) \mathbb{E}[x_t]^T )
( \sum_{t=0}^{T-1} \mathbb{E}[x_t x_t^T] )^-1
"""
_, n_dim_state = smoothed_state_means.shape
n_timesteps, n_dim_obs = observations.shape
res1 = np.zeros((n_dim_obs, n_dim_state))
res2 = np.zeros((n_dim_state, n_dim_state))
for t in range(n_timesteps):
if not np.any(np.ma.getmask(observations[t])):
observation_offset = _last_dims(observation_offsets, t, ndims=1)
res1 += np.outer(observations[t] - observation_offset,
smoothed_state_means[t])
res2 += (
smoothed_state_covariances[t]
+ np.outer(smoothed_state_means[t], smoothed_state_means[t])
)
return np.dot(res1, linalg.pinv(res2))
示例10: _em_transition_matrix
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _em_transition_matrix(transition_offsets, smoothed_state_means,
smoothed_state_covariances, pairwise_covariances):
r"""Apply the EM algorithm to parameter `transition_matrix`
Maximize expected log likelihood of observations with respect to the state
transition matrix `transition_matrix`.
.. math::
A &= ( \sum_{t=1}^{T-1} \mathbb{E}[x_t x_{t-1}^{T}]
- b_{t-1} \mathbb{E}[x_{t-1}]^T )
( \sum_{t=1}^{T-1} \mathbb{E}[x_{t-1} x_{t-1}^T] )^{-1}
"""
n_timesteps, n_dim_state, _ = smoothed_state_covariances.shape
res1 = np.zeros((n_dim_state, n_dim_state))
res2 = np.zeros((n_dim_state, n_dim_state))
for t in range(1, n_timesteps):
transition_offset = _last_dims(transition_offsets, t - 1, ndims=1)
res1 += (
pairwise_covariances[t]
+ np.outer(smoothed_state_means[t],
smoothed_state_means[t - 1])
- np.outer(transition_offset, smoothed_state_means[t - 1])
)
res2 += (
smoothed_state_covariances[t - 1]
+ np.outer(smoothed_state_means[t - 1],
smoothed_state_means[t - 1])
)
return np.dot(res1, linalg.pinv(res2))
示例11: initialize
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def initialize(self, design):
# PLEASE don't assume we have a constant...
# TODO: handle case for noconstant regression
self.design = design
self.whitened_design = self.whiten(self.design)
self.calc_beta = spl.pinv(self.whitened_design)
self.normalized_cov_beta = np.dot(self.calc_beta,
np.transpose(self.calc_beta))
self.df_total = self.whitened_design.shape[0]
eps = np.abs(self.design).sum() * np.finfo(np.float).eps
self.df_model = matrix_rank(self.design, eps)
self.df_residuals = self.df_total - self.df_model
示例12: _fit
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
示例13: _run_modeling
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def _run_modeling(cls, dataset_reader, **kwargs):
if 'subject_rejection' in kwargs and kwargs['subject_rejection'] is True:
assert False, 'SubjectAwareGenerativeModel must not and need not ' \
'apply subject rejection.'
ret = cls._get_opinion_score_2darray_with_preprocessing(dataset_reader, **kwargs)
score_mtx = ret['opinion_score_2darray']
num_video, num_subject = score_mtx.shape
A = np.zeros([num_video * num_subject, num_video + num_subject])
for idx_video in range(num_video):
for idx_subject in range(num_subject):
cur_row = idx_video * num_subject + idx_subject
A[cur_row][idx_subject] = 1.0
A[cur_row][num_subject + idx_video] = 1.0
y = np.array(score_mtx.ravel())
# add the extra constraint that the first ref video has score MOS
mos = pd.DataFrame(score_mtx).mean(axis=1)
row = np.zeros(num_subject + num_video)
row[num_subject + 0] = 1
score = mos[0]
A = np.vstack([A, row])
y = np.hstack([y, [score]])
b_q = np.dot(linalg.pinv(A), y)
b = b_q[:num_subject]
q = b_q[num_subject:]
result = {
'quality_scores': list(q),
'observer_bias': list(b),
}
return result
示例14: get_pinvs
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def get_pinvs(adj_matrix, vals_org, dim):
""" Precomputes the pseudo-inverse matrices for every dimension.
:param adj_matrix: sp.spmatrix
The graph represented as a sparse scipy matrix
:param vals_org: np.ndarray, shape [n]
The generalized eigenvalues of the clean graph
:param dim: int
Embedding dimension
:return: np.ndarray, shape [k, n, n]
Pseudo-inverse matrices for every dimension
"""
deg_matrix = sp.diags(adj_matrix.sum(0).A1)
pinvs = []
for k in range(dim):
print(k)
try:
pinvs.append(-np.linalg.pinv((adj_matrix - vals_org[k] * deg_matrix).toarray()))
except np.linalg.LinAlgError:
print('error')
pinvs.append(-spl.pinv((adj_matrix - vals_org[k] * deg_matrix).toarray()))
return np.stack(pinvs)
sum_of_powers = transition_matrix
last = transition_matrix
for i in range(1, pow):
last = last.dot(transition_matrix)
sum_of_powers += last
示例15: __MR_affinity_matrix
# 需要導入模塊: from scipy import linalg [as 別名]
# 或者: from scipy.linalg import pinv [as 別名]
def __MR_affinity_matrix(self,img,labels):
W,D = self.__MR_W_D_matrix(img,labels)
aff = pinv(D-self.weight_parameters['alpha']*W)
aff[sp.eye(sp.amax(labels)+1).astype(bool)] = 0.0 # diagonal elements to 0
return aff