本文整理汇总了Python中scipy.linalg.pinv2函数的典型用法代码示例。如果您正苦于以下问题:Python pinv2函数的具体用法?Python pinv2怎么用?Python pinv2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pinv2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
def train(arduino_serial, is_right_side):
training_values = collect_points(arduino_serial, is_right_side)
[M1, M2, M3, M4] = populate_matrices(training_values)
# find inverses using singular value decomposition
M1inv = linalg.pinv2(M1)
M2inv = linalg.pinv2(M2)
M3inv = linalg.pinv2(M3)
M4inv = linalg.pinv2(M4)
print M1inv.shape
print x.shape
# find coefficients
xCoeff1 = M1inv * x
xCoeff2 = M2inv * x
xCoeff3 = M3inv * x
xCoeff4 = M4inv * x
print xCoeff1
yCoeff1 = M1inv * y
yCoeff2 = M2inv * y
yCoeff3 = M3inv * y
yCoeff4 = M4inv * y
print yCoeff1
return [xCoeff1, xCoeff2, xCoeff3, xCoeff4, yCoeff1, yCoeff2, yCoeff3, yCoeff4]
示例2: _nipals_twoblocks_inner_loop
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = pinv2(X, check_finite=False)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = pinv2(Y, check_finite=False) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached',
ConvergenceWarning)
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
示例3: add_fit
def add_fit(self,X):
n_samples = X.shape[0]
# old
first = safe_sparse_dot(self.hidden_activations_.T, self.hidden_activations_)
M = pinv2(first+1*np.identity(first.shape[0]))
beta = self.coef_output_
# new
H = self._get_hidden_activations(X)
# update
first = pinv2(1*np.identity(n_samples)+safe_sparse_dot(safe_sparse_dot(H,M),H.T))
second = safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(M,H.T),first),H),M)
M = M - second
self.coef_output_ = beta + safe_sparse_dot(safe_sparse_dot(M,H.T),(X - safe_sparse_dot(H,beta)))
示例4: test_simple_rows
def test_simple_rows(self):
a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
a_pinv3 = pinv3(a)
assert_array_almost_equal(a_pinv,a_pinv2)
assert_array_almost_equal(a_pinv,a_pinv3)
示例5: test_simple_cols
def test_simple_cols(self):
a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
a_pinv3 = pinv3(a)
assert_array_almost_equal(a_pinv,a_pinv2)
assert_array_almost_equal(a_pinv,a_pinv3)
示例6: _compute_eloreta_inv
def _compute_eloreta_inv(G, W, n_orient, n_nzero, lambda2, force_equal):
"""Invert weights and compute M."""
W_inv = np.empty_like(W)
n_src = W_inv.shape[0]
if n_orient == 1 or force_equal:
W_inv[:] = 1. / W
else:
for ii in range(n_src):
# Here we use a single-precision-suitable `rcond` (given our
# 3x3 matrix size) because the inv could be saved in single
# precision.
W_inv[ii] = linalg.pinv2(W[ii], rcond=1e-7)
# Weight the gain matrix
W_inv_Gt = np.empty_like(G).T
for ii in range(n_src):
sl = slice(n_orient * ii, n_orient * (ii + 1))
W_inv_Gt[sl, :] = np.dot(W_inv[ii], G[:, sl].T)
# Compute the inverse, normalizing by the trace
G_W_inv_Gt = np.dot(G, W_inv_Gt)
G_W_inv_Gt *= n_nzero / np.trace(G_W_inv_Gt)
u, s, v = linalg.svd(G_W_inv_Gt)
s = s / (s ** 2 + lambda2)
M = np.dot(v.T[:, :n_nzero] * s[:n_nzero], u.T[:n_nzero])
return M, W_inv
示例7: fit
def fit(self, X, y):
if self.activation is None:
# Useful to quantify the impact of the non-linearity
self._activate = lambda x: x
else:
self._activate = self.activations[self.activation]
rng = check_random_state(self.random_state)
# one-of-K coding for output values
self.classes_ = unique_labels(y)
Y = label_binarize(y, self.classes_)
# set hidden layer parameters randomly
n_features = X.shape[1]
if self.rank is None:
if self.density == 1:
self.weights_ = rng.randn(n_features, self.n_hidden)
else:
self.weights_ = sparse_random_matrix(
self.n_hidden, n_features, density=self.density,
random_state=rng).T
else:
# Low rank weight matrix
self.weights_u_ = rng.randn(n_features, self.rank)
self.weights_v_ = rng.randn(self.rank, self.n_hidden)
self.biases_ = rng.randn(self.n_hidden)
# map the input data through the hidden layer
H = self.transform(X)
# fit the linear model on the hidden layer activation
self.beta_ = np.dot(pinv2(H), Y)
return self
示例8: test_pinv_array
def test_pinv_array(self):
from scipy.linalg import pinv2
tests = []
tests.append(np.random.rand(1, 1, 1))
tests.append(np.random.rand(3, 1, 1))
tests.append(np.random.rand(1, 2, 2))
tests.append(np.random.rand(3, 2, 2))
tests.append(np.random.rand(1, 3, 3))
tests.append(np.random.rand(3, 3, 3))
A = np.random.rand(1, 3, 3)
A[0, 0, :] = A[0, 1, :]
tests.append(A)
tests.append(np.random.rand(1, 1, 1) + 1.0j*np.random.rand(1, 1, 1))
tests.append(np.random.rand(3, 1, 1) + 1.0j*np.random.rand(3, 1, 1))
tests.append(np.random.rand(1, 2, 2) + 1.0j*np.random.rand(1, 2, 2))
tests.append(np.random.rand(3, 2, 2) + 1.0j*np.random.rand(3, 2, 2))
tests.append(np.random.rand(1, 3, 3) + 1.0j*np.random.rand(1, 3, 3))
tests.append(np.random.rand(3, 3, 3) + 1.0j*np.random.rand(3, 3, 3))
A = np.random.rand(1, 3, 3) + 1.0j*np.random.rand(1, 3, 3)
A[0, 0, :] = A[0, 1, :]
tests.append(A)
for test in tests:
pinv_test = np.zeros_like(test)
for i in range(pinv_test.shape[0]):
pinv_test[i] = pinv2(test[i])
pinv_array(test)
assert_array_almost_equal(test, pinv_test, decimal=4)
示例9: fit
def fit(self, X=None, y=None):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : array_like, shape (n_samples, 3)
An array with shape (n_eval, 3) with the observations of the output to be predicted.
of shape (n_samples, 3) with the Best Linear Unbiased Prediction at x.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
if X:
K_list = self.calc_scalar_kernel_matrices(X)
else:
K_list = self.calc_scalar_kernel_matrices()
# add diagonal noise to each scalar kernel matrix
K_list = [K + self.nugget * sp.ones(K.shape[0]) for K in K_list]
Kglob = None
# outer_iv = [sp.outer(iv, iv.T) for iv in self.ivs] # NO, wrong
for K, ivs, iv_corr in zip(K_list, self.ivs, self.iv_corr):
# make the outer product tensor of shape (N_ls, N_ls, 3, 3) and multiply it with the scalar kernel
K3D = iv_corr * K[:, :, None, None] * rotmat_multi(ivs, ivs)
# reshape tensor onto a 2D array tiled with 3x3 matrix blocks
if Kglob is None:
Kglob = K3D
else:
Kglob += K3D
Kglob = my_tensor_reshape(Kglob)
# # all channels merged into one covariance matrix
# # K^{glob}_{ij} = \sum_{k = 1}^{N_{IVs}} w_k D_{k, ij} |v_k^i\rangle \langle v_k^j |
try:
inv = LA.pinv2(Kglob)
except LA.LinAlgError as err:
print("pinv2 failed: %s. Switching to pinvh" % err)
try:
inv = LA.pinvh(Kglob)
except LA.LinAlgError as err:
print("pinvh failed: %s. Switching to pinv2" % err)
inv = None
# alpha is the vector of regression coefficients of GaussianProcess
alpha = sp.dot(inv, self.y.ravel())
if not self.low_memory:
self.inverse = inv
self.Kglob = Kglob
self.alpha = sp.array(alpha)
示例10: test_simple_complex
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
示例11: _pseudo_inverse_dense
def _pseudo_inverse_dense(L, rhoss, method='direct', **pseudo_args):
"""
Internal function for computing the pseudo inverse of an Liouvillian using
dense matrix methods. See pseudo_inverse for details.
"""
if method == 'direct':
rho_vec = np.transpose(mat2vec(rhoss.full()))
tr_mat = tensor([identity(n) for n in L.dims[0][0]])
tr_vec = np.transpose(mat2vec(tr_mat.full()))
N = np.prod(L.dims[0][0])
I = np.identity(N * N)
P = np.kron(np.transpose(rho_vec), tr_vec)
Q = I - P
LIQ = np.linalg.solve(L.full(), Q)
R = np.dot(Q, LIQ)
return Qobj(R, dims=L.dims)
elif method == 'numpy':
return Qobj(np.linalg.pinv(L.full()), dims=L.dims)
elif method == 'scipy':
return Qobj(la.pinv(L.full()), dims=L.dims)
elif method == 'scipy2':
return Qobj(la.pinv2(L.full()), dims=L.dims)
else:
raise ValueError("Unsupported method '%s'. Use 'direct' or 'numpy'" %
method)
示例12: unwhiten
def unwhiten(X, comp):
"""
Inverse process of whitening.
_comp_ is assumed to be column wise.
"""
uw = la.pinv2(comp)
return np.dot(X, uw)
示例13: _evaluateNet
def _evaluateNet(self):
wtRatio=1./3.
inputs=self.dataset.getField('input')
targets=self.dataset.getField('target')
training_start=int(wtRatio*len(inputs))
washout_inputs=inputs[:training_start]
training_inputs=inputs[training_start:]
training_targets=targets[training_start:]
phis=[]
self.model.network.reset()
self.model.washout(washout_inputs)
phis.append(self.model.washout(training_inputs))
PHI=concatenate(phis).T
PHI_INV=pinv2(PHI)
TARGET=concatenate(training_targets).T
W=dot(TARGET,PHI_INV)
self.model.setOutputWeightMatrix(W)
self.model.activate(washout_inputs)
outputs=self.model.activate(training_inputs)
OUTPUT=concatenate(outputs)
TARGET=TARGET.T
fitness=self.evalfunc(OUTPUT,TARGET)
return fitness
示例14: test_check_finite
def test_check_finite(self):
a = array([[1,2,3],[4,5,6.],[7,8,10]])
a_pinv = pinv(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
a_pinv = pinv2(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
a_pinv = pinv3(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
示例15: neurons
def neurons(x,y,nb_neurons):
n=x.shape[1]
# random generation of the neurons parameters
w=st.norm.rvs(size=(n, nb_neurons))
b=st.norm.rvs(size=(1,nb_neurons))
h=H(w,b,x) # activation matrix computation
beta_chapeau=dot(la.pinv2(h),y) # Penrose-Moore inversion
return w,b,beta_chapeau