本文整理汇总了Python中sklearn.decomposition.PCA.inverse_transform方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.inverse_transform方法的具体用法?Python PCA.inverse_transform怎么用?Python PCA.inverse_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.inverse_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_pca_analysis
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def do_pca_analysis(profiles, lens, name=''):
L = lens-np.mean(lens)
lx = np.linspace(np.min(L), np.max(L), 100)
#print L.shape
pr = []
for i,p in enumerate(profiles):
profile = UnivariateSpline(p[0], np.log(p[1]), s=1000)
x = np.linspace(0,0.9,90)
pr.append(profile(x))
y = np.array(pr)
pca = PCA(n_components=2)
pca.fit(y)
#print pca.explained_variance_ratio_
yp = pca.transform(y)
x = np.linspace(0,0.9,y.shape[1])
plt.subplot(231)
plt.scatter(yp[:,0], yp[:,1], c=L/float(np.max(L)), cmap=plt.get_cmap('jet'))
plt.subplot(232)
m,b,r1,p1,s = stats.linregress(L, yp[:,0])
plt.scatter(L, yp[:,0])
plt.plot(lx, m*lx+b, color='r')
plt.fill_between(lx, (m-s)*lx+b, (m+s)*lx+b, alpha=0.3, color='r')
plt.title("pc1 r:{0:.2f},p:{1:.2e}".format(r1,p1))
plt.subplot(233)
m,b,r2,p2,s = stats.linregress(L, yp[:,1])
plt.scatter(L, yp[:,1])
plt.plot(lx, m*lx+b, color='r')
plt.fill_between(lx, (m-s)*lx+b, (m+s)*lx+b, alpha=0.3, color='r')
plt.title("pc2 r:{0:.2f},p:{1:.2e}".format(r2,p2))
plt.subplot(234)
plt.plot(x, y.T, alpha=0.5)
plt.title('data')
plt.subplot(235)
n_samples = 50
s = np.random.normal(scale=np.std(yp[:,0]), size=n_samples)
v = np.vstack([s, np.zeros(n_samples)]).T
yt = pca.inverse_transform(v)
plt.plot(x, yt.T, alpha=0.5)
plt.title('pc1')
plt.subplot(236)
n_samples = 50
s = np.random.normal(scale=np.std(yp[:,1]), size=n_samples)
v = np.vstack([np.zeros(n_samples), s]).T
yt = pca.inverse_transform(v)
plt.plot(x, yt.T, alpha=0.5)
plt.title('pc2')
plt.suptitle('session {0} n:{1}'.format(name, L.shape[0]))
plt.savefig(ensure_dir("plots/2XA/pca_{0}.pdf".format(name)))
plt.clf()
if L.shape[0] > 11:
print name, L.shape[0], p1, r1, p2, r2, np.std(L)
示例2: _plot_energy
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def _plot_energy(self, num_samples=25, path_length=20):
"""
Plots the energy function of the network.
num_samples The number of samples to be used in the computation of the energy function.
The greater the number of samples, the higher the accuracy of the resultant plot.
path_length The number of steps to compute in calculating each sample's path of convergence
toward the network's attractors.
"""
attractors = self.training_data
states = [[np.random.choice([-1, 1]) for i in range(self.num_neurons)] for j in range(num_samples)]
pca = PCA(n_components=2)
pca.fit(attractors)
paths = [attractors]
for i in range(path_length):
states = self.learn(states, steps=1)
paths.append(states)
x = y = linspace(-1, 1, 100)
X,Y = meshgrid(x, y)
meshpts = array([[x, y] for x, y in zip(np.ravel(X), np.ravel(Y))])
mesh = pca.inverse_transform(meshpts)
grid = vstack((mesh, vstack(paths)))
energies = array([self.energy(point) for point in grid])
grid = pca.transform(grid)
gmin, gmax = grid.min(), grid.max()
xi, yi = np.mgrid[gmin:gmax:100j, gmin:gmax:100j]
zi = gd(grid, energies, (xi, yi), method='nearest')
self.energy_diagram.plot_surface(xi, yi, zi, cmap=cm.coolwarm, linewidth=1)
self.contour_diagram.contour(xi, yi, zi)
示例3: plot_all_pcs
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def plot_all_pcs(profiles):
pr = []
for i,p in enumerate(profiles):
mask = np.isnan(p)
p[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), p[~mask])
av, va = moving_average(np.log(p+0.001), 46, 100)
pr.append(av)
y = np.array(pr)
pca = PCA(n_components=6)
pca.fit(y)
yp = pca.transform(y)
x = np.linspace(0,0.9,y.shape[1])
n_samples = 50
plt.figure(figsize=(8, 9))
for i in xrange(6):
plt.subplot(3,2,i+1)
s = np.random.normal(scale=np.std(yp[:,i]), size=n_samples)
v = np.zeros((6, n_samples))
v[i] = s
yt = pca.inverse_transform(v.T)
plt.plot(x, np.exp(yt.T), c='b', alpha=0.3)
plt.ylabel('PC{0:d}'.format(i+1))
plt.xlabel('AP position (x/L)')
plt.tight_layout()
plt.savefig('plots/SI_pcgrid.pdf')
plt.clf()
示例4: _compute_group_mean
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def _compute_group_mean((cache_dir, images, normalization_name,
preprocess_file, method)):
try:
import numpy as np
from cpa.profiling.cache import Cache
from cpa.profiling.normalization import normalizations
from scipy.stats import norm as Gaussian
cache = Cache(cache_dir)
normalization = normalizations[normalization_name]
data, colnames, _ = cache.load(images, normalization=normalization)
cellcount = np.ones(1) * data.shape[0]
if method == 'cellcount':
return cellcount
if len(data) == 0:
return np.empty(len(colnames)) * np.nan
data = data[~np.isnan(np.sum(data, 1)), :]
if len(data) == 0:
return np.empty(len(colnames)) * np.nan
if preprocess_file:
preprocessor = cpa.util.unpickle1(preprocess_file)
data = preprocessor(data)
if method == 'mean':
return np.mean(data, axis=0)
elif method == 'mean+std':
return np.hstack((np.mean(data, axis=0), np.std(data, axis=0)))
elif method == 'mode':
return mode(data, axis=0)
elif method == 'median':
return np.median(data, axis=0)
elif method == 'median+mad':
c = Gaussian.ppf(3/4.)
d = np.median(data, axis=0)
return np.hstack((d,
np.median((np.fabs(data-d)) / c, axis=0)))
elif method == 'gmm2':
max_sample_size = 2000
if data.shape[0] > max_sample_size:
data = data[np.random.random_integers(0,data.shape[0]-1,size=max_sample_size),:]
from sklearn.decomposition import PCA
from sklearn.mixture import GMM
pca = PCA(n_components=0.99).fit(data)
pca_data = pca.transform(data)
#gmm = GMM(2, covariance_type='full', n_iter=100000, thresh=1e-7).fit(pca_data)
gmm = GMM(2, covariance_type='full').fit(pca_data)
return pca.inverse_transform(gmm.means_).flatten()
elif method == 'deciles':
return np.hstack(map(lambda d: np.percentile(data, d, axis=0), range(10,100,10)))
elif method == 'mean+deciles':
return np.hstack((np.mean(data, axis=0), np.hstack(map(lambda d: np.percentile(data, d, axis=0), range(10,100,10)))))
except: # catch *all* exceptions
from traceback import print_exc
import sys
print_exc(None, sys.stderr)
return None
示例5: evaluate
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def evaluate(self, idx, sampling=4):
sub_data = self.data.loc[idx]
if not np.isfinite(sub_data['a']):
return
start, stop = sub_data[['start', 'stop']].astype(np.int)
if self.method == 'polar':
thetas = np.linspace(sub_data.theta_i,
sub_data.theta_f,
self.size*sampling)
rhos = ellipsis_radius(thetas, sub_data.a, sub_data.b,
sub_data.phi_y)
xs = rhos * np.cos(thetas) + sub_data.x0
ys = rhos * np.sin(thetas) + sub_data.y0
elif self.method == 'cartesian':
t0 = np.int(self.segment.loc[start, 't'])
t1 = np.int(self.segment.loc[stop, 't'])
ts = np.linspace(t0, t1,
self.size*sampling)
xs, ys = ellipsis_cartes(ts, sub_data.a, sub_data.b,
sub_data.omega, sub_data.phi_x,
sub_data.phi_y,
sub_data.x0, sub_data.y0)
zs = np.ones_like(xs) * sub_data.z0
segdata = self.segment.loc[start:stop][self.coords].dropna()
pca = PCA()
pca.fit(segdata)
ellipsis_fit = pca.inverse_transform(np.vstack((xs, ys, zs)).T)
return ellipsis_fit
示例6: get_reconstruction_pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def get_reconstruction_pca(X_train,k):
pca = PCA(n_components=k)
pca = pca.fit(X_train)
X_pca = pca.transform(X_train) # M_train x k
X_reconstruct = pca.inverse_transform(X_pca)
#
U = pca.components_
return X_reconstruct, pca, U
示例7: get_reconstruction
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def get_reconstruction(X_train,k):
pca = PCA(n_components=k)
pca = pca.fit(X_train)
X_pca = pca.transform(X_train) # M_train x K
#print 'X_pca' , X_pca.shape
X_reconstruct = pca.inverse_transform(X_pca)
#print dir(pca)
return X_reconstruct, pca
示例8: plot_pca_illustration
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def plot_pca_illustration():
rnd = np.random.RandomState(5)
X_ = rnd.normal(size=(300, 2))
X_blob = np.dot(X_, rnd.normal(size=(2, 2))) + rnd.normal(size=2)
pca = PCA()
pca.fit(X_blob)
X_pca = pca.transform(X_blob)
S = X_pca.std(axis=0)
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes = axes.ravel()
axes[0].set_title("Original data")
axes[0].scatter(X_blob[:, 0], X_blob[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[0].set_xlabel("feature 1")
axes[0].set_ylabel("feature 2")
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[0] * pca.components_[0, 0],
S[0] * pca.components_[0, 1], width=.1, head_width=.3,
color='k')
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[1] * pca.components_[1, 0],
S[1] * pca.components_[1, 1], width=.1, head_width=.3,
color='k')
axes[0].text(-1.5, -.5, "Component 2", size=14)
axes[0].text(-4, -4, "Component 1", size=14)
axes[0].set_aspect('equal')
axes[1].set_title("Transformed data")
axes[1].scatter(X_pca[:, 0], X_pca[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[1].set_xlabel("First principal component")
axes[1].set_ylabel("Second principal component")
axes[1].set_aspect('equal')
axes[1].set_ylim(-8, 8)
pca = PCA(n_components=1)
pca.fit(X_blob)
X_inverse = pca.inverse_transform(pca.transform(X_blob))
axes[2].set_title("Transformed data w/ second component dropped")
axes[2].scatter(X_pca[:, 0], np.zeros(X_pca.shape[0]), c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[2].set_xlabel("First principal component")
axes[2].set_aspect('equal')
axes[2].set_ylim(-8, 8)
axes[3].set_title("Back-rotation using only first component")
axes[3].scatter(X_inverse[:, 0], X_inverse[:, 1], c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[3].set_xlabel("feature 1")
axes[3].set_ylabel("feature 2")
axes[3].set_aspect('equal')
axes[3].set_xlim(-8, 4)
axes[3].set_ylim(-8, 4)
示例9: perform_kde_with_pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def perform_kde_with_pca(data, params, num_comps):
pca = PCA(n_components=num_comps, whiten=False)
data = pca.fit_transform(data)
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
kde = grid.best_estimator_
new_data = kde.sample(1, random_state=0)
new_data = pca.inverse_transform(new_data)
quasar_plots.plot_spectrum(wavelengths, new_data[0])
plt.title('KDE with PCA')
示例10: libPCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def libPCA(X, precison=0.95):
m,n = X.shape
for k in range(1, n):
pca = PCA(k)
Z = pca.fit_transform(X)
print 'k: %s, precison: %s' %(k, pca.explained_variance_ratio_[0])
if pca.explained_variance_ratio_[0] > precison:
break
Xinv = pca.inverse_transform(Z)
return k, Z, Xinv
示例11: test_pca_inverse
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
示例12: get_pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def get_pca(**kwargs):
from sklearn.decomposition import PCA
data_input = kwargs['data_input']
n_components = kwargs['n_components']
pca = PCA(n_components=n_components)
scores = pca.fit_transform(data_input)
inverse_data = pca.inverse_transform(scores)
return {'scores': scores, 'loadings': pca.components_, 'model_fit' : inverse_data}
示例13: test_randomized_pca_inverse
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver="randomized", random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver="randomized", random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
示例14: Preprocess
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
class Preprocess(BaseEstimator, TransformerMixin):
""" Class used for preprocessing dataset x.
Standard scaling and PCA transformation
are performed.
Args:
pca_n: if integer, pca_n main components are selected
if 0 < pca_n < 1, selects n components so that
the fraction of explained variance is greater than pca_n
Notes:
Call fit than transform instead of fit_transform to get
the variance explained printed.
"""
def __init__(self, pca_n=None, scaler=Scaler(axis=1)):
self.pca = PCA(pca_n)
self.scaler = scaler
def fit(self, x):
self.pca.fit(self.scaler.fit_transform(x))
print "Variance explained:", np.sum(self.pca.explained_variance_ratio_)
def transform(self, x):
return self.pca.transform(self.scaler.transform(x))
def fit_transform(self, x):
""" Override fit_transform to avoid calling transform
twice on the standard scaler (in fit and transform)
"""
return self.pca.fit_transform(self.scaler.fit_transform(x))
def inverse_transform(self, x, only_pca=False):
""" First undo the PCA transformation, then undo the scaling unless only_pca """
if only_pca:
return self.pca.inverse_transform(x)
return self.scaler.inverse_transform(self.pca.inverse_transform(x))
示例15: run_PCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import inverse_transform [as 别名]
def run_PCA(self):
self.__normalize_col_array()
copy_col_array = self._col_array.copy()
# Zero Out Response
for col_name in copy_col_array:
if self._model_info[col_name][WORD_TYPE] == RESPONSE:
copy_col_array[col_name] = 0
# raise NotImplementedError # How are we dealing with NaNs?
elif self._model_info[col_name][WORD_TYPE] == PREDICTOR:
pass
else:
raise NameError
model = PCA(n_components='mle', whiten=True)
model.fit_transform(self._col_array)
self._results = model.inverse_transform(copy_col_array)