本文整理汇总了Python中sklearn.decomposition.IncrementalPCA.fit方法的典型用法代码示例。如果您正苦于以下问题:Python IncrementalPCA.fit方法的具体用法?Python IncrementalPCA.fit怎么用?Python IncrementalPCA.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.IncrementalPCA
的用法示例。
在下文中一共展示了IncrementalPCA.fit方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ipca
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def ipca(mov, components = 50, batch =1000):
# vectorize the images
num_frames, h, w = mov.shape
frame_size = h * w
frame_samples = np.reshape(mov, (num_frames, frame_size)).T
# run IPCA to approxiate the SVD
ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
ipca_f.fit(frame_samples)
# construct the reduced version of the movie vectors using only the
# principal component projection
proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))
# get the temporal principal components (pixel time series) and
# associated singular values
eigenseries = ipca_f.components_.T
# the rows of eigenseries are approximately orthogonal
# so we can approximately obtain eigenframes by multiplying the
# projected frame matrix by this transpose on the right
eigenframes = np.dot(proj_frame_vectors, eigenseries)
return eigenseries, eigenframes, proj_frame_vectors
示例2: test_incremental_pca_num_features_change
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def test_incremental_pca_num_features_change():
"""Test that changing n_components will raise an error."""
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
示例3: PCA_Train
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def PCA_Train(data, result_fold, n_components=128):
print_info("PCA training (n_components=%d)..." % n_components)
pca = IncrementalPCA(n_components=n_components)
pca.fit(data)
joblib.dump(pca, result_fold + "pca_model.m")
print_info("PCA done.")
return pca
示例4: test_singular_values
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
示例5: generate_pca_compression
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def generate_pca_compression(X, n_components=16, batch_size=100):
"""
Compresses the data using sklearn PCA implementation.
:param X: Data (n_samples, n_features)
:param n_components: Number of dimensions for PCA to keep
:param batch_size: Batch size for incrimental PCA
:return: X_prime (the compressed representation), pca
"""
pca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
pca.fit(X)
return pca.transform(X), pca
示例6: test_incremental_pca_set_params
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def test_incremental_pca_set_params():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
示例7: test_incremental_pca
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def test_incremental_pca():
"""Incremental PCA on dense arrays."""
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
示例8: IPCA
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def IPCA(self, components = 50, batch =1000):
'''
Iterative Principal Component analysis, see sklearn.decomposition.incremental_pca
Parameters:
------------
components (default 50) = number of independent components to return
batch (default 1000) = number of pixels to load into memory simultaneously in IPCA. More requires more memory but leads to better fit
Returns
-------
eigenseries: principal components (pixel time series) and associated singular values
eigenframes: eigenframes are obtained by multiplying the projected frame matrix by the projected movie (whitened frames?)
proj_frame_vectors:the reduced version of the movie vectors using only the principal component projection
'''
# vectorize the images
num_frames, h, w = np.shape(self);
frame_size = h * w;
frame_samples = np.reshape(self, (num_frames, frame_size)).T
# run IPCA to approxiate the SVD
ipca_f = IncrementalPCA(n_components=components, batch_size=batch)
ipca_f.fit(frame_samples)
# construct the reduced version of the movie vectors using only the
# principal component projection
proj_frame_vectors = ipca_f.inverse_transform(ipca_f.transform(frame_samples))
# get the temporal principal components (pixel time series) and
# associated singular values
eigenseries = ipca_f.components_.T
# the rows of eigenseries are approximately orthogonal
# so we can approximately obtain eigenframes by multiplying the
# projected frame matrix by this transpose on the right
eigenframes = np.dot(proj_frame_vectors, eigenseries)
return eigenseries, eigenframes, proj_frame_vectors
示例9: PCALDA
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
class PCALDA(AbstractFeature):
def __init__(self,options):
for key in options:
setattr(self,key,options[key])
def compute(self,X,y):
if X.ndim == 3:
X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
if not hasattr(self,"pca_dim"):
self.pca_dim = len(X)-len(np.unique(y))
# PCA
self.ipca = IncrementalPCA(n_components=self.pca_dim, batch_size=None)
self.ipca.fit(X)
X_pca = self.ipca.transform(X)
print("PCA train shape")
print(X_pca.shape)
# LDA
self.lda = sklearn.lda.LDA()
self.lda.fit(X_pca,y)
X_lda = self.lda.transform(X_pca)
return X_lda
def extract(self,x):
X = np.array([x])
if X.ndim == 3:
X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
X_pca = self.ipca.transform(X)
X_lda = self.lda.transform(X_pca)
return list(X_lda[0])
def __repr__(self):
return "PCALDA"
示例10: print
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
Test_matrix = np.array(rowstst)
print('\nTest data loaded!\n')
print('#================================================================#')
print('#================================================================#')
print('\nshape of Training Matrix = ', Train_matrix.shape)
print('shape of Test Matrix = ', Test_matrix.shape,'\n')
print('#================================================================#')
#========================= Principal Component Analysis ==========================#
print ('\nRunning Incrmental PCA with 200 Componenets and 5000 batch size')
pca = IncrementalPCA(n_components=200, batch_size = 5000)
pca.fit(Train_matrix)
Train_matrix = pca.transform(Train_matrix)
Test_matrix = pca.transform(Test_matrix)
parameters = pca.get_params()
variance = pca.explained_variance_ratio_
cumvariance = pca.explained_variance_ratio_.cumsum()
#np.savetxt("pca_result_variance_200.csv", variance, delimiter=",")
#np.savetxt("pca_result_cum_variance_200.csv", variance, delimiter=",")
print ('\nPCA complete!\n')
print ('#================================================================#')
print('\nWriting transformed Train and Test matrices to CSV\n')
print('#================================================================#')
with open(csv_pca_train_out_path, 'w', newline='') as csvtrainoutfile:
示例11: calc_ipca
# 需要导入模块: from sklearn.decomposition import IncrementalPCA [as 别名]
# 或者: from sklearn.decomposition.IncrementalPCA import fit [as 别名]
def calc_ipca(r, key, xyz, N, title=None):
n_dim = np.prod(xyz.shape[1:])
ipca = IncrementalPCA()
ipca.fit(xyz.reshape(len(xyz), n_dim))
return ipca