本文整理匯總了Python中sklearn.decomposition.KernelPCA方法的典型用法代碼示例。如果您正苦於以下問題:Python decomposition.KernelPCA方法的具體用法?Python decomposition.KernelPCA怎麽用?Python decomposition.KernelPCA使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.decomposition
的用法示例。
在下文中一共展示了decomposition.KernelPCA方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_nested_circles
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
示例2: test_kernel_pca_sparse
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
示例3: test_leave_zero_eig
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_leave_zero_eig():
"""This test checks that fit().transform() returns the same result as
fit_transform() in case of non-removed zero eigenvalue.
Non-regression test for issue #12141 (PR #12143)"""
X_fit = np.array([[1, 1], [0, 0]])
# Assert that even with all np warnings on, there is no div by zero warning
with pytest.warns(None) as record:
with np.errstate(all='warn'):
k = KernelPCA(n_components=2, remove_zero_eig=False,
eigen_solver="dense")
# Fit, then transform
A = k.fit(X_fit).transform(X_fit)
# Do both at once
B = k.fit_transform(X_fit)
# Compare
assert_array_almost_equal(np.abs(A), np.abs(B))
for w in record:
# There might be warnings about the kernel being badly conditioned,
# but there should not be warnings about division by zero.
# (Numpy division by zero warning can have many message variants, but
# at least we know that it is a RuntimeWarning so lets check only this)
assert not issubclass(w.category, RuntimeWarning)
示例4: test_kernel_pca_precomputed
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
示例5: test_gridsearch_pipeline_precomputed
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca),
("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
# 0.23. warning about tol not having its correct default value.
示例6: dim_reduction_method
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def dim_reduction_method(self):
"""
select dimensionality reduction method
"""
if self.dim_reduction=='pca':
return PCA()
elif self.dim_reduction=='factor-analysis':
return FactorAnalysis()
elif self.dim_reduction=='fast-ica':
return FastICA()
elif self.dim_reduction=='kernel-pca':
return KernelPCA()
elif self.dim_reduction=='sparse-pca':
return SparsePCA()
elif self.dim_reduction=='truncated-svd':
return TruncatedSVD()
elif self.dim_reduction!=None:
raise ValueError('%s is not a supported dimensionality reduction method. Valid inputs are: \
"pca","factor-analysis","fast-ica,"kernel-pca","sparse-pca","truncated-svd".'
%(self.dim_reduction))
示例7: __init__
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def __init__(self, options):
self.handle_options(options)
out_params = convert_params(
options.get('params', {}),
ints=['k', 'degree', 'alpha', 'max_iteration'],
floats=['gamma', 'tolerance'],
aliases={'k': 'n_components', 'tolerance': 'tol',
'max_iteration': 'max_iter'},
)
out_params['kernel'] = 'rbf'
if 'n_components' not in out_params:
out_params['n_components'] = min(2, len(options['feature_variables']))
elif out_params['n_components'] == 0:
raise RuntimeError('k needs to be greater than zero.')
self.estimator = _KPCA(**out_params)
# sklearn's KernelPCA.transform tries to form a complete kernel
# matrix of its input and the original data the model was fit
# on. Unfortunately, this might consume a colossal amount of
# memory for large inputs. We chunk the input to cut down on this.
示例8: test_objectmapper
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.decomposition.PCA, decomposition.PCA)
self.assertIs(df.decomposition.IncrementalPCA,
decomposition.IncrementalPCA)
self.assertIs(df.decomposition.KernelPCA, decomposition.KernelPCA)
self.assertIs(df.decomposition.FactorAnalysis,
decomposition.FactorAnalysis)
self.assertIs(df.decomposition.FastICA, decomposition.FastICA)
self.assertIs(df.decomposition.TruncatedSVD, decomposition.TruncatedSVD)
self.assertIs(df.decomposition.NMF, decomposition.NMF)
self.assertIs(df.decomposition.SparsePCA, decomposition.SparsePCA)
self.assertIs(df.decomposition.MiniBatchSparsePCA,
decomposition.MiniBatchSparsePCA)
self.assertIs(df.decomposition.SparseCoder, decomposition.SparseCoder)
self.assertIs(df.decomposition.DictionaryLearning,
decomposition.DictionaryLearning)
self.assertIs(df.decomposition.MiniBatchDictionaryLearning,
decomposition.MiniBatchDictionaryLearning)
self.assertIs(df.decomposition.LatentDirichletAllocation,
decomposition.LatentDirichletAllocation)
示例9: test_Decompositions_KernelPCA
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_Decompositions_KernelPCA(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = df.decomposition.KernelPCA()
mod2 = decomposition.KernelPCA()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.transform(mod1)
expected = mod2.transform(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_series_equal(df.target, result.target)
self.assert_numpy_array_almost_equal(result.data.values[:, :40],
expected[:, :40])
示例10: to_uts
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def to_uts(mts, transformer):
"""PCA Dimension Reduction. Convert MTS to UTS
Args:
mts (ndarray): MTS
transformer (class): PCA, KernelPCA, TSNE
Returns:
ndarray: UTS
"""
model = PCA(n_components=1)
if transformer == KernelPCA:
model = KernelPCA(n_components=1, kernel="rbf")
elif transformer == TSNE:
model = TSNE(n_components=1, perplexity=40, n_iter=300)
uts = model.fit_transform(mts)
uts = uts.reshape(-1)
return uts
示例11: reduce_KernelPCA
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def reduce_KernelPCA(x, **kwd_params):
'''
Reduce the dimensions using Principal Component
Analysis with different kernels
'''
# create the PCA object
pca = dc.KernelPCA(**kwd_params)
# learn the principal components from all the features
return pca.fit(x)
# the file name of the dataset
示例12: reduce_KernelPCA
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def reduce_KernelPCA(x, **kwd_params):
'''
Reduce the dimensions using Principal Component
Analysis with different kernels
'''
# create the PCA object
pca = dc.KernelPCA(**kwd_params)
# learn the principal components from all the features
return pca.fit(x)
# get the sample
示例13: test_kernel_pca
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
示例14: test_kernel_pca_invalid_parameters
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
示例15: test_kernel_pca_consistent_transform
# 需要導入模塊: from sklearn import decomposition [as 別名]
# 或者: from sklearn.decomposition import KernelPCA [as 別名]
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)