本文整理汇总了Python中sklearn.kernel_approximation.Nystroem方法的典型用法代码示例。如果您正苦于以下问题:Python kernel_approximation.Nystroem方法的具体用法?Python kernel_approximation.Nystroem怎么用?Python kernel_approximation.Nystroem使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.kernel_approximation
的用法示例。
在下文中一共展示了kernel_approximation.Nystroem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_nystroem_default_parameters
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_nystroem_default_parameters():
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(10, 4))
# rbf kernel should behave as gamma=None by default
# aka gamma = 1 / n_features
nystroem = Nystroem(n_components=10)
X_transformed = nystroem.fit_transform(X)
K = rbf_kernel(X, gamma=None)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
# chi2 kernel should behave as gamma=1 by default
nystroem = Nystroem(kernel='chi2', n_components=10)
X_transformed = nystroem.fit_transform(X)
K = chi2_kernel(X, gamma=1)
K2 = np.dot(X_transformed, X_transformed.T)
assert_array_almost_equal(K, K2)
示例2: test_import_from_sklearn_pipeline_feature_union
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_import_from_sklearn_pipeline_feature_union(self):
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
union = FeatureUnion([("pca", PCA(n_components=1)), ("nys", Nystroem(n_components=2, random_state=42))])
sklearn_pipeline = make_pipeline(union, KNeighborsClassifier())
lale_pipeline = import_from_sklearn_pipeline(sklearn_pipeline)
self.assertEqual(len(lale_pipeline.edges()), 3)
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), KNeighborsClassifierImpl)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
示例3: test_export_to_sklearn_pipeline3
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_export_to_sklearn_pipeline3(self):
from lale.lib.lale import ConcatFeatures
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier, LogisticRegression, SVC
from sklearn.feature_selection import SelectKBest
from lale.lib.sklearn import Nystroem
from sklearn.pipeline import FeatureUnion
lale_pipeline = ((PCA() >> SelectKBest(k=2)) & (Nystroem(random_state = 42) >> SelectKBest(k=3))
& (SelectKBest(k=3))) >> ConcatFeatures() >> SelectKBest(k=2) >> LogisticRegression()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(sklearn_pipeline.named_steps['featureunion'], FeatureUnion)
self.assertIsInstance(sklearn_pipeline.named_steps['selectkbest'], SelectKBest)
from sklearn.linear_model import LogisticRegression
self.assertIsInstance(sklearn_pipeline.named_steps['logisticregression'], LogisticRegression)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
示例4: test_nystroem_approximation
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
def linear_kernel(X, Y):
return np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
示例5: test_nystroem_singular_kernel
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert np.all(np.isfinite(Y))
示例6: test_nystroem_poly_kernel_params
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
示例7: test_nystroem_callable
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
def linear_kernel(X, Y):
return np.dot(X, Y.T)
# if degree, gamma or coef0 is passed, we raise a warning
msg = "Don't pass gamma, coef0 or degree to Nystroem"
params = ({'gamma': 1}, {'coef0': 1}, {'degree': 2})
for param in params:
ny = Nystroem(kernel=linear_kernel, **param)
with pytest.raises(ValueError, match=msg):
ny.fit(X)
示例8: test_comparison_with_scikit
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_comparison_with_scikit(self):
import warnings
warnings.filterwarnings("ignore")
from lale.lib.sklearn import PCA
import sklearn.datasets
from lale.helpers import cross_val_score
pca = PCA(n_components=3, random_state=42, svd_solver='arpack')
nys = Nystroem(n_components=10, random_state=42)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
trainable = (pca & nys) >> concat >> lr
digits = sklearn.datasets.load_digits()
X, y = sklearn.utils.shuffle(digits.data, digits.target, random_state=42)
cv_results = cross_val_score(trainable, X, y)
cv_results = ['{0:.1%}'.format(score) for score in cv_results]
from sklearn.pipeline import make_pipeline, FeatureUnion
from sklearn.decomposition import PCA as SklearnPCA
from sklearn.kernel_approximation import Nystroem as SklearnNystroem
from sklearn.linear_model import LogisticRegression as SklearnLR
from sklearn.model_selection import cross_val_score
union = FeatureUnion([("pca", SklearnPCA(n_components=3, random_state=42, svd_solver='arpack')),
("nys", SklearnNystroem(n_components=10, random_state=42))])
lr = SklearnLR(random_state=42, C=0.1)
pipeline = make_pipeline(union, lr)
scikit_cv_results = cross_val_score(pipeline, X, y, cv = 5)
scikit_cv_results = ['{0:.1%}'.format(score) for score in scikit_cv_results]
self.assertEqual(cv_results, scikit_cv_results)
warnings.resetwarnings()
示例9: test_compose3
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_compose3(self):
from lale.operators import make_pipeline
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = nys >> pca >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
predicted = trained.predict(digits.data)
示例10: test_pca_nys_lr
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_pca_nys_lr(self):
from lale.operators import make_union
nys = Nystroem(n_components=15)
pca = PCA(n_components=10)
lr = LogisticRegression(random_state=42)
trainable = make_union(nys, pca) >> lr
digits = sklearn.datasets.load_digits()
trained = trainable.fit(digits.data, digits.target)
predicted = trained.predict(digits.data)
示例11: test_compose4
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_compose4(self):
from lale.operators import make_choice
digits = sklearn.datasets.load_digits()
ohe = OneHotEncoder(handle_unknown=OneHotEncoder.handle_unknown.ignore)
ohe.get_params()
no_op = NoOp()
pca = PCA()
nys = Nystroem()
lr = LogisticRegression()
knn = KNeighborsClassifier()
step1 = ohe | no_op
step2 = pca | nys
step3 = lr | knn
model_plan = step1 >> step2 >> step3
#TODO: optimize on this plan and then fit and predict
示例12: test_import_from_sklearn_pipeline_nested_pipeline1
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_import_from_sklearn_pipeline_nested_pipeline1(self):
from sklearn.pipeline import FeatureUnion, make_pipeline
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.feature_selection import SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
union = FeatureUnion([("selectkbest_pca", make_pipeline(SelectKBest(k=3), FeatureUnion([('pca', PCA(n_components=1)), ('nested_pipeline', make_pipeline(SelectKBest(k=2), Nystroem()))]))), ("nys", Nystroem(n_components=2, random_state=42))])
sklearn_pipeline = make_pipeline(union, KNeighborsClassifier())
lale_pipeline = import_from_sklearn_pipeline(sklearn_pipeline)
self.assertEqual(len(lale_pipeline.edges()), 8)
#These assertions assume topological sort, which may not be unique. So the assertions are brittle.
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.select_k_best import SelectKBestImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[3][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[3][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[4][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[4][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[5][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[5][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[6][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[6][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[7][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[7][1]._impl_class(), KNeighborsClassifierImpl)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
示例13: test_import_from_sklearn_pipeline_nested_pipeline2
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_import_from_sklearn_pipeline_nested_pipeline2(self):
from sklearn.pipeline import FeatureUnion, make_pipeline
from sklearn.decomposition import PCA
from sklearn.kernel_approximation import Nystroem
from sklearn.feature_selection import SelectKBest
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
union = FeatureUnion([("selectkbest_pca", make_pipeline(SelectKBest(k=3), make_pipeline(SelectKBest(k=2), PCA()))), ("nys", Nystroem(n_components=2, random_state=42))])
sklearn_pipeline = make_pipeline(union, KNeighborsClassifier())
lale_pipeline = import_from_sklearn_pipeline(sklearn_pipeline)
self.assertEqual(len(lale_pipeline.edges()), 5)
from lale.lib.sklearn.pca import PCAImpl
from lale.lib.sklearn.nystroem import NystroemImpl
from lale.lib.lale.concat_features import ConcatFeaturesImpl
from lale.lib.sklearn.k_neighbors_classifier import KNeighborsClassifierImpl
from lale.lib.sklearn.select_k_best import SelectKBestImpl
self.assertEqual(lale_pipeline.edges()[0][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[0][1]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][0]._impl_class(), SelectKBestImpl)
self.assertEqual(lale_pipeline.edges()[1][1]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[2][0]._impl_class(), PCAImpl)
self.assertEqual(lale_pipeline.edges()[2][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[3][0]._impl_class(), NystroemImpl)
self.assertEqual(lale_pipeline.edges()[3][1]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[4][0]._impl_class(), ConcatFeaturesImpl)
self.assertEqual(lale_pipeline.edges()[4][1]._impl_class(), KNeighborsClassifierImpl)
self.assert_equal_predictions(sklearn_pipeline, lale_pipeline)
示例14: test_export_to_sklearn_pipeline2
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_export_to_sklearn_pipeline2(self):
from lale.lib.lale import ConcatFeatures
from lale.lib.sklearn import PCA
from lale.lib.sklearn import KNeighborsClassifier
from sklearn.feature_selection import SelectKBest
from lale.lib.sklearn import Nystroem
from sklearn.pipeline import FeatureUnion
lale_pipeline = (((PCA(svd_solver='randomized', random_state=42) & SelectKBest(k=3)) >> ConcatFeatures()) & Nystroem(random_state=42)) >> ConcatFeatures() >> KNeighborsClassifier()
trained_lale_pipeline = lale_pipeline.fit(self.X_train, self.y_train)
sklearn_pipeline = trained_lale_pipeline.export_to_sklearn_pipeline()
self.assertIsInstance(sklearn_pipeline.named_steps['featureunion'], FeatureUnion)
from sklearn.neighbors import KNeighborsClassifier
self.assertIsInstance(sklearn_pipeline.named_steps['kneighborsclassifier'], KNeighborsClassifier)
self.assert_equal_predictions(sklearn_pipeline, trained_lale_pipeline)
示例15: test_two_estimators_predict
# 需要导入模块: from sklearn import kernel_approximation [as 别名]
# 或者: from sklearn.kernel_approximation import Nystroem [as 别名]
def test_two_estimators_predict(self):
pipeline = StandardScaler() >> ( PCA() & Nystroem() & LogisticRegression() )>>ConcatFeatures() >> NoOp() >> LogisticRegression()
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)