本文整理汇总了Python中autosklearn.pipeline.classification.SimpleClassificationPipeline类的典型用法代码示例。如果您正苦于以下问题:Python SimpleClassificationPipeline类的具体用法?Python SimpleClassificationPipeline怎么用?Python SimpleClassificationPipeline使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SimpleClassificationPipeline类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_predict_proba_batched
def test_predict_proba_batched(self):
# Multiclass
cls = SimpleClassificationPipeline(include={'classifier': ['sgd']})
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(include={'classifier': ['lda']})
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array(list([(list([1 if i != y else 0 for i in range(10)]))
for y in Y_train]))
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例2: test_default_configuration
def test_default_configuration(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
auto = SimpleClassificationPipeline()
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.94,
sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
示例3: test_default_configuration_iterative_fit
def test_default_configuration_iterative_fit(self):
classifier = SimpleClassificationPipeline(
include={'classifier': ['random_forest'],
'preprocessor': ['no_preprocessing']})
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
XT = classifier.fit_transformer(X_train, Y_train)
for i in range(1, 11):
classifier.iterative_fit(X_train, Y_train)
self.assertEqual(classifier.steps[-1][-1].choice.estimator.n_estimators,
i)
示例4: test_default_configuration_multilabel
def test_default_configuration_multilabel(self):
for i in range(2):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"multilabel": True})
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset="iris", make_multilabel=True)
auto = SimpleClassificationPipeline(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.9599999999999995, sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
示例5: test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={"preprocessor": ["densifier"]}, dataset_properties={"sparse": True}
)
self.assertEqual(cs.get_hyperparameter("classifier:__choice__").default, "qda")
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={"preprocessor": ["nystroem_sampler"]}
)
self.assertEqual(cs.get_hyperparameter("classifier:__choice__").default, "sgd")
示例6: test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'qda')
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['nystroem_sampler']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'sgd')
示例7: test_configurations_sparse
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config and \
config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config and \
config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls = SimpleClassificationPipeline(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
示例8: _get_classification_configuration_space
def _get_classification_configuration_space(info, include):
task_type = info['task']
multilabel = False
multiclass = False
sparse = False
if task_type == MULTILABEL_CLASSIFICATION:
multilabel = True
if task_type == REGRESSION:
raise NotImplementedError()
if task_type == MULTICLASS_CLASSIFICATION:
multiclass = True
if task_type == BINARY_CLASSIFICATION:
pass
if info['is_sparse'] == 1:
sparse = True
dataset_properties = {
'multilabel': multilabel,
'multiclass': multiclass,
'sparse': sparse
}
return SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties=dataset_properties,
include=include)
示例9: test_add_classifier
def test_add_classifier(self):
self.assertEqual(len(classification_components._addons.components), 0)
classification_components.add_classifier(DummyClassifier)
self.assertEqual(len(classification_components._addons.components), 1)
cs = SimpleClassificationPipeline.get_hyperparameter_search_space()
self.assertIn("DummyClassifier", str(cs))
del classification_components._addons.components["DummyClassifier"]
示例10: test_add_preprocessor
def test_add_preprocessor(self):
self.assertEqual(len(preprocessing_components._addons.components), 0)
preprocessing_components.add_preprocessor(DummyPreprocessor)
self.assertEqual(len(preprocessing_components._addons.components), 1)
cs = SimpleClassificationPipeline.get_hyperparameter_search_space()
self.assertIn("DummyPreprocessor", str(cs))
del preprocessing_components._addons.components["DummyPreprocessor"]
示例11: test_predict_batched_sparse
def test_predict_batched_sparse(self):
cls = SimpleClassificationPipeline(dataset_properties={'sparse': True},
include={'classifier': ['sgd']})
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例12: test_get_hyperparameter_search_space_include_exclude_models
def test_get_hyperparameter_search_space_include_exclude_models(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={'classifier': ['libsvm_svc']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__'),
CategoricalHyperparameter('classifier:__choice__', ['libsvm_svc']))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
exclude={'classifier': ['libsvm_svc']})
self.assertNotIn('libsvm_svc', str(cs))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
include={'preprocessor': ['select_percentile_classification']})
self.assertEqual(cs.get_hyperparameter('preprocessor:__choice__'),
CategoricalHyperparameter('preprocessor:__choice__',
['select_percentile_classification']))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
exclude={'preprocessor': ['select_percentile_classification']})
self.assertNotIn('select_percentile_classification', str(cs))
示例13: test_get_hyperparameter_search_space_dataset_properties
def test_get_hyperparameter_search_space_dataset_properties(self):
cs_mc = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"multiclass": True})
self.assertNotIn("bernoulli_nb", str(cs_mc))
cs_ml = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"multilabel": True})
self.assertNotIn("k_nearest_neighbors", str(cs_ml))
self.assertNotIn("liblinear", str(cs_ml))
self.assertNotIn("libsvm_svc", str(cs_ml))
self.assertNotIn("sgd", str(cs_ml))
cs_sp = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"sparse": True})
self.assertIn("extra_trees", str(cs_sp))
self.assertIn("gradient_boosting", str(cs_sp))
self.assertIn("random_forest", str(cs_sp))
cs_mc_ml = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={"multilabel": True, "multiclass": True}
)
self.assertEqual(cs_ml, cs_mc_ml)
示例14: test_get_hyperparameter_search_space_dataset_properties
def test_get_hyperparameter_search_space_dataset_properties(self):
cs_mc = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'multiclass': True})
self.assertNotIn('bernoulli_nb', str(cs_mc))
cs_ml = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'multilabel': True})
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
self.assertIn('extra_trees', str(cs_sp))
self.assertIn('gradient_boosting', str(cs_sp))
self.assertIn('random_forest', str(cs_sp))
cs_mc_ml = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'multilabel': True, 'multiclass': True})
self.assertEqual(cs_ml, cs_mc_ml)
示例15: test_configurations_signed_data
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = SimpleClassificationPipeline(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue