本文整理汇总了Python中autosklearn.pipeline.classification.SimpleClassificationPipeline.fit方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleClassificationPipeline.fit方法的具体用法?Python SimpleClassificationPipeline.fit怎么用?Python SimpleClassificationPipeline.fit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autosklearn.pipeline.classification.SimpleClassificationPipeline
的用法示例。
在下文中一共展示了SimpleClassificationPipeline.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_predict_batched
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_batched(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
cls = SimpleClassificationPipeline(default)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
Y_train = np.array(list([(list([1 if i != y else 0 for i in range(10)])) for y in Y_train]))
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例2: test_configurations_signed_data
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = SimpleClassificationPipeline(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
示例3: test_predict_proba_batched
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_proba_batched(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
# Multiclass
cls = SimpleClassificationPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train_ = np.zeros((Y_train.shape[0], 10))
for i, y in enumerate(Y_train):
Y_train_[i][y] = 1
Y_train = Y_train_
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, np.ndarray)
self.assertEqual(prediction.shape, ((1647, 10)))
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
示例4: test_predict_proba_batched
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_proba_batched(self):
# Multiclass
cls = SimpleClassificationPipeline(include={'classifier': ['sgd']})
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(include={'classifier': ['lda']})
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array(list([(list([1 if i != y else 0 for i in range(10)]))
for y in Y_train]))
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例5: test_predict_proba_batched
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_proba_batched(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space()
default = cs.get_default_configuration()
# Multiclass
cls = SimpleClassificationPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits")
Y_train = np.array(list([(list([1 if i != y else 0 for i in range(10)])) for y in Y_train]))
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, np.ndarray)
self.assertEqual(prediction.shape, ((1647, 10)))
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
示例6: test_predict_proba_batched_sparse
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_proba_batched_sparse(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": 'True',
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
"rescaling:__choice__": "min/max"})
# Multiclass
cls = SimpleClassificationPipeline(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train_ = np.zeros((Y_train.shape[0], 10))
for i, y in enumerate(Y_train):
Y_train_[i][y] = 1
Y_train = Y_train_
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual(prediction.shape, ((1647, 10)))
self.assertIsInstance(prediction, np.ndarray)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
示例7: test_predict_proba_batched_sparse
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_proba_batched_sparse(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"sparse": True})
config = Configuration(
cs,
values={
"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": "True",
"preprocessor:__choice__": "no_preprocessing",
"classifier:random_forest:bootstrap": "True",
"classifier:random_forest:criterion": "gini",
"classifier:random_forest:max_depth": "None",
"classifier:random_forest:min_samples_split": 2,
"classifier:random_forest:min_samples_leaf": 2,
"classifier:random_forest:min_weight_fraction_leaf": 0.0,
"classifier:random_forest:max_features": 0.5,
"classifier:random_forest:max_leaf_nodes": "None",
"classifier:random_forest:n_estimators": 100,
"rescaling:__choice__": "min/max",
},
)
# Multiclass
cls = SimpleClassificationPipeline(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits", make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = SimpleClassificationPipeline(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset="digits", make_sparse=True)
Y_train = np.array(list([(list([1 if i != y else 0 for i in range(10)])) for y in Y_train]))
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual(prediction.shape, ((1647, 10)))
self.assertIsInstance(prediction, np.ndarray)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
示例8: test_configurations_sparse
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config and \
config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config and \
config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls = SimpleClassificationPipeline(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
示例9: test_predict_batched_sparse
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_batched_sparse(self):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": "True",
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
"rescaling:__choice__": "min/max"})
cls = SimpleClassificationPipeline(config)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例10: test_predict_batched_sparse
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_predict_batched_sparse(self):
cls = SimpleClassificationPipeline(dataset_properties={'sparse': True},
include={'classifier': ['sgd']})
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = unittest.mock.Mock(wraps=cls.steps[-1][1].predict_proba)
cls.steps[-1][-1].predict_proba = cls_predict
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.call_count)
assert_array_almost_equal(prediction_, prediction)
示例11: test_default_configuration
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_default_configuration(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
auto = SimpleClassificationPipeline()
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.94,
sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
示例12: test_default_configuration_multilabel
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_default_configuration_multilabel(self):
for i in range(2):
cs = SimpleClassificationPipeline.get_hyperparameter_search_space(dataset_properties={"multilabel": True})
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset="iris", make_multilabel=True)
auto = SimpleClassificationPipeline(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.9599999999999995, sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
示例13: test_default_configuration_multilabel
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_default_configuration_multilabel(self):
for i in range(2):
dataset_properties = {'multilabel': True}
classifier = SimpleClassificationPipeline(
dataset_properties=dataset_properties)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris',
make_multilabel=True)
classifier.set_hyperparameters(default)
classifier = classifier.fit(X_train, Y_train)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(0.94,
sklearn.metrics.accuracy_score(predictions,
Y_test))
scores = classifier.predict_proba(X_test)
示例14: _test_configurations
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def _test_configurations(self, configurations_space, make_sparse=False,
data=None, init_params=None,
dataset_properties=None):
# Use a limit of ~3GiB
limit = 3072 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
print(configurations_space)
for i in range(10):
config = configurations_space.sample_configuration()
config._populate_values()
# Restrict configurations which could take too long on travis-ci
restrictions = {'classifier:passive_aggressive:n_iter': 5,
'classifier:sgd:n_iter': 5,
'classifier:adaboost:n_estimators': 50,
'classifier:adaboost:max_depth': 1,
'preprocessor:kernel_pca:n_components': 10,
'preprocessor:kitchen_sinks:n_components': 50,
'classifier:proj_logit:max_epochs': 1,
'classifier:libsvm_svc:degree': 2,
'regressor:libsvm_svr:degree': 2,
'preprocessor:truncatedSVD:target_dim': 10,
'preprocessor:polynomial:degree': 2,
'classifier:lda:n_components': 10,
'preprocessor:nystroem_sampler:n_components': 50,
'preprocessor:feature_agglomeration:n_clusters': 2,
'classifier:gradient_boosting:max_depth': 2,
'classifier:gradient_boosting:n_estimators': 50}
for restrict_parameter in restrictions:
restrict_to = restrictions[restrict_parameter]
if restrict_parameter in config and \
config[restrict_parameter] is not None:
config._values[restrict_parameter] = restrict_to
print(config)
if data is None:
X_train, Y_train, X_test, Y_test = get_dataset(
dataset='digits', make_sparse=make_sparse, add_NaNs=True)
else:
X_train = data['X_train'].copy()
Y_train = data['Y_train'].copy()
X_test = data['X_test'].copy()
Y_test = data['Y_test'].copy()
init_params_ = copy.deepcopy(init_params)
cls = SimpleClassificationPipeline(random_state=1,
dataset_properties=dataset_properties,
init_params=init_params_,)
cls.set_hyperparameters(config, init_params=init_params_)
try:
cls.fit(X_train, Y_train, )
predictions = cls.predict(X_test.copy())
predictions = cls.predict_proba(X_test)
except MemoryError as e:
continue
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0]:
continue
elif "removed all features" in e.args[0]:
continue
elif "all features are discarded" in e.args[0]:
continue
elif "Numerical problems in QDA" in e.args[0]:
continue
elif 'Bug in scikit-learn' in e.args[0]:
continue
elif 'The condensed distance matrix must contain only finite ' \
'values.' in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(traceback.format_exc())
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(traceback.format_exc())
print(config)
raise e
示例15: test_weighting_effect
# 需要导入模块: from autosklearn.pipeline.classification import SimpleClassificationPipeline [as 别名]
# 或者: from autosklearn.pipeline.classification.SimpleClassificationPipeline import fit [as 别名]
def test_weighting_effect(self):
data = sklearn.datasets.make_classification(
n_samples=200, n_features=10, n_redundant=2, n_informative=2,
n_repeated=2, n_clusters_per_class=2, weights=[0.8, 0.2],
random_state=1)
for name, clf, acc_no_weighting, acc_weighting, places in \
[('adaboost', AdaboostClassifier, 0.810, 0.735, 3),
('decision_tree', DecisionTree, 0.780, 0.643, 3),
('extra_trees', ExtraTreesClassifier, 0.780, 0.8, 3),
('gradient_boosting', GradientBoostingClassifier,
0.737, 0.684, 3),
('random_forest', RandomForest, 0.780, 0.789, 3),
('libsvm_svc', LibSVM_SVC, 0.769, 0.72, 3),
('liblinear_svc', LibLinear_SVC, 0.762, 0.735, 3),
('passive_aggressive', PassiveAggressive, 0.642, 0.449, 3),
('sgd', SGD, 0.818, 0.575, 2)
]:
for strategy, acc in [
('none', acc_no_weighting),
('weighting', acc_weighting)
]:
# Fit
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
include = {'classifier': [name],
'preprocessor': ['no_preprocessing']}
classifier = SimpleClassificationPipeline(
random_state=1, include=include)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions, Y_test), acc,
places=places, msg=(name, strategy))
# fit_transformer and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
classifier.set_hyperparameters(configuration=default)
Xt, fit_params = classifier.fit_transformer(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions, Y_test), acc,
places=places)
for name, pre, acc_no_weighting, acc_weighting in \
[('extra_trees_preproc_for_classification',
ExtraTreesPreprocessorClassification, 0.810, 0.563),
('liblinear_svc_preprocessor', LibLinear_Preprocessor,
0.837, 0.567)]:
for strategy, acc in [('none', acc_no_weighting),
('weighting', acc_weighting)]:
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
include = {'classifier': ['sgd'], 'preprocessor': [name]}
classifier = SimpleClassificationPipeline(
random_state=1, include=include)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier.set_hyperparameters(default)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(
sklearn.metrics.f1_score(predictions, Y_test), acc,
places=3, msg=(name, strategy))
# fit_transformer and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
default._values['balancing:strategy'] = strategy
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
Xt, fit_params = classifier.fit_transformer(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
#.........这里部分代码省略.........