本文整理汇总了Python中ConfigSpace.configuration_space.ConfigurationSpace.add_configuration_space方法的典型用法代码示例。如果您正苦于以下问题:Python ConfigurationSpace.add_configuration_space方法的具体用法?Python ConfigurationSpace.add_configuration_space怎么用?Python ConfigurationSpace.add_configuration_space使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ConfigSpace.configuration_space.ConfigurationSpace
的用法示例。
在下文中一共展示了ConfigurationSpace.add_configuration_space方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_hyperparameter_search_space
# 需要导入模块: from ConfigSpace.configuration_space import ConfigurationSpace [as 别名]
# 或者: from ConfigSpace.configuration_space.ConfigurationSpace import add_configuration_space [as 别名]
def get_hyperparameter_search_space(self, dataset_properties=None,
default=None,
include=None,
exclude=None):
if dataset_properties is None:
dataset_properties = {}
if include is not None and exclude is not None:
raise ValueError("The arguments include_estimators and "
"exclude_estimators cannot be used together.")
cs = ConfigurationSpace()
# Compile a list of all estimator objects for this problem
available_estimators = self.get_available_components(
dataset_properties=dataset_properties,
include=include,
exclude=exclude)
if len(available_estimators) == 0:
raise ValueError("No classifiers found")
if default is None:
defaults = ['random_forest', 'liblinear_svc', 'sgd',
'libsvm_svc'] + list(available_estimators.keys())
for default_ in defaults:
if default_ in available_estimators:
if include is not None and default_ not in include:
continue
if exclude is not None and default_ in exclude:
continue
default = default_
break
estimator = CategoricalHyperparameter('__choice__',
list(available_estimators.keys()),
default_value=default)
cs.add_hyperparameter(estimator)
for estimator_name in available_estimators.keys():
estimator_configuration_space = available_estimators[estimator_name].\
get_hyperparameter_search_space(dataset_properties)
parent_hyperparameter = {'parent': estimator,
'value': estimator_name}
cs.add_configuration_space(estimator_name,
estimator_configuration_space,
parent_hyperparameter=parent_hyperparameter)
self.configuration_space_ = cs
self.dataset_properties_ = dataset_properties
return cs
示例2: get_hyperspace
# 需要导入模块: from ConfigSpace.configuration_space import ConfigurationSpace [as 别名]
# 或者: from ConfigSpace.configuration_space.ConfigurationSpace import add_configuration_space [as 别名]
def get_hyperspace(data_info,
include_estimators=None, include_preprocessors=None):
if data_info is None or not isinstance(data_info, dict):
data_info = dict()
if 'is_sparse' not in data_info:
# This dataset is probaby dense
data_info['is_sparse'] = False
sparse = data_info['is_sparse']
task_type = data_info['task']
multilabel = (task_type == MULTILABEL_CLASSIFICATION)
multiclass = (task_type == MULTICLASS_CLASSIFICATION)
if task_type in CLASSIFICATION_TASKS:
data_info['multilabel'] = multilabel
data_info['multiclass'] = multiclass
data_info['target_type'] = 'classification'
pipe_type = 'classifier'
# Components match to be forbidden
components_ = ["adaboost", "decision_tree", "extra_trees",
"gradient_boosting", "k_nearest_neighbors",
"libsvm_svc", "random_forest", "gaussian_nb",
"decision_tree"]
feature_learning_ = ["kitchen_sinks", "nystroem_sampler"]
elif task_type in REGRESSION_TASKS:
data_info['target_type'] = 'regression'
pipe_type = 'regressor'
# Components match to be forbidden
components_ = ["adaboost", "decision_tree", "extra_trees",
"gaussian_process", "gradient_boosting",
"k_nearest_neighbors", "random_forest"]
feature_learning_ = ["kitchen_sinks", "kernel_pca", "nystroem_sampler"]
else:
raise NotImplementedError()
include, exclude = dict(), dict()
if include_preprocessors is not None:
include["preprocessor"] = include_preprocessors
if include_estimators is not None:
include[pipe_type] = include_estimators
cs = ConfigurationSpace()
# Construct pipeline
# FIXME OrderedDIct?
pipeline = get_pipeline(data_info['task'])
# TODO include, exclude, pipeline
keys = [pair[0] for pair in pipeline]
for key in include:
if key not in keys:
raise ValueError('Invalid key in include: %s; should be one '
'of %s' % (key, keys))
for key in exclude:
if key not in keys:
raise ValueError('Invalid key in exclude: %s; should be one '
'of %s' % (key, keys))
# Construct hyperspace
# TODO What's the 'signed' stands for?
if 'signed' not in data_info:
# This dataset probably contains unsigned data
data_info['signed'] = False
match = check_pipeline(pipeline, data_info,
include=include, exclude=exclude)
# Now we have only legal combinations at this step of the pipeline
# Simple sanity checks
assert np.sum(match) != 0, "No valid pipeline found."
assert np.sum(match) <= np.size(match), \
"'matches' is not binary; %s <= %d, %s" % \
(str(np.sum(match)), np.size(match), str(match.shape))
# Iterate each dimension of the matches array (each step of the
# pipeline) to see if we can add a hyperparameter for that step
for node_idx, n_ in enumerate(pipeline):
node_name, node = n_
is_choice = hasattr(node, "get_available_components")
# if the node isn't a choice we can add it immediately because it
# must be active (if it wouldn't, np.sum(matches) would be zero
if not is_choice:
cs.add_configuration_space(node_name,
node.get_hyperparameter_search_space(data_info))
# If the node isn't a choice, we have to figure out which of it's
# choices are actually legal choices
else:
choices_list = find_active_choices(match, node, node_idx,data_info,
include=include.get(node_name),
exclude=exclude.get(node_name))
cs.add_configuration_space(node_name,
node.get_hyperparameter_search_space(data_info,
include=choices_list))
#.........这里部分代码省略.........