本文整理匯總了Python中hpbandster.core.result.logged_results_to_HBS_result方法的典型用法代碼示例。如果您正苦於以下問題:Python result.logged_results_to_HBS_result方法的具體用法?Python result.logged_results_to_HBS_result怎麽用?Python result.logged_results_to_HBS_result使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類hpbandster.core.result
的用法示例。
在下文中一共展示了result.logged_results_to_HBS_result方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fit
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def fit(self, pipeline_config, optimized_hyperparameter_config, budget, loss, info, refit=None):
if refit or pipeline_config["ensemble_size"] == 0 or pipeline_config["task_id"] not in [-1, 1]:
return {"optimized_hyperparameter_config": optimized_hyperparameter_config, "budget": budget}
filename = os.path.join(pipeline_config["result_logger_dir"], 'predictions_for_ensemble.npy')
optimize_metric = self.pipeline[MetricSelector.get_name()].metrics[pipeline_config["optimize_metric"]]
y_transform = self.pipeline[OneHotEncoding.get_name()].complete_y_tranformation
result = logged_results_to_HBS_result(pipeline_config["result_logger_dir"])
all_predictions, labels, model_identifiers, _ = read_ensemble_prediction_file(filename=filename, y_transform=y_transform)
ensemble_selection, ensemble_configs = build_ensemble(result=result,
optimize_metric=optimize_metric, ensemble_size=pipeline_config["ensemble_size"],
all_predictions=all_predictions, labels=labels, model_identifiers=model_identifiers,
only_consider_n_best=pipeline_config["ensemble_only_consider_n_best"],
sorted_initialization_n_best=pipeline_config["ensemble_sorted_initialization_n_best"])
return {"optimized_hyperparameter_config": optimized_hyperparameter_config, "budget": budget,
"ensemble": ensemble_selection,
"ensemble_configs": ensemble_configs,
"loss": loss,
"info": info
}
示例2: fit
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def fit(self, pipeline_config, autonet, run_result_dir, optimize_metric, trajectories):
ensemble_log_file = os.path.join(run_result_dir, "ensemble_log.json")
test_log_file = os.path.join(run_result_dir, "test_result.json")
if not pipeline_config["enable_ensemble"] or optimize_metric is None or \
(not os.path.exists(ensemble_log_file) and not os.path.exists(test_log_file)):
return {"trajectories": trajectories, "optimize_metric": optimize_metric}
try:
started = logged_results_to_HBS_result(run_result_dir).HB_config["time_ref"]
except:
return {"trajectories": trajectories, "optimize_metric": optimize_metric}
metrics = autonet.pipeline[MetricSelector.get_name()].metrics
ensemble_trajectories = dict()
test_trajectories = dict()
if os.path.exists(ensemble_log_file):
ensemble_trajectories = get_ensemble_trajectories(ensemble_log_file, started, metrics)
if os.path.exists(test_log_file):
test_trajectories = get_ensemble_trajectories(test_log_file, started, metrics, prefix="", only_test=True)
return {"trajectories": dict(trajectories, **ensemble_trajectories, **test_trajectories), "optimize_metric": optimize_metric}
示例3: test_incumbent_trajectory
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def test_incumbent_trajectory(self):
""" Load example result and check incumbent_trajectory generation for general errors (whitebox-test)"""
result = logged_results_to_HBS_result(self.result_path)
# All budgets
traj = get_incumbent_trajectory(result, result.HB_config['budgets'], mode='racing')
traj = get_incumbent_trajectory(result, result.HB_config['budgets'], mode='minimum')
traj = get_incumbent_trajectory(result, result.HB_config['budgets'], mode='prefer_higher_budget')
# Single budgets
traj = get_incumbent_trajectory(result, [result.HB_config['budgets'][0]], mode='racing')
traj = get_incumbent_trajectory(result, [result.HB_config['budgets'][0]], mode='minimum')
traj = get_incumbent_trajectory(result, [result.HB_config['budgets'][0]], mode='prefer_higher_budget')
示例4: parse_results
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def parse_results(self, pipeline_config):
"""Parse the results of the optimization run
Arguments:
pipeline_config {dict} -- The configuration of the pipeline.
Raises:
RuntimeError: An Error occurred when parsing the results.
Returns:
dict -- Dictionary summarizing the results
"""
try:
res = logged_results_to_HBS_result(pipeline_config["result_logger_dir"])
id2config = res.get_id2config_mapping()
incumbent_trajectory = res.get_incumbent_trajectory(bigger_is_better=False, non_decreasing_budget=False)
except Exception as e:
raise RuntimeError("Error parsing results. Check results.json and output for more details. An empty results.json is usually caused by a misconfiguration of AutoNet.")
if (len(incumbent_trajectory['config_ids']) == 0):
return dict()
final_config_id = incumbent_trajectory['config_ids'][-1]
final_budget = incumbent_trajectory['budgets'][-1]
best_run = [r for r in res.get_runs_by_id(final_config_id) if r.budget == final_budget][0]
return {'optimized_hyperparameter_config': id2config[final_config_id]['config'],
'budget': final_budget,
'loss': best_run.loss,
'info': best_run.info}
示例5: convert
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def convert(self, folders, ta_exec_dirs=None, output_dir=None, converted_dest='converted_input_data'):
try:
from hpbandster.core.result import Result as HPBResult
from hpbandster.core.result import logged_results_to_HBS_result
except ImportError as e:
raise ImportError("To analyze BOHB-data, please install hpbandster (e.g. `pip install hpbandster`)")
self.logger.debug("Converting BOHB-data to SMAC3-data. Called with: folders=%s, ta_exec_dirs=%s, output_dir=%s,"
" converted_dest=%s", str(folders), str(ta_exec_dirs), str(output_dir), str(converted_dest))
# Using temporary files for the intermediate smac-result-like format if no output_dir specified
if not output_dir:
output_dir = tempfile.mkdtemp()
self.logger.debug("Temporary directory for intermediate SMAC3-results: %s", output_dir)
if ta_exec_dirs is None or len(ta_exec_dirs) == 0:
ta_exec_dirs = ['.']
if len(ta_exec_dirs) != len(folders):
ta_exec_dirs = [ta_exec_dirs[0] for _ in folders]
# Get a list with alternative interpretations of the configspace-file
# (if it's a .pcs-file, for .json-files it's a length-one-list)
cs_interpretations = self.load_configspace(folders[0])
self.logger.debug("Loading with %d configspace alternative options...", len(cs_interpretations))
self.logger.info("Assuming BOHB treats target algorithms as deterministic (and does not re-evaluate)")
#####################
# Actual conversion #
#####################
folder_basenames = get_folder_basenames(folders)
result = OrderedDict()
for f, f_base, ta_exec_dir in zip(folders, folder_basenames, ta_exec_dirs): # Those are the parallel runs
converted_folder_path = os.path.join(output_dir, converted_dest, f_base)
self.logger.debug("Processing folder=%s, f_base=%s, ta_exec_dir=%s. Saving to %s.",
f, f_base, ta_exec_dir, converted_folder_path)
if not os.path.exists(converted_folder_path):
self.logger.debug("%s doesn't exist. Creating...", converted_folder_path)
os.makedirs(converted_folder_path)
# Original hpbandster-formatted result-object
hp_result = logged_results_to_HBS_result(f)
result[f] = self.hpbandster2smac(f, hp_result, cs_interpretations, converted_folder_path)
return result
示例6: build_run_trajectories
# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import logged_results_to_HBS_result [as 別名]
def build_run_trajectories(results_folder, autonet_config, metrics, log_functions):
# parse results
try:
res = logged_results_to_HBS_result(results_folder)
incumbent_trajectory = res.get_incumbent_trajectory(bigger_is_better=False, non_decreasing_budget=False)
except:
print("No incumbent trajectory found")
return dict()
# prepare
metric_name = autonet_config["optimize_metric"]
all_metrics = autonet_config["additional_metrics"] + [metric_name]
additional_metric_names = [("val_" + m, metrics[m]) for m in all_metrics]
additional_metric_names += [("train_" + m, metrics[m]) for m in all_metrics]
additional_metric_names += [(l, log_functions[l]) for l in autonet_config["additional_logs"]]
# initialize incumbent trajectories
incumbent_trajectories = dict()
# save incumbent trajectories
for name, obj in additional_metric_names:
tj = copy(incumbent_trajectory)
log_available = [name in run["info"] for config_id, budget in zip(tj["config_ids"], tj["budgets"])
for run in res.get_runs_by_id(config_id)
if run["budget"] == budget]
tj["values"] = [run["info"][name] for config_id, budget in zip(tj["config_ids"], tj["budgets"])
for run in res.get_runs_by_id(config_id)
if run["budget"] == budget and name in run["info"]]
tj["losses"] = [obj.loss_transform(x) for x in tj["values"]]
for key, value_list in tj.items():
if key in ["losses"]:
continue
tj[key] = [value for i, value in enumerate(value_list) if log_available[i]]
if tj["losses"]:
incumbent_trajectories[name] = tj
# assume first random config has been evaluated already at time 0
for name, trajectory in incumbent_trajectories.items():
for key, value_list in trajectory.items():
if not isinstance(value_list, (list, tuple)):
continue
trajectory[key] = [value_list[0] if key != "times_finished" else 0] + value_list
return incumbent_trajectories