当前位置: 首页>>代码示例>>Python>>正文


Python result.json_result_logger方法代码示例

本文整理汇总了Python中hpbandster.core.result.json_result_logger方法的典型用法代码示例。如果您正苦于以下问题:Python result.json_result_logger方法的具体用法?Python result.json_result_logger怎么用?Python result.json_result_logger使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hpbandster.core.result的用法示例。


在下文中一共展示了result.json_result_logger方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generate_bohb_data

# 需要导入模块: from hpbandster.core import result [as 别名]
# 或者: from hpbandster.core.result import json_result_logger [as 别名]
def generate_bohb_data():
    import warnings
    import hpbandster.core.nameserver as hpns
    import hpbandster.core.result as hpres
    from hpbandster.optimizers import BOHB as BOHB

    run_id = '0'  # Every run has to have a unique (at runtime) id.
    NS = hpns.NameServer(run_id=run_id, host='localhost', port=0)
    ns_host, ns_port = NS.start()

    from neural_opt import MyWorker, get_configspace

    w = MyWorker(nameserver=ns_host,
             nameserver_port=ns_port,
             run_id=run_id,  # same as nameserver's
            )
    w.run(background=True)

    # Log the optimization results for later analysis
    result_logger = hpres.json_result_logger(directory='test/general_example/results/bohb_full_configspace',
                                             overwrite=True)

    bohb = BOHB(configspace=get_configspace(),
                run_id=run_id,  # same as nameserver's
                eta=2, min_budget=5, max_budget=100,  # Hyperband parameters
                nameserver=ns_host, nameserver_port=ns_port,
                result_logger=result_logger,
                )

    # Then start the optimizer. The n_iterations parameter specifies
    # the number of iterations to be performed in this run
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        res = bohb.run(n_iterations=2)

    # After the run is finished, the services started above need to be shutdown.
    # This ensures that the worker, the nameserver and the master all properly exit
    # and no (daemon) threads keep running afterwards.
    # In particular we shutdown the optimizer (which shuts down all workers) and the nameserver.
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown() 
开发者ID:automl,项目名称:CAVE,代码行数:43,代码来源:extensive_tests.py

示例2: fit

# 需要导入模块: from hpbandster.core import result [as 别名]
# 或者: from hpbandster.core.result import json_result_logger [as 别名]
def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, refit=False):

        autonet_logger = logging.getLogger('autonet')
        hpbandster_logger = logging.getLogger('hpbandster')

        level = self.logger_settings[pipeline_config['log_level']]
        autonet_logger.setLevel(level)
        hpbandster_logger.setLevel(level)

        autonet_logger.info("Start autonet with config:\n" + str(pipeline_config))
        result_logger = []
        if not refit:
            result_logger = json_result_logger(directory=pipeline_config["result_logger_dir"], overwrite=True)
        return { 'X_train': X_train, 'Y_train': Y_train, 'X_valid': X_valid, 'Y_valid': Y_valid,
            'result_loggers':  [result_logger], 'shutdownables': []} 
开发者ID:automl,项目名称:Auto-PyTorch,代码行数:17,代码来源:autonet_settings.py

示例3: test_optimizer

# 需要导入模块: from hpbandster.core import result [as 别名]
# 或者: from hpbandster.core.result import json_result_logger [as 别名]
def test_optimizer(self):

        class ResultNode(PipelineNode):
            def fit(self, X_train, Y_train):
                return {'loss': X_train.shape[1], 'info': {'train_a': X_train.shape[1], 'train_b': Y_train.shape[1]}}

            def get_hyperparameter_search_space(self, **pipeline_config):
                cs = CS.ConfigurationSpace()
                cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('hyper', lower=0, upper=30))
                return cs
            
            def get_pipeline_config_options(self):
                return [
                    ConfigOption("result_logger_dir", default=".", type="directory"),
                    ConfigOption("optimize_metric", default="a", type=str),
                ]

        logger = logging.getLogger('hpbandster')
        logger.setLevel(logging.ERROR)
        logger = logging.getLogger('autonet')
        logger.setLevel(logging.ERROR)

        pipeline = Pipeline([
            OptimizationAlgorithm([
                ResultNode()
            ])
        ])

        pipeline_config = pipeline.get_pipeline_config(num_iterations=1, budget_type='epochs', result_logger_dir=".")
        pipeline.fit_pipeline(pipeline_config=pipeline_config, X_train=np.random.rand(15,10), Y_train=np.random.rand(15, 5), X_valid=None, Y_valid=None,
            result_loggers=[json_result_logger(directory=".", overwrite=True)], dataset_info=None, shutdownables=[])

        result_of_opt_pipeline = pipeline[OptimizationAlgorithm.get_name()].fit_output['optimized_hyperparameter_config']
        print(pipeline[OptimizationAlgorithm.get_name()].fit_output)

        self.assertIn(result_of_opt_pipeline[ResultNode.get_name() + ConfigWrapper.delimiter + 'hyper'], list(range(0, 31))) 
开发者ID:automl,项目名称:Auto-PyTorch,代码行数:38,代码来源:test_optimization_algorithm.py


注:本文中的hpbandster.core.result.json_result_logger方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。