當前位置: 首頁>>代碼示例>>Python>>正文


Python result.json_result_logger方法代碼示例

本文整理匯總了Python中hpbandster.core.result.json_result_logger方法的典型用法代碼示例。如果您正苦於以下問題:Python result.json_result_logger方法的具體用法?Python result.json_result_logger怎麽用?Python result.json_result_logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在hpbandster.core.result的用法示例。


在下文中一共展示了result.json_result_logger方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: generate_bohb_data

# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import json_result_logger [as 別名]
def generate_bohb_data():
    import warnings
    import hpbandster.core.nameserver as hpns
    import hpbandster.core.result as hpres
    from hpbandster.optimizers import BOHB as BOHB

    run_id = '0'  # Every run has to have a unique (at runtime) id.
    NS = hpns.NameServer(run_id=run_id, host='localhost', port=0)
    ns_host, ns_port = NS.start()

    from neural_opt import MyWorker, get_configspace

    w = MyWorker(nameserver=ns_host,
             nameserver_port=ns_port,
             run_id=run_id,  # same as nameserver's
            )
    w.run(background=True)

    # Log the optimization results for later analysis
    result_logger = hpres.json_result_logger(directory='test/general_example/results/bohb_full_configspace',
                                             overwrite=True)

    bohb = BOHB(configspace=get_configspace(),
                run_id=run_id,  # same as nameserver's
                eta=2, min_budget=5, max_budget=100,  # Hyperband parameters
                nameserver=ns_host, nameserver_port=ns_port,
                result_logger=result_logger,
                )

    # Then start the optimizer. The n_iterations parameter specifies
    # the number of iterations to be performed in this run
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        res = bohb.run(n_iterations=2)

    # After the run is finished, the services started above need to be shutdown.
    # This ensures that the worker, the nameserver and the master all properly exit
    # and no (daemon) threads keep running afterwards.
    # In particular we shutdown the optimizer (which shuts down all workers) and the nameserver.
    bohb.shutdown(shutdown_workers=True)
    NS.shutdown() 
開發者ID:automl,項目名稱:CAVE,代碼行數:43,代碼來源:extensive_tests.py

示例2: fit

# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import json_result_logger [as 別名]
def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, refit=False):

        autonet_logger = logging.getLogger('autonet')
        hpbandster_logger = logging.getLogger('hpbandster')

        level = self.logger_settings[pipeline_config['log_level']]
        autonet_logger.setLevel(level)
        hpbandster_logger.setLevel(level)

        autonet_logger.info("Start autonet with config:\n" + str(pipeline_config))
        result_logger = []
        if not refit:
            result_logger = json_result_logger(directory=pipeline_config["result_logger_dir"], overwrite=True)
        return { 'X_train': X_train, 'Y_train': Y_train, 'X_valid': X_valid, 'Y_valid': Y_valid,
            'result_loggers':  [result_logger], 'shutdownables': []} 
開發者ID:automl,項目名稱:Auto-PyTorch,代碼行數:17,代碼來源:autonet_settings.py

示例3: test_optimizer

# 需要導入模塊: from hpbandster.core import result [as 別名]
# 或者: from hpbandster.core.result import json_result_logger [as 別名]
def test_optimizer(self):

        class ResultNode(PipelineNode):
            def fit(self, X_train, Y_train):
                return {'loss': X_train.shape[1], 'info': {'train_a': X_train.shape[1], 'train_b': Y_train.shape[1]}}

            def get_hyperparameter_search_space(self, **pipeline_config):
                cs = CS.ConfigurationSpace()
                cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('hyper', lower=0, upper=30))
                return cs
            
            def get_pipeline_config_options(self):
                return [
                    ConfigOption("result_logger_dir", default=".", type="directory"),
                    ConfigOption("optimize_metric", default="a", type=str),
                ]

        logger = logging.getLogger('hpbandster')
        logger.setLevel(logging.ERROR)
        logger = logging.getLogger('autonet')
        logger.setLevel(logging.ERROR)

        pipeline = Pipeline([
            OptimizationAlgorithm([
                ResultNode()
            ])
        ])

        pipeline_config = pipeline.get_pipeline_config(num_iterations=1, budget_type='epochs', result_logger_dir=".")
        pipeline.fit_pipeline(pipeline_config=pipeline_config, X_train=np.random.rand(15,10), Y_train=np.random.rand(15, 5), X_valid=None, Y_valid=None,
            result_loggers=[json_result_logger(directory=".", overwrite=True)], dataset_info=None, shutdownables=[])

        result_of_opt_pipeline = pipeline[OptimizationAlgorithm.get_name()].fit_output['optimized_hyperparameter_config']
        print(pipeline[OptimizationAlgorithm.get_name()].fit_output)

        self.assertIn(result_of_opt_pipeline[ResultNode.get_name() + ConfigWrapper.delimiter + 'hyper'], list(range(0, 31))) 
開發者ID:automl,項目名稱:Auto-PyTorch,代碼行數:38,代碼來源:test_optimization_algorithm.py


注:本文中的hpbandster.core.result.json_result_logger方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。