当前位置: 首页>>代码示例>>Python>>正文


Python hp.loguniform方法代码示例

本文整理汇总了Python中hyperopt.hp.loguniform方法的典型用法代码示例。如果您正苦于以下问题:Python hp.loguniform方法的具体用法?Python hp.loguniform怎么用?Python hp.loguniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hyperopt.hp的用法示例。


在下文中一共展示了hp.loguniform方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: hyperopt_lightgbm_basic

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def hyperopt_lightgbm_basic(X, y, params, config, max_evals=50):
    X_train, X_test, y_train, y_test = data_split_by_time(X, y, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X, y, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    val_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"forgetting_factor": hp.loguniform("forgetting_factor", 0.01, 0.1)
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 100,
                        val_data, early_stopping_rounds=30, verbose_eval=0)
        pred = model.predict(X_test)
        score = roc_auc_score(y_test, pred)
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:DominickZhang,项目名称:KDDCup2019_admin,代码行数:37,代码来源:automl.py

示例2: hyperopt_lightgbm

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
    X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        if config.time_left() < 50:
            return {'status': STATUS_FAIL}
        else:
            model = lgb.train({**params, **hyperparams}, train_data, 100,
                          valid_data, early_stopping_rounds=10, verbose_eval=0)
            pred = model.predict(X_test)
            score = roc_auc_score(y_test, pred)

            #score = model.best_score["valid_0"][params["metric"]]

            # in classification, less is better
            return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:DominickZhang,项目名称:KDDCup2019_admin,代码行数:43,代码来源:automl.py

示例3: __init__

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 10.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs.
        } 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:12,代码来源:hyperparams.py

示例4: __init__

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def __init__(self, train_file, test_file, gpu_mode, model_out_dir, log_dir, max_epochs, batch_size, num_workers):
        """
        Initialize the object
        :param train_file: A train CSV file containing train data set information
        :param test_file: A test CSV file containing train data set information
        :param gpu_mode: If true, GPU will be used to train and test the models
        :param model_out_dir: Directory to save the model
        :param log_dir: Directory to save the log
        """
        # the hyper-parameter space is defined here
        self.space = {
            # hp.loguniform returns a value drawn according to exp(uniform(low, high)) so that the logarithm of the
            # return value is uniformly distributed.
            'lr': hp.loguniform('lr', -8, -4),
            'l2': hp.loguniform('l2', -12, -4)
        }
        self.train_file = train_file
        self.test_file = test_file
        self.gpu_mode = gpu_mode
        self.log_directory = log_dir
        self.model_out_dir = model_out_dir
        self.max_epochs = max_epochs
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.hidden_size = TrainOptions.HIDDEN_SIZE
        self.gru_layers = TrainOptions.GRU_LAYERS 
开发者ID:kishwarshafin,项目名称:helen,代码行数:28,代码来源:hyperbandInterface.py

示例5: hyperopt_lightgbm

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config):
    X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)),
        # smaller than 2^(max_depth)
        "num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1),
        # "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1),
        # "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)),
        # "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0),
        # "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0),
        "min_child_weight": hp.quniform('min_child_weight', 2, 50, 2),
        "reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0),
        "reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0),
        "learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01),
        # "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)),
        #
        "min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)),
        #"is_unbalance": hp.choice("is_unbalance", [True])
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 300,
                          valid_data, early_stopping_rounds=45, verbose_eval=0)

        score = model.best_score["valid_0"][params["metric"]]

        # in classification, less is better
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=150, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:shuyao95,项目名称:kddcup2019-automl,代码行数:43,代码来源:automl.py

示例6: predict

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def predict(self, x):
        data = xgb.DMatrix(x)
        pred = self.model.predict(data)
        return pred


# -----------------------------------
# 探索するパラメータの空間の指定
# -----------------------------------
# hp.choiceでは、複数の選択肢から選ぶ
# hp.uniformでは、下限・上限を指定した一様分布から抽出する。引数は下限・上限
# hp.quniformでは、下限・上限を指定した一様分布のうち一定の間隔ごとの点から抽出する。引数は下限・上限・間隔
# hp.loguniformでは、下限・上限を指定した対数が一様分布に従う分布から抽出する。引数は下限・上限の対数をとった値 
开发者ID:ghmagazine,项目名称:kagglebook,代码行数:15,代码来源:ch06-01-hopt.py

示例7: test_read_loguniform

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def test_read_loguniform(self):
        # 0 float
        # 1   hyperopt_param
        # 2     Literal{colnorm_thresh}
        # 3     loguniform
        # 4       Literal{-20.7232658369}
        # 5       Literal{-6.90775527898}
        loguniform = hp.loguniform('colnorm_thresh', np.log(1e-9),
            np.log(1e-3)).inputs()[0].inputs()[1]
        ret = self.pyll_reader.read_loguniform(loguniform, 'colnorm_thresh')
        expected = configuration_space.UniformFloatHyperparameter(
            'colnorm_thresh', 1e-9, 1e-3, base=np.e)
        self.assertEqual(expected, ret) 
开发者ID:automl,项目名称:HPOlib,代码行数:15,代码来源:test_pyll_util.py

示例8: test_write_loguniform

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def test_write_loguniform(self):
        c = configuration_space.UniformFloatHyperparameter("c", 0.001, 1, base=np.e)
        expected = ("c", 'param_0 = hp.loguniform("c", -6.90775527898, 0.0)')
        value = self.pyll_writer.write_hyperparameter(c, None)
        self.assertEqual(expected, value) 
开发者ID:automl,项目名称:HPOlib,代码行数:7,代码来源:test_pyll_util.py

示例9: tpe_configspace

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def tpe_configspace(self):
        from hyperopt import hp
        import numpy as np
        space = {
            'l_rate': hp.loguniform('l_rate', np.log(1e-6), np.log(1e-1)),
            'burn_in': hp.uniform('burn_in', 0, .8),
            'n_units_1': hp.qloguniform('n_units_1', np.log(16), np.log(512), 1),
            'n_units_2': hp.qloguniform('n_units_2', np.log(16), np.log(512), 1),
            'mdecay': hp.uniform('mdecay', 0, 1)
        }
        return(space) 
开发者ID:automl,项目名称:BOAH,代码行数:13,代码来源:bnn_worker.py

示例10: tpe_configspace

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def tpe_configspace(self):

        import numpy as np
        from hyperopt import hp

        space = {
            'learning_rate': hp.loguniform('learning_rate', np.log(1e-7), np.log(1e-1)),
            'batch_size': hp.qloguniform('batch_size', np.log(8), np.log(256), 1),
            'n_units_1': hp.qloguniform('n_units_1', np.log(8), np.log(128), 1),
            'n_units_2': hp.qloguniform('n_units_2', np.log(8), np.log(128), 1),
            'discount': hp.uniform('discount', 0, 1),
            'likelihood_ratio_clipping': hp.uniform('likelihood_ratio_clipping', 0, 1),
            'entropy_regularization': hp.uniform('entropy_regularization', 0, 1)
        }
        return(space) 
开发者ID:automl,项目名称:BOAH,代码行数:17,代码来源:cartpole_worker.py

示例11: get_hyperopt_dimensions

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def get_hyperopt_dimensions(api_config):
        """Help routine to setup hyperopt search space in constructor.

        Take api_config as argument so this can be static.
        """
        # The ordering of iteration prob makes no difference, but just to be
        # safe and consistnent with space.py, I will make sorted.
        param_list = sorted(api_config.keys())

        space = {}
        round_to_values = {}
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]

            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
                round_to_values[param_name] = interp1d(
                    param_values, param_values, kind="nearest", fill_value="extrapolate"
                )

            if param_type == "int":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.qloguniform(param_name, np.log(low), np.log(high), 1)
                else:
                    space[param_name] = hp.quniform(param_name, low, high, 1)
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                space[param_name] = hp.choice(param_name, (False, True))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                space[param_name] = hp.choice(param_name, param_values)
            elif param_type == "real":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.loguniform(param_name, np.log(low), np.log(high))
                else:
                    space[param_name] = hp.uniform(param_name, low, high)
            else:
                assert False, "type %s not handled in API" % param_type

        return space, round_to_values 
开发者ID:uber,项目名称:bayesmark,代码行数:55,代码来源:hyperopt_optimizer.py

示例12: visitSearchSpaceNumber

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import loguniform [as 别名]
def visitSearchSpaceNumber(self, space:SearchSpaceNumber, path:str, counter=None):
        label = self.mk_label(path, counter)

        if space.pgo is not None:
            return scope.pgo_sample(space.pgo, hp.quniform(label, 0, len(space.pgo)-1, 1))

        dist = "uniform"
        if space.distribution:
            dist = space.distribution

        if space.maximum is None:
            raise SearchSpaceError(path, f"maximum not specified for a number with distribution {dist}")
        max = space.getInclusiveMax()

        # These distributions need only a maximum
        if dist == "integer":
            if not space.discrete:
                raise SearchSpaceError(path, "integer distribution specified for a non discrete numeric type")
            return hp.randint(label, max)

        if space.minimum is None:
            raise SearchSpaceError(path, f"minimum not specified for a number with distribution {dist}")
        min = space.getInclusiveMin()

        if dist == "uniform":
            if space.discrete:
                return scope.int(hp.quniform(label, min, max, 1))
            else:
                return hp.uniform(label, min, max)
        elif dist == "loguniform":
            # for log distributions, hyperopt requires that we provide the log of the min/max
            if min <= 0:
                raise SearchSpaceError(path, f"minimum of 0 specified with a {dist} distribution.  This is not allowed; please set it (possibly using minimumForOptimizer) to be positive")
            if min > 0:
                min = math.log(min)
            if max > 0:
                max = math.log(max)
            if space.discrete:
                return scope.int(hp.qloguniform(label, min, max, 1))
            else:
                return hp.loguniform(label, min, max)

        else:
            raise SearchSpaceError(path, f"Unknown distribution type: {dist}") 
开发者ID:IBM,项目名称:lale,代码行数:46,代码来源:lale_hyperopt.py


注:本文中的hyperopt.hp.loguniform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。