当前位置: 首页>>代码示例>>Python>>正文


Python hp.uniform方法代码示例

本文整理汇总了Python中hyperopt.hp.uniform方法的典型用法代码示例。如果您正苦于以下问题:Python hp.uniform方法的具体用法?Python hp.uniform怎么用?Python hp.uniform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hyperopt.hp的用法示例。


在下文中一共展示了hp.uniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def __init__(self):
        self.lmbda = [0.1, 0.2]
        self.feature_map_dropout = [0.1, 0.2, 0.5]
        self.input_dropout = [0.1, 0.2, 0.5]
        self.hidden_dropout = [0.1, 0.2, 0.5]
        self.use_bias = [True, False]
        self.label_smoothing = [0.1, 0.2, 0.5]
        self.lr_decay = [0.95, 0.9, 0.8]
        self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
        self.L1_flag = [True, False]
        self.hidden_size = [8, 16]
        self.batch_size = [256, 512]
        self.epochs = [2, 5, 10]
        self.margin = [0.4, 1.0, 2.0]
        self.optimizer = ["adam", "sgd", 'rms']
        self.sampling = ["uniform", "bern"] 
开发者ID:Sujit-O,项目名称:pykg2vec,代码行数:18,代码来源:hyperparams.py

示例2: test_operator_in

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def test_operator_in(self):
        a_or_b = configuration_space.CategoricalHyperparameter("a_or_b", ["a", "b"])
        cond_a = configuration_space.UniformFloatHyperparameter(
            'cond_a', 0, 1, conditions=[['a_or_b == a']])
        cond_b = configuration_space.UniformFloatHyperparameter(
            'cond_b', 0, 3, q=0.1, conditions=[['a_or_b == b']])
        e = configuration_space.UniformFloatHyperparameter("e", 0, 5,
                                     conditions=[['a_or_b in {a,b}']])
        conditional_space_operator_in = {"a_or_b": a_or_b, "cond_a": cond_a,
                                 "cond_b": cond_b, "e": e}
        cs = self.pyll_writer.write(conditional_space_operator_in)
        expected = StringIO.StringIO()
        expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll')
        expected.write('\n\n')
        expected.write('param_0 = hp.uniform("cond_a", 0.0, 1.0)\n')
        expected.write('param_1 = hp.quniform("cond_b", -0.0499, 3.05, 0.1)\n')
        expected.write('param_2 = hp.uniform("e", 0.0, 5.0)\n')
        expected.write('param_3 = hp.choice("a_or_b", [\n')
        expected.write('    {"a_or_b": "a", "cond_a": param_0, "e": param_2, '
                       '},\n')
        expected.write('    {"a_or_b": "b", "cond_b": param_1, "e": param_2, '
                       '},\n')
        expected.write('    ])\n\n')
        expected.write('space = {"a_or_b": param_3}\n')
        self.assertEqual(expected.getvalue(), cs) 
开发者ID:automl,项目名称:HPOlib,代码行数:27,代码来源:test_pyll_util.py

示例3: test_write_uniform

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def test_write_uniform(self):
        a = configuration_space.UniformFloatHyperparameter("a", 0, 1)
        expected = ('a', 'param_0 = hp.uniform("a", 0.0, 1.0)')
        value = self.pyll_writer.write_hyperparameter(a, None)
        self.assertEqual(expected, value)

        # The hyperparameter name has to be converted seperately because
        # otherwise the parameter values are converted at object costruction
        # time
        a = configuration_space.UniformFloatHyperparameter("a", 1, 10, base=10)
        a.name = self.pyll_writer.convert_name(a)
        expected = ('LOG10_a', 'param_1 = hp.uniform("LOG10_a", 0.0, 1.0)')
        value = self.pyll_writer.write_hyperparameter(a, None)
        self.assertEqual(expected, value)

        nhid1 = configuration_space.UniformFloatHyperparameter(
            "nhid1", 16, 1024, q=16, base=np.e)
        expected = ('nhid1', 'param_2 = hp.qloguniform('
                    '"nhid1", 2.0794540416, 6.93925394604, 16.0)')
        value = self.pyll_writer.write_hyperparameter(nhid1, None)
        self.assertEqual(expected, value) 
开发者ID:automl,项目名称:HPOlib,代码行数:23,代码来源:test_pyll_util.py

示例4: test_write_uniform_int

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def test_write_uniform_int(self):
        a_int = configuration_space.UniformIntegerHyperparameter("a_int", 0, 1)
        expected = ('a_int', 'param_0 = pyll.scope.int(hp.quniform('
                             '"a_int", -0.49999, 1.5, 1.0))')
        value = self.pyll_writer.write_hyperparameter(a_int, None)
        self.assertEqual(expected, value)

        # Test for the problem that if a parameter has Q not None and is on
        # log scale, the Q must not be in the hp object, but the
        # hyperparameter name. If this is done the other way round,
        # the log-value of the hyperparameter is quantized
        a_int = configuration_space.UniformIntegerHyperparameter(
            "a_int", 1, 1000, base=10)
        a_int.name = self.pyll_writer.convert_name(a_int)
        expected = ('LOG10_Q1_a_int', 'param_1 = hp.uniform('
                    '"LOG10_Q1_a_int", -0.301021309861, 3.00021709297)')
        value = self.pyll_writer.write_hyperparameter(a_int, None)
        self.assertEqual(expected, value) 
开发者ID:automl,项目名称:HPOlib,代码行数:20,代码来源:test_pyll_util.py

示例5: set_basic_conf

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def set_basic_conf(self):
        space = {
            "x": hp.uniform("x", 0, 10),
            "y": hp.uniform("y", -10, 10),
            "z": hp.uniform("z", -10, 0)
        }

        def cost(space, reporter):
            loss = space["x"]**2 + space["y"]**2 + space["z"]**2
            reporter(loss=loss)

        search_alg = HyperOptSearch(
            space,
            metric="loss",
            mode="min",
            random_state_seed=5,
            n_initial_points=1,
            max_concurrent=1000  # Here to avoid breaking back-compat.
        )
        return search_alg, cost 
开发者ID:ray-project,项目名称:ray,代码行数:22,代码来源:test_tune_restore.py

示例6: score

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter):
    """
    Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data. 
    """

    if outfile is None:
        outfile = infile
    else:
        outfile = outfile

    # Prepare XGBoost-specific parameters
    xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}

    xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}

    xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}

    if not apply_weights:
        PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter).run()
    else:
        PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights, ss_score_filter).run()


# IPF 
开发者ID:PyProphet,项目名称:pyprophet,代码行数:26,代码来源:main.py

示例7: full_hyper_space

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def full_hyper_space(self):
        from hyperopt import hp

        space_1, choices_1 = self.query_model1.hyper_space()
        space_2, choices_2 = self.query_model2.hyper_space()
        parameter_space = {}
        hyper_choices = {}
        for key, value in space_1.items():
            new_key = "qry_" + self.strategy_1 + key[4:]
            parameter_space[new_key] = value
            hyper_choices[new_key] = choices_1[key]

        for key, value in space_2.items():
            new_key = "qry_" + self.strategy_2 + key[4:]
            parameter_space[new_key] = value
            hyper_choices[new_key] = choices_2[key]

        parameter_space["qry_mix_ratio"] = hp.uniform(
            "qry_mix_ratio", 0, 1)

        return parameter_space, hyper_choices 
开发者ID:asreview,项目名称:asreview,代码行数:23,代码来源:mixed.py

示例8: target

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def target(args):
        w1,w2,w3 = args
        r = a + b*w1 +c*w2 + d*w3
        result = r.topk(5,1)[1]
        predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
        score,_,_,_ = get_score(predict_label_and_marked_label_list)
        print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
        return -score 
开发者ID:chenyuntc,项目名称:PyTorchText,代码行数:10,代码来源:search2.py

示例9: target

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def target(args):
    r=0
    for r_,k_ in zip(args,probs):
        r=r+r_*k_
    result = r.topk(5,1)[1]
    predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
    score,_,_,_ = get_score(predict_label_and_marked_label_list)
    print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
    return -score 
开发者ID:chenyuntc,项目名称:PyTorchText,代码行数:11,代码来源:search_paris.py

示例10: target

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def target(args):
    r=0
    for r_,k_ in enumerate(args):
        if r_<model_num:
            r +=k_*probs[r_]
        else:
            tmp=t.load(files_path[r_]).cuda().float()
            r=r+k_*tmp.cpu()
    result = r.topk(5,1)[1]
    predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
    score,_,_,rrs = get_score(predict_label_and_marked_label_list)
    print (args,score,rrs)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
    return -rrs[0] 
开发者ID:chenyuntc,项目名称:PyTorchText,代码行数:15,代码来源:search_all.py

示例11: target

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def target(args):
    r=0
    for r_,k_ in enumerate(args):
        if r_<model_num:
            r +=k_*probs[r_]
        else:
            tmp=t.load(files_path[r_]).cuda().float()
            r=r+k_*tmp.cpu()
    result = r.topk(5,1)[1]
    predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
    score,_,_,_ = get_score(predict_label_and_marked_label_list)
    print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
    return -score 
开发者ID:chenyuntc,项目名称:PyTorchText,代码行数:15,代码来源:searchstack_new.py

示例12: target

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def target(args):
    r=0
    for r_,k_ in enumerate(args):
        if r_<model_num:
            r +=k_*probs[r_]
        else:
            tmp=t.load(files_path[r_]).cuda().float()
            r=r+k_*tmp.cpu()
    result = r.topk(5,1)[1]
    predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)]
    score,_,_,rrs= get_score(predict_label_and_marked_label_list)
    print (args,score,rrs)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)]
    return -rrs[0] 
开发者ID:chenyuntc,项目名称:PyTorchText,代码行数:15,代码来源:search_aug_noMultimodel_weight1.py

示例13: hyperopt_lightgbm_basic

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def hyperopt_lightgbm_basic(X, y, params, config, max_evals=50):
    X_train, X_test, y_train, y_test = data_split_by_time(X, y, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X, y, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    val_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"forgetting_factor": hp.loguniform("forgetting_factor", 0.01, 0.1)
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 100,
                        val_data, early_stopping_rounds=30, verbose_eval=0)
        pred = model.predict(X_test)
        score = roc_auc_score(y_test, pred)
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:DominickZhang,项目名称:KDDCup2019_admin,代码行数:37,代码来源:automl.py

示例14: hyperopt_lightgbm

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
    X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        if config.time_left() < 50:
            return {'status': STATUS_FAIL}
        else:
            model = lgb.train({**params, **hyperparams}, train_data, 100,
                          valid_data, early_stopping_rounds=10, verbose_eval=0)
            pred = model.predict(X_test)
            score = roc_auc_score(y_test, pred)

            #score = model.best_score["valid_0"][params["metric"]]

            # in classification, less is better
            return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:DominickZhang,项目名称:KDDCup2019_admin,代码行数:43,代码来源:automl.py

示例15: hyperopt_lightgbm

# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import uniform [as 别名]
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config):
    X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)),
        # smaller than 2^(max_depth)
        "num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1),
        # "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1),
        # "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)),
        # "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0),
        # "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0),
        "min_child_weight": hp.quniform('min_child_weight', 2, 50, 2),
        "reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0),
        "reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0),
        "learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01),
        # "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)),
        #
        "min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)),
        #"is_unbalance": hp.choice("is_unbalance", [True])
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 300,
                          valid_data, early_stopping_rounds=45, verbose_eval=0)

        score = model.best_score["valid_0"][params["metric"]]

        # in classification, less is better
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=150, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
开发者ID:shuyao95,项目名称:kddcup2019-automl,代码行数:43,代码来源:automl.py


注:本文中的hyperopt.hp.uniform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。