本文整理汇总了Python中hyperopt.hp.quniform方法的典型用法代码示例。如果您正苦于以下问题:Python hp.quniform方法的具体用法?Python hp.quniform怎么用?Python hp.quniform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类hyperopt.hp
的用法示例。
在下文中一共展示了hp.quniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_read_quniform
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_read_quniform(self):
# TODO scope.int(hp.quniform('liblinear:LOG2_C', -5, 15, 1))
# 0 float
# 1 hyperopt_param
# 2 Literal{l0eg_fsize}
# 3 quniform
# 4 Literal{2.50001}
# 5 Literal{8.5}
# 6 Literal{1}
quniform = hp.quniform('l0eg_fsize', 2.50001, 8.5, 1). \
inputs()[0].inputs()[1]
ret = self.pyll_reader.read_quniform(quniform, 'l0eg_fsize')
expected = configuration_space.UniformIntegerHyperparameter(
'l0eg_fsize', 3, 8)
self.assertEqual(expected, ret)
l2_out_lp_psize = hp.quniform("l2_out_lp_psize", 0.50001, 5.5, 1). \
inputs()[0].inputs()[1]
ret = self.pyll_reader.read_quniform(l2_out_lp_psize, "l2_out_lp_psize")
expected = configuration_space.UniformIntegerHyperparameter(
"l2_out_lp_psize", 1, 5)
self.assertEqual(expected, ret)
示例2: test_convert_conditional_space
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_convert_conditional_space(self):
a_or_b = configuration_space.CategoricalHyperparameter("a_or_b", ["a", "b"])
cond_a = configuration_space.UniformFloatHyperparameter(
'cond_a', 0, 1, conditions=[['a_or_b == a']])
cond_b = configuration_space.UniformFloatHyperparameter(
'cond_b', 0, 3, q=0.1, conditions=[['a_or_b == b']])
conditional_space = {"a_or_b": a_or_b, "cond_a": cond_a, "cond_b": cond_b}
cs = self.pyll_writer.write(conditional_space)
expected = StringIO.StringIO()
expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll')
expected.write('\n\n')
expected.write('param_0 = hp.uniform("cond_a", 0.0, 1.0)\n')
expected.write('param_1 = hp.quniform("cond_b", -0.0499, 3.05, 0.1)\n')
expected.write('param_2 = hp.choice("a_or_b", [\n')
expected.write(' {"a_or_b": "a", "cond_a": param_0, },\n')
expected.write(' {"a_or_b": "b", "cond_b": param_1, },\n')
expected.write(' ])\n\n')
expected.write('space = {"a_or_b": param_2}\n')
self.assertEqual(expected.getvalue(), cs)
示例3: test_operator_in
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_operator_in(self):
a_or_b = configuration_space.CategoricalHyperparameter("a_or_b", ["a", "b"])
cond_a = configuration_space.UniformFloatHyperparameter(
'cond_a', 0, 1, conditions=[['a_or_b == a']])
cond_b = configuration_space.UniformFloatHyperparameter(
'cond_b', 0, 3, q=0.1, conditions=[['a_or_b == b']])
e = configuration_space.UniformFloatHyperparameter("e", 0, 5,
conditions=[['a_or_b in {a,b}']])
conditional_space_operator_in = {"a_or_b": a_or_b, "cond_a": cond_a,
"cond_b": cond_b, "e": e}
cs = self.pyll_writer.write(conditional_space_operator_in)
expected = StringIO.StringIO()
expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll')
expected.write('\n\n')
expected.write('param_0 = hp.uniform("cond_a", 0.0, 1.0)\n')
expected.write('param_1 = hp.quniform("cond_b", -0.0499, 3.05, 0.1)\n')
expected.write('param_2 = hp.uniform("e", 0.0, 5.0)\n')
expected.write('param_3 = hp.choice("a_or_b", [\n')
expected.write(' {"a_or_b": "a", "cond_a": param_0, "e": param_2, '
'},\n')
expected.write(' {"a_or_b": "b", "cond_b": param_1, "e": param_2, '
'},\n')
expected.write(' ])\n\n')
expected.write('space = {"a_or_b": param_3}\n')
self.assertEqual(expected.getvalue(), cs)
示例4: test_write_quniform_int
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_write_quniform_int(self):
b_int_1 = configuration_space.UniformIntegerHyperparameter("b_int", 0, 3, q=1.0)
expected = ("b_int", 'param_0 = pyll.scope.int(hp.quniform('
'"b_int", -0.49999, 3.5, 1.0))')
value = self.pyll_writer.write_hyperparameter(b_int_1, None)
self.assertEqual(expected, value)
# TODO: trying to add the same parameter name a second time, maybe an
# error should be raised!
b_int_2 = configuration_space.UniformIntegerHyperparameter("b_int", 0, 3, q=2.0)
expected = ("b_int", 'param_1 = pyll.scope.int(hp.quniform('
'"b_int", -0.49999, 3.5, 2.0))')
value = self.pyll_writer.write_hyperparameter(b_int_2, None)
self.assertEqual(expected, value)
b_int_3 = configuration_space.UniformIntegerHyperparameter(
"b_int", 1, 100, base=10)
b_int_3.name = self.pyll_writer.convert_name(b_int_3)
# TODO: this is an example of non-uniform integer sampling!
expected = ('LOG10_Q1_b_int', 'param_1 = hp.uniform('
'"LOG10_Q1_b_int", -0.301021309861, 2.00216606176)')
value = self.pyll_writer.write_hyperparameter(b_int_3, None)
self.assertEqual(expected, value)
示例5: set_default_hyperparameters
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def set_default_hyperparameters(self):
"""
Set default hyperparameter space. If none is provided, default is used.
"""
self.hyperparameter_space = {
'scale_X': hp.choice('scale_X', ['std', 'mm01', 'mm11', None]),
'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11', None]),
}
if self.input_obj.keywords['pes_format'] == 'interatomics':
self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': True,'morse_alpha': hp.quniform('morse_alpha', 1, 2, 0.1)},{'morse': False}]))
else:
self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': False}]))
if self.pip:
val = hp.choice('pip',[{'pip': True,'degree_reduction': hp.choice('degree_reduction', [True,False])}])
self.set_hyperparameter('pip', val)
else:
self.set_hyperparameter('pip', hp.choice('pip', [{'pip': False}]))
if self.input_obj.keywords['gp_ard'] == 'opt': # auto relevancy determination (independant length scales for each feature)
self.set_hyperparameter('ARD', hp.choice('ARD', [True,False]))
#TODO add optional space inclusions, something like: if option: self.hyperparameter_space['newoption'] = hp.choice(..)
示例6: score
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter):
"""
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
# Prepare XGBoost-specific parameters
xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33}
xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'}
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights, ss_score_filter).run()
# IPF
示例7: run
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def run():
param_space = {
'w0': 1.0,
'w1': hp.quniform('w1', 0.01, 2.0, 0.01),
'max_evals': 800
}
trial_counter = 0
trials = Trials()
objective = lambda p: hyperopt_wrapper(p)
best_params = fmin(objective, param_space, algo=tpe.suggest,\
trials = trials, max_evals=param_space["max_evals"])
print 'best parameters: '
for k, v in best_params.items():
print "%s: %s" % (k ,v)
trial_loss = np.asarray(trials.losses(), dtype=float)
best_loss = min(trial_loss)
print 'best loss: ', best_loss
示例8: full_hyper_space
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def full_hyper_space(self):
from hyperopt import hp
eps = 1e-7
hyper_space, hyper_choices = super(Doc2Vec, self).full_hyper_space()
hyper_space.update({
"fex_vector_size": hp.quniform(
"fex_vector_size", 31.5, 127.5-eps, 8),
"fex_epochs": hp.quniform("fex_epochs", 20, 50, 1),
"fex_min_count": hp.quniform("fex_min_count", 0.5, 2.499999, 1),
"fex_window": hp.quniform("fex_window", 4.5, 9.4999999, 1),
"fex_dm_concat": hp.randint("fex_dm_concat", 2),
"fex_dm": hp.randint("fex_dm", 3),
"fex_dbow_words": hp.randint("fex_dbow_words", 2),
})
return hyper_space, hyper_choices
示例9: hyperopt_lightgbm_basic
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def hyperopt_lightgbm_basic(X, y, params, config, max_evals=50):
X_train, X_test, y_train, y_test = data_split_by_time(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = data_split_by_time(X, y, test_size=0.3)
train_data = lgb.Dataset(X_train, label=y_train)
val_data = lgb.Dataset(X_val, label=y_val)
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
#"forgetting_factor": hp.loguniform("forgetting_factor", 0.01, 0.1)
#"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
"feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
"bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
"bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
"reg_alpha": hp.uniform("reg_alpha", 0, 2),
"reg_lambda": hp.uniform("reg_lambda", 0, 2),
"min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
}
def objective(hyperparams):
model = lgb.train({**params, **hyperparams}, train_data, 100,
val_data, early_stopping_rounds=30, verbose_eval=0)
pred = model.predict(X_test)
score = roc_auc_score(y_test, pred)
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=max_evals, verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
return hyperparams
示例10: hyperopt_lightgbm
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_val, label=y_val)
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
#"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
"feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
"bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
"bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
"reg_alpha": hp.uniform("reg_alpha", 0, 2),
"reg_lambda": hp.uniform("reg_lambda", 0, 2),
"min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
}
def objective(hyperparams):
if config.time_left() < 50:
return {'status': STATUS_FAIL}
else:
model = lgb.train({**params, **hyperparams}, train_data, 100,
valid_data, early_stopping_rounds=10, verbose_eval=0)
pred = model.predict(X_test)
score = roc_auc_score(y_test, pred)
#score = model.best_score["valid_0"][params["metric"]]
# in classification, less is better
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=max_evals, verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
return hyperparams
示例11: hyperopt_lightgbm
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config):
X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_val, label=y_val)
space = {
"max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)),
# smaller than 2^(max_depth)
"num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)),
"feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1),
# "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1),
# "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)),
# "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0),
# "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0),
"min_child_weight": hp.quniform('min_child_weight', 2, 50, 2),
"reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0),
"reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0),
"learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01),
# "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)),
#
"min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)),
#"is_unbalance": hp.choice("is_unbalance", [True])
}
def objective(hyperparams):
model = lgb.train({**params, **hyperparams}, train_data, 300,
valid_data, early_stopping_rounds=45, verbose_eval=0)
score = model.best_score["valid_0"][params["metric"]]
# in classification, less is better
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=150, verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
return hyperparams
示例12: options
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def options(self):
return {
'fast_len': hp.quniform('fast_len', 1, 30, 1),
'slow_len': hp.quniform('slow_len', 1, 30, 1),
}
示例13: predict
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def predict(self, x):
data = xgb.DMatrix(x)
pred = self.model.predict(data)
return pred
# -----------------------------------
# 探索するパラメータの空間の指定
# -----------------------------------
# hp.choiceでは、複数の選択肢から選ぶ
# hp.uniformでは、下限・上限を指定した一様分布から抽出する。引数は下限・上限
# hp.quniformでは、下限・上限を指定した一様分布のうち一定の間隔ごとの点から抽出する。引数は下限・上限・間隔
# hp.loguniformでは、下限・上限を指定した対数が一様分布に従う分布から抽出する。引数は下限・上限の対数をとった値
示例14: test_convert_configuration_space
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_convert_configuration_space(self):
a = configuration_space.UniformFloatHyperparameter("a", 0, 1)
b = configuration_space.UniformFloatHyperparameter("b", 0, 3, q=0.1)
expected = StringIO.StringIO()
expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll')
expected.write('\n\n')
expected.write('param_0 = hp.uniform("a", 0.0, 1.0)\n')
expected.write('param_1 = hp.quniform("b", -0.0499, 3.05, 0.1)\n\n')
expected.write('space = {"a": param_0, "b": param_1}\n')
simple_space = {"a": a, "b": b}
cs = self.pyll_writer.write(simple_space)
self.assertEqual(expected.getvalue(), cs)
示例15: test_convert_complex_space
# 需要导入模块: from hyperopt import hp [as 别名]
# 或者: from hyperopt.hp import quniform [as 别名]
def test_convert_complex_space(self):
cs = self.pyll_writer.write(config_space)
expected = StringIO.StringIO()
expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll')
expected.write('\n\n')
expected.write('param_0 = hp.uniform("LOG2_C", -5.0, 15.0)\n')
expected.write('param_1 = hp.uniform("LOG2_gamma", -14.9999800563, '
'3.0)\n')
expected.write('param_2 = hp.choice("kernel", [\n')
expected.write(' {"kernel": "linear", },\n')
expected.write(' {"kernel": "rbf", "LOG2_gamma": param_1, },\n')
expected.write(' ])\n')
expected.write('param_3 = hp.uniform("lr", 0.0001, 1.0)\n')
expected.write('param_4 = pyll.scope.int(hp.quniform('
'"neurons", 15.50001, 1024.5, 16.0))\n')
expected.write('param_5 = hp.choice("classifier", [\n')
expected.write(' {"classifier": "nn", "lr": param_3, "neurons": '
'param_4, },\n')
expected.write(' {"classifier": "svm", "LOG2_C": param_0, '
'"kernel": param_2, },\n')
expected.write(' ])\n')
expected.write('param_6 = hp.choice("preprocessing", [\n')
expected.write(' {"preprocessing": "None", },\n')
expected.write(' {"preprocessing": "pca", },\n')
expected.write(' ])\n\n')
expected.write('space = {"classifier": param_5, '
'"preprocessing": param_6}\n')
self.assertEqual(expected.getvalue(), cs)
self.pyll_writer.reset_hyperparameter_countr()
expected.seek(0)
cs = self.pyll_writer.write(config_space_2)
self.assertEqual(expected.getvalue().replace("gamma", "gamma_2"), cs)