本文整理汇总了Python中hyperopt.fmin方法的典型用法代码示例。如果您正苦于以下问题:Python hyperopt.fmin方法的具体用法?Python hyperopt.fmin怎么用?Python hyperopt.fmin使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类hyperopt
的用法示例。
在下文中一共展示了hyperopt.fmin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_compilefn_train_test_split
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def test_compilefn_train_test_split(tmpdir):
db_name = "test"
exp_name = "test2"
fn = CompileFN(db_name, exp_name,
data_fn=data.data,
model_fn=model.build_model,
optim_metric="acc",
optim_metric_mode="max",
# eval
valid_split=.5,
stratified=False,
random_state=True,
save_dir="/tmp/")
hyper_params = {
"data": {},
"shared": {"max_features": 100, "maxlen": 20},
"model": {"filters": hp.choice("m_filters", (2, 5)),
"hidden_dims": 3,
},
"fit": {"epochs": 1}
}
fn_test(fn, hyper_params, tmp_dir=str(tmpdir))
trials = Trials()
best = fmin(fn, hyper_params, trials=trials, algo=tpe.suggest, max_evals=2)
assert isinstance(best, dict)
示例2: optimize_hyperparam
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def optimize_hyperparam(self, X, y, test_size=.2, n_eval=100):
X_trn, X_val, y_trn, y_val = train_test_split(X, y, test_size=test_size, shuffle=self.shuffle)
def objective(hyperparams):
model = XGBModel(n_estimators=self.n_est, **self.params, **hyperparams)
model.fit(X=X_trn, y=y_trn,
eval_set=[(X_val, y_val)],
eval_metric=self.metric,
early_stopping_rounds=self.n_stop,
verbose=False)
score = model.evals_result()['validation_0'][self.metric][model.best_iteration] * self.loss_sign
return {'loss': score, 'status': STATUS_OK, 'model': model}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=self.space, trials=trials,
algo=tpe.suggest, max_evals=n_eval, verbose=1,
rstate=self.random_state)
hyperparams = space_eval(self.space, best)
return hyperparams, trials
示例3: run
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def run(self):
logger_all.debug("Instantiating MongoTrials object.")
trials = MongoTrials(
as_mongo_str(os.path.join(self.mongo_url, "jobs")), exp_key=self.exp_key
)
logger_all.debug("Calling fmin.")
fmin(
fn=self.objective_hyperopt,
space=self.space,
algo=self.algo,
max_evals=self.max_evals,
trials=trials,
show_progressbar=self.show_progressbar,
)
# queue.put uses pickle so remove attribute containing thread.lock
if hasattr(trials, "handle"):
logger_all.debug("fmin returned. Deleting Trial handle for pickling.")
del trials.handle
logger_all.debug("Putting Trials in Queue.")
self.queue.put(trials)
示例4: optimize
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def optimize(self):
"""Function that performs bayesian optimization"""
trials = Trials()
self._best_result = fmin(fn=self._get_loss, space=self.search_space, trials=trials,
algo=tpe.suggest, max_evals=self.max_evals)
columns = list(self.search_space.keys())
results = pd.DataFrame(columns=['iteration'] + columns + ['loss'])
for idx, trial in enumerate(trials.trials):
row = [idx]
translated_eval = space_eval(self.search_space, {k: v[0] for k, v in trial['misc']['vals'].items()})
for k in columns:
row.append(translated_eval[k])
row.append(trial['result']['loss'])
results.loc[idx] = row
path = self.config_local.path_result / self.model_name
path.mkdir(parents=True, exist_ok=True)
results.to_csv(str(path / "trials.csv"), index=False)
self._logger.info(results)
self._logger.info('Found golden setting:')
self._logger.info(space_eval(self.search_space, self._best_result))
示例5: _fmin
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def _fmin(self, trials):
# new version of hyperopt has keyword argument `show_progressbar` that
# breaks doctests, so here's a workaround
fmin_kwargs = dict(
fn=self._run,
space=self._params.hyper_space,
algo=hyperopt.tpe.suggest,
max_evals=self._num_runs,
trials=trials
)
try:
hyperopt.fmin(
**fmin_kwargs,
show_progressbar=False
)
except TypeError:
hyperopt.fmin(**fmin_kwargs)
示例6: run
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def run(self):
trials = Trials()
best = fmin(self._obj, self.model_param_space._build_space(),
tpe.suggest, self.max_evals, trials)
best_params = space_eval(self.model_param_space._build_space(), best)
best_params = self.model_param_space._convert_into_param(best_params)
trial_loss = np.asarray(trials.losses(), dtype=float)
best_ind = np.argmin(trial_loss)
best_ap = trial_loss[best_ind]
best_loss = trials.trial_attachments(trials.trials[best_ind])["loss"]
best_acc = trials.trial_attachments(trials.trials[best_ind])["acc"]
self.logger.info("-" * 50)
self.logger.info("Best Average Precision: %.3f" % best_ap)
self.logger.info("with Loss %.3f, Accuracy %.3f" % (best_loss, best_acc))
self.logger.info("Best Param:")
self.task._print_param_dict(best_params)
self.logger.info("-" * 50)
示例7: params_search
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def params_search(self):
"""
˜ function to search params
"""
def objective(args):
logger.info(f"Params : {args}")
try:
self.params = args
self.exchange = BitMexBackTest()
self.exchange.on_update(self.bin_size, self.strategy)
profit_factor = self.exchange.win_profit/self.exchange.lose_loss
logger.info(f"Profit Factor : {profit_factor}")
ret = {
'status': STATUS_OK,
'loss': 1/profit_factor
}
except Exception as e:
ret = {
'status': STATUS_FAIL
}
return ret
trials = Trials()
best_params = fmin(objective, self.options(), algo=tpe.suggest, trials=trials, max_evals=200)
logger.info(f"Best params is {best_params}")
logger.info(f"Best profit factor is {1/trials.best_trial['result']['loss']}")
示例8: run
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def run(self):
trials = hyperopt.Trials()
hyperopt.fmin(fn=lambda kwargs: {'loss': self.train(kwargs), 'status': hyperopt.STATUS_OK},
space=self.search_space,
algo=hyperopt.tpe.suggest,
max_evals=self.num_eval,
trials=trials,
verbose=10)
# from the trials, get the values for every parameter
# set the number of iter to None as they are not changed in Hyperopt
# and zip the loss
self.history.extend(zip([(
{name: val[0] for name, val in params["misc"]["vals"].items()}, None)
for params in trials.trials], trials.losses()))
return self.history[int(np.argmin([val[1] for val in self.history]))]
示例9: test_compilefn_cross_val
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def test_compilefn_cross_val(tmpdir):
db_name = "test"
exp_name = "test2"
fn = CompileFN(db_name, exp_name,
cv_n_folds=3,
stratified=False,
random_state=True,
data_fn=data.data,
model_fn=model.build_model,
optim_metric="loss",
optim_metric_mode="min",
save_dir="/tmp/")
hyper_params = {
"data": {},
"shared": {"max_features": 100, "maxlen": 20},
"model": {"filters": hp.choice("m_filters", (2, 5)),
"hidden_dims": 3,
},
"fit": {"epochs": 1}
}
fn_test(fn, hyper_params, tmp_dir=str(tmpdir))
trials = Trials()
best = fmin(fn, hyper_params, trials=trials, algo=tpe.suggest, max_evals=2)
assert isinstance(best, dict)
示例10: run
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def run():
param_space = {
'w0': 1.0,
'w1': hp.quniform('w1', 0.01, 2.0, 0.01),
'max_evals': 800
}
trial_counter = 0
trials = Trials()
objective = lambda p: hyperopt_wrapper(p)
best_params = fmin(objective, param_space, algo=tpe.suggest,\
trials = trials, max_evals=param_space["max_evals"])
print 'best parameters: '
for k, v in best_params.items():
print "%s: %s" % (k ,v)
trial_loss = np.asarray(trials.losses(), dtype=float)
best_loss = min(trial_loss)
print 'best loss: ', best_loss
示例11: run_hyperopt
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def run_hyperopt(self, max_eval, space):
"""
Runs the hyperopt trainer
:param max_eval: (int) max evaluations to carry out when running hyperopt
:param space: {dict} }dictionary of hyperparameter space to explore
:return: dictionary of best fit models by dna
"""
# Reset run parameters
self._max_eval = max_eval
self._results = {}
self._eval_idx = 0
# Hyperopt is picky about the function handle
def model_handle(params):
return self.model(params)
# Run the hyperparameter optimization
_ = fmin(fn=model_handle, space=space, algo=tpe.suggest, max_evals=max_eval)
return self._results
示例12: recommendNextParameters
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def recommendNextParameters(self, hyperparameterSpace, results, currentTrials, lockedValues=None):
if lockedValues is None:
lockedValues = {}
rstate = numpy.random.RandomState(seed=int(random.randint(1, 2 ** 32 - 1)))
trials = self.convertResultsToTrials(hyperparameterSpace, results)
space = Hyperparameter(hyperparameterSpace).createHyperoptSpace(lockedValues)
params = {}
def sample(parameters):
nonlocal params
params = parameters
return {"loss": 0.5, 'status': 'ok'}
hyperopt.fmin(fn=sample,
space=space,
algo=functools.partial(hyperopt.rand.suggest),
max_evals=1,
trials=trials,
rstate=rstate,
show_progressbar=False)
return params
示例13: recommendNextParameters
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def recommendNextParameters(self, hyperparameterSpace, results, currentTrials, lockedValues=None):
if lockedValues is None:
lockedValues = {}
rstate = numpy.random.RandomState(seed=int(random.randint(1, 2 ** 32 - 1)))
trials = self.convertResultsToTrials(hyperparameterSpace, results)
space = Hyperparameter(hyperparameterSpace).createHyperoptSpace(lockedValues)
params = {}
def sample(parameters):
nonlocal params
params = parameters
return {"loss": 0.5, 'status': 'ok'}
hyperopt.fmin(fn=sample,
space=space,
algo=functools.partial(hyperopt.tpe.suggest, n_EI_candidates=24, gamma=0.25),
max_evals=1,
trials=trials,
rstate=rstate,
show_progressbar=False)
return params
示例14: hyperopt_lightgbm_basic
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def hyperopt_lightgbm_basic(X, y, params, config, max_evals=50):
X_train, X_test, y_train, y_test = data_split_by_time(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = data_split_by_time(X, y, test_size=0.3)
train_data = lgb.Dataset(X_train, label=y_train)
val_data = lgb.Dataset(X_val, label=y_val)
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
#"forgetting_factor": hp.loguniform("forgetting_factor", 0.01, 0.1)
#"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
"feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
"bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
"bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
"reg_alpha": hp.uniform("reg_alpha", 0, 2),
"reg_lambda": hp.uniform("reg_lambda", 0, 2),
"min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
}
def objective(hyperparams):
model = lgb.train({**params, **hyperparams}, train_data, 100,
val_data, early_stopping_rounds=30, verbose_eval=0)
pred = model.predict(X_test)
score = roc_auc_score(y_test, pred)
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=max_evals, verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
return hyperparams
示例15: hyperopt_lightgbm
# 需要导入模块: import hyperopt [as 别名]
# 或者: from hyperopt import fmin [as 别名]
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = lgb.Dataset(X_val, label=y_val)
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
#"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
"feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
"bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
"bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
"reg_alpha": hp.uniform("reg_alpha", 0, 2),
"reg_lambda": hp.uniform("reg_lambda", 0, 2),
"min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
}
def objective(hyperparams):
if config.time_left() < 50:
return {'status': STATUS_FAIL}
else:
model = lgb.train({**params, **hyperparams}, train_data, 100,
valid_data, early_stopping_rounds=10, verbose_eval=0)
pred = model.predict(X_test)
score = roc_auc_score(y_test, pred)
#score = model.best_score["valid_0"][params["metric"]]
# in classification, less is better
return {'loss': -score, 'status': STATUS_OK}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=max_evals, verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
return hyperparams