本文整理汇总了Python中hyperopt.Trials类的典型用法代码示例。如果您正苦于以下问题:Python Trials类的具体用法?Python Trials怎么用?Python Trials使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Trials类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: optimize_model_pytorch
def optimize_model_pytorch(device, args, train_GWAS, train_y, test_GWAS, test_y, out_folder ="", startupJobs = 40, maxevals = 200, noOut = False):
global numTrials_pytorch
numTrials_pytorch= 0
trials = Trials()
trial_wrapper = partial(trial_pytorch,device = device, args = args , train_GWAS = train_GWAS, train_y = train_y , test_GWAS = test_GWAS , test_y = test_y)
best_pars = fmin(trial_wrapper, parameter_space_pytorch(), algo=partial(tpe.suggest, n_startup_jobs=(startupJobs) ), max_evals=maxevals, trials=trials)
# Print the selected 'best' hyperparameters.
if noOut == False: print('\nBest hyperparameter settings: ',space_eval(parameter_space_pytorch(), best_pars),'\n')
# loops through the 1st entry in the dict that holds all the lookup keys
regression = True
for p in trials.trials[0]['misc']['idxs']: plot_optimization_pytorch(trials, p, regression, out_folder = out_folder)
best_pars = space_eval(parameter_space_pytorch(), best_pars) # this turns the indices into the actual params into the valid aprameter space
# override the epochs with the early start
lowestLossIndex = np.argmin(trials.losses())
trials.trial_attachments(trials.trials[lowestLossIndex])['highestAcc_epoch']
best_pars['earlyStopEpochs'] = trials.trial_attachments(trials.trials[lowestLossIndex])['highestAcc_epoch']
best_pars['earlyStopEpochs'] += 1 # as epochs are 0 based otherwise...
best_pars['epochs'] = best_pars['earlyStopEpochs']
if best_pars['epochs'] <= 0 : best_pars['epochs'] = 1 # we dont want a network without any training, as that will cause a problem for deep dreaming
return(best_pars)
示例2: notest_opt_qn_normal
def notest_opt_qn_normal(f=hp_normal):
bandit = Bandit(
{'loss': scope.sum([f('v%i' % ii, 0, 1)
for ii in range(25)]) ** 2},
loss_target=0)
algo = TreeParzenEstimator(bandit,
prior_weight=.5,
n_startup_jobs=0,
n_EI_candidates=1,
gamma=0.15)
trials = Trials()
experiment = Experiment(trials, algo, async=False)
experiment.max_queue_len = 1
experiment.run(40)
print 'sorted losses:', list(sorted(trials.losses()))
idxs, vals = miscs_to_idxs_vals(trials.miscs)
if 1:
import hyperopt.plotting
hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
else:
import matplotlib.pyplot as plt
begin = [v[:10] for k, v in vals.items()]
end = [v[-10:] for k, v in vals.items()]
plt.subplot(2, 1, 1)
plt.title('before')
plt.hist(np.asarray(begin).flatten())
plt.subplot(2, 1, 2)
plt.title('after')
plt.hist(np.asarray(end).flatten())
plt.show()
示例3: main
def main():
usage = "%prog text.json labels.csv feature_dir output_dir"
parser = OptionParser(usage=usage)
parser.add_option('-m', dest='max_iter', default=4,
help='Maximum iterations of Bayesian optimization; default=%default')
(options, args) = parser.parse_args()
max_iter = int(options.max_iter)
global data_filename, label_filename, feature_dir, output_dir, log_filename
data_filename = args[0]
label_filename = args[1]
feature_dir = args[2]
output_dir = args[3]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
log_filename = os.path.join(output_dir, 'log.txt')
with open(log_filename, 'w') as logfile:
logfile.write(','.join([data_filename, label_filename, feature_dir, output_dir]))
trials = Trials()
best = fmin(call_experiment,
space=space,
algo=tpe.suggest,
max_evals=max_iter,
trials=trials)
print space_eval(space, best)
print trials.losses()
示例4: run_all_dl
def run_all_dl(csvfile = saving_fp,
space = [hp.quniform('h1', 100, 550, 1),
hp.quniform('h2', 100, 550, 1),
hp.quniform('h3', 100, 550, 1),
#hp.choice('activation', ["RectifierWithDropout", "TanhWithDropout"]),
hp.uniform('hdr1', 0.001, 0.3),
hp.uniform('hdr2', 0.001, 0.3),
hp.uniform('hdr3', 0.001, 0.3),
hp.uniform('rho', 0.9, 0.999),
hp.uniform('epsilon', 1e-10, 1e-4)]):
# maxout works well with dropout (Goodfellow et al 2013), and rectifier has worked well with image recognition (LeCun et al 1998)
start_save(csvfile = csvfile)
trials = Trials()
print "Deep learning..."
best = fmin(objective,
space = space,
algo=tpe.suggest,
max_evals=evals,
trials=trials)
print best
print trials.losses()
with open('output/dlbest.pkl', 'w') as output:
pickle.dump(best, output, -1)
with open('output/dltrials.pkl', 'w') as output:
pickle.dump(trials, output, -1)
示例5: run
def run(self):
start = time.time()
trials = Trials()
best = fmin(self._obj, self.model_param_space._build_space(), tpe.suggest, self.max_evals, trials)
best_params = space_eval(self.model_param_space._build_space(), best)
best_params = self.model_param_space._convert_int_param(best_params)
trial_rmses = np.asarray(trials.losses(), dtype=float)
best_ind = np.argmin(trial_rmses)
best_rmse_mean = trial_rmses[best_ind]
best_rmse_std = trials.trial_attachments(trials.trials[best_ind])["std"]
self.logger.info("-"*50)
self.logger.info("Best RMSE")
self.logger.info(" Mean: %.6f"%best_rmse_mean)
self.logger.info(" std: %.6f"%best_rmse_std)
self.logger.info("Best param")
self.task._print_param_dict(best_params)
end = time.time()
_sec = end - start
_min = int(_sec/60.)
self.logger.info("Time")
if _min > 0:
self.logger.info(" %d mins"%_min)
else:
self.logger.info(" %d secs"%_sec)
self.logger.info("-"*50)
示例6: optimize
def optimize(obj_function, inputs, key_file, space, max_eval):
trials = Trials()
f = partial(obj_function, inputs, key_file)
best = fmin(f, space=space, algo=tpe.suggest, max_evals=max_eval,
trials=trials)
LOGGER.info("{}\t{}".format(best, 1 - min(trials.losses())))
示例7: main
def main():
usage = "%prog"
parser = OptionParser(usage=usage)
parser.add_option('-o', dest='output_dirname', default='bayes_opt_rnn_chars',
help='Output directory name')
parser.add_option('--reuse', dest='reuse', action="store_true", default=False,
help='Use reusable holdout; default=%default')
(options, args) = parser.parse_args()
global output_dirname, output_filename, reuse, search_alpha, space
reuse = options.reuse
output_dirname = options.output_dirname
if reuse:
output_dirname += '_reuse'
output_filename = fh.make_filename(defines.exp_dir, fh.get_basename_wo_ext(output_dirname), 'log')
with codecs.open(output_filename, 'w') as output_file:
output_file.write(output_dirname + '\n')
#output_file.write('reuse = ' + str(reuse) + '\n')
trials = Trials()
best = fmin(call_experiment,
space=space,
algo=tpe.suggest,
max_evals=100,
trials=trials)
print space_eval(space, best)
print trials.losses()
示例8: work
def work(self):
bandit = self.bandit
random_algo = Random(bandit)
# build an experiment of 10 trials
trials = Trials()
exp = Experiment(trials, random_algo)
#print random_algo.s_specs_idxs_vals
exp.run(10)
ids = trials.tids
assert len(ids) == 10
tpe_algo = TreeParzenEstimator(bandit)
#print pyll.as_apply(tpe_algo.post_idxs)
#print pyll.as_apply(tpe_algo.post_vals)
argmemo = {}
print trials.miscs
idxs, vals = miscs_to_idxs_vals(trials.miscs)
argmemo[tpe_algo.observed['idxs']] = idxs
argmemo[tpe_algo.observed['vals']] = vals
argmemo[tpe_algo.observed_loss['idxs']] = trials.tids
argmemo[tpe_algo.observed_loss['vals']] = trials.losses()
stuff = pyll.rec_eval([tpe_algo.post_below['idxs'],
tpe_algo.post_below['vals']],
memo=argmemo)
print stuff
示例9: optimize_model_parameter_split
def optimize_model_parameter_split(x, y, model_name=None, loss_function="accuracy", parameter=None, max_evals=100, n_folds=5, isWrite=True, times=1, problem_pattern="classification"):
"""
hyperopt model turning
"""
if model_name == None and parameter == None:
print "you must set parameter or model_name"
return None
elif parameter != None:
param = parameter
elif model_name != None:
param = parameter_dictionary[model_name]
else:
return None
x_trains = []
x_tests = []
y_trains = []
y_tests = []
for time in xrange(times):
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.0125)
x_trains.append(x_train)
x_tests.append(x_test)
y_trains.append(y_train)
y_tests.append(y_test)
trials = Trials()
function = lambda param: optimize_model_function_split(
param, x_trains, x_tests, y_trains, y_tests, loss_function)
print param
print "========================================================================"
best_param = fmin(function, param,
algo=tpe.suggest, max_evals=max_evals, trials=trials)
print "========================================================================"
print "write result to csv files"
# write the csv file
if isWrite:
datas = []
for trial_data in trials.trials:
print trial_data
trial_parameter_dictionary = {}
trial_parameter_dictionary['model'] = model_name
trial_parameter_dictionary['tid'] = trial_data['misc']['tid']
for key, value in trial_data['misc']['vals'].items():
print key, value[0]
trial_parameter_dictionary[key] = value[0]
trial_parameter_dictionary['loss'] = trial_data['result']['loss']
trial_parameter_dictionary[
'status'] = trial_data['result']['status']
datas.append(trial_parameter_dictionary)
filename = str(time.time()) + ".csv"
dictionary_in_list_convert_to_csv(datas, filename)
print trials.statuses()
return best_param
示例10: test_basic
def test_basic(self):
bandit = self._bandit_cls()
#print 'bandit params', bandit.params, bandit
#print 'algo params', algo.vh.params
trials = Trials()
fmin(lambda x: x, bandit.expr,
trials=trials,
algo=suggest,
max_evals=self._n_steps)
assert trials.average_best_error(bandit) - bandit.loss_target < .2
示例11: test_basic
def test_basic(self):
domain = self._domain_cls()
# print 'domain params', domain.params, domain
# print 'algo params', algo.vh.params
trials = Trials()
fmin(lambda x: x, domain.expr,
trials=trials,
algo=suggest,
max_evals=self._n_steps)
assert trials.average_best_error(domain) - domain.loss_target < .2
示例12: test_basic
def test_basic(self):
bandit = self._bandit_cls()
algo = Random(bandit)
trials = Trials()
experiment = Experiment(trials, algo, async=False)
experiment.max_queue_len = 50
experiment.run(self._n_steps)
print
print self._bandit_cls
print bandit.loss_target
print trials.average_best_error(bandit)
assert trials.average_best_error(bandit) - bandit.loss_target < .2
print
示例13: main
def main():
set_globals()
trials = Trials()
best = fmin(call_experiment,
space=space,
algo=tpe.suggest,
max_evals=max_iter,
trials=trials)
print space_eval(space, best)
print "losses:", [-l for l in trials.losses()]
print('the best loss: ', max([-l for l in trials.losses()]))
print("number of trials: " + str(len(trials.trials)))
示例14: TunningParamter
def TunningParamter(param,data,features,feature,source_name,real_value,int_boolean):
data = data[~pd.isnull(all_data[feature])]
print data.shape
ISOTIMEFORMAT='%Y-%m-%d %X'
start = time.strftime(ISOTIMEFORMAT, time.localtime())
trials = Trials()
objective = lambda p : trainModel(p, data, features, feature,source_name,real_value,int_boolean)
best_parameters = fmin(objective, param, algo =tpe.suggest,max_evals=param['max_evals'],trials= trials)
#now we need to get best_param
trials_loss = np.asanyarray(trials.losses(),dtype=float)
best_loss = min(trials_loss)
ind = np.where(trials_loss==best_loss)[0][0]
best_loss_std = trials.trial_attachments(trials.trials[ind])['std']
end = time.strftime(ISOTIMEFORMAT,time.localtime())
dumpMessage(best_parameters, best_loss, best_loss_std,param['task'],source_name,start,end)
示例15: test_basic
def test_basic(self):
bandit = self._bandit_cls()
print 'bandit params', bandit.params
algo = Random(bandit)
print 'algo params', algo.vh.params
trials = Trials()
experiment = Experiment(trials, algo, async=False)
experiment.catch_bandit_exceptions = False
experiment.max_queue_len = 50
experiment.run(self._n_steps)
print
print self._bandit_cls
print bandit.loss_target
print trials.average_best_error(bandit)
assert trials.average_best_error(bandit) - bandit.loss_target < .2
print