本文整理汇总了Python中ray.tune.run_experiments方法的典型用法代码示例。如果您正苦于以下问题:Python tune.run_experiments方法的具体用法?Python tune.run_experiments怎么用?Python tune.run_experiments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ray.tune
的用法示例。
在下文中一共展示了tune.run_experiments方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testDict
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testDict(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": "f1",
},
"bar": {
"run": "f1",
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
示例2: testExperimentList
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testExperimentList(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
exp2 = Experiment(**{
"name": "bar",
"run": "f1",
})
trials = run_experiments([exp1, exp2])
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
示例3: testAutoregisterTrainable
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testAutoregisterTrainable(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
class B(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": train,
},
"bar": {
"run": B
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
示例4: testCheckpointAtEnd
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testCheckpointAtEnd(self):
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, path):
checkpoint = os.path.join(path, "checkpoint")
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trials = run_experiments({
"foo": {
"run": train,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
示例5: testInvalidExportFormats
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testInvalidExportFormats(self):
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
self.assertRaises(TuneError, fail_trial)
示例6: testCustomResources
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments({
"foo": {
"run": train,
"resources_per_trial": {
"cpu": 1,
"custom_resources": {
"hi": 2
}
}
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
示例7: testTrainableCallable
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testTrainableCallable(self):
def dummy_fn(config, reporter, steps):
reporter(timesteps_total=steps, done=True)
from functools import partial
steps = 500
register_trainable("test", partial(dummy_fn, steps=steps))
[trial] = run_experiments({
"foo": {
"run": "test",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
[trial] = tune.run(partial(dummy_fn, steps=steps)).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
示例8: testLogdir
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testLogdir(self):
def train(config, reporter):
assert os.path.join(ray.utils.get_user_temp_dir(), "logdir",
"foo") in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": os.path.join(ray.utils.get_user_temp_dir(),
"logdir"),
"config": {
"a": "b"
},
}
})
示例9: testLongFilename
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testLongFilename(self):
def train(config, reporter):
assert os.path.join(ray.utils.get_user_temp_dir(), "logdir",
"foo") in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": os.path.join(ray.utils.get_user_temp_dir(),
"logdir"),
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40),
},
}
})
示例10: testBadStoppingReturn
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testBadStoppingReturn(self):
def train(config, reporter):
reporter()
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
"stop": {
"time": 10
},
}
})
self.assertRaises(TuneError, f)
示例11: testIterationCounter
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testIterationCounter(self):
def train(config, reporter):
for i in range(100):
reporter(itr=i, timesteps_this_iter=1)
register_trainable("exp", train)
config = {
"my_exp": {
"run": "exp",
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
}
[trial] = run_experiments(config)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 100)
self.assertEqual(trial.last_result["itr"], 99)
示例12: testTrialReuseEnabledError
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testTrialReuseEnabledError(self):
def run():
run_experiments(
{
"foo": {
"run": create_resettable_class(),
"max_failures": 1,
"num_samples": 4,
"config": {
"fake_reset_not_supported": True
},
}
},
reuse_actors=True,
scheduler=FrequentPausesScheduler())
self.assertRaises(TuneError, lambda: run())
示例13: run_example_local
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def run_example_local(example_module_name, example_argv, local_mode=False):
"""Run example locally, potentially parallelizing across cpus/gpus."""
example_module = importlib.import_module(example_module_name)
example_args = example_module.get_parser().parse_args(example_argv)
variant_spec = example_module.get_variant_spec(example_args)
trainable_class = example_module.get_trainable_class(example_args)
experiment_id, experiment = generate_experiment(
trainable_class, variant_spec, example_args)
experiments = {experiment_id: experiment}
ray.init(
num_cpus=example_args.cpus,
num_gpus=example_args.gpus,
resources=example_args.resources or {},
local_mode=local_mode,
include_webui=example_args.include_webui,
temp_dir=example_args.temp_dir)
tune.run_experiments(
experiments,
with_server=example_args.with_server,
server_port=4321,
scheduler=None)
示例14: run
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def run(config_file, tunable_id, local_dir):
register_trainable(tunable_id, TrainTunable)
lm_config = config_util.load(config_file)
def easydict_to_dict(config):
if isinstance(config, EasyDict):
config = dict(config)
for key, value in config.items():
if isinstance(value, EasyDict):
value = dict(value)
easydict_to_dict(value)
config[key] = value
return config
tune_space = easydict_to_dict(lm_config['TUNE_SPACE'])
tune_spec = easydict_to_dict(lm_config['TUNE_SPEC'])
tune_spec['run'] = tunable_id
tune_spec['config'] = {'lm_config': os.path.join(os.getcwd(), config_file)}
tune_spec['local_dir'] = local_dir
tune_spec['trial_name_creator'] = ray.tune.function(trial_str_creator)
# Expecting use of gpus to do parameter search
ray.init(num_cpus=multiprocessing.cpu_count() // 2, num_gpus=max(get_num_gpu(), 1))
algo = HyperOptSearch(tune_space, max_concurrent=4, reward_attr="mean_accuracy")
scheduler = AsyncHyperBandScheduler(time_attr="training_iteration", reward_attr="mean_accuracy", max_t=200)
trials = run_experiments(experiments={'exp_tune': tune_spec},
search_alg=algo,
scheduler=scheduler)
print("The best result is", get_best_result(trials, metric="mean_accuracy", param='config'))
示例15: testExperiment
# 需要导入模块: from ray import tune [as 别名]
# 或者: from ray.tune import run_experiments [as 别名]
def testExperiment(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
[trial] = run_experiments(exp1)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)