本文整理匯總了Python中ray.tune.Trainable方法的典型用法代碼示例。如果您正苦於以下問題:Python tune.Trainable方法的具體用法?Python tune.Trainable怎麽用?Python tune.Trainable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類ray.tune
的用法示例。
在下文中一共展示了tune.Trainable方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testAutoregisterTrainable
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testAutoregisterTrainable(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
class B(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": train,
},
"bar": {
"run": B
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
示例2: testCheckpointAtEnd
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testCheckpointAtEnd(self):
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, path):
checkpoint = os.path.join(path, "checkpoint")
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trials = run_experiments({
"foo": {
"run": train,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
示例3: testExportFormats
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testExportFormats(self):
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
path = os.path.join(export_dir, "exported")
with open(path, "w") as f:
f.write("OK")
return {export_formats[0]: path}
trials = run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "exported")))
示例4: testInvalidExportFormats
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testInvalidExportFormats(self):
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
self.assertRaises(TuneError, fail_trial)
示例5: testCustomResources
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class train(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments({
"foo": {
"run": train,
"resources_per_trial": {
"cpu": 1,
"custom_resources": {
"hi": 2
}
}
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
示例6: testResetTrial
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testResetTrial(self):
"""Tests that reset works as expected."""
class B(Trainable):
def step(self):
return dict(timesteps_this_iter=1, done=True)
def reset_config(self, config):
self.config = config
return True
trials = self.generate_trials({
"run": B,
"config": {
"foo": 0
},
}, "grid_search")
trial = trials[0]
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {"hi": 1},
"modified_mock")
self.assertEqual(exists, True)
self.assertEqual(trial.config.get("hi"), 1)
self.assertEqual(trial.experiment_tag, "modified_mock")
self.assertEqual(Trial.RUNNING, trial.status)
示例7: create_resettable_class
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def create_resettable_class():
class MyResettableClass(Trainable):
def setup(self, config):
self.config = config
self.num_resets = 0
self.iter = 0
def step(self):
self.iter += 1
return {"num_resets": self.num_resets, "done": self.iter > 1}
def save_checkpoint(self, chkpt_dir):
return {"iter": self.iter}
def load_checkpoint(self, item):
self.iter = item["iter"]
def reset_config(self, new_config):
if "fake_reset_not_supported" in self.config:
return False
self.num_resets += 1
return True
return MyResettableClass
示例8: setUp
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def setUp(self):
class MockTrainable(Trainable):
scores_dict = {
0: [5, 4, 4, 4, 4, 4, 4, 4, 0],
1: [4, 3, 3, 3, 3, 3, 3, 3, 1],
2: [2, 1, 1, 1, 1, 1, 1, 1, 8],
3: [9, 7, 7, 7, 7, 7, 7, 7, 6],
4: [7, 5, 5, 5, 5, 5, 5, 5, 3]
}
def setup(self, config):
self.id = config["id"]
self.idx = 0
def step(self):
val = self.scores_dict[self.id][self.idx]
self.idx += 1
return {"score": val}
def save_checkpoint(self, checkpoint_dir):
pass
def load_checkpoint(self, checkpoint_path):
pass
self.MockTrainable = MockTrainable
ray.init(local_mode=False, num_cpus=1)
示例9: testCheckpointing
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testCheckpointing(self):
pbt = self.basicSetup(perturbation_interval=2)
class train(tune.Trainable):
def step(self):
return {"mean_accuracy": self.training_iteration}
def save_checkpoint(self, path):
checkpoint = os.path.join(path, "checkpoint")
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trial_hyperparams = {
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10,
"id_factor": 0
}
analysis = tune.run(
train,
num_samples=3,
scheduler=pbt,
checkpoint_freq=3,
config=trial_hyperparams,
stop={"training_iteration": 30})
for trial in analysis.trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
示例10: testCheckpointDict
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def testCheckpointDict(self):
pbt = self.basicSetup(perturbation_interval=2)
class train_dict(tune.Trainable):
def setup(self, config):
self.state = {"hi": 1}
def step(self):
return {"mean_accuracy": self.training_iteration}
def save_checkpoint(self, path):
return self.state
def load_checkpoint(self, state):
self.state = state
trial_hyperparams = {
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10,
"id_factor": 0
}
analysis = tune.run(
train_dict,
num_samples=3,
scheduler=pbt,
checkpoint_freq=3,
config=trial_hyperparams,
stop={"training_iteration": 30})
for trial in analysis.trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
示例11: TuneTrainable
# 需要導入模塊: from ray import tune [as 別名]
# 或者: from ray.tune import Trainable [as 別名]
def TuneTrainable(train_fn):
"""Helper function for geting a trainable to use with Tune
The function expectes a train_fn function which takes a config as input,
and returns four items.
- model: the tensorflow estimator.
- train_spec: training specification.
- eval_spec: evaluation specification.
- reporter: a function which returns the metrics given evalution.
The resulting trainable reports the metrics when checkpoints are saved,
the report frequency is controlled by the checkpoint frequency,
and the metrics are determined by reporter.
"""
import os
from ray.tune import Trainable
from tensorflow.train import CheckpointSaverListener
class _tuneStoper(CheckpointSaverListener):
def after_save(self, session, global_step_value):
return True
class TuneTrainable(Trainable):
def _setup(self, config):
tf.logging.set_verbosity(tf.logging.ERROR)
self.config = config
model, train_spec, eval_spec, reporter = train_fn(config)
self.model = model
self.train_spec = train_spec
self.eval_spec = eval_spec
self.reporter = reporter
def _train(self):
import warnings
index_warning = 'Converting sparse IndexedSlices'
warnings.filterwarnings('ignore', index_warning)
model = self.model
model.train(input_fn=self.train_spec.input_fn,
max_steps=self.train_spec.max_steps,
hooks=self.train_spec.hooks,
saving_listeners=[_tuneStoper()])
eval_out = model.evaluate(input_fn=self.eval_spec.input_fn,
steps=self.eval_spec.steps,
hooks=self.eval_spec.hooks)
metrics = self.reporter(eval_out)
return metrics
def _save(self, checkpoint_dir):
latest_checkpoint = self.model.latest_checkpoint()
chkpath = os.path.join(checkpoint_dir, 'path.txt')
with open(chkpath, 'w') as f:
f.write(latest_checkpoint)
return chkpath
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
chkpath = f.readline().strip()
self.model, _, _, _ = train_fn(self.config, chkpath)
return TuneTrainable