本文整理匯總了Python中tensor2tensor.utils.registry.model方法的典型用法代碼示例。如果您正苦於以下問題:Python registry.model方法的具體用法?Python registry.model怎麽用?Python registry.model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensor2tensor.utils.registry
的用法示例。
在下文中一共展示了registry.model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate_world_model
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def evaluate_world_model(simulated_problem_name, problem_name, hparams,
world_model_dir, epoch_data_dir, tmp_dir):
"""Generate simulated environment data and return reward accuracy."""
gym_simulated_problem = registry.problem(simulated_problem_name)
sim_steps = hparams.simulated_env_generator_num_steps
gym_simulated_problem.settable_num_steps = sim_steps
with temporary_flags({
"problem": problem_name,
"model": hparams.generative_model,
"hparams_set": hparams.generative_model_params,
"data_dir": epoch_data_dir,
"output_dir": world_model_dir,
}):
gym_simulated_problem.generate_data(epoch_data_dir, tmp_dir)
n = max(1., gym_simulated_problem.statistics.number_of_dones)
model_reward_accuracy = (
gym_simulated_problem.statistics.successful_episode_reward_predictions
/ float(n))
old_path = os.path.join(epoch_data_dir, "debug_frames_sim")
new_path = os.path.join(epoch_data_dir, "debug_frames_sim_eval")
if not tf.gfile.Exists(new_path):
tf.gfile.Rename(old_path, new_path)
return model_reward_accuracy
示例2: make_estimator_model_fn
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def make_estimator_model_fn(model_name,
hparams,
decode_hparams=None,
use_tpu=False):
model_cls = registry.model(model_name)
def wrapping_model_fn(features, labels, mode, params=None, config=None):
return model_cls.estimator_model_fn(
hparams,
features,
labels,
mode,
config=config,
params=params,
decode_hparams=decode_hparams,
use_tpu=use_tpu)
return wrapping_model_fn
示例3: __init__
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None):
assert hparams.distill_phase in ["train", "distill"]
if hparams.distill_phase == "train" and hparams.teacher_learning_rate:
hparams.learning_rate = hparams.teacher_learning_rate
elif hparams.distill_phase == "distill" and hparams.student_learning_rate:
hparams.learning_rate = hparams.student_learning_rate
self.teacher_hparams = registry.hparams(hparams.teacher_hparams)
self.teacher_model = registry.model(
hparams.teacher_model)(self.teacher_hparams, mode, problem_hparams,
data_parallelism, decode_hparams)
self.student_hparams = registry.hparams(hparams.student_hparams)
self.student_model = registry.model(
hparams.student_model)(self.student_hparams, mode, problem_hparams,
data_parallelism, decode_hparams)
super(Distillation, self).__init__(hparams, mode, problem_hparams,
data_parallelism, decode_hparams)
示例4: ppo_original_params
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def ppo_original_params():
"""Parameters based on the original PPO paper."""
hparams = ppo_atari_base()
hparams.learning_rate_constant = 2.5e-4
hparams.gae_gamma = 0.99
hparams.gae_lambda = 0.95
hparams.clipping_coef = 0.1
hparams.value_loss_coef = 1
hparams.entropy_loss_coef = 0.01
hparams.eval_every_epochs = 200
hparams.dropout_ppo = 0.1
# The parameters below are modified to accommodate short epoch_length (which
# is needed for model based rollouts).
hparams.epoch_length = 50
hparams.optimization_batch_size = 20
return hparams
示例5: get_mnist_random_output
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def get_mnist_random_output(self, model_name, hparams_set=None,
mode=tf.estimator.ModeKeys.TRAIN):
hparams_set = hparams_set or model_name
x = np.random.randint(256, size=(1, 28, 28, 1))
y = np.random.randint(10, size=(1, 1))
features = {
"targets": tf.constant(x, dtype=tf.int32),
"inputs": tf.constant(y, dtype=tf.int32),
}
hparams = trainer_lib.create_hparams(
hparams_set, problem_name="image_mnist_rev", data_dir=".")
model = registry.model(model_name)(hparams, mode)
tf.train.create_global_step()
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(logits)
return res
示例6: __init__
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None,
**kwargs):
assert hparams.distill_phase in ["train", "distill"]
if hparams.distill_phase == "train" and hparams.teacher_learning_rate:
hparams.learning_rate = hparams.teacher_learning_rate
elif hparams.distill_phase == "distill" and hparams.student_learning_rate:
hparams.learning_rate = hparams.student_learning_rate
self.teacher_hparams = registry.hparams(hparams.teacher_hparams)
self.teacher_model = registry.model(
hparams.teacher_model)(self.teacher_hparams, mode, problem_hparams,
data_parallelism, decode_hparams)
self.student_hparams = registry.hparams(hparams.student_hparams)
self.student_model = registry.model(
hparams.student_model)(self.student_hparams, mode, problem_hparams,
data_parallelism, decode_hparams)
super(Distillation,
self).__init__(hparams, mode, problem_hparams, data_parallelism,
decode_hparams, **kwargs)
示例7: train_autoencoder
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def train_autoencoder(problem_name, data_dir, output_dir, hparams, epoch):
"""Train autoencoder on problem_name."""
train_steps = hparams.autoencoder_train_steps * (epoch + 2)
with temporary_flags({
"problem": problem_name,
"data_dir": data_dir,
"output_dir": output_dir,
"model": "autoencoder_ordered_discrete",
"hparams_set": "autoencoder_discrete_pong",
"train_steps": train_steps,
"eval_steps": 100,
}):
t2t_trainer.main([])
示例8: train_agent
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def train_agent(problem_name, agent_model_dir,
event_dir, world_model_dir, epoch_data_dir, hparams, epoch=0):
"""Train the PPO agent in the simulated environment."""
gym_problem = registry.problem(problem_name)
ppo_hparams = trainer_lib.create_hparams(hparams.ppo_params)
ppo_params_names = ["epochs_num", "epoch_length",
"learning_rate", "num_agents",
"optimization_epochs"]
for param_name in ppo_params_names:
ppo_param_name = "ppo_"+ param_name
if ppo_param_name in hparams:
ppo_hparams.set_hparam(param_name, hparams.get(ppo_param_name))
ppo_epochs_num = hparams.ppo_epochs_num
ppo_hparams.save_models_every_epochs = ppo_epochs_num
ppo_hparams.world_model_dir = world_model_dir
ppo_hparams.add_hparam("force_beginning_resets", True)
# Adding model hparams for model specific adjustments
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
ppo_hparams.add_hparam("model_hparams", model_hparams)
environment_spec = copy.copy(gym_problem.environment_spec)
environment_spec.simulation_random_starts = hparams.simulation_random_starts
environment_spec.intrinsic_reward_scale = hparams.intrinsic_reward_scale
ppo_hparams.add_hparam("environment_spec", environment_spec)
with temporary_flags({
"problem": problem_name,
"model": hparams.generative_model,
"hparams_set": hparams.generative_model_params,
"output_dir": world_model_dir,
"data_dir": epoch_data_dir,
}):
rl_trainer_lib.train(ppo_hparams, event_dir, agent_model_dir, epoch=epoch)
示例9: train_world_model
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def train_world_model(problem_name, data_dir, output_dir, hparams, epoch):
"""Train the world model on problem_name."""
train_steps = hparams.model_train_steps * (epoch + 2)
with temporary_flags({
"data_dir": data_dir,
"output_dir": output_dir,
"problem": problem_name,
"model": hparams.generative_model,
"hparams_set": hparams.generative_model_params,
"eval_steps": 100,
"train_steps": train_steps,
}):
t2t_trainer.main([])
示例10: rl_modelrl_base
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def rl_modelrl_base():
return tf.contrib.training.HParams(
epochs=3,
# Total frames used for training =
# steps * (1 - 1/11) * epochs
# 1/11 steps are used for evaluation data
# 100k frames for training = 36666
true_env_generator_num_steps=36666,
generative_model="next_frame_basic",
generative_model_params="next_frame",
ppo_params="ppo_pong_base",
autoencoder_train_steps=0,
model_train_steps=50000,
simulated_env_generator_num_steps=2000,
simulation_random_starts=True,
intrinsic_reward_scale=0.,
ppo_epochs_num=200, # This should be enough to see something
# Our simulated envs do not know how to reset.
# You should set ppo_time_limit to the value you believe that
# the simulated env produces a reasonable output.
ppo_time_limit=200, # TODO(blazej): this param is unused
# It makes sense to have ppo_time_limit=ppo_epoch_length,
# though it is not necessary.
ppo_epoch_length=60,
ppo_num_agents=16,
# Whether the PPO agent should be restored from the previous iteration, or
# should start fresh each time.
ppo_continue_training=True,
game="wrapped_long_pong",
# Whether to evaluate the world model in each iteration of the loop to get
# the model_reward_accuracy metric.
eval_world_model=True,
)
示例11: rl_modelrl_base_stochastic
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def rl_modelrl_base_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rl_modelrl_base()
hparams.generative_model = "next_frame_stochastic"
hparams.generative_model_params = "next_frame_stochastic_cutoff"
return hparams
示例12: rl_modelrl_tiny_stochastic
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def rl_modelrl_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rl_modelrl_tiny()
hparams.generative_model = "next_frame_stochastic"
hparams.generative_model_params = "next_frame_stochastic_tiny"
return hparams
示例13: simulate
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def simulate(self, action):
with tf.name_scope("environment/simulate"):
actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
axis=1)
history = self.history_buffer.get_all_elements()
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
model_output = self._model.infer(
{"inputs": history, "input_action": actions})
observ = tf.to_float(tf.squeeze(model_output["targets"], axis=1))
reward = tf.to_float(model_output["target_reward"])
reward = tf.reshape(reward, shape=(self.length,)) + self._min_reward
if self._intrinsic_reward_scale:
# Use the model's uncertainty about its prediction as an intrinsic
# reward. The uncertainty is measured by the log probability of the
# predicted pixel value.
if "targets_logits" not in model_output:
raise ValueError("The use of intrinsic rewards requires access to "
"the logits. Ensure that model.infer returns "
"'targets_logits'")
uncertainty_reward = compute_uncertainty_reward(
model_output["targets_logits"], model_output["targets"])
uncertainty_reward = tf.minimum(
1., self._intrinsic_reward_scale * uncertainty_reward)
uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
message="uncertainty_reward", first_n=1,
summarize=8)
reward += uncertainty_reward
done = tf.constant(False, tf.bool, shape=(self.length,))
with tf.control_dependencies([observ]):
with tf.control_dependencies(
[self._observ.assign(observ),
self.history_buffer.move_by_one_element(observ)]):
return tf.identity(reward), tf.identity(done)
示例14: main
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
if FLAGS.score_file:
filename = os.path.expanduser(FLAGS.score_file)
if not tf.gfile.Exists(filename):
raise ValueError("The file to score doesn't exist: %s" % filename)
results = score_file(filename)
if not FLAGS.decode_to_file:
raise ValueError("To score a file, specify --decode_to_file for results.")
write_file = open(os.path.expanduser(FLAGS.decode_to_file), "w")
for score in results:
write_file.write("%.6f\n" % score)
write_file.close()
return
hp = create_hparams()
decode_hp = create_decode_hparams()
estimator = trainer_lib.create_estimator(
FLAGS.model,
hp,
t2t_trainer.create_run_config(hp),
decode_hparams=decode_hp,
use_tpu=FLAGS.use_tpu)
decode(estimator, hp, decode_hp)
示例15: testT2TModelRegistration
# 需要導入模塊: from tensor2tensor.utils import registry [as 別名]
# 或者: from tensor2tensor.utils.registry import model [as 別名]
def testT2TModelRegistration(self):
@registry.register_model
class MyModel1(t2t_model.T2TModel):
pass
model = registry.model("my_model1")
self.assertTrue(model is MyModel1)