本文整理汇总了Python中tensor2tensor.utils.usr_dir.import_usr_dir方法的典型用法代码示例。如果您正苦于以下问题:Python usr_dir.import_usr_dir方法的具体用法?Python usr_dir.import_usr_dir怎么用?Python usr_dir.import_usr_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensor2tensor.utils.usr_dir
的用法示例。
在下文中一共展示了usr_dir.import_usr_dir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_teacher_experiment
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def create_teacher_experiment(run_config, hparams, argv):
"""Creates experiment function."""
tf.logging.info("training teacher")
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
return cloud_mlengine.launch()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
with t2t_trainer.maybe_cloud_tpu():
hparams.distill_phase = "train"
exp_fn = t2t_trainer.create_experiment_fn()
exp = exp_fn(run_config, hparams)
return exp
示例2: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
ckpt_dir = os.path.expanduser(FLAGS.output_dir)
hparams = create_hparams()
hparams.no_data_parallelism = True # To clear the devices
run_config = t2t_trainer.create_run_config(hparams)
estimator = create_estimator(run_config, hparams)
problem = hparams.problem
strategy = trainer_lib.create_export_strategy(problem, hparams)
export_dir = os.path.join(ckpt_dir, "export", strategy.name)
strategy.export(
estimator,
export_dir,
checkpoint_path=tf.train.latest_checkpoint(ckpt_dir))
示例3: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
validate_flags()
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
problem = registry.problem(FLAGS.problem)
hparams = tf.contrib.training.HParams(
data_dir=os.path.expanduser(FLAGS.data_dir))
problem.get_hparams(hparams)
request_fn = make_request_fn()
while True:
inputs = FLAGS.inputs_once if FLAGS.inputs_once else input(">> ")
outputs = serving_utils.predict([inputs], problem, request_fn)
outputs, = outputs
output, score = outputs
print_str = """
Input:
{inputs}
Output (Score {score:.3f}):
{output}
"""
print(print_str.format(inputs=inputs, output=output, score=score))
if FLAGS.inputs_once:
break
示例4: create_teacher_experiment
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def create_teacher_experiment(run_config, hparams, argv):
"""Creates experiment function."""
tf.logging.info("training teacher")
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
return cloud_mlengine.launch()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
hparams.distill_phase = "train"
exp_fn = t2t_trainer.create_experiment_fn()
exp = exp_fn(run_config, hparams)
return exp
示例5: create_student_experiment
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def create_student_experiment(run_config, hparams, argv):
"""Creates experiment function."""
tf.logging.info("training student")
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
return cloud_mlengine.launch()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
hparams.add_hparam("teacher_dir", FLAGS.teacher_dir)
hparams.distill_phase = "distill"
exp_fn = t2t_trainer.create_experiment_fn()
exp = exp_fn(run_config, hparams)
return exp
示例6: create_student_experiment
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def create_student_experiment(run_config, hparams, argv):
"""Creates experiment function."""
tf.logging.info("training student")
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
return cloud_mlengine.launch()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
with t2t_trainer.maybe_cloud_tpu():
hparams.add_hparam("teacher_dir", FLAGS.teacher_dir)
hparams.distill_phase = "distill"
exp_fn = t2t_trainer.create_experiment_fn()
exp = exp_fn(run_config, hparams)
return exp
示例7: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
cloud_mlengine.launch()
return
if FLAGS.generate_data:
generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
set_hparams_from_args(argv[1:])
hparams = create_hparams()
with maybe_cloud_tpu():
exp_fn = create_experiment_fn()
exp = exp_fn(create_run_config(hparams), hparams)
if is_chief():
save_metadata(hparams)
execute_schedule(exp)
示例8: __init__
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def __init__(self, processor_configuration):
"""Creates the Transformer estimator.
Args:
processor_configuration: A ProcessorConfiguration protobuffer with the
transformer fields populated.
"""
# Do the pre-setup tensor2tensor requires for flags and configurations.
transformer_config = processor_configuration["transformer"]
FLAGS.output_dir = transformer_config["model_dir"]
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
data_dir = os.path.expanduser(transformer_config["data_dir"])
# Create the basic hyper parameters.
self.hparams = trainer_lib.create_hparams(
transformer_config["hparams_set"],
transformer_config["hparams"],
data_dir=data_dir,
problem_name=transformer_config["problem"])
decode_hp = decoding.decode_hparams()
decode_hp.add_hparam("shards", 1)
decode_hp.add_hparam("shard_id", 0)
# Create the estimator and final hyper parameters.
self.estimator = trainer_lib.create_estimator(
transformer_config["model"],
self.hparams,
t2t_trainer.create_run_config(self.hparams),
decode_hparams=decode_hp, use_tpu=False)
# Fetch the vocabulary and other helpful variables for decoding.
self.source_vocab = self.hparams.problem_hparams.vocabulary["inputs"]
self.targets_vocab = self.hparams.problem_hparams.vocabulary["targets"]
self.const_array_size = 10000
# Prepare the Transformer's debug data directory.
run_dirs = sorted(glob.glob(os.path.join("/tmp/t2t_server_dump", "run_*")))
for run_dir in run_dirs:
shutil.rmtree(run_dir)
示例9: create_student_experiment
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def create_student_experiment(run_config, hparams, argv):
"""Creates experiment function."""
tf.logging.info("training student")
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
t2t_trainer.maybe_log_registry_and_exit()
if FLAGS.cloud_mlengine:
return cloud_mlengine.launch()
if FLAGS.generate_data:
t2t_trainer.generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
if argv:
t2t_trainer.set_hparams_from_args(argv[1:])
hparams.add_hparam("teacher_dir", FLAGS.teacher_dir)
hparams.add_hparam("student_dir", FLAGS.student_dir)
hparams.distill_phase = "distill"
exp_fn = t2t_trainer.create_experiment_fn()
exp = exp_fn(run_config, hparams)
return exp
示例10: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, FLAGS.hparams, data_dir=FLAGS.data_dir,
problem_name=FLAGS.problem)
# set appropriate dataset-split, if flags.eval_use_test_set.
dataset_split = "test" if FLAGS.eval_use_test_set else None
dataset_kwargs = {"dataset_split": dataset_split}
eval_input_fn = hparams.problem.make_estimator_input_fn(
tf.estimator.ModeKeys.EVAL, hparams, dataset_kwargs=dataset_kwargs)
config = t2t_trainer.create_run_config(hparams)
# summary-hook in tf.estimator.EstimatorSpec requires
# hparams.model_dir to be set.
hparams.add_hparam("model_dir", config.model_dir)
estimator = trainer_lib.create_estimator(
FLAGS.model, hparams, config, use_tpu=FLAGS.use_tpu)
ckpt_iter = trainer_lib.next_checkpoint(
hparams.model_dir, FLAGS.eval_timeout_mins)
for ckpt_path in ckpt_iter:
predictions = estimator.evaluate(
eval_input_fn, steps=FLAGS.eval_steps, checkpoint_path=ckpt_path)
tf.logging.info(predictions)
示例11: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
if FLAGS.score_file:
filename = os.path.expanduser(FLAGS.score_file)
if not tf.gfile.Exists(filename):
raise ValueError("The file to score doesn't exist: %s" % filename)
results = score_file(filename)
if not FLAGS.decode_to_file:
raise ValueError("To score a file, specify --decode_to_file for results.")
write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w")
for score in results:
write_file.write("%.6f\n" % score)
write_file.close()
return
hp = create_hparams()
decode_hp = create_decode_hparams()
run_config = t2t_trainer.create_run_config(hp)
if FLAGS.disable_grappler_optimizations:
run_config.session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# summary-hook in tf.estimator.EstimatorSpec requires
# hparams.model_dir to be set.
hp.add_hparam("model_dir", run_config.model_dir)
estimator = trainer_lib.create_estimator(
FLAGS.model,
hp,
run_config,
decode_hparams=decode_hp,
use_tpu=FLAGS.use_tpu)
decode(estimator, hp, decode_hp)
示例12: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
if FLAGS.checkpoint_path:
checkpoint_path = FLAGS.checkpoint_path
ckpt_dir = os.path.dirname(checkpoint_path)
else:
ckpt_dir = os.path.expanduser(FLAGS.output_dir)
checkpoint_path = tf.train.latest_checkpoint(ckpt_dir)
hparams = create_hparams()
hparams.no_data_parallelism = True # To clear the devices
problem = hparams.problem
decode_hparams = decoding.decode_hparams(FLAGS.decode_hparams)
export_dir = FLAGS.export_dir or os.path.join(ckpt_dir, "export")
if FLAGS.export_as_tfhub:
checkpoint_path = tf.train.latest_checkpoint(ckpt_dir)
export_as_tfhub_module(FLAGS.model, hparams, decode_hparams, problem,
checkpoint_path, export_dir)
return
run_config = t2t_trainer.create_run_config(hparams)
estimator = create_estimator(run_config, hparams)
exporter = tf.estimator.FinalExporter(
"exporter",
lambda: problem.serving_input_fn(hparams, decode_hparams, FLAGS.use_tpu),
as_text=FLAGS.as_text)
exporter.export(
estimator,
export_dir,
checkpoint_path=checkpoint_path,
eval_result=None,
is_the_final_export=True)
示例13: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
if FLAGS.score_file:
filename = os.path.expanduser(FLAGS.score_file)
if not tf.gfile.Exists(filename):
raise ValueError("The file to score doesn't exist: %s" % filename)
results = score_file(filename)
if not FLAGS.decode_to_file:
raise ValueError("To score a file, specify --decode_to_file for results.")
write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w")
for score in results:
write_file.write("%.6f\n" % score)
write_file.close()
return
hp = create_hparams()
decode_hp = create_decode_hparams()
estimator = trainer_lib.create_estimator(
FLAGS.model,
hp,
t2t_trainer.create_run_config(hp),
decode_hparams=decode_hp,
use_tpu=FLAGS.use_tpu)
decode(estimator, hp, decode_hp)
示例14: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
if FLAGS.checkpoint_path:
checkpoint_path = FLAGS.checkpoint_path
ckpt_dir = os.path.dirname(checkpoint_path)
else:
ckpt_dir = os.path.expanduser(FLAGS.output_dir)
checkpoint_path = tf.train.latest_checkpoint(ckpt_dir)
hparams = create_hparams()
hparams.no_data_parallelism = True # To clear the devices
problem = hparams.problem
export_dir = FLAGS.export_dir or os.path.join(ckpt_dir, "export")
if FLAGS.export_as_tfhub:
checkpoint_path = tf.train.latest_checkpoint(ckpt_dir)
decode_hparams = decoding.decode_hparams(FLAGS.decode_hparams)
export_as_tfhub_module(FLAGS.model, hparams, decode_hparams, problem,
checkpoint_path, export_dir)
return
run_config = t2t_trainer.create_run_config(hparams)
estimator = create_estimator(run_config, hparams)
exporter = tf.estimator.FinalExporter(
"exporter", lambda: problem.serving_input_fn(hparams), as_text=True)
exporter.export(
estimator,
export_dir,
checkpoint_path=checkpoint_path,
eval_result=None,
is_the_final_export=True)
示例15: main
# 需要导入模块: from tensor2tensor.utils import usr_dir [as 别名]
# 或者: from tensor2tensor.utils.usr_dir import import_usr_dir [as 别名]
def main(argv):
tf.logging.set_verbosity(tf.logging.INFO)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# If we just have to print the registry, do that and exit early.
maybe_log_registry_and_exit()
# Create HParams.
if argv:
set_hparams_from_args(argv[1:])
hparams = create_hparams()
if FLAGS.schedule == "train" or FLAGS.schedule == "train_eval_and_decode":
mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)
if FLAGS.schedule == "run_std_server":
run_std_server()
mlperf_log.transformer_print(
key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,
hparams=hparams)
trainer_lib.set_random_seed(FLAGS.random_seed)
if FLAGS.cloud_mlengine:
cloud_mlengine.launch()
return
if FLAGS.generate_data:
generate_data()
if cloud_mlengine.job_dir():
FLAGS.output_dir = cloud_mlengine.job_dir()
exp_fn = create_experiment_fn()
exp = exp_fn(create_run_config(hparams), hparams)
if is_chief():
save_metadata(hparams)
execute_schedule(exp)
if FLAGS.schedule != "train":
mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,
hparams=hparams)