本文整理汇总了Python中absl.flags.mark_flags_as_required方法的典型用法代码示例。如果您正苦于以下问题:Python flags.mark_flags_as_required方法的具体用法?Python flags.mark_flags_as_required怎么用?Python flags.mark_flags_as_required使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类absl.flags
的用法示例。
在下文中一共展示了flags.mark_flags_as_required方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def main(_):
flags.mark_flags_as_required(["output_cache_dir"])
_import_modules(FLAGS.module_import)
t5.data.add_global_cache_dirs(
[FLAGS.output_cache_dir] + FLAGS.tasks_additional_cache_dirs)
output_dirs = []
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options)
with beam.Pipeline(options=pipeline_options) as pipeline:
tf.io.gfile.makedirs(FLAGS.output_cache_dir)
output_dirs = run_pipeline(
pipeline, FLAGS.tasks, FLAGS.output_cache_dir,
FLAGS.max_input_examples, FLAGS.excluded_tasks, FLAGS.module_import,
FLAGS.overwrite)
# TODO(adarob): Figure out a way to write these when each task completes.
for output_dir in output_dirs:
with tf.io.gfile.GFile(os.path.join(output_dir, "COMPLETED"), "w") as f:
f.write("")
示例2: main
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def main(argv):
del argv
flags.mark_flags_as_required(['csv', 'output_directory'])
tf.io.gfile.makedirs(FLAGS.output_directory)
with tf.io.gfile.GFile(FLAGS.csv) as f:
reader = csv.DictReader(f)
splits = collections.defaultdict(list)
for row in reader:
splits[row['split']].append(
(os.path.join(FLAGS.midi_dir, row['midi_filename']),
os.path.join(FLAGS.wav_dir, row['audio_filename'])))
if sorted(splits.keys()) != sorted(FLAGS.expected_splits.split(',')):
raise ValueError('Got unexpected set of splits: %s' % list(splits.keys()))
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options)
with beam.Pipeline(options=pipeline_options) as p:
for split in splits:
split_p = p | 'prepare_split_%s' % split >> beam.Create(splits[split])
split_p |= 'create_examples_%s' % split >> beam.ParDo(
CreateExampleDoFn(FLAGS.wav_dir, FLAGS.midi_dir, FLAGS.add_wav_glob))
split_p |= 'write_%s' % split >> beam.io.WriteToTFRecord(
os.path.join(FLAGS.output_directory, '%s.tfrecord' % split),
coder=beam.coders.ProtoCoder(tf.train.Example),
num_shards=FLAGS.num_shards)
示例3: define_flags
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def define_flags():
''' define flags for evaluator'''
# The GPU devices which are visible for current process
flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES')
flags.DEFINE_string('config', None, help='path to yaml config file')
flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'],
'eval or infer')
flags.DEFINE_bool('debug', False, 'debug mode')
# https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330
flags.mark_flags_as_required(['config', 'mode'])
示例4: main
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def main(_):
flags.mark_flags_as_required(["task"])
if FLAGS.module_import:
import_modules(FLAGS.module_import)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
total_examples = 0
tf.enable_eager_execution()
task = t5.data.TaskRegistry.get(FLAGS.task)
files = task.tfds_dataset.files(FLAGS.split)
def _example_to_string(ex):
key_to_string = {}
for k in ("inputs", "targets"):
if k in ex:
v = ex[k].numpy()
key_to_string[k] = (
" ".join(str(i) for i in v) if FLAGS.tokenize
else v.decode("utf-8"))
else:
v[k] = ""
return FLAGS.format_string.format(**key_to_string)
for shard_path in files:
logging.info("Processing shard: %s", shard_path)
ds = task.tfds_dataset.load_shard(shard_path)
ds = task.preprocess_text(ds)
if FLAGS.tokenize:
ds = t5.data.encode_string_features(
ds, task.output_features, keys=task.output_features,
copy_plaintext=True)
ds = task.preprocess_tokens(ds, sequence_length())
for ex in ds:
print(_example_to_string(ex))
total_examples += 1
if total_examples == FLAGS.max_examples:
return
示例5: main
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def main(unused_argv):
flags.mark_flags_as_required(['bucket_name', 'mode'])
if FLAGS.mode == 'cc':
run_cc()
elif FLAGS.mode == 'tpu':
run_tpu(no_resign=False)
elif FLAGS.mode == 'tpu_nr':
run_tpu(no_resign=True)
示例6: define_flags
# 需要导入模块: from absl import flags [as 别名]
# 或者: from absl.flags import mark_flags_as_required [as 别名]
def define_flags():
"""Construct flags for the server."""
flags.DEFINE_integer(name="num_workers", default=multiprocessing.cpu_count(),
help="Size of the negative generation worker pool.")
flags.DEFINE_string(name="data_dir", default=None,
help="The data root. (used to construct cache paths.)")
flags.DEFINE_string(name="cache_id", default=None,
help="The cache_id generated in the main process.")
flags.DEFINE_integer(name="num_readers", default=4,
help="Number of reader datasets in training. This sets"
"how the epoch files are sharded.")
flags.DEFINE_integer(name="num_neg", default=None,
help="The Number of negative instances to pair with a "
"positive instance.")
flags.DEFINE_integer(name="num_train_positives", default=None,
help="The number of positive training examples.")
flags.DEFINE_integer(name="num_items", default=None,
help="Number of items from which to select negatives.")
flags.DEFINE_integer(name="num_users", default=None,
help="The number of unique users. Used for evaluation.")
flags.DEFINE_integer(name="epochs_per_cycle", default=1,
help="The number of epochs of training data to produce"
"at a time.")
flags.DEFINE_integer(name="num_cycles", default=None,
help="The number of cycles to produce training data "
"for.")
flags.DEFINE_integer(name="train_batch_size", default=None,
help="The batch size with which training TFRecords will "
"be chunked.")
flags.DEFINE_integer(name="eval_batch_size", default=None,
help="The batch size with which evaluation TFRecords "
"will be chunked.")
flags.DEFINE_boolean(name="redirect_logs", default=False,
help="Catch logs and write them to a file. "
"(Useful if this is run as a subprocess)")
flags.DEFINE_boolean(name="use_tf_logging", default=False,
help="Use tf.logging instead of log file.")
flags.DEFINE_integer(name="seed", default=None,
help="NumPy random seed to set at startup. If not "
"specified, a seed will not be set.")
flags.DEFINE_boolean(name="ml_perf", default=None,
help="Match MLPerf. See ncf_main.py for details.")
flags.DEFINE_bool(name="output_ml_perf_compliance_logging", default=None,
help="Output the MLPerf compliance logging. See "
"ncf_main.py for details.")
flags.mark_flags_as_required(["data_dir", "cache_id"])
开发者ID:isobar-us,项目名称:multilabel-image-classification-tensorflow,代码行数:49,代码来源:data_async_generation.py