當前位置: 首頁>>代碼示例>>Python>>正文


Python flags.mark_flags_as_required方法代碼示例

本文整理匯總了Python中absl.flags.mark_flags_as_required方法的典型用法代碼示例。如果您正苦於以下問題:Python flags.mark_flags_as_required方法的具體用法?Python flags.mark_flags_as_required怎麽用?Python flags.mark_flags_as_required使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在absl.flags的用法示例。


在下文中一共展示了flags.mark_flags_as_required方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def main(_):
  flags.mark_flags_as_required(["output_cache_dir"])

  _import_modules(FLAGS.module_import)

  t5.data.add_global_cache_dirs(
      [FLAGS.output_cache_dir] + FLAGS.tasks_additional_cache_dirs)

  output_dirs = []
  pipeline_options = beam.options.pipeline_options.PipelineOptions(
      FLAGS.pipeline_options)
  with beam.Pipeline(options=pipeline_options) as pipeline:
    tf.io.gfile.makedirs(FLAGS.output_cache_dir)
    output_dirs = run_pipeline(
        pipeline, FLAGS.tasks, FLAGS.output_cache_dir,
        FLAGS.max_input_examples, FLAGS.excluded_tasks, FLAGS.module_import,
        FLAGS.overwrite)

  # TODO(adarob): Figure out a way to write these when each task completes.
  for output_dir in output_dirs:
    with tf.io.gfile.GFile(os.path.join(output_dir, "COMPLETED"), "w") as f:
      f.write("") 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:24,代碼來源:cache_tasks_main.py

示例2: main

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def main(argv):
  del argv


  flags.mark_flags_as_required(['csv', 'output_directory'])

  tf.io.gfile.makedirs(FLAGS.output_directory)

  with tf.io.gfile.GFile(FLAGS.csv) as f:
    reader = csv.DictReader(f)

    splits = collections.defaultdict(list)
    for row in reader:
      splits[row['split']].append(
          (os.path.join(FLAGS.midi_dir, row['midi_filename']),
           os.path.join(FLAGS.wav_dir, row['audio_filename'])))

  if sorted(splits.keys()) != sorted(FLAGS.expected_splits.split(',')):
    raise ValueError('Got unexpected set of splits: %s' % list(splits.keys()))

  pipeline_options = beam.options.pipeline_options.PipelineOptions(
      FLAGS.pipeline_options)
  with beam.Pipeline(options=pipeline_options) as p:
    for split in splits:
      split_p = p | 'prepare_split_%s' % split >> beam.Create(splits[split])
      split_p |= 'create_examples_%s' % split >> beam.ParDo(
          CreateExampleDoFn(FLAGS.wav_dir, FLAGS.midi_dir, FLAGS.add_wav_glob))
      split_p |= 'write_%s' % split >> beam.io.WriteToTFRecord(
          os.path.join(FLAGS.output_directory, '%s.tfrecord' % split),
          coder=beam.coders.ProtoCoder(tf.train.Example),
          num_shards=FLAGS.num_shards) 
開發者ID:magenta,項目名稱:magenta,代碼行數:33,代碼來源:onsets_frames_transcription_create_tfrecords.py

示例3: define_flags

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def define_flags():
  ''' define flags for evaluator'''
  # The GPU devices which are visible for current process
  flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES')
  flags.DEFINE_string('config', None, help='path to yaml config file')
  flags.DEFINE_enum('mode', 'eval', ['eval', 'infer', 'eval_and_infer'],
                    'eval or infer')
  flags.DEFINE_bool('debug', False, 'debug mode')
  # https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330
  flags.mark_flags_as_required(['config', 'mode']) 
開發者ID:didi,項目名稱:delta,代碼行數:12,代碼來源:run_saved_model.py

示例4: main

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def main(_):
  flags.mark_flags_as_required(["task"])

  if FLAGS.module_import:
    import_modules(FLAGS.module_import)

  gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)

  total_examples = 0
  tf.enable_eager_execution()
  task = t5.data.TaskRegistry.get(FLAGS.task)
  files = task.tfds_dataset.files(FLAGS.split)
  def _example_to_string(ex):
    key_to_string = {}
    for k in ("inputs", "targets"):
      if k in ex:
        v = ex[k].numpy()
        key_to_string[k] = (
            " ".join(str(i) for i in v) if FLAGS.tokenize
            else v.decode("utf-8"))
      else:
        v[k] = ""
    return FLAGS.format_string.format(**key_to_string)

  for shard_path in files:
    logging.info("Processing shard: %s", shard_path)
    ds = task.tfds_dataset.load_shard(shard_path)
    ds = task.preprocess_text(ds)
    if FLAGS.tokenize:
      ds = t5.data.encode_string_features(
          ds, task.output_features, keys=task.output_features,
          copy_plaintext=True)
      ds = task.preprocess_tokens(ds, sequence_length())

    for ex in ds:
      print(_example_to_string(ex))
      total_examples += 1
      if total_examples == FLAGS.max_examples:
        return 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:41,代碼來源:dump_task.py

示例5: main

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def main(unused_argv):
    flags.mark_flags_as_required(['bucket_name', 'mode'])
    if FLAGS.mode == 'cc':
        run_cc()
    elif FLAGS.mode == 'tpu':
        run_tpu(no_resign=False)
    elif FLAGS.mode == 'tpu_nr':
        run_tpu(no_resign=True) 
開發者ID:mlperf,項目名稱:training,代碼行數:10,代碼來源:selfplay.py

示例6: define_flags

# 需要導入模塊: from absl import flags [as 別名]
# 或者: from absl.flags import mark_flags_as_required [as 別名]
def define_flags():
  """Construct flags for the server."""
  flags.DEFINE_integer(name="num_workers", default=multiprocessing.cpu_count(),
                       help="Size of the negative generation worker pool.")
  flags.DEFINE_string(name="data_dir", default=None,
                      help="The data root. (used to construct cache paths.)")
  flags.DEFINE_string(name="cache_id", default=None,
                      help="The cache_id generated in the main process.")
  flags.DEFINE_integer(name="num_readers", default=4,
                       help="Number of reader datasets in training. This sets"
                            "how the epoch files are sharded.")
  flags.DEFINE_integer(name="num_neg", default=None,
                       help="The Number of negative instances to pair with a "
                            "positive instance.")
  flags.DEFINE_integer(name="num_train_positives", default=None,
                       help="The number of positive training examples.")
  flags.DEFINE_integer(name="num_items", default=None,
                       help="Number of items from which to select negatives.")
  flags.DEFINE_integer(name="num_users", default=None,
                       help="The number of unique users. Used for evaluation.")
  flags.DEFINE_integer(name="epochs_per_cycle", default=1,
                       help="The number of epochs of training data to produce"
                            "at a time.")
  flags.DEFINE_integer(name="num_cycles", default=None,
                       help="The number of cycles to produce training data "
                            "for.")
  flags.DEFINE_integer(name="train_batch_size", default=None,
                       help="The batch size with which training TFRecords will "
                            "be chunked.")
  flags.DEFINE_integer(name="eval_batch_size", default=None,
                       help="The batch size with which evaluation TFRecords "
                            "will be chunked.")
  flags.DEFINE_boolean(name="redirect_logs", default=False,
                       help="Catch logs and write them to a file. "
                            "(Useful if this is run as a subprocess)")
  flags.DEFINE_boolean(name="use_tf_logging", default=False,
                       help="Use tf.logging instead of log file.")
  flags.DEFINE_integer(name="seed", default=None,
                       help="NumPy random seed to set at startup. If not "
                            "specified, a seed will not be set.")
  flags.DEFINE_boolean(name="ml_perf", default=None,
                       help="Match MLPerf. See ncf_main.py for details.")
  flags.DEFINE_bool(name="output_ml_perf_compliance_logging", default=None,
                    help="Output the MLPerf compliance logging. See "
                         "ncf_main.py for details.")

  flags.mark_flags_as_required(["data_dir", "cache_id"]) 
開發者ID:isobar-us,項目名稱:multilabel-image-classification-tensorflow,代碼行數:49,代碼來源:data_async_generation.py


注:本文中的absl.flags.mark_flags_as_required方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。