當前位置: 首頁>>代碼示例>>Python>>正文


Python gfile.Glob方法代碼示例

本文整理匯總了Python中tensorflow.gfile.Glob方法的典型用法代碼示例。如果您正苦於以下問題:Python gfile.Glob方法的具體用法?Python gfile.Glob怎麽用?Python gfile.Glob使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.Glob方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_filename_queue

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def create_filename_queue(coordinates_file_pattern, shuffle=True):
  """Creates a queue for reading coordinates from coordinate file.

  Args:
    coordinates_file_pattern: File pattern for TFRecords of
                              input examples of the form of a glob
                              pattern or path@shards.
    shuffle: Whether to shuffle the coordinate file list. Note that the expanded
             coordinates_file_pattern is not guaranteed to be sorted
             alphabetically.

  Returns:
    Tensorflow queue with coordinate filenames
  """
  m = re.search(r'@(\d{1,})', coordinates_file_pattern)
  if m:
    num_shards = int(m.group(1))
    coord_file_list = [
      re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards),
             coordinates_file_pattern)
      for i in range(num_shards)]
  else:
    coord_file_list = gfile.Glob(coordinates_file_pattern)
  return tf.train.string_input_producer(coord_file_list, shuffle=shuffle) 
開發者ID:google,項目名稱:ffn,代碼行數:26,代碼來源:inputs.py

示例2: read_df_from_gcs

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def read_df_from_gcs(file_pattern):
  """Read data from Google Cloud Storage, split into train and validation sets.

  Assume that the data on GCS is in csv format without header.
  The column names will be provided through metadata

  Args:
    file_pattern: (string) pattern of the files containing training data.
    For example: [gs://bucket/folder_name/prefix]

  Returns:
    pandas.DataFrame
  """

  # Download the files to local /tmp/ folder
  df_list = []

  for filepath in gfile.Glob(file_pattern):
    with gfile.Open(filepath, 'r') as f:
      # Assume there is no header
      df_list.append(pd.read_csv(f, names=metadata.CSV_COLUMNS))

  data_df = pd.concat(df_list)

  return data_df 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:27,代碼來源:utils.py

示例3: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=4 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:21,代碼來源:inference-combine-tfrecords-frame.py

示例4: get_input_evaluation_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      print data_pattern, files
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:22,代碼來源:check_distillation.py

示例5: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader,
                                 data_pattern,
                                 batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=4 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:21,代碼來源:inference-pre-ensemble.py

示例6: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:21,代碼來源:inference-combine-tfrecords-video.py

示例7: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:21,代碼來源:inference.py

示例8: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256,
                           num_epochs=None):
  logging.info("Using batch size of " + str(batch_size) + " for training.")
  with tf.name_scope("train_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find training files. data_pattern='" +
                    data_pattern + "'.")
    logging.info("Number of training files: %s.", str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, num_epochs=num_epochs, shuffle=False)
    training_data = reader.prepare_reader(filename_queue)

    return tf.train.batch(
        training_data,
        batch_size=batch_size,
        capacity=FLAGS.batch_size * 4,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:24,代碼來源:train.py

示例9: get_input_evaluation_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024):

  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:22,代碼來源:eval.py

示例10: validate

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def validate(
        working_dir: 'tf.estimator working directory',
        *tf_record_dirs: 'Directories where holdout data are',
        checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,
        validate_name: 'Name for validation set (i.e., selfplay or human)'=None):
    qmeas.start_time('validate')
    tf_records = []
    with timer("Building lists of holdout files"):
        for record_dir in tf_record_dirs:
            tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))

    first_record = os.path.basename(tf_records[0])
    last_record = os.path.basename(tf_records[-1])
    with timer("Validating from {} to {}".format(first_record, last_record)):
        dual_net.validate(
            working_dir, tf_records, checkpoint_name=checkpoint_name,
            name=validate_name)
    qmeas.stop_time('validate') 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:20,代碼來源:main.py

示例11: train

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def train():
    model_version, model_name = get_latest_model()
    logger.info("Training on gathered game data, initializing from {}".format(model_name))
    new_model_name = generate(model_version + 1)
    logger.info("New model will be {}".format(new_model_name))
    save_file = os.path.join(PATHS.MODELS_DIR, new_model_name)

    try:
        logger.info("Getting tf_records")
        tf_records = sorted(gfile.Glob(os.path.join(PATHS.TRAINING_CHUNK_DIR, '*.tfrecord.zz')))
        tf_records = tf_records[
                     -1 * (GLOBAL_PARAMETER_STORE.WINDOW_SIZE // GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD):]

        print("Training from:", tf_records[0], "to", tf_records[-1])

        with timer("Training"):
            network.train(PATHS.ESTIMATOR_WORKING_DIR, tf_records, model_version+1)
            network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR, save_file)

    except:
        logger.info("Got an error training")
        logging.exception("Train error") 
開發者ID:PacktPublishing,項目名稱:Python-Reinforcement-Learning-Projects,代碼行數:24,代碼來源:controller.py

示例12: get_existing_corners

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_existing_corners(segmentation_dir):
  corners = []
  # Legacy path format.
  for path in gfile.Glob(os.path.join(segmentation_dir, 'seg-*_*_*.npz')):
    corners.append(get_corner_from_path(path))
  for path in gfile.Glob(os.path.join(segmentation_dir, '*/*/seg-*_*_*.npz')):
    corners.append(get_corner_from_path(path))
  return corners 
開發者ID:google,項目名稱:ffn,代碼行數:10,代碼來源:storage.py

示例13: main

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.json_prediction_files_pattern:
    raise ValueError(
        "The flag --json_prediction_files_pattern must be specified.")

  if not FLAGS.csv_output_file:
    raise ValueError("The flag --csv_output_file must be specified.")

  logging.info("Looking for prediction files with pattern: %s", 
               FLAGS.json_prediction_files_pattern)

  file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)  
  logging.info("Found files: %s", file_paths)

  logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
  with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
    output_file.write(get_csv_header())

    for file_path in file_paths:
      logging.info("processing file: %s", file_path)

      with gfile.Open(file_path) as input_file:

        for line in input_file: 
          json_data = json.loads(line)
          output_file.write(to_csv_row(json_data))

    output_file.flush()
  logging.info("done") 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:33,代碼來源:convert_prediction_from_json_to_csv.py

示例14: get_input_data_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1):
  """Creates the section of the graph which reads the input data.

  Args:
    reader: A class which parses the input data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  with tf.name_scope("input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find input files. data_pattern='" +
                    data_pattern + "'")
    logging.info("number of input files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, num_epochs=1, shuffle=False)
    examples_and_labels = [reader.prepare_reader(filename_queue)
                           for _ in range(num_readers)]

    video_id_batch, video_batch, unused_labels, num_frames_batch = (
        tf.train.batch_join(examples_and_labels,
                            batch_size=batch_size,
                            allow_smaller_final_batch = True,
                            enqueue_many=True))
    return video_id_batch, video_batch, num_frames_batch 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:36,代碼來源:inference.py

示例15: get_input_evaluation_tensors

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Glob [as 別名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024,
                                 num_readers=1):
  """Creates the section of the graph which reads the evaluation data.

  Args:
    reader: A class which parses the training data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = [
        reader.prepare_reader(filename_queue) for _ in range(num_readers)
    ]
    return tf.train.batch_join(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:39,代碼來源:eval.py


注:本文中的tensorflow.gfile.Glob方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。