当前位置: 首页>>代码示例>>Python>>正文


Python gfile.Glob方法代码示例

本文整理汇总了Python中tensorflow.gfile.Glob方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.Glob方法的具体用法?Python gfile.Glob怎么用?Python gfile.Glob使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.Glob方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_filename_queue

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def create_filename_queue(coordinates_file_pattern, shuffle=True):
  """Creates a queue for reading coordinates from coordinate file.

  Args:
    coordinates_file_pattern: File pattern for TFRecords of
                              input examples of the form of a glob
                              pattern or path@shards.
    shuffle: Whether to shuffle the coordinate file list. Note that the expanded
             coordinates_file_pattern is not guaranteed to be sorted
             alphabetically.

  Returns:
    Tensorflow queue with coordinate filenames
  """
  m = re.search(r'@(\d{1,})', coordinates_file_pattern)
  if m:
    num_shards = int(m.group(1))
    coord_file_list = [
      re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards),
             coordinates_file_pattern)
      for i in range(num_shards)]
  else:
    coord_file_list = gfile.Glob(coordinates_file_pattern)
  return tf.train.string_input_producer(coord_file_list, shuffle=shuffle) 
开发者ID:google,项目名称:ffn,代码行数:26,代码来源:inputs.py

示例2: read_df_from_gcs

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def read_df_from_gcs(file_pattern):
  """Read data from Google Cloud Storage, split into train and validation sets.

  Assume that the data on GCS is in csv format without header.
  The column names will be provided through metadata

  Args:
    file_pattern: (string) pattern of the files containing training data.
    For example: [gs://bucket/folder_name/prefix]

  Returns:
    pandas.DataFrame
  """

  # Download the files to local /tmp/ folder
  df_list = []

  for filepath in gfile.Glob(file_pattern):
    with gfile.Open(filepath, 'r') as f:
      # Assume there is no header
      df_list.append(pd.read_csv(f, names=metadata.CSV_COLUMNS))

  data_df = pd.concat(df_list)

  return data_df 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:27,代码来源:utils.py

示例3: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=4 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:21,代码来源:inference-combine-tfrecords-frame.py

示例4: get_input_evaluation_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      print data_pattern, files
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:22,代码来源:check_distillation.py

示例5: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader,
                                 data_pattern,
                                 batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=4 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:21,代码来源:inference-pre-ensemble.py

示例6: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:21,代码来源:inference-combine-tfrecords-video.py

示例7: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256):
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:21,代码来源:inference.py

示例8: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader,
                           data_pattern,
                           batch_size=256,
                           num_epochs=None):
  logging.info("Using batch size of " + str(batch_size) + " for training.")
  with tf.name_scope("train_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find training files. data_pattern='" +
                    data_pattern + "'.")
    logging.info("Number of training files: %s.", str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, num_epochs=num_epochs, shuffle=False)
    training_data = reader.prepare_reader(filename_queue)

    return tf.train.batch(
        training_data,
        batch_size=batch_size,
        capacity=FLAGS.batch_size * 4,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:24,代码来源:train.py

示例9: get_input_evaluation_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024):

  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    files.sort()
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = reader.prepare_reader(filename_queue)
    return tf.train.batch(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:22,代码来源:eval.py

示例10: validate

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def validate(
        working_dir: 'tf.estimator working directory',
        *tf_record_dirs: 'Directories where holdout data are',
        checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,
        validate_name: 'Name for validation set (i.e., selfplay or human)'=None):
    qmeas.start_time('validate')
    tf_records = []
    with timer("Building lists of holdout files"):
        for record_dir in tf_record_dirs:
            tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))

    first_record = os.path.basename(tf_records[0])
    last_record = os.path.basename(tf_records[-1])
    with timer("Validating from {} to {}".format(first_record, last_record)):
        dual_net.validate(
            working_dir, tf_records, checkpoint_name=checkpoint_name,
            name=validate_name)
    qmeas.stop_time('validate') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:20,代码来源:main.py

示例11: train

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def train():
    model_version, model_name = get_latest_model()
    logger.info("Training on gathered game data, initializing from {}".format(model_name))
    new_model_name = generate(model_version + 1)
    logger.info("New model will be {}".format(new_model_name))
    save_file = os.path.join(PATHS.MODELS_DIR, new_model_name)

    try:
        logger.info("Getting tf_records")
        tf_records = sorted(gfile.Glob(os.path.join(PATHS.TRAINING_CHUNK_DIR, '*.tfrecord.zz')))
        tf_records = tf_records[
                     -1 * (GLOBAL_PARAMETER_STORE.WINDOW_SIZE // GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD):]

        print("Training from:", tf_records[0], "to", tf_records[-1])

        with timer("Training"):
            network.train(PATHS.ESTIMATOR_WORKING_DIR, tf_records, model_version+1)
            network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR, save_file)

    except:
        logger.info("Got an error training")
        logging.exception("Train error") 
开发者ID:PacktPublishing,项目名称:Python-Reinforcement-Learning-Projects,代码行数:24,代码来源:controller.py

示例12: get_existing_corners

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_existing_corners(segmentation_dir):
  corners = []
  # Legacy path format.
  for path in gfile.Glob(os.path.join(segmentation_dir, 'seg-*_*_*.npz')):
    corners.append(get_corner_from_path(path))
  for path in gfile.Glob(os.path.join(segmentation_dir, '*/*/seg-*_*_*.npz')):
    corners.append(get_corner_from_path(path))
  return corners 
开发者ID:google,项目名称:ffn,代码行数:10,代码来源:storage.py

示例13: main

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.json_prediction_files_pattern:
    raise ValueError(
        "The flag --json_prediction_files_pattern must be specified.")

  if not FLAGS.csv_output_file:
    raise ValueError("The flag --csv_output_file must be specified.")

  logging.info("Looking for prediction files with pattern: %s", 
               FLAGS.json_prediction_files_pattern)

  file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)  
  logging.info("Found files: %s", file_paths)

  logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
  with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
    output_file.write(get_csv_header())

    for file_path in file_paths:
      logging.info("processing file: %s", file_path)

      with gfile.Open(file_path) as input_file:

        for line in input_file: 
          json_data = json.loads(line)
          output_file.write(to_csv_row(json_data))

    output_file.flush()
  logging.info("done") 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:33,代码来源:convert_prediction_from_json_to_csv.py

示例14: get_input_data_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1):
  """Creates the section of the graph which reads the input data.

  Args:
    reader: A class which parses the input data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  with tf.name_scope("input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find input files. data_pattern='" +
                    data_pattern + "'")
    logging.info("number of input files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, num_epochs=1, shuffle=False)
    examples_and_labels = [reader.prepare_reader(filename_queue)
                           for _ in range(num_readers)]

    video_id_batch, video_batch, unused_labels, num_frames_batch = (
        tf.train.batch_join(examples_and_labels,
                            batch_size=batch_size,
                            allow_smaller_final_batch = True,
                            enqueue_many=True))
    return video_id_batch, video_batch, num_frames_batch 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:36,代码来源:inference.py

示例15: get_input_evaluation_tensors

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import Glob [as 别名]
def get_input_evaluation_tensors(reader,
                                 data_pattern,
                                 batch_size=1024,
                                 num_readers=1):
  """Creates the section of the graph which reads the evaluation data.

  Args:
    reader: A class which parses the training data.
    data_pattern: A 'glob' style path to the data files.
    batch_size: How many examples to process at a time.
    num_readers: How many I/O threads to use.

  Returns:
    A tuple containing the features tensor, labels tensor, and optionally a
    tensor containing the number of frames per video. The exact dimensions
    depend on the reader being used.

  Raises:
    IOError: If no files matching the given pattern were found.
  """
  logging.info("Using batch size of " + str(batch_size) + " for evaluation.")
  with tf.name_scope("eval_input"):
    files = gfile.Glob(data_pattern)
    if not files:
      raise IOError("Unable to find the evaluation files.")
    logging.info("number of evaluation files: " + str(len(files)))
    filename_queue = tf.train.string_input_producer(
        files, shuffle=False, num_epochs=1)
    eval_data = [
        reader.prepare_reader(filename_queue) for _ in range(num_readers)
    ]
    return tf.train.batch_join(
        eval_data,
        batch_size=batch_size,
        capacity=3 * batch_size,
        allow_smaller_final_batch=True,
        enqueue_many=True) 
开发者ID:antoine77340,项目名称:Youtube-8M-WILLOW,代码行数:39,代码来源:eval.py


注:本文中的tensorflow.gfile.Glob方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。