当前位置: 首页>>代码示例>>Python>>正文


Python gfile.ListDirectory方法代码示例

本文整理汇总了Python中tensorflow.gfile.ListDirectory方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.ListDirectory方法的具体用法?Python gfile.ListDirectory怎么用?Python gfile.ListDirectory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.ListDirectory方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _load_data

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def _load_data(self):
    dataset = np.zeros((24 * 4 * 183, 64, 64, 3))
    all_files = [x for x in gfile.ListDirectory(CARS3D_PATH) if ".mat" in x]
    for i, filename in enumerate(all_files):
      data_mesh = _load_mesh(filename)
      factor1 = np.array(list(range(4)))
      factor2 = np.array(list(range(24)))
      all_factors = np.transpose([
          np.tile(factor1, len(factor2)),
          np.repeat(factor2, len(factor1)),
          np.tile(i,
                  len(factor1) * len(factor2))
      ])
      indexes = self.index.features_to_index(all_factors)
      dataset[indexes] = data_mesh
    return dataset 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:18,代码来源:cars3d.py

示例2: aggregate_json_results

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def aggregate_json_results(base_path):
  """Aggregates all the result files in a directory into a namespaced dict.

  Args:
    base_path: String with the directory containing JSON files that only contain
      dictionaries.

  Returns:
    Namespaced dictionary with the results.
  """
  result = {}
  compiled_pattern = re.compile(r"(.*)\.json")
  for filename in gfile.ListDirectory(base_path):
    match = compiled_pattern.match(filename)
    if match:
      path = os.path.join(base_path, filename)
      with tf.gfile.GFile(path, "r") as f:
        result[match.group(1)] = json.load(f)
  return namespaced_dict(**result) 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:21,代码来源:results.py

示例3: format_input

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def format_input(input_path, size):
  """Reads input path, randomly selects a sub-sample and concatenates them.

  Args:
    input_path: `str`, directory to read files from.
    size: `int`, number of files to read.

  Returns:
    List of `str` containing independent text reviews.
  """

  files = [path for path in gfile.ListDirectory(input_path)
           if path.endswith(constants.FILE_EXTENSION)]
  files = np.random.choice(files, size, replace=False)
  files = [os.path.join(input_path, filename) for filename in files]
  return get_prediction_input(files) 
开发者ID:GoogleCloudPlatform,项目名称:professional-services,代码行数:18,代码来源:scoring.py

示例4: validate_holdout_selfplay

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def validate_holdout_selfplay():
    """Validate on held-out selfplay data."""
    holdout_dirs = (os.path.join(fsdb.holdout_dir(), d)
                    for d in reversed(gfile.ListDirectory(fsdb.holdout_dir()))
                    if gfile.IsDirectory(os.path.join(fsdb.holdout_dir(), d))
                    for f in gfile.ListDirectory(os.path.join(fsdb.holdout_dir(), d)))

    # This is a roundabout way of computing how many hourly directories we need
    # to read in order to encompass 20,000 holdout games.
    holdout_dirs = set(itertools.islice(holdout_dirs), 20000)
    cmd = ['python3', 'validate.py'] + list(holdout_dirs) + [
        '--use_tpu',
        '--tpu_name={}'.format(TPU_NAME),
        '--flagfile=rl_loop/distributed_flags',
        '--expand_validation_dirs']
    mask_flags.run(cmd) 
开发者ID:mlperf,项目名称:training,代码行数:18,代码来源:train_and_validate.py

示例5: load_config

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def load_config(config_path, config=None):
    """Loads configs from (possibly multiple) file(s).

    A config file can be either a Python file (with suffix '.py')
    or a YAML file. If the filename is not suffixed with '.py', the file is
    parsed as YAML.

    Args:
        config_path: Paths to configuration files. This can be a `list` of
            config file names, or a path to a directory in which all files
            are loaded, or a string of multiple file names separated by commas.
        config (dict, optional): A config dict to which new configurations are
            added. If `None`, a new config dict is created.

    Returns:
        A `dict` of configurations.
    """
    fnames = []
    if isinstance(config_path, (list, tuple)):
        fnames = list(config_path)
    elif gfile.IsDirectory(config_path):
        for fname in gfile.ListDirectory(config_path):
            fname = os.path.join(config_path, fname)
            if not gfile.IsDirectory(fname):
                fnames.append(fname)
    else:
        for fname in config_path.split(","):
            fname = fname.strip()
            if not fname:
                continue
            fnames.append(fname)

    if config is None:
        config = {}

    for fname in fnames:
        config = load_config_single(fname, config)

    return config

# pylint: disable=too-many-locals 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:43,代码来源:utils_io.py

示例6: test_capture

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def test_capture(self):
    global_step = tf.contrib.framework.get_or_create_global_step()
    # Some test computation
    some_weights = tf.get_variable("weigths", [2, 128])
    computation = tf.nn.softmax(some_weights)

    hook = hooks.MetadataCaptureHook(
        params={"step": 5}, model_dir=self.model_dir,
        run_config=tf.contrib.learn.RunConfig())
    hook.begin()

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should not trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      # Should trigger *after* step 5
      sess.run(tf.assign(global_step, 5))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      mon_sess.run(computation)
      self.assertEqual(
          set(gfile.ListDirectory(self.model_dir)),
          set(["run_meta", "tfprof_log", "timeline.json"])) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:29,代码来源:hooks_test.py

示例7: test_capture

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def test_capture(self):
    global_step = tf.contrib.framework.get_or_create_global_step()
    # Some test computation
    some_weights = tf.get_variable("weigths", [2, 128])
    computation = tf.nn.softmax(some_weights)

    hook = hooks.MetadataCaptureHook(
        params={"step": 5}, model_dir=self.model_dir)
    hook.begin()

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      #pylint: disable=W0212
      mon_sess = monitored_session._HookedSession(sess, [hook])
      # Should not trigger for step 0
      sess.run(tf.assign(global_step, 0))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      # Should trigger *after* step 5
      sess.run(tf.assign(global_step, 5))
      mon_sess.run(computation)
      self.assertEqual(gfile.ListDirectory(self.model_dir), [])
      mon_sess.run(computation)
      self.assertEqual(
          set(gfile.ListDirectory(self.model_dir)),
          set(["run_meta", "tfprof_log", "timeline.json"])) 
开发者ID:pandegroup,项目名称:reaction_prediction_seq2seq,代码行数:28,代码来源:hooks_test.py

示例8: get_sgf_names

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def get_sgf_names(model):
    game_dir = HOLDOUT_PATH.format(FLAGS.base_dir, model)
    tf_records = map(os.path.basename, gfile.ListDirectory(game_dir))
    sgfs = [record.replace('.tfrecord.zz', '.sgf') for record in tf_records]
    return [PATH_TEMPLATE.format(FLAGS.base_dir, model, sgf) for sgf in sgfs] 
开发者ID:mlperf,项目名称:training,代码行数:7,代码来源:prepare_bigquery.py

示例9: get_hour_dirs

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def get_hour_dirs(root=None):
    """Gets the directories under selfplay_dir that match YYYY-MM-DD-HH."""
    root = root or selfplay_dir()
    return list(filter(lambda s: re.match(r"\d{4}-\d{2}-\d{2}-\d{2}", s),
                       gfile.ListDirectory(root))) 
开发者ID:mlperf,项目名称:training,代码行数:7,代码来源:fsdb.py

示例10: gather

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def gather(
        input_directory: 'where to look for games'='data/selfplay/',
        output_directory: 'where to put collected games'='data/training_chunks/',
        examples_per_record: 'how many tf.examples to gather in each chunk'=EXAMPLES_PER_RECORD):
    qmeas.start_time('gather')
    _ensure_dir_exists(output_directory)
    models = [model_dir.strip('/')
              for model_dir in sorted(gfile.ListDirectory(input_directory))[-50:]]
    with timer("Finding existing tfrecords..."):
        model_gamedata = {
            model: gfile.Glob(
                os.path.join(input_directory, model, '*.tfrecord.zz'))
            for model in models
        }
    print("Found %d models" % len(models))
    for model_name, record_files in sorted(model_gamedata.items()):
        print("    %s: %s files" % (model_name, len(record_files)))

    meta_file = os.path.join(output_directory, 'meta.txt')
    try:
        with gfile.GFile(meta_file, 'r') as f:
            already_processed = set(f.read().split())
    except tf.errors.NotFoundError:
        already_processed = set()

    num_already_processed = len(already_processed)

    for model_name, record_files in sorted(model_gamedata.items()):
        if set(record_files) <= already_processed:
            continue
        print("Gathering files for %s:" % model_name)
        for i, example_batch in enumerate(
                tqdm(preprocessing.shuffle_tf_examples(examples_per_record, record_files))):
            output_record = os.path.join(output_directory,
                                         '{}-{}.tfrecord.zz'.format(model_name, str(i)))
            preprocessing.write_tf_examples(
                output_record, example_batch, serialize=False)
        already_processed.update(record_files)

    print("Processed %s new files" %
          (len(already_processed) - num_already_processed))
    with gfile.GFile(meta_file, 'w') as f:
        f.write('\n'.join(sorted(already_processed)))
    qmeas.stop_time('gather') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:46,代码来源:main.py

示例11: aggregate

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import ListDirectory [as 别名]
def aggregate():
    logger.info("Gathering game results")

    os.makedirs(PATHS.TRAINING_CHUNK_DIR, exist_ok=True)
    os.makedirs(PATHS.SELFPLAY_DIR, exist_ok=True)
    models = [model_dir.strip('/')
              for model_dir in sorted(gfile.ListDirectory(PATHS.SELFPLAY_DIR))[-50:]]

    with timer("Finding existing tfrecords..."):
        model_gamedata = {
            model: gfile.Glob(
                os.path.join(PATHS.SELFPLAY_DIR, model, '*.zz'))
            for model in models
        }
    logger.info("Found %d models" % len(models))
    for model_name, record_files in sorted(model_gamedata.items()):
        logger.info("    %s: %s files" % (model_name, len(record_files)))

    meta_file = os.path.join(PATHS.TRAINING_CHUNK_DIR, 'meta.txt')
    try:
        with gfile.GFile(meta_file, 'r') as f:
            already_processed = set(f.read().split())
    except tf.errors.NotFoundError:
        already_processed = set()

    num_already_processed = len(already_processed)

    for model_name, record_files in sorted(model_gamedata.items()):
        if set(record_files) <= already_processed:
            continue
        logger.info("Gathering files for %s:" % model_name)
        for i, example_batch in enumerate(
                tqdm(preprocessing.shuffle_tf_examples(GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD, record_files))):
            output_record = os.path.join(PATHS.TRAINING_CHUNK_DIR,
                                         '{}-{}.tfrecord.zz'.format(model_name, str(i)))
            preprocessing.write_tf_examples(
                output_record, example_batch, serialize=False)
        already_processed.update(record_files)

    logger.info("Processed %s new files" %
          (len(already_processed) - num_already_processed))
    with gfile.GFile(meta_file, 'w') as f:
        f.write('\n'.join(sorted(already_processed))) 
开发者ID:PacktPublishing,项目名称:Python-Reinforcement-Learning-Projects,代码行数:45,代码来源:controller.py


注:本文中的tensorflow.gfile.ListDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。