当前位置: 首页>>代码示例>>Python>>正文


Python gfile.GFile方法代码示例

本文整理汇总了Python中tensorflow.gfile.GFile方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.GFile方法的具体用法?Python gfile.GFile怎么用?Python gfile.GFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.GFile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _parse_lines

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def _parse_lines(path):
  """Parses lines from IWSLT17 dataset."""
  lines = []
  with gfile.GFile(path) as fp:
    for line in fp:
      line = line.strip()
      # Skip lines that are tags entirely.
      if _WHOLE_TAG_REGEX.match(line):
        continue
      # Try to parse as content between an opening and closing tags.
      match = _FLAT_HTML_REGEX.match(line)
      # Always append text not contained between the tags.
      if match is None:
        lines.append(line)
      elif (match.group(1) == match.group(3) and
            match.group(1).lower() in _ALLOWED_TAGS):
        lines.append(match.group(2).strip())
  return lines 
开发者ID:google-research,项目名称:language,代码行数:20,代码来源:identify_overlap_iwslt17.py

示例2: get_prediction_input

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def get_prediction_input(files):
  """Reads and concatenates text files in input directory.

  Args:
    files: List of `str`, containing absolute path to files to read.

  Returns:
    List of `str` containing independent text reviews.

  Raises:
    ValueError: If input files are empty.
  """

  instances = []
  for path in files:
    with gfile.GFile(path, 'r') as lines:
      instances += lines
  if not instances:
    raise ValueError('No review found in input files.')
  return instances 
开发者ID:GoogleCloudPlatform,项目名称:professional-services,代码行数:22,代码来源:scoring.py

示例3: get_csv_data

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def get_csv_data(filename):
  """Parse csv and return Dataset object with data and targets.

  Create pickle data from csv, assumes the first column contains the targets
  Args:
    filename: complete path of the csv file
  Returns:
    Dataset object
  """
  f = gfile.GFile(filename, 'r')
  mat = []
  for l in f:
    row = l.strip()
    row = row.replace('"', '')
    row = row.split(',')
    row = [float(x) for x in row]
    mat.append(row)
  mat = np.array(mat)
  y = mat[:, 0]
  X = mat[:, 1:]
  data = Dataset(X, y)
  return data 
开发者ID:google,项目名称:active-learning,代码行数:24,代码来源:create_data.py

示例4: _build_embedding_matrix

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def _build_embedding_matrix(self):
    """Builds the embedding matrix for the model.

    Returns:
      words: a list of strings representing the words in the vocabulary.
      embeddings: a float32 array of shape [vocab_size, embeddings_dim].
    """
    logging.info('Loading Glove embeddings.')
    words = []
    embeddings = []
    with gfile.GFile(FLAGS.glove_path) as f:
      for line in f:
        values = line.split()
        words.append(values[0])
        embeddings.append(np.asarray(values[1:], dtype='float32'))

    logging.info('Found %s word vectors.', len(embeddings))
    return words, np.array(embeddings) 
开发者ID:google,项目名称:active-qa,代码行数:20,代码来源:selector_keras.py

示例5: load_word2vec

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def load_word2vec(filename, vocab, word_vecs):
    """Loads embeddings in the word2vec binary format which has a header line
    containing the number of vectors and their dimensionality (two integers),
    followed with number-of-vectors lines each of which is formatted as
    '<word-string> <embedding-vector>'.

    Args:
        filename (str): Path to the embedding file.
        vocab (dict): A dictionary that maps token strings to integer index.
            Tokens not in :attr:`vocab` are not read.
        word_vecs: A 2D numpy array of shape `[vocab_size, embed_dim]`
            which is updated as reading from the file.

    Returns:
        The updated :attr:`word_vecs`.
    """
    with gfile.GFile(filename, "rb") as fin:
        header = fin.readline()
        vocab_size, vector_size = [int(s) for s in header.split()]
        if vector_size != word_vecs.shape[1]:
            raise ValueError("Inconsistent word vector sizes: %d vs %d" %
                             (vector_size, word_vecs.shape[1]))
        binary_len = np.dtype('float32').itemsize * vector_size
        for _ in np.arange(vocab_size):
            chars = []
            while True:
                char = fin.read(1)
                if char == b' ':
                    break
                if char != b'\n':
                    chars.append(char)
            word = b''.join(chars)
            word = tf.compat.as_text(word)
            if word in vocab:
                word_vecs[vocab[word]] = np.fromstring(
                    fin.read(binary_len), dtype='float32')
            else:
                fin.read(binary_len)
    return word_vecs 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:41,代码来源:embedding.py

示例6: load_glove

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def load_glove(filename, vocab, word_vecs):
    """Loads embeddings in the glove text format in which each line is
    '<word-string> <embedding-vector>'. Dimensions of the embedding vector
    are separated with whitespace characters.

    Args:
        filename (str): Path to the embedding file.
        vocab (dict): A dictionary that maps token strings to integer index.
            Tokens not in :attr:`vocab` are not read.
        word_vecs: A 2D numpy array of shape `[vocab_size, embed_dim]`
            which is updated as reading from the file.

    Returns:
        The updated :attr:`word_vecs`.
    """
    with gfile.GFile(filename) as fin:
        for line in fin:
            vec = line.strip().split()
            if len(vec) == 0:
                continue
            word, vec = vec[0], vec[1:]
            word = tf.compat.as_text(word)
            if word not in vocab:
                continue
            if len(vec) != word_vecs.shape[1]:
                raise ValueError("Inconsistent word vector sizes: %d vs %d" %
                                 (len(vec), word_vecs.shape[1]))
            word_vecs[vocab[word]] = np.array([float(v) for v in vec])
    return word_vecs 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:31,代码来源:embedding.py

示例7: _load_config_yaml

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def _load_config_yaml(fname):
    with gfile.GFile(fname) as config_file:
        config = yaml.load(config_file)
    return config 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:6,代码来源:utils_io.py

示例8: overwrite_tf_flags_with_config

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def overwrite_tf_flags_with_config(flags, config_paths):
    """Load flags from config file

    Adapted from:
    https://github.com/google/seq2seq/blob/7f485894d412e8d81ce0e07977831865e44309ce/bin/train.py#L244
    """
    final_config = {}
    if not config_paths:
        return
    for config_path in config_paths.split(","):
        config_path = config_path.strip()
        if not config_path:
            continue
        config_path = os.path.abspath(config_path)
        tf.logging.info("Loading config from %s", config_path)
        with gfile.GFile(config_path.strip()) as config_file:
            config_flags = yaml.load(config_file)
            final_config = _deep_merge_dict(final_config, config_flags)

    # Merge flags with config values
    for flag_key, flag_value in final_config.items():
        if hasattr(flags, flag_key) and isinstance(getattr(flags, flag_key), dict):
            merged_value = _deep_merge_dict(flag_value, getattr(flags, flag_key))
            setattr(flags, flag_key, merged_value)
        elif hasattr(flags, flag_key):
            setattr(flags, flag_key, flag_value)
        else:
            tf.logging.warning("Ignoring config flag: %s", flag_key) 
开发者ID:merantix,项目名称:imitation-learning,代码行数:30,代码来源:tf_flags_util.py

示例9: selfplay

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def selfplay(
        load_file: "The path to the network model files",
        output_dir: "Where to write the games"="data/selfplay",
        holdout_dir: "Where to write the games"="data/holdout",
        output_sgf: "Where to write the sgfs"="sgf/",
        readouts: 'How many simulations to run per move'=100,
        verbose: '>=2 will print debug info, >=3 will print boards' = 1,
        resign_threshold: 'absolute value of threshold to resign at' = 0.95,
        holdout_pct: 'how many games to hold out for validation' = 0.05):
    qmeas.start_time('selfplay')
    clean_sgf = os.path.join(output_sgf, 'clean')
    full_sgf = os.path.join(output_sgf, 'full')
    _ensure_dir_exists(clean_sgf)
    _ensure_dir_exists(full_sgf)
    _ensure_dir_exists(output_dir)
    _ensure_dir_exists(holdout_dir)

    with timer("Loading weights from %s ... " % load_file):
        network = dual_net.DualNetwork(load_file)

    with timer("Playing game"):
        player = selfplay_mcts.play(
            network, readouts, resign_threshold, verbose)

    output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
    game_data = player.extract_data()
    with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf(use_comments=False))
    with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf())

    tf_examples = preprocessing.make_dataset_from_selfplay(game_data)

    # Hold out 5% of games for evaluation.
    if random.random() < holdout_pct:
        fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
    else:
        fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))

    preprocessing.write_tf_examples(fname, tf_examples)
    qmeas.stop_time('selfplay') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:43,代码来源:main.py

示例10: selfplay_cache_model

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def selfplay_cache_model(
        network: "The path to the network model files",
        output_dir: "Where to write the games"="data/selfplay",
        holdout_dir: "Where to write the games"="data/holdout",
        output_sgf: "Where to write the sgfs"="sgf/",
        readouts: 'How many simulations to run per move'=100,
        verbose: '>=2 will print debug info, >=3 will print boards' = 1,
        resign_threshold: 'absolute value of threshold to resign at' = 0.95,
        holdout_pct: 'how many games to hold out for validation' = 0.05):
    qmeas.start_time('selfplay')
    clean_sgf = os.path.join(output_sgf, 'clean')
    full_sgf = os.path.join(output_sgf, 'full')
    _ensure_dir_exists(clean_sgf)
    _ensure_dir_exists(full_sgf)
    _ensure_dir_exists(output_dir)
    _ensure_dir_exists(holdout_dir)

    with timer("Playing game"):
        player = selfplay_mcts.play(
            network, readouts, resign_threshold, verbose)

    output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())
    game_data = player.extract_data()
    with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf(use_comments=False))
    with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:
        f.write(player.to_sgf())

    tf_examples = preprocessing.make_dataset_from_selfplay(game_data)

    # Hold out 5% of games for evaluation.
    if random.random() < holdout_pct:
        fname = os.path.join(holdout_dir, "{}.tfrecord.zz".format(output_name))
    else:
        fname = os.path.join(output_dir, "{}.tfrecord.zz".format(output_name))

    preprocessing.write_tf_examples(fname, tf_examples)
    qmeas.stop_time('selfplay') 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:40,代码来源:main.py

示例11: _load

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def _load(path):
  with gfile.GFile(path) as f:
    result = json.load(f)
  result["path"] = path
  return result 
开发者ID:google-research,项目名称:disentanglement_lib,代码行数:7,代码来源:aggregate_results.py

示例12: _get_unk_mapping

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def _get_unk_mapping(filename):
  """Reads a file that specifies a mapping from source to target tokens.
  The file must contain lines of the form <source>\t<target>"

  Args:
    filename: path to the mapping file

  Returns:
    A dictionary that maps from source -> target tokens.
  """
  with gfile.GFile(filename, "r") as mapping_file:
    lines = mapping_file.readlines()
    mapping = dict([_.split("\t")[0:2] for _ in lines])
    mapping = {k.strip(): v.strip() for k, v in mapping.items()}
  return mapping 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:17,代码来源:decode_text.py

示例13: get_vocab_info

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def get_vocab_info(vocab_path):
  """Creates a `VocabInfo` instance that contains the vocabulary size and
    the special vocabulary for the given file.

  Args:
    vocab_path: Path to a vocabulary file with one word per line.

  Returns:
    A VocabInfo tuple.
  """
  with gfile.GFile(vocab_path) as file:
    vocab_size = sum(1 for _ in file)
  special_vocab = get_special_vocab(vocab_size)
  return VocabInfo(vocab_path, vocab_size, special_vocab) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:16,代码来源:vocab.py

示例14: dump

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def dump(self, model_dir):
    """Dumps the options to a file in the model directory.

    Args:
      model_dir: Path to the model directory. The options will be
      dumped into a file in this directory.
    """
    gfile.MakeDirs(model_dir)
    options_dict = {
        "model_class": self.model_class,
        "model_params": self.model_params,
    }

    with gfile.GFile(TrainOptions.path(model_dir), "wb") as file:
      file.write(json.dumps(options_dict).encode("utf-8")) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:17,代码来源:utils.py

示例15: load

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import GFile [as 别名]
def load(model_dir):
    """ Loads options from the given model directory.

    Args:
      model_dir: Path to the model directory.
    """
    with gfile.GFile(TrainOptions.path(model_dir), "rb") as file:
      options_dict = json.loads(file.read().decode("utf-8"))
    options_dict = defaultdict(None, options_dict)

    return TrainOptions(
        model_class=options_dict["model_class"],
        model_params=options_dict["model_params"]) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:15,代码来源:utils.py


注:本文中的tensorflow.gfile.GFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。