當前位置: 首頁>>代碼示例>>Python>>正文


Python gfile.GFile方法代碼示例

本文整理匯總了Python中tensorflow.python.platform.gfile.GFile方法的典型用法代碼示例。如果您正苦於以下問題:Python gfile.GFile方法的具體用法?Python gfile.GFile怎麽用?Python gfile.GFile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.platform.gfile的用法示例。


在下文中一共展示了gfile.GFile方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def main(_):
  if not FLAGS.output_file:
    raise ValueError('You must supply the path to save to with --output_file')
  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default() as graph:
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
                                          FLAGS.dataset_dir)
    network_fn = nets_factory.get_network_fn(
        FLAGS.model_name,
        num_classes=(dataset.num_classes - FLAGS.labels_offset),
        is_training=FLAGS.is_training)
    if hasattr(network_fn, 'default_image_size'):
      image_size = network_fn.default_image_size
    else:
      image_size = FLAGS.default_image_size
    placeholder = tf.placeholder(name='input', dtype=tf.float32,
                                 shape=[1, image_size, image_size, 3])
    network_fn(placeholder)
    graph_def = graph.as_graph_def()
    with gfile.GFile(FLAGS.output_file, 'wb') as f:
      f.write(graph_def.SerializeToString()) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:23,代碼來源:export_inference_graph.py

示例2: main

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def main(_):
  if not FLAGS.output_file:
    raise ValueError('You must supply the path to save to with --output_file')
  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default() as graph:
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
                                          FLAGS.dataset_dir)
    network_fn = nets_factory.get_network_fn(
        FLAGS.model_name,
        num_classes=(dataset.num_classes - FLAGS.labels_offset),
        is_training=FLAGS.is_training)
    image_size = FLAGS.image_size or network_fn.default_image_size
    placeholder = tf.placeholder(name='input', dtype=tf.float32,
                                 shape=[FLAGS.batch_size, image_size,
                                        image_size, 3])
    network_fn(placeholder)
    graph_def = graph.as_graph_def()
    with gfile.GFile(FLAGS.output_file, 'wb') as f:
      f.write(graph_def.SerializeToString()) 
開發者ID:yuantailing,項目名稱:ctw-baseline,代碼行數:21,代碼來源:export_inference_graph.py

示例3: maybe_download

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def maybe_download(filename, work_directory, source_url):
  """Download the data from source url, unless it's already here.

  Args:
      filename: string, name of the file in the directory.
      work_directory: string, path to working directory.
      source_url: url to download from if file doesn't exist.

  Returns:
      Path to resulting file.
  """
  if not gfile.Exists(work_directory):
    gfile.MakeDirs(work_directory)
  filepath = os.path.join(work_directory, filename)
  if not gfile.Exists(filepath):
    temp_file_name, _ = urlretrieve_with_retry(source_url)
    gfile.Copy(temp_file_name, filepath)
    with gfile.GFile(filepath) as f:
      size = f.size()
    print('Successfully downloaded', filename, size, 'bytes.')
  return filepath 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:base.py

示例4: main

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def main(_):
  if not FLAGS.output_file:
    raise ValueError('You must supply the path to save to with --output_file')
  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default() as graph:
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
                                          FLAGS.dataset_dir)
    network_fn = nets_factory.get_network_fn(
        FLAGS.model_name,
        num_classes=(dataset.num_classes - FLAGS.labels_offset),
        is_training=FLAGS.is_training)
    image_size = FLAGS.image_size or network_fn.default_image_size
    placeholder = tf.placeholder(name='input', dtype=tf.float32,
                                 shape=[FLAGS.batch_size, image_size,
                                        image_size, 3])
    network_fn(placeholder)

    if FLAGS.quantize:
      tf.contrib.quantize.create_eval_graph()

    graph_def = graph.as_graph_def()
    with gfile.GFile(FLAGS.output_file, 'wb') as f:
      f.write(graph_def.SerializeToString()) 
開發者ID:andrewekhalel,項目名稱:edafa,代碼行數:25,代碼來源:export_inference_graph.py

示例5: print_output

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def print_output(output_file, use_text_format, use_gold_segmentation, output):
  """Writes a set of sentences in CoNLL format.

  Args:
    output_file: The file to write to.
    use_text_format: Whether this computation used text-format input.
    use_gold_segmentation: Whether this computation used gold segmentation.
    output: A list of sentences to write to the output file.
  """
  with gfile.GFile(output_file, 'w') as f:
    f.write('## tf:{}\n'.format(use_text_format))
    f.write('## gs:{}\n'.format(use_gold_segmentation))
    for serialized_sentence in output:
      sentence = sentence_pb2.Sentence()
      sentence.ParseFromString(serialized_sentence)
      f.write('# text = {}\n'.format(sentence.text.encode('utf-8')))
      for i, token in enumerate(sentence.token):
        head = token.head + 1
        f.write('%s\t%s\t_\t_\t_\t_\t%d\t%s\t_\t_\n' %
                (i + 1, token.word.encode('utf-8'), head,
                 token.label.encode('utf-8')))
      f.write('\n') 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:24,代碼來源:parse_to_conll.py

示例6: get_word_freqs

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def get_word_freqs(path, counter, norm_digits=True):
    """Extract word-frequency mapping from file given by path.
    
    Args:
        path: data file of words we wish to extract vocab counts from.
        counter: collections.Counter object for mapping word -> frequency.
        norm_digits: Boolean; if true, all digits are replaced by 0s.
    
    Returns:
        The counter (dict), updated with mappings from word -> frequency. 
    """

    print("Creating vocabulary for data", path)
    with gfile.GFile(path, mode="rb") as f:
        for i, line in enumerate(f):
            if (i + 1) % 100000 == 0:
                print("\tProcessing line", (i + 1))
            line = tf.compat.as_bytes(line)
            tokens = basic_tokenizer(line)
            # Update word frequency counts in vocab counter dict.
            for w in tokens:
                word = _DIGIT_RE.sub(b"0", w) if norm_digits else w
                counter[word] += 1
        return counter 
開發者ID:mckinziebrandon,項目名稱:DeepChatModels,代碼行數:26,代碼來源:io_utils.py

示例7: get_vocab_dicts

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def get_vocab_dicts(vocabulary_path):
    """Returns word_to_idx, idx_to_word dictionaries given vocabulary.

    Args:
      vocabulary_path: path to the file containing the vocabulary.

    Returns:
      a pair: the vocabulary (a dictionary mapping string to integers), and
      the reversed vocabulary (a list, which reverses the vocabulary mapping).

    Raises:
      ValueError: if the provided vocabulary_path does not exist.
    """
    if gfile.Exists(vocabulary_path):
        rev_vocab = []
        with gfile.GFile(vocabulary_path, mode="rb") as f:
            rev_vocab.extend(f.readlines())
        rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
        vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
        return vocab, rev_vocab
    else:
        raise ValueError("Vocabulary file %s not found.", vocabulary_path) 
開發者ID:mckinziebrandon,項目名稱:DeepChatModels,代碼行數:24,代碼來源:io_utils.py

示例8: data_to_token_ids

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def data_to_token_ids(data_path, target_path, vocabulary_path, normalize_digits=True):
    """Tokenize data file and turn into token-ids using given vocabulary file.

    This function loads data line-by-line from data_path, calls the above
    sentence_to_token_ids, and saves the result to target_path.

    Args:
      data_path: path to the data file in one-sentence-per-line format.
      target_path: path where the file with token-ids will be created.
      vocabulary_path: path to the vocabulary file.
      normalize_digits: Boolean; if true, all digits are replaced by 0s.
    """
    if not gfile.Exists(target_path):
        print("Tokenizing data in %s" % data_path)
        vocab, _ = get_vocab_dicts(vocabulary_path=vocabulary_path)
        with gfile.GFile(data_path, mode="rb") as data_file:
            with gfile.GFile(target_path, mode="w") as tokens_file:
                counter = 0
                for line in data_file:
                    counter += 1
                    if counter % 100000 == 0:
                        print("  tokenizing line %d" % counter)
                    token_ids = sentence_to_token_ids(
                        tf.compat.as_bytes(line), vocab, normalize_digits)
                    tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") 
開發者ID:mckinziebrandon,項目名稱:DeepChatModels,代碼行數:27,代碼來源:io_utils.py

示例9: maybe_download_and_extract

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def maybe_download_and_extract(filename, data_dir, source_url):
  """Maybe download and extract a file."""
  if not gfile.Exists(data_dir):
    gfile.MakeDirs(data_dir)

  filepath = os.path.join(data_dir, filename)

  if not gfile.Exists(filepath):
    print('Downloading from {}'.format(source_url))
    temp_file_name, _ = urllib.request.urlretrieve(source_url)
    gfile.Copy(temp_file_name, filepath)
    with gfile.GFile(filepath) as f:
      size = f.size()
    print('Successfully downloaded \'{}\' of {} bytes'.format(filename, size))

  if filename.endswith('.zip'):
    print('Extracting {}'.format(filename))
    zipfile.ZipFile(file=filepath, mode='r').extractall(data_dir) 
開發者ID:GoogleCloudPlatform,項目名稱:solutions-vision-search,代碼行數:20,代碼來源:task.py

示例10: map_chars

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def map_chars(file_chars, chars=None):
  """Creates character-index mapping. The mapping needs to be constant for
    training and inference.
  """
  if not os.path.exists(file_chars):
    tf.logging.info('WARNING!!!! regenerating %s', file_chars)
    idx_to_char = {i + 1: c for i, c in enumerate(chars)}
    # 0 is not used, dense to sparse array
    idx_to_char[0] = ''
    # null label
    idx_to_char[len(idx_to_char)] = '_'

    with gfile.GFile(file_chars, 'w') as fp:
      for i, c in idx_to_char.items():
        fp.write('%d,%s\n' % (i, c))
  else:
    with gfile.GFile(file_chars, 'r') as fp:
      reader = csv.reader(fp, delimiter=',')
      idx_to_char = {int(i): c for i, c in reader}

  char_to_idx = {c: i for i, c in idx_to_char.items()}
  return idx_to_char, char_to_idx 
開發者ID:huschen,項目名稱:kaggle_speech_recognition,代碼行數:24,代碼來源:util_data.py

示例11: create_vocabulary

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
                      tokenizer=None, normalize_digits=True):
  """Create vocabulary file (if it does not exist yet) from data file.

  Data file is assumed to contain one sentence per line. Each sentence is
  tokenized and digits are normalized (if normalize_digits is set).
  Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
  We write it to vocabulary_path in a one-token-per-line format, so that later
  token in the first line gets id=0, second line gets id=1, and so on.

  Args:
    vocabulary_path: path where the vocabulary will be created.
    data_path: data file that will be used to create vocabulary.
    max_vocabulary_size: limit on the size of the created vocabulary.
    tokenizer: a function to use to tokenize each data sentence;
      if None, basic_tokenizer will be used.
    normalize_digits: Boolean; if true, all digits are replaced by 0s.
  """
  if not gfile.Exists(vocabulary_path):
    print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
    vocab = {}
    with gfile.GFile(data_path, mode="rb") as f:
      counter = 0
      for line in f:
        counter += 1
        if counter % 100000 == 0:
          print("  processing line %d" % counter)
        line = tf.compat.as_bytes(line)
        tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
        for w in tokens:
          word = _DIGIT_RE.sub(b"0", w) if normalize_digits else w
          if word in vocab:
            vocab[word] += 1
          else:
            vocab[word] = 1
      vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
      if len(vocab_list) > max_vocabulary_size:
        vocab_list = vocab_list[:max_vocabulary_size]
      with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
        for w in vocab_list:
          vocab_file.write(w + b"\n") 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:43,代碼來源:data_utils.py

示例12: initialize_vocabulary

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def initialize_vocabulary(vocabulary_path):
  """Initialize vocabulary from file.

  We assume the vocabulary is stored one-item-per-line, so a file:
    dog
    cat
  will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
  also return the reversed-vocabulary ["dog", "cat"].

  Args:
    vocabulary_path: path to the file containing the vocabulary.

  Returns:
    a pair: the vocabulary (a dictionary mapping string to integers), and
    the reversed vocabulary (a list, which reverses the vocabulary mapping).

  Raises:
    ValueError: if the provided vocabulary_path does not exist.
  """
  if gfile.Exists(vocabulary_path):
    rev_vocab = []
    with gfile.GFile(vocabulary_path, mode="rb") as f:
      rev_vocab.extend(f.readlines())
    rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
    vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
    return vocab, rev_vocab
  else:
    raise ValueError("Vocabulary file %s not found.", vocabulary_path) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:30,代碼來源:data_utils.py

示例13: data_to_token_ids

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def data_to_token_ids(data_path, target_path, vocabulary_path,
                      tokenizer=None, normalize_digits=True):
  """Tokenize data file and turn into token-ids using given vocabulary file.

  This function loads data line-by-line from data_path, calls the above
  sentence_to_token_ids, and saves the result to target_path. See comment
  for sentence_to_token_ids on the details of token-ids format.

  Args:
    data_path: path to the data file in one-sentence-per-line format.
    target_path: path where the file with token-ids will be created.
    vocabulary_path: path to the vocabulary file.
    tokenizer: a function to use to tokenize each sentence;
      if None, basic_tokenizer will be used.
    normalize_digits: Boolean; if true, all digits are replaced by 0s.
  """
  if not gfile.Exists(target_path):
    print("Tokenizing data in %s" % data_path)
    vocab, _ = initialize_vocabulary(vocabulary_path)
    with gfile.GFile(data_path, mode="rb") as data_file:
      with gfile.GFile(target_path, mode="w") as tokens_file:
        counter = 0
        for line in data_file:
          counter += 1
          if counter % 100000 == 0:
            print("  tokenizing line %d" % counter)
          token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab,
                                            tokenizer, normalize_digits)
          tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:31,代碼來源:data_utils.py

示例14: write_image

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def write_image(image_path, rgb):
  ext = os.path.splitext(image_path)[1]
  with gfile.GFile(image_path, 'w') as f:
    img_str = cv2.imencode(ext, rgb[:,:,::-1])[1].tostring()
    f.write(img_str) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:7,代碼來源:file_utils.py

示例15: optimize_graph

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import GFile [as 別名]
def optimize_graph(graph):
    """Strips unused subgraphs and save it as another frozen TF model."""
    gdef = strip_unused_lib.strip_unused(
            input_graph_def = graph.as_graph_def(),
            input_node_names = [input_node],
            output_node_names = [bbox_output_node, class_output_node],
            placeholder_type_enum = dtypes.float32.as_datatype_enum)

    with gfile.GFile(frozen_model_file, "wb") as f:
        f.write(gdef.SerializeToString())


# Load the original graph and remove anything we don't need. 
開發者ID:hollance,項目名稱:coreml-survival-guide,代碼行數:15,代碼來源:ssdlite.py


注:本文中的tensorflow.python.platform.gfile.GFile方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。