当前位置: 首页>>代码示例>>Python>>正文


Python gfile.Exists方法代码示例

本文整理汇总了Python中tensorflow.python.platform.gfile.Exists方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.Exists方法的具体用法?Python gfile.Exists怎么用?Python gfile.Exists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.platform.gfile的用法示例。


在下文中一共展示了gfile.Exists方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_bottleneck_file

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  print('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, bottleneck_tensor)
  except:
    raise RuntimeError('Error during processing file %s' % image_path)

  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string) 
开发者ID:ArunMichaelDsouza,项目名称:tensorflow-image-detection,代码行数:21,代码来源:retrain.py

示例2: Load

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def Load(self):
    """Loads new values.

    The watcher will load from one path at a time; as soon as that path stops
    yielding events, it will move on to the next path. We assume that old paths
    are never modified after a newer path has been written. As a result, Load()
    can be called multiple times in a row without losing events that have not
    been yielded yet. In other words, we guarantee that every event will be
    yielded exactly once.

    Yields:
      All values that have not been yielded yet.

    Raises:
      DirectoryDeletedError: If the directory has been permanently deleted
        (as opposed to being temporarily unavailable).
    """
    try:
      for event in self._LoadInternal():
        yield event
    except errors.OpError:
      if not gfile.Exists(self._directory):
        raise DirectoryDeletedError(
            'Directory %s has been permanently deleted' % self._directory) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:directory_watcher.py

示例3: maybe_download

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def maybe_download(filename, work_directory, source_url):
  """Download the data from source url, unless it's already here.

  Args:
      filename: string, name of the file in the directory.
      work_directory: string, path to working directory.
      source_url: url to download from if file doesn't exist.

  Returns:
      Path to resulting file.
  """
  if not gfile.Exists(work_directory):
    gfile.MakeDirs(work_directory)
  filepath = os.path.join(work_directory, filename)
  if not gfile.Exists(filepath):
    temp_file_name, _ = urlretrieve_with_retry(source_url)
    gfile.Copy(temp_file_name, filepath)
    with gfile.GFile(filepath) as f:
      size = f.size()
    print('Successfully downloaded', filename, size, 'bytes.')
  return filepath 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:base.py

示例4: testPathsWithParse

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def testPathsWithParse(self):
    base_dir = os.path.join(test.get_temp_dir(), "paths_parse")
    self.assertFalse(gfile.Exists(base_dir))
    for p in xrange(3):
      gfile.MakeDirs(os.path.join(base_dir, "%d" % p))
    # add a base_directory to ignore
    gfile.MakeDirs(os.path.join(base_dir, "ignore"))

    # create a simple parser that pulls the export_version from the directory.
    def parser(path):
      match = re.match("^" + base_dir + "/(\\d+)$", path.path)
      if not match:
        return None
      return path._replace(export_version=int(match.group(1)))

    self.assertEquals(
        gc.get_paths(
            base_dir, parser=parser), [
                gc.Path(os.path.join(base_dir, "0"), 0),
                gc.Path(os.path.join(base_dir, "1"), 1),
                gc.Path(os.path.join(base_dir, "2"), 2)
            ]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:gc_test.py

示例5: create_image_lists

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def create_image_lists(image_dir):
    if not gfile.Exists(image_dir):
        print("Image directory '" + image_dir + "' not found.")
        return None
    image_list = []

    file_list = []
    file_glob = os.path.join(image_dir, "images", '*.' + 'jpg')
    file_list.extend(glob.glob(file_glob))

    if not file_list:
        print('No files found')
    else:
        image_list = file_list

    random.shuffle(image_list)
    no_of_images = len(image_list)
    print ('No. of Image files: %d' % no_of_images)

    return image_list 
开发者ID:shekkizh,项目名称:Colorization.tensorflow,代码行数:22,代码来源:read_LaMemDataset.py

示例6: get_vocab_dicts

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def get_vocab_dicts(vocabulary_path):
    """Returns word_to_idx, idx_to_word dictionaries given vocabulary.

    Args:
      vocabulary_path: path to the file containing the vocabulary.

    Returns:
      a pair: the vocabulary (a dictionary mapping string to integers), and
      the reversed vocabulary (a list, which reverses the vocabulary mapping).

    Raises:
      ValueError: if the provided vocabulary_path does not exist.
    """
    if gfile.Exists(vocabulary_path):
        rev_vocab = []
        with gfile.GFile(vocabulary_path, mode="rb") as f:
            rev_vocab.extend(f.readlines())
        rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
        vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
        return vocab, rev_vocab
    else:
        raise ValueError("Vocabulary file %s not found.", vocabulary_path) 
开发者ID:mckinziebrandon,项目名称:DeepChatModels,代码行数:24,代码来源:io_utils.py

示例7: data_to_token_ids

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def data_to_token_ids(data_path, target_path, vocabulary_path, normalize_digits=True):
    """Tokenize data file and turn into token-ids using given vocabulary file.

    This function loads data line-by-line from data_path, calls the above
    sentence_to_token_ids, and saves the result to target_path.

    Args:
      data_path: path to the data file in one-sentence-per-line format.
      target_path: path where the file with token-ids will be created.
      vocabulary_path: path to the vocabulary file.
      normalize_digits: Boolean; if true, all digits are replaced by 0s.
    """
    if not gfile.Exists(target_path):
        print("Tokenizing data in %s" % data_path)
        vocab, _ = get_vocab_dicts(vocabulary_path=vocabulary_path)
        with gfile.GFile(data_path, mode="rb") as data_file:
            with gfile.GFile(target_path, mode="w") as tokens_file:
                counter = 0
                for line in data_file:
                    counter += 1
                    if counter % 100000 == 0:
                        print("  tokenizing line %d" % counter)
                    token_ids = sentence_to_token_ids(
                        tf.compat.as_bytes(line), vocab, normalize_digits)
                    tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") 
开发者ID:mckinziebrandon,项目名称:DeepChatModels,代码行数:27,代码来源:io_utils.py

示例8: create_bottleneck_file

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
                           image_dir, category, sess, jpeg_data_tensor,
                           decoded_image_tensor, resized_input_tensor,
                           bottleneck_tensor):
  """Create a single bottleneck file."""
  tf.logging.info('Creating bottleneck at ' + bottleneck_path)
  image_path = get_image_path(image_lists, label_name, index,
                              image_dir, category)
  if not gfile.Exists(image_path):
    tf.logging.fatal('File does not exist %s', image_path)
  image_data = gfile.FastGFile(image_path, 'rb').read()
  try:
    bottleneck_values = run_bottleneck_on_image(
        sess, image_data, jpeg_data_tensor, decoded_image_tensor,
        resized_input_tensor, bottleneck_tensor)
  except Exception as e:
    raise RuntimeError('Error during processing file %s (%s)' % (image_path,
                                                                 str(e)))
  bottleneck_string = ','.join(str(x) for x in bottleneck_values)
  with open(bottleneck_path, 'w') as bottleneck_file:
    bottleneck_file.write(bottleneck_string) 
开发者ID:Spidy20,项目名称:Music_player_with_Emotions_recognition,代码行数:23,代码来源:retrain.py

示例9: testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self):
    model_dir = tempfile.mkdtemp('load_existing_vars_no_reshape')
    if gfile.Exists(model_dir):
      gfile.DeleteRecursively(model_dir)

    init_value0 = [[10.0, 11.0]]
    init_value1 = 20.0
    var_names_to_values = {'v0': init_value0, 'v1': init_value1}

    with self.cached_session() as sess:
      model_path = self.create_checkpoint_from_values(var_names_to_values,
                                                      model_dir)
      var0 = variables_lib2.variable('my_var0', shape=[2, 1])
      var1 = variables_lib2.variable('my_var1', shape=[])

      vars_to_restore = {'v0': var0, 'v1': var1}
      init_fn = variables_lib2.assign_from_checkpoint_fn(
          model_path, vars_to_restore)

      # Initialize the variables.
      sess.run(variables_lib.global_variables_initializer())

      # Perform the assignment.
      with self.assertRaises(errors_impl.InvalidArgumentError):
        init_fn(sess) 
开发者ID:google-research,项目名称:tf-slim,代码行数:27,代码来源:variables_test.py

示例10: testReturnsSingleCheckpointIfOneCheckpointFound

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
    checkpoint_dir = tempfile.mkdtemp('one_checkpoint_found')
    if not gfile.Exists(checkpoint_dir):
      gfile.MakeDirs(checkpoint_dir)

    global_step = variables.get_or_create_global_step()
    saver = saver_lib.Saver()  # Saves the global step.

    with self.cached_session() as session:
      session.run(variables_lib.global_variables_initializer())
      save_path = os.path.join(checkpoint_dir, 'model.ckpt')
      saver.save(session, save_path, global_step=global_step)

    num_found = 0
    for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
      num_found += 1
    self.assertEqual(num_found, 1) 
开发者ID:google-research,项目名称:tf-slim,代码行数:19,代码来源:evaluation_test.py

示例11: testExportInferenceGraph

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def testExportInferenceGraph(self):
    tmpdir = self.get_temp_dir()
    output_file = os.path.join(tmpdir, 'inception_v3.pb')
    flags = tf.app.flags.FLAGS
    flags.output_file = output_file
    flags.model_name = 'inception_v3'
    flags.dataset_dir = tmpdir
    export_inference_graph.main(None)
    self.assertTrue(gfile.Exists(output_file)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:export_inference_graph_test.py

示例12: get_wmt_enfr_train_set

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def get_wmt_enfr_train_set(directory):
  """Download the WMT en-fr training corpus to directory unless it's there."""
  train_path = os.path.join(directory, "giga-fren.release2.fixed")
  if not (gfile.Exists(train_path +".fr") and gfile.Exists(train_path +".en")):
    corpus_file = maybe_download(directory, "training-giga-fren.tar",
                                 _WMT_ENFR_TRAIN_URL)
    print("Extracting tar file %s" % corpus_file)
    with tarfile.open(corpus_file, "r") as corpus_tar:
      corpus_tar.extractall(directory)
    gunzip_file(train_path + ".fr.gz", train_path + ".fr")
    gunzip_file(train_path + ".en.gz", train_path + ".en")
  return train_path 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:14,代码来源:data_utils.py

示例13: get_wmt_enfr_dev_set

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def get_wmt_enfr_dev_set(directory):
  """Download the WMT en-fr training corpus to directory unless it's there."""
  dev_name = "newstest2013"
  dev_path = os.path.join(directory, dev_name)
  if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")):
    dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL)
    print("Extracting tgz file %s" % dev_file)
    with tarfile.open(dev_file, "r:gz") as dev_tar:
      fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
      en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
      fr_dev_file.name = dev_name + ".fr"  # Extract without "dev/" prefix.
      en_dev_file.name = dev_name + ".en"
      dev_tar.extract(fr_dev_file, directory)
      dev_tar.extract(en_dev_file, directory)
  return dev_path 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:17,代码来源:data_utils.py

示例14: create_vocabulary

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,
                      tokenizer=None, normalize_digits=True):
  """Create vocabulary file (if it does not exist yet) from data file.

  Data file is assumed to contain one sentence per line. Each sentence is
  tokenized and digits are normalized (if normalize_digits is set).
  Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
  We write it to vocabulary_path in a one-token-per-line format, so that later
  token in the first line gets id=0, second line gets id=1, and so on.

  Args:
    vocabulary_path: path where the vocabulary will be created.
    data_path: data file that will be used to create vocabulary.
    max_vocabulary_size: limit on the size of the created vocabulary.
    tokenizer: a function to use to tokenize each data sentence;
      if None, basic_tokenizer will be used.
    normalize_digits: Boolean; if true, all digits are replaced by 0s.
  """
  if not gfile.Exists(vocabulary_path):
    print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path))
    vocab = {}
    with gfile.GFile(data_path, mode="rb") as f:
      counter = 0
      for line in f:
        counter += 1
        if counter % 100000 == 0:
          print("  processing line %d" % counter)
        line = tf.compat.as_bytes(line)
        tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)
        for w in tokens:
          word = _DIGIT_RE.sub(b"0", w) if normalize_digits else w
          if word in vocab:
            vocab[word] += 1
          else:
            vocab[word] = 1
      vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
      if len(vocab_list) > max_vocabulary_size:
        vocab_list = vocab_list[:max_vocabulary_size]
      with gfile.GFile(vocabulary_path, mode="wb") as vocab_file:
        for w in vocab_list:
          vocab_file.write(w + b"\n") 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:43,代码来源:data_utils.py

示例15: initialize_vocabulary

# 需要导入模块: from tensorflow.python.platform import gfile [as 别名]
# 或者: from tensorflow.python.platform.gfile import Exists [as 别名]
def initialize_vocabulary(vocabulary_path):
  """Initialize vocabulary from file.

  We assume the vocabulary is stored one-item-per-line, so a file:
    dog
    cat
  will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
  also return the reversed-vocabulary ["dog", "cat"].

  Args:
    vocabulary_path: path to the file containing the vocabulary.

  Returns:
    a pair: the vocabulary (a dictionary mapping string to integers), and
    the reversed vocabulary (a list, which reverses the vocabulary mapping).

  Raises:
    ValueError: if the provided vocabulary_path does not exist.
  """
  if gfile.Exists(vocabulary_path):
    rev_vocab = []
    with gfile.GFile(vocabulary_path, mode="rb") as f:
      rev_vocab.extend(f.readlines())
    rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
    vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
    return vocab, rev_vocab
  else:
    raise ValueError("Vocabulary file %s not found.", vocabulary_path) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:30,代码来源:data_utils.py


注:本文中的tensorflow.python.platform.gfile.Exists方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。