當前位置: 首頁>>代碼示例>>Python>>正文


Python gfile.Glob方法代碼示例

本文整理匯總了Python中tensorflow.python.platform.gfile.Glob方法的典型用法代碼示例。如果您正苦於以下問題:Python gfile.Glob方法的具體用法?Python gfile.Glob怎麽用?Python gfile.Glob使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.platform.gfile的用法示例。


在下文中一共展示了gfile.Glob方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_maxiter_weights

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def get_maxiter_weights(dir):
    try:
        filenames = gfile.Glob(dir + '/model*')
    except NotFoundError:
        print('nothing found at ', dir + '/model*')
        return None
    iternums = []
    if len(filenames) != 0:
        for f in filenames:
            try:
                iternums.append(int(re.match('.*?([0-9]+)$', f).group(1)))
            except:
                iternums.append(-1)
        iternums = np.array(iternums)
        return filenames[np.argmax(iternums)].split('.')[0]  # skip the str after the '.'
    else:
        return None 
開發者ID:SudeepDasari,項目名稱:visual_foresight,代碼行數:19,代碼來源:setup_predictor.py

示例2: convert

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def convert(data_path):
    # iterate through the data splits
    for data_split in ['train', 'test']:
        os.makedirs(os.path.join(data_path, data_split))
        data_split_path = os.path.join(data_path, 'softmotion30_44k', data_split)
        data_split_files = gfile.Glob(os.path.join(data_split_path, '*'))
        # iterate through the TF records
        for f in data_split_files:
            print('Current file: ' + f)
            ind = int(f.split('/')[-1].split('_')[1]) # starting video index
            # iterate through the sequences in this TF record
            for serialized_example in tf.python_io.tf_record_iterator(f):
                os.makedirs(os.path.join(data_path, data_split, str(ind)))
                example = tf.train.Example()
                example.ParseFromString(serialized_example)
                # iterate through the sequence
                for i in range(30):
                    image_name = str(i) + '/image_aux1/encoded'
                    byte_str = example.features.feature[image_name].bytes_list.value[0]
                    img = Image.frombytes('RGB', (64, 64), byte_str)
                    img = np.array(img.getdata()).reshape(img.size[1], img.size[0], 3) / 255.
                    imsave(os.path.join(data_path, data_split, str(ind), str(i) + '.png'), img)
                print('     Finished processing sequence ' + str(ind))
                ind += 1 
開發者ID:joelouismarino,項目名稱:amortized-variational-filtering,代碼行數:26,代碼來源:convert_bair.py

示例3: build_image_input

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def build_image_input(train=True, novel=True):
  """Create input tfrecord tensors.

  Args:
    novel: whether or not to grab novel or seen images.
  Returns:
    list of tensors corresponding to images. The images
    tensor is 5D, batch x time x height x width x channels.
  Raises:
    RuntimeError: if no files found.
  """
  if train:
    data_dir = os.path.expanduser('~/Downloads/google_brainrobotdata_push')
  elif novel:
    data_dir = os.path.expanduser('~/Downloads/google_brainrobotdata_push')
  else:
    data_dir = os.path.expanduser('~/Downloads/google_brainrobotdata_push')
  filenames = gfile.Glob(os.path.join(data_dir, '*'))
  print(filenames)
  if not filenames:
    raise RuntimeError('No data files found.')
  filename_queue = tf.train.string_input_producer(filenames, shuffle=False)
  reader = tf.TFRecordReader()
  _, serialized_example = reader.read(filename_queue) 
開發者ID:jhu-lcsr,項目名稱:costar_plan,代碼行數:26,代碼來源:push_dataset_grab_train_images.py

示例4: create_data_list

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def create_data_list(image_dir):
  if not gfile.Exists(image_dir):
    print("Image director '" + image_dir + "' not found.")
    return None
  extensions = ['jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG']
  print("Looking for images in '" + image_dir + "'")
  file_list = []
  for extension in extensions:
    file_glob = os.path.join(image_dir, '*.' + extension)
    file_list.extend(gfile.Glob(file_glob))
  if not file_list:
    print("No files found in '" + image_dir + "'")
    return None
  images = []
  labels = []
  for file_name in file_list:
    image = Image.open(file_name)
    image_gray = image.convert('L')
    image_resize = image_gray.resize(size=(IMAGE_WIDTH,IMAGE_HEIGHT))
    input_img = np.array(image_resize, dtype='int16')
    image.close()
    label_name = os.path.basename(file_name).split('_')[0]
    images.append(input_img)
    labels.append(label_name)
  return zip(images, labels) 
開發者ID:PatrickLib,項目名稱:captcha_recognize,代碼行數:27,代碼來源:captcha_records.py

示例5: prepare_background_data

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def prepare_background_data(self):
    """Searches a folder for background noise audio, and loads it into memory.

    It's expected that the background audio samples will be in a subdirectory
    named '_background_noise_' inside the 'data_dir' folder, as .wavs that match
    the sample rate of the training data, but can be much longer in duration.

    If the '_background_noise_' folder doesn't exist at all, this isn't an
    error, it's just taken to mean that no background noise augmentation should
    be used. If the folder does exist, but it's empty, that's treated as an
    error.

    Returns:
      List of raw PCM-encoded audio samples of background noise.

    Raises:
      Exception: If files aren't found in the folder.
    """
    self.background_data = []
    background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)
    if not os.path.exists(background_dir):
      return self.background_data
    with tf.Session(graph=tf.Graph()) as sess:
      wav_filename_placeholder = tf.placeholder(tf.string, [])
      wav_loader = io_ops.read_file(wav_filename_placeholder)
      wav_decoder = contrib_audio.decode_wav(wav_loader, desired_channels=1)
      search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME,
                                 '*.wav')
      for wav_path in gfile.Glob(search_path):
        wav_data = sess.run(
            wav_decoder,
            feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()
        self.background_data.append(wav_data)
      if not self.background_data:
        raise Exception('No background wav files were found in ' + search_path) 
開發者ID:nesl,項目名稱:adversarial_audio,代碼行數:37,代碼來源:input_data.py

示例6: get_data_files

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def get_data_files(data_sources):
  """Get data_files from data_sources.

  Args:
    data_sources: a list/tuple of files or the location of the data, i.e.
      /path/to/train@128, /path/to/train* or /tmp/.../train*

  Returns:
    a list of data_files.

  Raises:
    ValueError: if not data files are not found

  """
  if isinstance(data_sources, (list, tuple)):
    data_files = []
    for source in data_sources:
      data_files += get_data_files(source)
  else:
    if '*' in data_sources or '?' in data_sources or '[' in data_sources:
      data_files = gfile.Glob(data_sources)
    else:
      data_files = [data_sources]
  if not data_files:
    raise ValueError('No data files found in %s' % (data_sources,))
  return data_files 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:28,代碼來源:parallel_reader.py

示例7: _get_file_names

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def _get_file_names(file_pattern, randomize_input):
  """Parse list of file names from pattern, optionally shuffled.

  Args:
    file_pattern: File glob pattern, or list of glob patterns.
    randomize_input: Whether to shuffle the order of file names.

  Returns:
    List of file names matching `file_pattern`.

  Raises:
    ValueError: If `file_pattern` is empty, or pattern matches no files.
  """
  if isinstance(file_pattern, list):
    if not file_pattern:
      raise ValueError("File pattern is empty.")
    file_names = []
    for entry in file_pattern:
      file_names.extend(gfile.Glob(entry))
  else:
    file_names = list(gfile.Glob(file_pattern))

  if not file_names:
    raise ValueError("No files match %s." % file_pattern)

  # Sort files so it will be deterministic for unit tests.
  if not randomize_input:
    file_names = sorted(file_names)
  return file_names 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:31,代碼來源:dataset_ops.py

示例8: _expand_file_names

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def _expand_file_names(filepatterns):
  """Takes a list of file patterns and returns a list of resolved file names."""
  if not isinstance(filepatterns, (list, tuple, set)):
    filepatterns = [filepatterns]
  filenames = set()
  for filepattern in filepatterns:
    names = set(gfile.Glob(filepattern))
    filenames |= names
  return list(filenames) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:11,代碼來源:tensorflow_dataframe.py

示例9: _get_file_names

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def _get_file_names(file_pattern, randomize_input):
  """Parse list of file names from pattern, optionally shuffled.

  Args:
    file_pattern: File glob pattern, or list of glob patterns.
    randomize_input: Whether to shuffle the order of file names.

  Returns:
    List of file names matching `file_pattern`.

  Raises:
    ValueError: If `file_pattern` is empty, or pattern matches no files.
  """
  if isinstance(file_pattern, list):
    if not file_pattern:
      raise ValueError('No files given to dequeue_examples.')
    file_names = []
    for entry in file_pattern:
      file_names.extend(gfile.Glob(entry))
  else:
    file_names = list(gfile.Glob(file_pattern))

  if not file_names:
    raise ValueError('No files match %s.' % file_pattern)

  # Sort files so it will be deterministic for unit tests. They'll be shuffled
  # in `string_input_producer` if `randomize_input` is enabled.
  if not randomize_input:
    file_names = sorted(file_names)
  return file_names 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:32,代碼來源:graph_io.py

示例10: _get_file_names

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def _get_file_names(file_pattern, randomize_input):
  """Parse list of file names from pattern, optionally shuffled.

  Args:
    file_pattern: File glob pattern, or list of strings.
    randomize_input: Whether to shuffle the order of file names.

  Returns:
    List of file names matching `file_pattern`.

  Raises:
    ValueError: If `file_pattern` is empty, or pattern matches no files.
  """
  if isinstance(file_pattern, list):
    file_names = file_pattern
    if not file_names:
      raise ValueError('No files given to dequeue_examples.')
  else:
    file_names = list(gfile.Glob(file_pattern))
    if not file_names:
      raise ValueError('No files match %s.' % file_pattern)

  # Sort files so it will be deterministic for unit tests. They'll be shuffled
  # in `string_input_producer` if `randomize_input` is enabled.
  if not randomize_input:
    file_names = sorted(file_names)
  return file_names 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:29,代碼來源:graph_io.py

示例11: test_example1

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def test_example1(self):
    n_frames = 5
    convert_videos_to_tfrecord(source_path=in_path, destination_path=out_path,
                               n_videos_in_record=n_videos_per_record,
                               n_frames_per_video=n_frames,
                               dense_optical_flow=True,
                               file_suffix="*.mp4")

    filenames = gfile.Glob(os.path.join(out_path, "*.tfrecords"))
    n_files = len(filenames)

    self.assertTrue(filenames)
    self.assertEqual(n_files * n_videos_per_record,
                     get_number_of_records(filenames, n_frames)) 
開發者ID:ferreirafabio,項目名稱:video2tfrecord,代碼行數:16,代碼來源:test.py

示例12: minibatch

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def minibatch(self, dataset, subset, cache_data=False):

    with tf.compat.v1.name_scope('batch_processing'):

      glob_pattern = dataset.tf_record_pattern(subset)
      file_names = gfile.Glob(glob_pattern)
      if not file_names:
        raise ValueError('Found no files in --data_dir matching: {}'
                         .format(glob_pattern))
      ds = tf.data.TFRecordDataset.list_files(file_names)

      ds = ds.apply(
        parallel_interleave(
          tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5,
          sloppy=True,
          buffer_output_elements=10000, prefetch_input_elements=10000))

      if cache_data:
        ds = ds.take(1).cache().repeat()

      ds = ds.prefetch(buffer_size=10000)

      # num of parallel batches not greater than 56
      max_num_parallel_batches = min(56, 2 * self.num_cores)
      ds = ds.apply(
        map_and_batch(
          map_func=self.parse_and_preprocess,
          batch_size=self.batch_size,
          num_parallel_batches=max_num_parallel_batches,
          num_parallel_calls=None))

      ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)  # this number can be tuned

      ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
      images, labels = ds_iterator.get_next()
      # reshape
      labels = tf.reshape(labels, [self.batch_size])

      return images, labels 
開發者ID:IntelAI,項目名稱:models,代碼行數:41,代碼來源:preprocessing.py

示例13: minibatch

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def minibatch(self, dataset, subset, cache_data=False):

    with tf.compat.v1.name_scope('batch_processing'):

      glob_pattern = dataset.tf_record_pattern(subset)
      file_names = gfile.Glob(glob_pattern)
      if not file_names:
        raise ValueError('Found no files in --data_dir matching: {}'
                         .format(glob_pattern))
      ds = tf.data.TFRecordDataset.list_files(file_names)

      ds = ds.apply(
        parallel_interleave(
          tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5,
          sloppy=True,
          buffer_output_elements=10000, prefetch_input_elements=10000))

      if cache_data:
        ds = ds.take(1).cache().repeat()

      ds = ds.prefetch(buffer_size=10000)

      # num of parallel batches not greater than 56
      max_num_parallel_batches = min(56, 2*self.num_cores)
      ds = ds.apply(
        map_and_batch(
          map_func=self.parse_and_preprocess,
          batch_size=self.batch_size,
          num_parallel_batches=max_num_parallel_batches,
          num_parallel_calls=None))  # this number should be tuned

      ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)  # this number can be tuned

      ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
      images, _ = ds_iterator.get_next()

      return images 
開發者ID:IntelAI,項目名稱:models,代碼行數:39,代碼來源:preprocessing_benchmark.py

示例14: minibatch

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def minibatch(self, dataset, subset, cache_data=False):

    with tf.compat.v1.name_scope('batch_processing'):

      glob_pattern = dataset.tf_record_pattern(subset)
      file_names = gfile.Glob(glob_pattern)
      if not file_names:
        raise ValueError('Found no files in --data_dir matching: {}'
                         .format(glob_pattern))
      ds = tf.data.TFRecordDataset.list_files(file_names)

      ds = ds.apply(
        parallel_interleave(
          tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5,
          sloppy=True,
          buffer_output_elements=10000, prefetch_input_elements=10000))

      if cache_data:
        ds = ds.take(1).cache().repeat()

      ds = ds.prefetch(buffer_size=10000)
      #ds = ds.prefetch(buffer_size=self.batch_size)

      # num of parallel batches not greater than 56
      max_num_parallel_batches = min(56, 2 * self.num_cores)
      ds = ds.apply(
        map_and_batch(
          map_func=self.parse_and_preprocess,
          batch_size=self.batch_size,
          num_parallel_batches=max_num_parallel_batches,
          num_parallel_calls=None))

      ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

      ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
      images, labels, filename = ds_iterator.get_next()
      # reshape
      labels = tf.reshape(labels, [self.batch_size])
      filename = tf.reshape(filename, [self.batch_size])

      return images, labels, filename 
開發者ID:IntelAI,項目名稱:models,代碼行數:43,代碼來源:preprocessing.py

示例15: minibatch

# 需要導入模塊: from tensorflow.python.platform import gfile [as 別名]
# 或者: from tensorflow.python.platform.gfile import Glob [as 別名]
def minibatch(self, dataset, subset, cache_data=False):

    with tf.compat.v1.name_scope('batch_processing'):

      glob_pattern = dataset.tf_record_pattern(subset)
      file_names = gfile.Glob(glob_pattern)
      if not file_names:
        raise ValueError('Found no files in --data_dir matching: {}'
                         .format(glob_pattern))
      ds = tf.data.TFRecordDataset.list_files(file_names)

      ds = ds.apply(
        parallel_interleave(
          tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5,
          sloppy=True,
          buffer_output_elements=10000, prefetch_input_elements=10000))

      if cache_data:
        ds = ds.take(1).cache().repeat()

      ds = ds.prefetch(buffer_size=10000)
      # ds = ds.prefetch(buffer_size=self.batch_size)

      # num of parallel batches not greater than 56
      max_num_parallel_batches = min(56, 2*self.num_cores)
      ds = ds.apply(
        map_and_batch(
          map_func=self.parse_and_preprocess,
          batch_size=self.batch_size,
          num_parallel_batches=max_num_parallel_batches,
          num_parallel_calls=None))  # this number should be tuned

      ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)  # this number can be tuned

      ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
      images, _ = ds_iterator.get_next()

      return images 
開發者ID:IntelAI,項目名稱:models,代碼行數:40,代碼來源:preprocessing_benchmark.py


注:本文中的tensorflow.python.platform.gfile.Glob方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。