當前位置: 首頁>>代碼示例>>Python>>正文


Python io_ops.TFRecordReader方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.io_ops.TFRecordReader方法的典型用法代碼示例。如果您正苦於以下問題:Python io_ops.TFRecordReader方法的具體用法?Python io_ops.TFRecordReader怎麽用?Python io_ops.TFRecordReader使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.io_ops的用法示例。


在下文中一共展示了io_ops.TFRecordReader方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: TFRecordSource

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def TFRecordSource(file_names,
                   reader_kwargs=None,
                   enqueue_size=1,
                   batch_size=1,
                   queue_capacity=None,
                   shuffle=False,
                   min_after_dequeue=None,
                   num_threads=1,
                   seed=None):
  return ReaderSource(io_ops.TFRecordReader,
                      work_units=file_names,
                      reader_kwargs=reader_kwargs,
                      enqueue_size=enqueue_size,
                      batch_size=batch_size,
                      queue_capacity=queue_capacity,
                      shuffle=shuffle,
                      min_after_dequeue=min_after_dequeue,
                      num_threads=num_threads,
                      seed=seed) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:reader_source.py

示例2: testTFRecordReader

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def testTFRecordReader(self):
    with self.test_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEquals(flowers, num_reads) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:22,代碼來源:parallel_reader_test.py

示例3: testOutOfRangeError

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def testOutOfRangeError(self):
    with self.test_session():
      [tfrecord_path] = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=1)

    key, value = parallel_reader.single_pass_read(
        tfrecord_path, reader_class=io_ops.TFRecordReader)
    init_op = variables.local_variables_initializer()

    with self.test_session() as sess:
      sess.run(init_op)
      with queues.QueueRunners(sess):
        num_reads = 11
        with self.assertRaises(errors_impl.OutOfRangeError):
          for _ in range(num_reads):
            sess.run([key, value]) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:18,代碼來源:parallel_reader_test.py

示例4: testTFRecordReader

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def testTFRecordReader(self):
    with self.cached_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEqual(flowers, num_reads) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:22,代碼來源:parallel_reader_test.py

示例5: testOutOfRangeError

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def testOutOfRangeError(self):
    with self.cached_session():
      [tfrecord_path] = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=1)

    key, value = parallel_reader.single_pass_read(
        tfrecord_path, reader_class=io_ops.TFRecordReader)
    init_op = variables.local_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      with queues.QueueRunners(sess):
        num_reads = 11
        with self.assertRaises(errors_impl.OutOfRangeError):
          for _ in range(num_reads):
            sess.run([key, value]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:18,代碼來源:parallel_reader_test.py

示例6: _verify_all_data_sources_read

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def _verify_all_data_sources_read(self, shared_queue):
    with self.test_session():
      tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    num_readers = len(tfrecord_paths)
    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=num_readers)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files)
    key, value = p_reader.read(filename_queue)

    count0 = 0
    count1 = 0
    count2 = 0

    num_reads = 50

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if '0-of-3' in str(current_key):
          count0 += 1
        if '1-of-3' in str(current_key):
          count1 += 1
        if '2-of-3' in str(current_key):
          count2 += 1

    self.assertGreater(count0, 0)
    self.assertGreater(count1, 0)
    self.assertGreater(count2, 0)
    self.assertEquals(count0 + count1 + count2, num_reads) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:38,代碼來源:parallel_reader_test.py

示例7: _create_tfrecord_dataset

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def _create_tfrecord_dataset(tmpdir):
  if not gfile.Exists(tmpdir):
    gfile.MakeDirs(tmpdir)

  data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)

  keys_to_features = {
      'image/encoded':
          parsing_ops.FixedLenFeature(
              shape=(), dtype=dtypes.string, default_value=''),
      'image/format':
          parsing_ops.FixedLenFeature(
              shape=(), dtype=dtypes.string, default_value='jpeg'),
      'image/class/label':
          parsing_ops.FixedLenFeature(
              shape=[1],
              dtype=dtypes.int64,
              default_value=array_ops.zeros(
                  [1], dtype=dtypes.int64))
  }

  items_to_handlers = {
      'image': tfexample_decoder.Image(),
      'label': tfexample_decoder.Tensor('image/class/label'),
  }

  decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
                                               items_to_handlers)

  return dataset.Dataset(
      data_sources=data_sources,
      reader=io_ops.TFRecordReader,
      decoder=decoder,
      num_samples=100,
      items_to_descriptions=None) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:37,代碼來源:dataset_data_provider_test.py

示例8: _verify_all_data_sources_read

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def _verify_all_data_sources_read(self, shared_queue):
    with self.cached_session():
      tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=3)

    num_readers = len(tfrecord_paths)
    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=num_readers)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files)
    key, value = p_reader.read(filename_queue)

    count0 = 0
    count1 = 0
    count2 = 0

    num_reads = 50

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if '0-of-3' in str(current_key):
          count0 += 1
        if '1-of-3' in str(current_key):
          count1 += 1
        if '2-of-3' in str(current_key):
          count2 += 1

    self.assertGreater(count0, 0)
    self.assertGreater(count1, 0)
    self.assertGreater(count2, 0)
    self.assertEqual(count0 + count1 + count2, num_reads) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:38,代碼來源:parallel_reader_test.py

示例9: read_batch_record_features

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def read_batch_record_features(file_pattern, batch_size, features,
                               randomize_input=True, num_epochs=None,
                               queue_capacity=10000, reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or pattern of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() as shown in the tests.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:41,代碼來源:graph_io.py

示例10: read_batch_record_features

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def read_batch_record_features(file_pattern,
                               batch_size,
                               features,
                               randomize_input=True,
                               num_epochs=None,
                               queue_capacity=10000,
                               reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or patterns of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() and run the op in a session.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples. In order to have
      predicted and repeatable order of reading and enqueueing, such as in
      prediction and evaluation mode, `reader_num_threads` should be 1.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:47,代碼來源:graph_io.py

示例11: read_batch_record_features

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def read_batch_record_features(file_pattern,
                               batch_size,
                               features,
                               randomize_input=True,
                               num_epochs=None,
                               queue_capacity=10000,
                               reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or pattern of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() as shown in the tests.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:45,代碼來源:graph_io.py

示例12: _verify_read_up_to_out

# 需要導入模塊: from tensorflow.python.ops import io_ops [as 別名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 別名]
def _verify_read_up_to_out(self, shared_queue):
    with self.cached_session():
      num_files = 3
      num_records_per_file = 7
      tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(),
          num_files=num_files,
          num_records_per_file=num_records_per_file)

    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=5)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files, num_epochs=1)
    key, value = p_reader.read_up_to(filename_queue, 4)

    count0 = 0
    count1 = 0
    count2 = 0
    all_keys_count = 0
    all_values_count = 0

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      while True:
        try:
          current_keys, current_values = sess.run([key, value])
          self.assertEqual(len(current_keys), len(current_values))
          all_keys_count += len(current_keys)
          all_values_count += len(current_values)
          for current_key in current_keys:
            if '0-of-3' in str(current_key):
              count0 += 1
            if '1-of-3' in str(current_key):
              count1 += 1
            if '2-of-3' in str(current_key):
              count2 += 1
        except errors_impl.OutOfRangeError:
          break

    self.assertEqual(count0, num_records_per_file)
    self.assertEqual(count1, num_records_per_file)
    self.assertEqual(count2, num_records_per_file)
    self.assertEqual(
        all_keys_count,
        num_files * num_records_per_file)
    self.assertEqual(all_values_count, all_keys_count)
    self.assertEqual(
        count0 + count1 + count2,
        all_keys_count) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:53,代碼來源:parallel_reader_test.py


注:本文中的tensorflow.python.ops.io_ops.TFRecordReader方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。